code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# This script tests the contrast() function.
# contrast(input_img, intensity, output_img) adjusts the contrast of an image
# Input: input_img: string, path for the input image file
# intensity: int, intensity of contrast enhancement, between 0 and 10, defaults to 5.
# display: bool, display the enhanced image in an IDE, defaults to False.
# output_img: string, path for the output image file
# Output: an image file at the specified output path
import numpy as np
import pytest
import skimage.io
from picfixPy.contrast import contrast
# generate input image
test_img1 = np.array([[[1,55,255], [2,55,255] ,[3,100,5]],
[[1,55,255], [2,55,255], [3,100,5]],
[[1,55,255], [2,55,255], [3,100,5]]], dtype = 'uint8')
skimage.io.imsave("picfixPy/test/test_img/contrast/test_img1.png",
test_img1, check_contrast=False)
# generate output image when intensity is 5
expected_img1 = np.array([[[0,7,255], [0,7,255] ,[0,81,0]],
[[0,7,255], [0,7,255], [0,81,0]],
[[0,7,255], [0,7,255], [0,81,0]]], dtype='uint8')
# test for implementation correctness
def test_zero_intensity():
contrast("picfixPy/test/test_img/contrast/test_img1.png",
0,
False,
"picfixPy/test/test_img/contrast/contrast.png")
output_img = skimage.io.imread("picfixPy/test/test_img/contrast/contrast.png")
assert np.array_equal(output_img, test_img1), "Images should be indentical with 0 intensity."
def test_correct_contrast():
contrast("picfixPy/test/test_img/contrast/test_img1.png",
5,
False,
"picfixPy/test/test_img/contrast/expected_img1.png")
output_img = skimage.io.imread("picfixPy/test/test_img/contrast/expected_img1.png")
assert np.array_equal(output_img, expected_img1), "The image should be with adjusted contrast with an intensity of 5."
# test for exception handling
def test_input_string():
with pytest.raises(AttributeError):
contrast(888, 5, False, "picfixPy/test/test_img/contrast/contrast.png")
def test_valid_intensity():
with pytest.raises(ValueError):
contrast("picfixPy/test/test_img/contrast/test_img1.png",
-10.5,
False,
"picfixPy/test/test_img/contrast/contrast.png")
def test_input_exist():
with pytest.raises(FileNotFoundError):
contrast("picfixPy/test/test_img/ffxiv/namazu.png",
5,
False,
"picfixPy/test/test_img/contrast/contrast.png")
def test_input_nonimage():
with pytest.raises(OSError):
contrast("picfixPy/test/test_img/contrast/test_img1.java",
5,
False,
"picfixPy/test/test_img/contrast/contrast.png")
def test_display_image():
try:
contrast("picfixPy/test/test_img/contrast/test_img1.png",
5,
True)
except Exception: # pragma: no cover
raise pytest.fail("Cannot display image, something went wrong.")
def test_output_nonimage():
with pytest.raises(ValueError):
contrast("picfixPy/test/test_img/contrast/test_img1.png",
5,
False,
"picfixPy/test/test_img/contrast/test_img2.java")
def test_output_path_valid():
with pytest.raises(FileNotFoundError):
contrast("picfixPy/test/test_img/contrast/test_img1.png",
5,
False,
"beasttribe/namazu/dailies.png") | [
"pytest.fail",
"pytest.raises",
"numpy.array",
"numpy.array_equal",
"picfixPy.contrast.contrast"
] | [((597, 760), 'numpy.array', 'np.array', (['[[[1, 55, 255], [2, 55, 255], [3, 100, 5]], [[1, 55, 255], [2, 55, 255], [3,\n 100, 5]], [[1, 55, 255], [2, 55, 255], [3, 100, 5]]]'], {'dtype': '"""uint8"""'}), "([[[1, 55, 255], [2, 55, 255], [3, 100, 5]], [[1, 55, 255], [2, 55,\n 255], [3, 100, 5]], [[1, 55, 255], [2, 55, 255], [3, 100, 5]]], dtype=\n 'uint8')\n", (605, 760), True, 'import numpy as np\n'), ((962, 1111), 'numpy.array', 'np.array', (['[[[0, 7, 255], [0, 7, 255], [0, 81, 0]], [[0, 7, 255], [0, 7, 255], [0, 81,\n 0]], [[0, 7, 255], [0, 7, 255], [0, 81, 0]]]'], {'dtype': '"""uint8"""'}), "([[[0, 7, 255], [0, 7, 255], [0, 81, 0]], [[0, 7, 255], [0, 7, 255],\n [0, 81, 0]], [[0, 7, 255], [0, 7, 255], [0, 81, 0]]], dtype='uint8')\n", (970, 1111), True, 'import numpy as np\n'), ((1211, 1330), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/contrast/test_img1.png"""', '(0)', '(False)', '"""picfixPy/test/test_img/contrast/contrast.png"""'], {}), "('picfixPy/test/test_img/contrast/test_img1.png', 0, False,\n 'picfixPy/test/test_img/contrast/contrast.png')\n", (1219, 1330), False, 'from picfixPy.contrast import contrast\n'), ((1461, 1498), 'numpy.array_equal', 'np.array_equal', (['output_img', 'test_img1'], {}), '(output_img, test_img1)\n', (1475, 1498), True, 'import numpy as np\n'), ((1582, 1706), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/contrast/test_img1.png"""', '(5)', '(False)', '"""picfixPy/test/test_img/contrast/expected_img1.png"""'], {}), "('picfixPy/test/test_img/contrast/test_img1.png', 5, False,\n 'picfixPy/test/test_img/contrast/expected_img1.png')\n", (1590, 1706), False, 'from picfixPy.contrast import contrast\n'), ((1842, 1883), 'numpy.array_equal', 'np.array_equal', (['output_img', 'expected_img1'], {}), '(output_img, expected_img1)\n', (1856, 1883), True, 'import numpy as np\n'), ((2024, 2053), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (2037, 2053), False, 'import pytest\n'), ((2063, 2134), 'picfixPy.contrast.contrast', 'contrast', (['(888)', '(5)', '(False)', '"""picfixPy/test/test_img/contrast/contrast.png"""'], {}), "(888, 5, False, 'picfixPy/test/test_img/contrast/contrast.png')\n", (2071, 2134), False, 'from picfixPy.contrast import contrast\n'), ((2173, 2198), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2186, 2198), False, 'import pytest\n'), ((2208, 2331), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/contrast/test_img1.png"""', '(-10.5)', '(False)', '"""picfixPy/test/test_img/contrast/contrast.png"""'], {}), "('picfixPy/test/test_img/contrast/test_img1.png', -10.5, False,\n 'picfixPy/test/test_img/contrast/contrast.png')\n", (2216, 2331), False, 'from picfixPy.contrast import contrast\n'), ((2415, 2447), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (2428, 2447), False, 'import pytest\n'), ((2457, 2570), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/ffxiv/namazu.png"""', '(5)', '(False)', '"""picfixPy/test/test_img/contrast/contrast.png"""'], {}), "('picfixPy/test/test_img/ffxiv/namazu.png', 5, False,\n 'picfixPy/test/test_img/contrast/contrast.png')\n", (2465, 2570), False, 'from picfixPy.contrast import contrast\n'), ((2657, 2679), 'pytest.raises', 'pytest.raises', (['OSError'], {}), '(OSError)\n', (2670, 2679), False, 'import pytest\n'), ((2689, 2809), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/contrast/test_img1.java"""', '(5)', '(False)', '"""picfixPy/test/test_img/contrast/contrast.png"""'], {}), "('picfixPy/test/test_img/contrast/test_img1.java', 5, False,\n 'picfixPy/test/test_img/contrast/contrast.png')\n", (2697, 2809), False, 'from picfixPy.contrast import contrast\n'), ((2903, 2969), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/contrast/test_img1.png"""', '(5)', '(True)'], {}), "('picfixPy/test/test_img/contrast/test_img1.png', 5, True)\n", (2911, 2969), False, 'from picfixPy.contrast import contrast\n'), ((3160, 3185), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (3173, 3185), False, 'import pytest\n'), ((3195, 3316), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/contrast/test_img1.png"""', '(5)', '(False)', '"""picfixPy/test/test_img/contrast/test_img2.java"""'], {}), "('picfixPy/test/test_img/contrast/test_img1.png', 5, False,\n 'picfixPy/test/test_img/contrast/test_img2.java')\n", (3203, 3316), False, 'from picfixPy.contrast import contrast\n'), ((3407, 3439), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (3420, 3439), False, 'import pytest\n'), ((3449, 3553), 'picfixPy.contrast.contrast', 'contrast', (['"""picfixPy/test/test_img/contrast/test_img1.png"""', '(5)', '(False)', '"""beasttribe/namazu/dailies.png"""'], {}), "('picfixPy/test/test_img/contrast/test_img1.png', 5, False,\n 'beasttribe/namazu/dailies.png')\n", (3457, 3553), False, 'from picfixPy.contrast import contrast\n'), ((3061, 3119), 'pytest.fail', 'pytest.fail', (['"""Cannot display image, something went wrong."""'], {}), "('Cannot display image, something went wrong.')\n", (3072, 3119), False, 'import pytest\n')] |
"""Class for Measurement Data."""
import sys
import os
import numpy as np
import math
import time
import warnings
from scipy.spatial import distance
import scipy.fftpack as fftp
from scipy import signal
def zerocrossings(data):
signs = np.sign(data)
# Since np.sign(0) yields 0,
# we treat them as negative sign here
signs[signs == 0] = -1
return len(np.where(np.diff(signs))[0])
def zcr(data):
"""
Return the Zero Crossing Rate.
:param data: Data
:type data: list
:return: Zero Crossing Rate
:rtype: float
"""
signs = np.sign(data)
# Since np.sign(0) yields 0,
# we treat them as negative sign here
signs[signs == 0] = -1
return len(np.where(np.diff(signs))[0])/len(data)
def rms(data, axis=None):
"""
Return the RMS value of the data.
:param data: Data
:type data: list
:param axis: Currently not in use
:type axis: None
:return: Root Mean Square of the data
:rtype: float
"""
return np.sqrt(data.dot(data)/data.size)
def normalizedZCR(data, samplingRate, LINE_FREQUENCY=50.0):
r"""
Return the normalized Zero Crossing Rate.
It is normalized by the expected value which is 2*50 crossings
per second in 50Hz networks. Therefore a value of 1 corresponds to
50 Zero Crossings in 1 second. If the length of the data given is
smaller than 1 second, the data is scaled down
2*50/2 in 0.5s
2*50/10 in 0.1s etc.
It is further normalized by making it mean free. If the data is lifted
and oscillates around an offset. The offset is guessed and subtracted.
The maximum of the meanfree and untouched ZCR is returned.
TODO: If the data is not symmetric, the mean is not a good estimate
:param data: Data
:type data: list
:param samplingRate: Numbers of data samples per second
:type samplingRate: int
:param LINE_FREQUENCY: 50 or 60 Hz, default=50
:type LINE_FREQUENCY: float
:return: Normalized Zero Crossing Rate
:rtype: float
"""
# we make data meanfree
mean = np.mean(data)
signs = np.sign(data-mean)
signs2 = np.sign(data)
# Since np.sign(0) yields 0,
# we treat them as negative sign here
signs[signs == 0] = -1
signs2[signs2 == 0] = -1
# get expected ZCR
samplesPerPhase = int(samplingRate/LINE_FREQUENCY)
expectedZeroCrossings = len(data)/samplesPerPhase * 2
zcrNoMean = len(np.where(np.diff(signs2))[0])/expectedZeroCrossings
zcrMean = len(np.where(np.diff(signs))[0])/expectedZeroCrossings
return max(zcrNoMean, zcrMean)
def calcFullPowers(voltage, current, samplingRate, LINE_FREQUENCY=50.0):
"""
Calculate Active, Reactive and Apparent power from voltage and current.
:param voltage: Voltage in volt
:type voltage: list or np.array
:param current: Current in milli ampere
:type current: list or np.array
:param samplingRate: Samplingrate for phase calculation
:type samplingRate: int
:param LINE_FREQUENCY: 50 or 60 Hz, default=50
:type LINE_FREQUENCY: float
:return: Active, Reactive and Apparent Power with 50Hz samplingrate
:rtype: Tuple
"""
sfos = int(samplingRate/LINE_FREQUENCY)
p = []
s = []
q = []
# Calculate active power from voltage and current
for i in range(len(voltage)-sfos):
start = i
u = np.array(voltage[start:start+sfos])
i = np.array(current[start:start+sfos])
temp = 0.001*np.mean(u*i)
p.append(temp if temp > 0 else 0)
urms = np.sqrt(u.dot(u)/u.size)
irms = np.sqrt(i.dot(i)/i.size)
s.append(0.001*urms*irms)
q.append(math.sqrt(abs(s[-1]*s[-1] - p[-1]*p[-1])))
return p, q, s
def calcPowers(voltage, current, samplingRate, upsamplingMethod=None, LINE_FREQUENCY=50):
"""
Calculate Active, Reactive and Apparent power from voltage and current.
:param voltage: Voltage in volt
:type voltage: list or np.array
:param current: Current in milli ampere
:type current: list or np.array
:param samplingRate: Samplingrate for phase calculation
:type samplingRate: int
:param upsamplingMethod: If final data should be same samplingrate as input data, default=None
One in [\"linear\",\"repeat\"]
:type upsamplingMethod: str
:param LINE_FREQUENCY: 50 or 60 Hz, default=50
:type LINE_FREQUENCY: float
:return: Active, Reactive and Apparent Power as np.arrays
:rtype: Tuple
"""
sfos = int(samplingRate/LINE_FREQUENCY)
numPoints = len(voltage)
reshaping = int(math.floor(numPoints/sfos))
end = reshaping*sfos
# Make both mean free
v = voltage[:end]
c = current[:end]
momentary = 0.001*np.array(v[:end]*c[:end])
# # moving avg. over sfos
# ones = np.ones((sfos,))/sfos
# momentary = np.convolve(momentary, ones, mode='valid')
# bringing down to 50 Hz by using mean
momentary = momentary.reshape((-1, sfos))
p = np.mean(momentary, axis=1)
v = v[:end].reshape((-1, sfos))
i = c[:end].reshape((-1, sfos))
# quicker way to do this
vrms = np.sqrt(np.einsum('ij,ij->i', v, v)/sfos)
irms = np.sqrt(np.einsum('ij,ij->i', i, i)/sfos)
# Because unit of current is in mA
s = 0.001*vrms*irms
q = np.sqrt(np.abs(s*s - p*p))
# Handle upsampling here
if upsamplingMethod is not None:
if upsamplingMethod == "linear":
x = np.linspace(0, end/samplingRate, end/samplingRate*LINE_FREQUENCY)
x_new = np.linspace(0, end/samplingRate, end)
s = np.interp(x_new, x, s)
p = np.interp(x_new, x, p)
q = np.interp(x_new, x, q)
elif upsamplingMethod == "repeat":
s = np.repeat(s, sfos)
p = np.repeat(p, sfos)
q = np.repeat(q, sfos)
if end != numPoints:
s = np.append(s, np.repeat(s[-1], numPoints-end))
p = np.append(p, np.repeat(p[-1], numPoints-end))
q = np.append(q, np.repeat(q[-1], numPoints-end))
return p,q,s
def lowpass(data, fs, order, fc):
nyq = 0.5 * fs # Calculate the Nyquist frequency.
cut = fc / nyq # Calculate the cutoff frequency (-3 dB).
lp_b, lp_a = signal.butter(order, cut, btype='lowpass') # Design and apply the low-pass filter.
lp_data = list(signal.filtfilt(lp_b, lp_a, data)) # Apply forward-backward filter with linear phase.
return lp_data
def index2Freq(i, sampleRate, nFFT):
"""
Return the frequency for a given FTT index.
:param i: Index
:type i: int
:param sampleRate: Numbers of data samples per second
:type sampleRate: int
:param nFFT: Length of fourier transform
:type nFFT: int
:return: Frequency at the given FFT index
:rtype: int
"""
return (i * (sampleRate / (nFFT*2)))
def freq2Index(freq, sampleRate, nFFT):
"""
Return the FTT index for a given frequency.
:param freq: Frequency, of which the bins should be returned
:type freq: int
:param sampleRate: Numbers of data samples per second
:type sampleRate: int
:param nFFT: Length of fourier transform
:type nFFT: int
:return: FFT index of the given frequency
:rtype: int
"""
return int(round(freq / (sampleRate / (nFFT*2)), 3))
def fftBinsForFreqs(freqs, sample_rate, data):
"""
Return the fft bin(s) (value) corresponding to given frequency.
:param freqs: Frequencies, of which the bins should be returned
:type freqs: list
:param sample_rate: Numbers of data samples per second
:type sample_rate: int
:param data: Fourier transform
:type data: list
:return: Bins corresponding to given frequencies
:rtype: list
"""
magnitudes = []
for freq in freqs:
bin = freq2Index(freq, sample_rate, len(data))
magnitudes.append(data[int(bin)])
return magnitudes
def fftBinIndexesForFreqs(freqs, sample_rate, data):
"""
Return the fft bin(s) (index) corresponding to given frequencies.
:param freqs: Frequencies, of which the bins should be returned
:type freqs: list
:param sample_rate: Numbers of data samples per second
:type sample_rate: int
:param data: Fourier transform
:type data: list
:return: Bins that correspond to the given frequencies
:rtype: list
"""
bins = []
# We can only reconstruct half the samplingRate
for freq in freqs:
bin = freq2Index(freq, sample_rate, len(data))
bins.append(bin)
return bins
def fft2(data, nfft):
"""
Calculate the fft of signal 'data'.
The fft is comuted using the numpy.fft.fft.
:param data: Data
:type data: list
:param nfft: Length of fourier transform
:type nfft: int
:return: Transformed input
:rtype: list
"""
if (len(data) > nfft):
print("Warning: FFT size should be larger than data size.")
N = nfft
FFT = np.fft.fft(data, norm="ortho", n=N)[:N//2]/nfft
return abs(FFT)
def fft(data, nfft):
"""
Calculate the fft of signal 'data'.
The fft is comuted using the scipy.fftpack.fft.
:param data: Data
:type data: list
:param nfft: Length of fourier transform
:type nfft: int
:return: Transformed input
:rtype: list
"""
if (len(data) > nfft):
print("Warning: size should be larger than data size.")
FFT = fftp.fft(data, n=nfft)[:nfft//2]/nfft
return abs(FFT)
def goertzel(samples, sample_rate, freqRanges):
"""
Implement the Goertzel algorithm.
Implementation of the Goertzel algorithm, useful for calculating
individual terms of a discrete Fourier transform. Result are firstly
the actual frequencies calculated and secondly the coefficients for
each of those frequencies `(real part, imag part, power)`. For simple
spectral analysis, the power is usually enough.
:param samples: Windowed one-dimensional signal
:type samples: list
:param sample_rate: Original rate the signal is sampled at
:type sample_rate: int
:param freqRanges: Ranges of frequencies that are meant to be computed
:type freqRanges: list of tuples
:return: The calculated frequencies and the coefficients (as 3-tuple)
:rtype: list, list
:Example:
Calculating frequencies in ranges [400, 500] and [1000, 1100]
of a windowed signal sampled at 44100 Hz.
``freqs, results = goertzel(some_samples, 44100,[(400, 500),
(1000, 1100)])``
"""
window_size = len(samples)
f_step = sample_rate / float(window_size)
f_step_normalized = 1.0 / window_size
# Calculate all the DFT bins we have to compute to include frequencies
# in `freqs`.
bins = set()
for f_range in freqRanges:
f_start, f_end = f_range
k_start = int(math.floor(round(f_start / f_step, 3)))
k_end = int(math.ceil(round(f_end / f_step, 3)))
if k_end > window_size - 1:
raise ValueError('frequency out of range %s' % k_end)
bins = bins.union(range(k_start, k_end))
# For all the bins, calculate the DFT term
n_range = range(0, window_size)
freqs = []
results = []
for k in bins:
# Bin frequency and coefficients for the computation
f = k * f_step_normalized
w_real = 2.0 * math.cos(2.0 * math.pi * f)
w_imag = math.sin(2.0 * math.pi * f)
# Doing the calculation on the whole sample
d1, d2 = 0.0, 0.0
for n in n_range:
y = samples[n] + w_real * d1 - d2
d2, d1 = d1, y
# Storing results `(real part, imag part, power)`
results.append((
0.5 * w_real * d1 - d2, w_imag * d1,
d2**2 + d1**2 - w_real * d1 * d2)
)
freqs.append(f * sample_rate)
return freqs, results
def absDist(v1, v2):
"""
Return absolut distance between two scalar values.
:param v1: First value
:type v1: float
:param v2: Second vector
:type v2: float
:return: The absolute distance
:rtype: float
"""
if np.sign(v1) == np.sign(v2):
return abs(abs(v1) - abs(v2))
else:
return abs(abs(v1) + abs(v2))
DEBUG = False
def euclideanDistance(vec1, vec2):
r"""
Calculate the euclidean distance of two given (feature) vectors.
.. math::
||\vec{v_1} - \vec{v_2}|| = \sqrt{\sum_{K=1}^{N} (vec_{1,k} - vec_{2,k})^2}
:param vec1: First vector
:type vec1: list
:param vec2: Second vector
:type vec2: list
:return: The euclidean distance
:rtype: float
"""
# return distance.euclidean(vec1, vec2)
return np.linalg.norm(np.array(vec1)-np.array(vec2))
def quadraticDistance(vec1, vec2):
r"""
Calculate the quadratic distance of two given (feature) vectors.
.. math::
\sum_{K=1}^{N} (vec_{1,k} - vec_{2,k})^2
:param vec1: First vector
:type vec1: list
:param vec2: Second vector
:type vec2: list
:return: The quadratic distance
:rtype: float
"""
return sum([(s1 - s2)**2 for s1, s2 in zip(vec1, vec2)])
def manhattan_distance(vec1, vec2):
r"""
Return the manhattan distance between two vectors.
.. math::
\sum_{K=1}^{N} vec_{1,k} - vec_{2,k}
:param vec1: First vector
:type vec1: list
:param vec2: Second vector
:type vec2: list
:return: The manhattan distance
:rtype: float
"""
return sum(abs(a-b) for a, b in zip(vec1, vec2))
def compareSine(sine1, sine2, hard=True):
"""
Compare two sinewaves and return true if similar enough.
:param sine1: First sine
:type sine1: list
:param sine2: Second sine
:type sine2: list
:param hard: Sets the threshold to 0.01 if True, 0.02 if False
:type hard: bool
:return: The quadratic distance
:rtype: float
"""
# Compare them two
if len(sine1) != len(sine2):
warnings.warn("Sinewaves need equal length for comparison")
return False
rms = rms(sine1)
# calculate euclidean distance
dst = distance.euclidean(sine1, sine2)/(len(sine1)*rms)
rmsDst = absDist(rms, rms(sine2))
meanDst = absDist(np.mean(sine1), np.mean(sine2))
if hard is True:
dstThreshold = 0.01
else:
dstThreshold = 0.02
meanDstThreshold = max(rms*0.075, 5)
rmsDstThreshold = max(rms*0.0075, 10)
if (dst < dstThreshold and meanDst < meanDstThreshold and
rmsDst < rmsDstThreshold):
return True
else:
return False | [
"numpy.abs",
"scipy.spatial.distance.euclidean",
"scipy.signal.filtfilt",
"numpy.fft.fft",
"math.floor",
"numpy.einsum",
"math.sin",
"scipy.fftpack.fft",
"numpy.mean",
"numpy.array",
"math.cos",
"numpy.linspace",
"numpy.sign",
"numpy.interp",
"warnings.warn",
"numpy.diff",
"scipy.sig... | [((242, 255), 'numpy.sign', 'np.sign', (['data'], {}), '(data)\n', (249, 255), True, 'import numpy as np\n'), ((576, 589), 'numpy.sign', 'np.sign', (['data'], {}), '(data)\n', (583, 589), True, 'import numpy as np\n'), ((2070, 2083), 'numpy.mean', 'np.mean', (['data'], {}), '(data)\n', (2077, 2083), True, 'import numpy as np\n'), ((2096, 2116), 'numpy.sign', 'np.sign', (['(data - mean)'], {}), '(data - mean)\n', (2103, 2116), True, 'import numpy as np\n'), ((2128, 2141), 'numpy.sign', 'np.sign', (['data'], {}), '(data)\n', (2135, 2141), True, 'import numpy as np\n'), ((5038, 5064), 'numpy.mean', 'np.mean', (['momentary'], {'axis': '(1)'}), '(momentary, axis=1)\n', (5045, 5064), True, 'import numpy as np\n'), ((6288, 6330), 'scipy.signal.butter', 'signal.butter', (['order', 'cut'], {'btype': '"""lowpass"""'}), "(order, cut, btype='lowpass')\n", (6301, 6330), False, 'from scipy import signal\n'), ((3390, 3427), 'numpy.array', 'np.array', (['voltage[start:start + sfos]'], {}), '(voltage[start:start + sfos])\n', (3398, 3427), True, 'import numpy as np\n'), ((3438, 3475), 'numpy.array', 'np.array', (['current[start:start + sfos]'], {}), '(current[start:start + sfos])\n', (3446, 3475), True, 'import numpy as np\n'), ((4640, 4668), 'math.floor', 'math.floor', (['(numPoints / sfos)'], {}), '(numPoints / sfos)\n', (4650, 4668), False, 'import math\n'), ((4786, 4813), 'numpy.array', 'np.array', (['(v[:end] * c[:end])'], {}), '(v[:end] * c[:end])\n', (4794, 4813), True, 'import numpy as np\n'), ((5354, 5375), 'numpy.abs', 'np.abs', (['(s * s - p * p)'], {}), '(s * s - p * p)\n', (5360, 5375), True, 'import numpy as np\n'), ((6391, 6424), 'scipy.signal.filtfilt', 'signal.filtfilt', (['lp_b', 'lp_a', 'data'], {}), '(lp_b, lp_a, data)\n', (6406, 6424), False, 'from scipy import signal\n'), ((11428, 11455), 'math.sin', 'math.sin', (['(2.0 * math.pi * f)'], {}), '(2.0 * math.pi * f)\n', (11436, 11455), False, 'import math\n'), ((12139, 12150), 'numpy.sign', 'np.sign', (['v1'], {}), '(v1)\n', (12146, 12150), True, 'import numpy as np\n'), ((12154, 12165), 'numpy.sign', 'np.sign', (['v2'], {}), '(v2)\n', (12161, 12165), True, 'import numpy as np\n'), ((13969, 14028), 'warnings.warn', 'warnings.warn', (['"""Sinewaves need equal length for comparison"""'], {}), "('Sinewaves need equal length for comparison')\n", (13982, 14028), False, 'import warnings\n'), ((14116, 14148), 'scipy.spatial.distance.euclidean', 'distance.euclidean', (['sine1', 'sine2'], {}), '(sine1, sine2)\n', (14134, 14148), False, 'from scipy.spatial import distance\n'), ((14226, 14240), 'numpy.mean', 'np.mean', (['sine1'], {}), '(sine1)\n', (14233, 14240), True, 'import numpy as np\n'), ((14242, 14256), 'numpy.mean', 'np.mean', (['sine2'], {}), '(sine2)\n', (14249, 14256), True, 'import numpy as np\n'), ((3495, 3509), 'numpy.mean', 'np.mean', (['(u * i)'], {}), '(u * i)\n', (3502, 3509), True, 'import numpy as np\n'), ((5187, 5214), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'v', 'v'], {}), "('ij,ij->i', v, v)\n", (5196, 5214), True, 'import numpy as np\n'), ((5240, 5267), 'numpy.einsum', 'np.einsum', (['"""ij,ij->i"""', 'i', 'i'], {}), "('ij,ij->i', i, i)\n", (5249, 5267), True, 'import numpy as np\n'), ((5497, 5568), 'numpy.linspace', 'np.linspace', (['(0)', '(end / samplingRate)', '(end / samplingRate * LINE_FREQUENCY)'], {}), '(0, end / samplingRate, end / samplingRate * LINE_FREQUENCY)\n', (5508, 5568), True, 'import numpy as np\n'), ((5583, 5622), 'numpy.linspace', 'np.linspace', (['(0)', '(end / samplingRate)', 'end'], {}), '(0, end / samplingRate, end)\n', (5594, 5622), True, 'import numpy as np\n'), ((5637, 5659), 'numpy.interp', 'np.interp', (['x_new', 'x', 's'], {}), '(x_new, x, s)\n', (5646, 5659), True, 'import numpy as np\n'), ((5676, 5698), 'numpy.interp', 'np.interp', (['x_new', 'x', 'p'], {}), '(x_new, x, p)\n', (5685, 5698), True, 'import numpy as np\n'), ((5715, 5737), 'numpy.interp', 'np.interp', (['x_new', 'x', 'q'], {}), '(x_new, x, q)\n', (5724, 5737), True, 'import numpy as np\n'), ((8996, 9031), 'numpy.fft.fft', 'np.fft.fft', (['data'], {'norm': '"""ortho"""', 'n': 'N'}), "(data, norm='ortho', n=N)\n", (9006, 9031), True, 'import numpy as np\n'), ((9455, 9477), 'scipy.fftpack.fft', 'fftp.fft', (['data'], {'n': 'nfft'}), '(data, n=nfft)\n', (9463, 9477), True, 'import scipy.fftpack as fftp\n'), ((11383, 11410), 'math.cos', 'math.cos', (['(2.0 * math.pi * f)'], {}), '(2.0 * math.pi * f)\n', (11391, 11410), False, 'import math\n'), ((12718, 12732), 'numpy.array', 'np.array', (['vec1'], {}), '(vec1)\n', (12726, 12732), True, 'import numpy as np\n'), ((12733, 12747), 'numpy.array', 'np.array', (['vec2'], {}), '(vec2)\n', (12741, 12747), True, 'import numpy as np\n'), ((382, 396), 'numpy.diff', 'np.diff', (['signs'], {}), '(signs)\n', (389, 396), True, 'import numpy as np\n'), ((5797, 5815), 'numpy.repeat', 'np.repeat', (['s', 'sfos'], {}), '(s, sfos)\n', (5806, 5815), True, 'import numpy as np\n'), ((5832, 5850), 'numpy.repeat', 'np.repeat', (['p', 'sfos'], {}), '(p, sfos)\n', (5841, 5850), True, 'import numpy as np\n'), ((5867, 5885), 'numpy.repeat', 'np.repeat', (['q', 'sfos'], {}), '(q, sfos)\n', (5876, 5885), True, 'import numpy as np\n'), ((5944, 5977), 'numpy.repeat', 'np.repeat', (['s[-1]', '(numPoints - end)'], {}), '(s[-1], numPoints - end)\n', (5953, 5977), True, 'import numpy as np\n'), ((6006, 6039), 'numpy.repeat', 'np.repeat', (['p[-1]', '(numPoints - end)'], {}), '(p[-1], numPoints - end)\n', (6015, 6039), True, 'import numpy as np\n'), ((6068, 6101), 'numpy.repeat', 'np.repeat', (['q[-1]', '(numPoints - end)'], {}), '(q[-1], numPoints - end)\n', (6077, 6101), True, 'import numpy as np\n'), ((716, 730), 'numpy.diff', 'np.diff', (['signs'], {}), '(signs)\n', (723, 730), True, 'import numpy as np\n'), ((2438, 2453), 'numpy.diff', 'np.diff', (['signs2'], {}), '(signs2)\n', (2445, 2453), True, 'import numpy as np\n'), ((2508, 2522), 'numpy.diff', 'np.diff', (['signs'], {}), '(signs)\n', (2515, 2522), True, 'import numpy as np\n')] |
# quantum machine learning: classification problem
import numpy as np
import matplotlib.pyplot as plt
from qiskit import BasicAer
from qiskit.circuit.library import ZZFeatureMap
from qiskit.aqua import QuantumInstance
from qiskit.aqua.algorithms import QSVM, SklearnSVM
from qiskit.aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name
from qiskit.ml.datasets import ad_hoc_data, sample_ad_hoc_data
# parameters
feature_dim = 2
training_dataset_size = 20
testing_dataset_size = 10
shots = 10000
random_seed = 10598
# setup training data
sample_total, training_input, test_input, class_labels, = ad_hoc_data(
training_size=training_dataset_size,
test_size=testing_dataset_size,
n=feature_dim,
gap=0.3,
plot_data=False )
extra_test_data = sample_ad_hoc_data(sample_total, testing_dataset_size, n=feature_dim)
datapoints, class_to_label = split_dataset_to_data_and_labels(extra_test_data)
print(class_to_label)
# setup backend, feature map, and plugged into quantum support vector machine
backend = BasicAer.get_backend('qasm_simulator')
feature_map = ZZFeatureMap(feature_dimension=feature_dim, reps=2, entanglement='linear')
qsvm = QSVM(feature_map, training_input, test_input, datapoints[0])
qsvm.random_seed = random_seed
quantum_instance = QuantumInstance(backend, shots=shots, seed_simulator=random_seed, seed_transpiler=random_seed)
result = qsvm.run(quantum_instance)
print(f'Testing success ratio: {result["testing_accuracy"]}')
print()
print('Prediction from datapoints set:')
print(f' ground truth: {map_label_to_class_name(datapoints[1], qsvm.label_to_class)}')
print(f' prediction: {result["predicted_classes"]}')
predicted_labels = result["predicted_labels"]
print(f' success rate: {100*np.count_nonzero(predicted_labels == datapoints[1])/len(predicted_labels)}%')
# prints kernel matrix
print("kernel matrix during the trainings:")
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix), interpolation='nearest', origin='upper', cmap='bone_r')
plt.show()
# compare to classical SVM
result = SklearnSVM(training_input, test_input, datapoints[0]).run()
print(f'Testing success ratio: {result["testing_accuracy"]}')
print()
print('Prediction from datapoints set:')
print(f' ground truth: {map_label_to_class_name(datapoints[1], qsvm.label_to_class)}')
print(f' prediction: {result["predicted_classes"]}')
predicted_labels = result["predicted_labels"]
print(f' success rate: {100*np.count_nonzero(predicted_labels == datapoints[1])/len(predicted_labels)}%')
kernel_matrix = result['kernel_matrix_training']
img = plt.imshow(np.asmatrix(kernel_matrix), interpolation='nearest', origin='upper', cmap='bone_r')
plt.show() | [
"qiskit.aqua.utils.map_label_to_class_name",
"matplotlib.pyplot.show",
"numpy.count_nonzero",
"qiskit.ml.datasets.ad_hoc_data",
"qiskit.aqua.algorithms.SklearnSVM",
"qiskit.aqua.QuantumInstance",
"qiskit.BasicAer.get_backend",
"numpy.asmatrix",
"qiskit.aqua.algorithms.QSVM",
"qiskit.aqua.utils.spl... | [((622, 748), 'qiskit.ml.datasets.ad_hoc_data', 'ad_hoc_data', ([], {'training_size': 'training_dataset_size', 'test_size': 'testing_dataset_size', 'n': 'feature_dim', 'gap': '(0.3)', 'plot_data': '(False)'}), '(training_size=training_dataset_size, test_size=\n testing_dataset_size, n=feature_dim, gap=0.3, plot_data=False)\n', (633, 748), False, 'from qiskit.ml.datasets import ad_hoc_data, sample_ad_hoc_data\n'), ((789, 858), 'qiskit.ml.datasets.sample_ad_hoc_data', 'sample_ad_hoc_data', (['sample_total', 'testing_dataset_size'], {'n': 'feature_dim'}), '(sample_total, testing_dataset_size, n=feature_dim)\n', (807, 858), False, 'from qiskit.ml.datasets import ad_hoc_data, sample_ad_hoc_data\n'), ((888, 937), 'qiskit.aqua.utils.split_dataset_to_data_and_labels', 'split_dataset_to_data_and_labels', (['extra_test_data'], {}), '(extra_test_data)\n', (920, 937), False, 'from qiskit.aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name\n'), ((1049, 1087), 'qiskit.BasicAer.get_backend', 'BasicAer.get_backend', (['"""qasm_simulator"""'], {}), "('qasm_simulator')\n", (1069, 1087), False, 'from qiskit import BasicAer\n'), ((1102, 1176), 'qiskit.circuit.library.ZZFeatureMap', 'ZZFeatureMap', ([], {'feature_dimension': 'feature_dim', 'reps': '(2)', 'entanglement': '"""linear"""'}), "(feature_dimension=feature_dim, reps=2, entanglement='linear')\n", (1114, 1176), False, 'from qiskit.circuit.library import ZZFeatureMap\n'), ((1184, 1244), 'qiskit.aqua.algorithms.QSVM', 'QSVM', (['feature_map', 'training_input', 'test_input', 'datapoints[0]'], {}), '(feature_map, training_input, test_input, datapoints[0])\n', (1188, 1244), False, 'from qiskit.aqua.algorithms import QSVM, SklearnSVM\n'), ((1295, 1393), 'qiskit.aqua.QuantumInstance', 'QuantumInstance', (['backend'], {'shots': 'shots', 'seed_simulator': 'random_seed', 'seed_transpiler': 'random_seed'}), '(backend, shots=shots, seed_simulator=random_seed,\n seed_transpiler=random_seed)\n', (1310, 1393), False, 'from qiskit.aqua import QuantumInstance\n'), ((2054, 2064), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2062, 2064), True, 'import matplotlib.pyplot as plt\n'), ((2723, 2733), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2731, 2733), True, 'import matplotlib.pyplot as plt\n'), ((1970, 1996), 'numpy.asmatrix', 'np.asmatrix', (['kernel_matrix'], {}), '(kernel_matrix)\n', (1981, 1996), True, 'import numpy as np\n'), ((2639, 2665), 'numpy.asmatrix', 'np.asmatrix', (['kernel_matrix'], {}), '(kernel_matrix)\n', (2650, 2665), True, 'import numpy as np\n'), ((2103, 2156), 'qiskit.aqua.algorithms.SklearnSVM', 'SklearnSVM', (['training_input', 'test_input', 'datapoints[0]'], {}), '(training_input, test_input, datapoints[0])\n', (2113, 2156), False, 'from qiskit.aqua.algorithms import QSVM, SklearnSVM\n'), ((1563, 1622), 'qiskit.aqua.utils.map_label_to_class_name', 'map_label_to_class_name', (['datapoints[1]', 'qsvm.label_to_class'], {}), '(datapoints[1], qsvm.label_to_class)\n', (1586, 1622), False, 'from qiskit.aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name\n'), ((2300, 2359), 'qiskit.aqua.utils.map_label_to_class_name', 'map_label_to_class_name', (['datapoints[1]', 'qsvm.label_to_class'], {}), '(datapoints[1], qsvm.label_to_class)\n', (2323, 2359), False, 'from qiskit.aqua.utils import split_dataset_to_data_and_labels, map_label_to_class_name\n'), ((1757, 1808), 'numpy.count_nonzero', 'np.count_nonzero', (['(predicted_labels == datapoints[1])'], {}), '(predicted_labels == datapoints[1])\n', (1773, 1808), True, 'import numpy as np\n'), ((2494, 2545), 'numpy.count_nonzero', 'np.count_nonzero', (['(predicted_labels == datapoints[1])'], {}), '(predicted_labels == datapoints[1])\n', (2510, 2545), True, 'import numpy as np\n')] |
import os
import cv2
import json
import numpy as np
import _pickle as cPickle
from ..base_dataset import Base_dataset
from ..common import visualize
from .define import MpiiPart,MpiiColor
from .format import PoseInfo
from .prepare import prepare_dataset
from .generate import generate_train_data,generate_eval_data
def init_dataset(config):
dataset=MPII_dataset(config)
return dataset
class MPII_dataset(Base_dataset):
'''a dataset class specified for mpii dataset, provides uniform APIs'''
def __init__(self,config,input_kpt_cvter=None,output_kpt_cvter=None,dataset_filter=None):
super().__init__(config,input_kpt_cvter,output_kpt_cvter)
#basic data configure
self.official_flag=config.data.official_flag
self.dataset_type=config.data.dataset_type
self.dataset_path=config.data.dataset_path
self.vis_dir=config.data.vis_dir
self.annos_path=None
self.images_path=None
self.parts=MpiiPart
self.colors=MpiiColor
if(input_kpt_cvter==None):
input_kpt_cvter=lambda x:x
if(output_kpt_cvter==None):
output_kpt_cvter=lambda x:x
self.input_kpt_cvter=input_kpt_cvter
self.output_kpt_cvter=output_kpt_cvter
self.dataset_filter=dataset_filter
def visualize(self,vis_num=10):
'''visualize annotations of the train dataset
visualize the annotation points in the image to help understand and check annotation
the visualized image will be saved in the "data_vis_dir" of the corresponding model directory(specified by model name).
the visualized annotations are from the train dataset.
Parameters
----------
arg1 : Int
An integer indicates how many images with their annotations are going to be visualized.
Returns
-------
None
'''
train_dataset=self.get_train_dataset()
visualize(self.vis_dir,vis_num,train_dataset,self.parts,self.colors,dataset_name="mpii")
def get_parts(self):
return self.parts
def get_colors(self):
return self.colors
def get_dataset_type(self):
return self.dataset_type
def prepare_dataset(self):
'''download,extract, and reformat the dataset
the official dataset is in .mat format, format it into json format automaticly.
Parameters
----------
None
Returns
-------
None
'''
self.train_annos_path,self.val_annos_path,self.images_path=prepare_dataset(self.dataset_path)
def generate_train_data(self):
return generate_train_data(self.images_path,self.train_annos_path,self.dataset_filter,self.input_kpt_cvter)
def generate_eval_data(self):
return generate_eval_data(self.images_path,self.val_annos_path,self.dataset_filter)
def set_input_kpt_cvter(self,input_kpt_cvter):
self.input_kpt_cvter=input_kpt_cvter
def set_output_kpt_cvter(self,output_kpt_cvter):
self.output_kpt_cvter=output_kpt_cvter
def get_input_kpt_cvter(self):
return self.input_kpt_cvter
def get_output_kpt_cvter(self):
return self.output_kpt_cvter
def official_eval(self,pd_json,eval_dir=f"./eval_dir"):
'''providing official evaluation of MPII dataset
output model metrics of PCHs on mpii evaluation dataset(split automaticly)
Parameters
----------
arg1 : String
A string path of the json file in the same format of cocoeval annotation file(person_keypoints_val2017.json)
which contains predicted results. one can refer the evaluation pipeline of models for generation procedure of this json file.
arg2 : String
A string path indicates where the result json file which contains MPII PCH metrics of various keypoint saves.
Returns
-------
None
'''
#format predict result in dict
pd_anns=pd_json["annotations"]
pd_dict={}
for pd_ann in pd_anns:
image_id=pd_ann["image_id"]
kpt_list=np.array(pd_ann["keypoints"])
x=kpt_list[0::3][np.newaxis,...]
y=kpt_list[1::3][np.newaxis,...]
pd_ann["keypoints"]=np.concatenate([x,y],axis=0)
if(image_id not in pd_dict):
pd_dict[image_id]=[]
pd_dict[image_id].append(pd_ann)
#format ground truth
metas=PoseInfo(self.images_path,self.val_annos_path,dataset_filter=self.dataset_filter).metas
gt_dict={}
for meta in metas:
gt_ann_list=meta.to_anns_list()
for gt_ann in gt_ann_list:
kpt_list=np.array(gt_ann["keypoints"])
x=kpt_list[0::3][np.newaxis,...]
y=kpt_list[1::3][np.newaxis,...]
vis_list=np.array(gt_ann["vis"])
vis_list=np.where(vis_list>0,1,0)
gt_ann["keypoints"]=np.concatenate([x,y],axis=0)
gt_ann["vis"]=vis_list
gt_dict[meta.image_id]=gt_ann_list
all_pd_kpts=[]
all_gt_kpts=[]
all_gt_vis=[]
all_gt_headbbxs=[]
#match kpt into order for PCK calculation
for image_id in pd_dict.keys():
#sort pd_anns by score
pd_img_anns=np.array(pd_dict[image_id])
sort_idx=np.argsort([-pd_img_ann["score"] for pd_img_ann in pd_img_anns])
pd_img_anns=pd_img_anns[sort_idx]
gt_img_anns=gt_dict[image_id]
#start to match pd and gt anns
match_pd_ids=np.full(shape=len(gt_img_anns),fill_value=-1)
for pd_id,pd_img_ann in enumerate(pd_img_anns):
pd_kpts=pd_img_ann["keypoints"]
match_id=-1
match_dist=np.inf
for gt_id,gt_img_ann in enumerate(gt_img_anns):
#gt person already matched
if(match_pd_ids[gt_id]!=-1):
continue
gt_kpts=gt_img_ann["keypoints"]
gt_vis=gt_img_ann["vis"]
vis_mask=np.ones(shape=gt_vis.shape)
vis_mask[6:8]=0
vis_num=np.sum(gt_vis)
if(vis_num==0):
continue
dist=np.sum(np.linalg.norm((pd_kpts-gt_kpts)*gt_vis*vis_mask,axis=0))/vis_num
if(dist<match_dist):
match_dist=dist
match_id=gt_id
if(match_id!=-1):
match_pd_ids[match_id]=pd_id
#add kpts to the list by the matched order
for gt_id,gt_img_ann in enumerate(gt_img_anns):
all_gt_kpts.append(gt_img_ann["keypoints"])
all_gt_vis.append(gt_img_ann["vis"])
all_gt_headbbxs.append(gt_img_ann["headbbx"])
match_pd_id=match_pd_ids[gt_id]
if(match_pd_id!=-1):
all_pd_kpts.append(pd_img_anns[match_pd_id]["keypoints"])
#not detected
else:
all_pd_kpts.append(np.zeros_like(all_gt_kpts[-1]))
#calculate pchk
#input shape:
#shape kpts 2*n_pos*val_num
#shape vis n_pos*val_num
#shape headbbxs(x,y,w,h) 4*val_num
#shape all_dist n_pos*val_num
#shape headsize val_num
print(f"evaluating over {len(pd_dict.keys())} images and {len(all_gt_kpts)} people")
all_pd_kpts=np.array(all_pd_kpts).transpose([1,2,0])
all_gt_kpts=np.array(all_gt_kpts).transpose([1,2,0])
all_gt_vis=np.array(all_gt_vis).transpose([1,0])
all_gt_headbbxs=np.array(all_gt_headbbxs).transpose([1,0])
all_gt_headsize=np.linalg.norm(all_gt_headbbxs[2:4,:],axis=0) #[2:4] correspond to w,h
all_dist=np.linalg.norm(all_pd_kpts-all_gt_kpts,axis=0)/all_gt_headsize
jnt_vis_num=np.sum(all_gt_vis,axis=1)
PCKh=100.0*np.sum(all_dist<=0.5,axis=1)/jnt_vis_num
#calculate pchk_all
rng = np.arange(0, 0.5+0.1, 0.1)
pckAll = np.zeros((len(rng), len(self.parts)))
for r in range(0,len(rng)):
threshold=rng[r]
pckAll[r]=100.0*np.sum(all_dist<=threshold,axis=1)/jnt_vis_num
#calculate mean
PCKh_mask = np.ma.array(PCKh, mask=False)
PCKh_mask.mask[6:8] = True #ignore thorax and pevis
jnt_count = np.ma.array(jnt_vis_num, mask=False)
jnt_count.mask[6:8] = True #ignore thorax and pevis
jnt_ratio = jnt_count / np.sum(jnt_count).astype(np.float64)
result_dict={
"Head": PCKh[MpiiPart.Headtop.value],
"Shoulder": 0.5*(PCKh[MpiiPart.LShoulder.value]+PCKh[MpiiPart.RShoulder.value]),
"Elbow": 0.5*(PCKh[MpiiPart.LElbow.value]+PCKh[MpiiPart.RElbow.value]),
"Wrist": 0.5*(PCKh[MpiiPart.LWrist.value]+PCKh[MpiiPart.RWrist.value]),
"Hip": 0.5*(PCKh[MpiiPart.LHip.value]+PCKh[MpiiPart.RHip.value]),
"Knee": 0.5*(PCKh[MpiiPart.LKnee.value]+PCKh[MpiiPart.RKnee.value]),
"Ankle": 0.5*(PCKh[MpiiPart.LAnkle.value]+PCKh[MpiiPart.RAnkle.value]),
"Mean": np.sum(PCKh_mask*jnt_ratio),
"Mean@0.1": np.mean(np.sum(pckAll[1:,:]*jnt_ratio,axis=1))
}
print("\tresult-PCKh:")
for key in result_dict.keys():
print(f"\t{key}: {result_dict[key]}")
result_path=os.path.join(eval_dir,"result.json")
json.dump(result_dict,open(result_path,"w"))
return result_dict
| [
"numpy.zeros_like",
"numpy.sum",
"numpy.ones",
"numpy.argsort",
"numpy.ma.array",
"numpy.where",
"numpy.arange",
"numpy.linalg.norm",
"numpy.array",
"os.path.join",
"numpy.concatenate"
] | [((7841, 7888), 'numpy.linalg.norm', 'np.linalg.norm', (['all_gt_headbbxs[2:4, :]'], {'axis': '(0)'}), '(all_gt_headbbxs[2:4, :], axis=0)\n', (7855, 7888), True, 'import numpy as np\n'), ((8012, 8038), 'numpy.sum', 'np.sum', (['all_gt_vis'], {'axis': '(1)'}), '(all_gt_vis, axis=1)\n', (8018, 8038), True, 'import numpy as np\n'), ((8140, 8168), 'numpy.arange', 'np.arange', (['(0)', '(0.5 + 0.1)', '(0.1)'], {}), '(0, 0.5 + 0.1, 0.1)\n', (8149, 8168), True, 'import numpy as np\n'), ((8406, 8435), 'numpy.ma.array', 'np.ma.array', (['PCKh'], {'mask': '(False)'}), '(PCKh, mask=False)\n', (8417, 8435), True, 'import numpy as np\n'), ((8522, 8558), 'numpy.ma.array', 'np.ma.array', (['jnt_vis_num'], {'mask': '(False)'}), '(jnt_vis_num, mask=False)\n', (8533, 8558), True, 'import numpy as np\n'), ((9577, 9614), 'os.path.join', 'os.path.join', (['eval_dir', '"""result.json"""'], {}), "(eval_dir, 'result.json')\n", (9589, 9614), False, 'import os\n'), ((4179, 4208), 'numpy.array', 'np.array', (["pd_ann['keypoints']"], {}), "(pd_ann['keypoints'])\n", (4187, 4208), True, 'import numpy as np\n'), ((4331, 4361), 'numpy.concatenate', 'np.concatenate', (['[x, y]'], {'axis': '(0)'}), '([x, y], axis=0)\n', (4345, 4361), True, 'import numpy as np\n'), ((5391, 5418), 'numpy.array', 'np.array', (['pd_dict[image_id]'], {}), '(pd_dict[image_id])\n', (5399, 5418), True, 'import numpy as np\n'), ((5440, 5506), 'numpy.argsort', 'np.argsort', (["[(-pd_img_ann['score']) for pd_img_ann in pd_img_anns]"], {}), "([(-pd_img_ann['score']) for pd_img_ann in pd_img_anns])\n", (5450, 5506), True, 'import numpy as np\n'), ((7929, 7978), 'numpy.linalg.norm', 'np.linalg.norm', (['(all_pd_kpts - all_gt_kpts)'], {'axis': '(0)'}), '(all_pd_kpts - all_gt_kpts, axis=0)\n', (7943, 7978), True, 'import numpy as np\n'), ((9324, 9353), 'numpy.sum', 'np.sum', (['(PCKh_mask * jnt_ratio)'], {}), '(PCKh_mask * jnt_ratio)\n', (9330, 9353), True, 'import numpy as np\n'), ((4768, 4797), 'numpy.array', 'np.array', (["gt_ann['keypoints']"], {}), "(gt_ann['keypoints'])\n", (4776, 4797), True, 'import numpy as np\n'), ((4921, 4944), 'numpy.array', 'np.array', (["gt_ann['vis']"], {}), "(gt_ann['vis'])\n", (4929, 4944), True, 'import numpy as np\n'), ((4970, 4998), 'numpy.where', 'np.where', (['(vis_list > 0)', '(1)', '(0)'], {}), '(vis_list > 0, 1, 0)\n', (4978, 4998), True, 'import numpy as np\n'), ((5031, 5061), 'numpy.concatenate', 'np.concatenate', (['[x, y]'], {'axis': '(0)'}), '([x, y], axis=0)\n', (5045, 5061), True, 'import numpy as np\n'), ((7591, 7612), 'numpy.array', 'np.array', (['all_pd_kpts'], {}), '(all_pd_kpts)\n', (7599, 7612), True, 'import numpy as np\n'), ((7652, 7673), 'numpy.array', 'np.array', (['all_gt_kpts'], {}), '(all_gt_kpts)\n', (7660, 7673), True, 'import numpy as np\n'), ((7712, 7732), 'numpy.array', 'np.array', (['all_gt_vis'], {}), '(all_gt_vis)\n', (7720, 7732), True, 'import numpy as np\n'), ((7774, 7799), 'numpy.array', 'np.array', (['all_gt_headbbxs'], {}), '(all_gt_headbbxs)\n', (7782, 7799), True, 'import numpy as np\n'), ((8057, 8088), 'numpy.sum', 'np.sum', (['(all_dist <= 0.5)'], {'axis': '(1)'}), '(all_dist <= 0.5, axis=1)\n', (8063, 8088), True, 'import numpy as np\n'), ((9385, 9426), 'numpy.sum', 'np.sum', (['(pckAll[1:, :] * jnt_ratio)'], {'axis': '(1)'}), '(pckAll[1:, :] * jnt_ratio, axis=1)\n', (9391, 9426), True, 'import numpy as np\n'), ((6196, 6223), 'numpy.ones', 'np.ones', ([], {'shape': 'gt_vis.shape'}), '(shape=gt_vis.shape)\n', (6203, 6223), True, 'import numpy as np\n'), ((6288, 6302), 'numpy.sum', 'np.sum', (['gt_vis'], {}), '(gt_vis)\n', (6294, 6302), True, 'import numpy as np\n'), ((8315, 8352), 'numpy.sum', 'np.sum', (['(all_dist <= threshold)'], {'axis': '(1)'}), '(all_dist <= threshold, axis=1)\n', (8321, 8352), True, 'import numpy as np\n'), ((8656, 8673), 'numpy.sum', 'np.sum', (['jnt_count'], {}), '(jnt_count)\n', (8662, 8673), True, 'import numpy as np\n'), ((7218, 7248), 'numpy.zeros_like', 'np.zeros_like', (['all_gt_kpts[-1]'], {}), '(all_gt_kpts[-1])\n', (7231, 7248), True, 'import numpy as np\n'), ((6404, 6467), 'numpy.linalg.norm', 'np.linalg.norm', (['((pd_kpts - gt_kpts) * gt_vis * vis_mask)'], {'axis': '(0)'}), '((pd_kpts - gt_kpts) * gt_vis * vis_mask, axis=0)\n', (6418, 6467), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from operators import spin_loop_correlator
class SpinLoopCorrelatorTest(tf.test.TestCase):
def test_find_states_value_encoding(self):
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(1, 3)
states, coeffs = loop_correlator.find_states(np.array([0, 1, 3, 3, 4]))
self.assertEqual(states.shape, (1, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[0, 3, 1, 3, 4]]))
np.testing.assert_equal(coeffs, 1.0)
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(3, 1)
states, coeffs = loop_correlator.find_states(np.array([0, 1, 3, 3, 4]))
self.assertEqual(states.shape, (1, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[4, 0, 3, 1, 3]]))
np.testing.assert_equal(coeffs, 1.0)
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(1, 3,
add_sign=True)
states, coeffs = loop_correlator.find_states(np.array([0, 1, 3, 3, 4]))
self.assertEqual(states.shape, (1, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[0, 3, 1, 3, 4]]))
np.testing.assert_equal(coeffs, -1.0)
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(3, 1,
add_sign=True)
states, coeffs = loop_correlator.find_states(np.array([0, 1, 3, 3, 4]))
self.assertEqual(states.shape, (1, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[4, 0, 3, 1, 3]]))
np.testing.assert_equal(coeffs, -1.0)
def test_find_states_onehot_encoding(self):
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(1, 3)
states, coeffs = loop_correlator.find_states(np.array(
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]))
self.assertEqual(states.shape, (1, 5, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]]))
np.testing.assert_equal(coeffs, 1.0)
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(3, 1)
states, coeffs = loop_correlator.find_states(np.array(
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]))
self.assertEqual(states.shape, (1, 5, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0]]]))
np.testing.assert_equal(coeffs, 1.0)
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(1, 3,
add_sign=True)
states, coeffs = loop_correlator.find_states(np.array(
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]))
self.assertEqual(states.shape, (1, 5, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]]))
np.testing.assert_equal(coeffs, -1.0)
loop_correlator = spin_loop_correlator.SpinLoopCorrelator(3, 1,
add_sign=True)
states, coeffs = loop_correlator.find_states(np.array(
[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]))
self.assertEqual(states.shape, (1, 5, 5))
self.assertEqual(coeffs.shape, (1,))
np.testing.assert_equal(states, np.array([[[0, 0, 0, 0, 1],
[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 0, 1, 0]]]))
np.testing.assert_equal(coeffs, -1.0)
if __name__ == "__main__":
tf.test.main()
| [
"tensorflow.test.main",
"operators.spin_loop_correlator.SpinLoopCorrelator",
"numpy.array",
"numpy.testing.assert_equal"
] | [((5067, 5081), 'tensorflow.test.main', 'tf.test.main', ([], {}), '()\n', (5079, 5081), True, 'import tensorflow as tf\n'), ((321, 366), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(1)', '(3)'], {}), '(1, 3)\n', (360, 366), False, 'from operators import spin_loop_correlator\n'), ((616, 652), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(1.0)'], {}), '(coeffs, 1.0)\n', (639, 652), True, 'import numpy as np\n'), ((680, 725), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(3)', '(1)'], {}), '(3, 1)\n', (719, 725), False, 'from operators import spin_loop_correlator\n'), ((975, 1011), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(1.0)'], {}), '(coeffs, 1.0)\n', (998, 1011), True, 'import numpy as np\n'), ((1039, 1099), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(1)', '(3)'], {'add_sign': '(True)'}), '(1, 3, add_sign=True)\n', (1078, 1099), False, 'from operators import spin_loop_correlator\n'), ((1415, 1452), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(-1.0)'], {}), '(coeffs, -1.0)\n', (1438, 1452), True, 'import numpy as np\n'), ((1480, 1540), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(3)', '(1)'], {'add_sign': '(True)'}), '(3, 1, add_sign=True)\n', (1519, 1540), False, 'from operators import spin_loop_correlator\n'), ((1856, 1893), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(-1.0)'], {}), '(coeffs, -1.0)\n', (1879, 1893), True, 'import numpy as np\n'), ((1969, 2014), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(1)', '(3)'], {}), '(1, 3)\n', (2008, 2014), False, 'from operators import spin_loop_correlator\n'), ((2637, 2673), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(1.0)'], {}), '(coeffs, 1.0)\n', (2660, 2673), True, 'import numpy as np\n'), ((2701, 2746), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(3)', '(1)'], {}), '(3, 1)\n', (2740, 2746), False, 'from operators import spin_loop_correlator\n'), ((3369, 3405), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(1.0)'], {}), '(coeffs, 1.0)\n', (3392, 3405), True, 'import numpy as np\n'), ((3433, 3493), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(1)', '(3)'], {'add_sign': '(True)'}), '(1, 3, add_sign=True)\n', (3472, 3493), False, 'from operators import spin_loop_correlator\n'), ((4182, 4219), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(-1.0)'], {}), '(coeffs, -1.0)\n', (4205, 4219), True, 'import numpy as np\n'), ((4247, 4307), 'operators.spin_loop_correlator.SpinLoopCorrelator', 'spin_loop_correlator.SpinLoopCorrelator', (['(3)', '(1)'], {'add_sign': '(True)'}), '(3, 1, add_sign=True)\n', (4286, 4307), False, 'from operators import spin_loop_correlator\n'), ((4996, 5033), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['coeffs', '(-1.0)'], {}), '(coeffs, -1.0)\n', (5019, 5033), True, 'import numpy as np\n'), ((420, 445), 'numpy.array', 'np.array', (['[0, 1, 3, 3, 4]'], {}), '([0, 1, 3, 3, 4])\n', (428, 445), True, 'import numpy as np\n'), ((579, 606), 'numpy.array', 'np.array', (['[[0, 3, 1, 3, 4]]'], {}), '([[0, 3, 1, 3, 4]])\n', (587, 606), True, 'import numpy as np\n'), ((779, 804), 'numpy.array', 'np.array', (['[0, 1, 3, 3, 4]'], {}), '([0, 1, 3, 3, 4])\n', (787, 804), True, 'import numpy as np\n'), ((938, 965), 'numpy.array', 'np.array', (['[[4, 0, 3, 1, 3]]'], {}), '([[4, 0, 3, 1, 3]])\n', (946, 965), True, 'import numpy as np\n'), ((1219, 1244), 'numpy.array', 'np.array', (['[0, 1, 3, 3, 4]'], {}), '([0, 1, 3, 3, 4])\n', (1227, 1244), True, 'import numpy as np\n'), ((1378, 1405), 'numpy.array', 'np.array', (['[[0, 3, 1, 3, 4]]'], {}), '([[0, 3, 1, 3, 4]])\n', (1386, 1405), True, 'import numpy as np\n'), ((1660, 1685), 'numpy.array', 'np.array', (['[0, 1, 3, 3, 4]'], {}), '([0, 1, 3, 3, 4])\n', (1668, 1685), True, 'import numpy as np\n'), ((1819, 1846), 'numpy.array', 'np.array', (['[[4, 0, 3, 1, 3]]'], {}), '([[4, 0, 3, 1, 3]])\n', (1827, 1846), True, 'import numpy as np\n'), ((2068, 2168), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 1]])\n', (2076, 2168), True, 'import numpy as np\n'), ((2326, 2428), 'numpy.array', 'np.array', (['[[[1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1]]]'], {}), '([[[1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, \n 0], [0, 0, 0, 0, 1]]])\n', (2334, 2428), True, 'import numpy as np\n'), ((2800, 2900), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 1]])\n', (2808, 2900), True, 'import numpy as np\n'), ((3058, 3160), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 1], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0,\n 0, 1, 0]]]'], {}), '([[[0, 0, 0, 0, 1], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, \n 0], [0, 0, 0, 1, 0]]])\n', (3066, 3160), True, 'import numpy as np\n'), ((3613, 3713), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 1]])\n', (3621, 3713), True, 'import numpy as np\n'), ((3871, 3973), 'numpy.array', 'np.array', (['[[[1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1]]]'], {}), '([[[1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, \n 0], [0, 0, 0, 0, 1]]])\n', (3879, 3973), True, 'import numpy as np\n'), ((4427, 4527), 'numpy.array', 'np.array', (['[[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0], [0, 0,\n 0, 0, 1]]'], {}), '([[1, 0, 0, 0, 0], [0, 1, 0, 0, 0], [0, 0, 0, 1, 0], [0, 0, 0, 1, 0\n ], [0, 0, 0, 0, 1]])\n', (4435, 4527), True, 'import numpy as np\n'), ((4685, 4787), 'numpy.array', 'np.array', (['[[[0, 0, 0, 0, 1], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, 0], [0, 0,\n 0, 1, 0]]]'], {}), '([[[0, 0, 0, 0, 1], [1, 0, 0, 0, 0], [0, 0, 0, 1, 0], [0, 1, 0, 0, \n 0], [0, 0, 0, 1, 0]]])\n', (4693, 4787), True, 'import numpy as np\n')] |
import numpy as np
import scipy as sp
from optimize import GibbsOrientationSampler, Likelihood
class CryoEMSampler(GibbsOrientationSampler):
def energy(self, x):
fx = np.fft.fftshift(np.fft.fftn(x))
return super(CryoEMSampler, self).energy(fx)
def gradient(self, x):
fx = np.fft.fftshift(np.fft.fftn(x))
dE = super(CryoEMSampler, self).gradient(fx)
return np.fft.ifftshift(np.fft.ifftn(dE))
def update_rotations(self, x):
fx = np.fft.fftshift(np.fft.fftn(x))
return super(CryoEMSampler, self).update_rotations(fx)
class CryoEMGaussianLikelihood(Likelihood):
def __init__(self, k=1., n=1, mask=None):
super(CryoEMGaussianLikelihood, self).__init__(n, mask)
self._k = np.float(k)
def _energy(self, theta, data):
n_data = self._n
chi = (data - theta)
chi2 = chi.dot(chi)
E = 0.5 * self._k * chi2
# Partion function
E -= n_data * np.log(self._k)
return E
def _gradient(self, theta, data):
n_data = self._n
diff = (data - theta)
energy = 0.5 * self._k * diff.dot(diff)
# Partion function
energy -= n_data * np.log(self._k)
grad = -self._k * diff
return energy, grad
if __name__ == "__main__":
import pylab as plt
import seaborn as sns
import scipy.ndimage
import time
from xfel.numeric.quadrature import GaussSO3Quadrature, ChebyshevSO3Quadrature
import os
from xfel.grid.interpolation_matrix import compute_slice_interpolation_matrix, get_image_to_sparse_projection
from xfel.grid.optimize import GaussianLikelihood
resolution = 32
order = 3
rad = 0.99
q = ChebyshevSO3Quadrature(order)
m = len(q.R)
from xfel.io import mrc
from xfel.grid.interpolation_matrix import compute_slice_interpolation_matrix, get_image_to_sparse_projection
from scipy.ndimage import zoom
ground_truth = mrc.read(os.path.expanduser("~/projects/xfel/data/phantom/phantom.mrc"))[0]
gt = ground_truth = zoom(ground_truth, resolution/128.)
data = mrc.read(os.path.expanduser("~/projects/gmm-rec/examples/phantom/coarse_input.mrc"))[0]
data = data.swapaxes(0,2)
proj = compute_slice_interpolation_matrix(q.R, resolution, radius_cutoff=rad)
image_to_vector = get_image_to_sparse_projection(resolution, rad)
ft_data = np.array([np.fft.fftshift(np.fft.fftn(d)) for d in data])
ft_data_sparse = np.array([image_to_vector.dot(ft_data[i,:,:].ravel())
for i in range(ft_data.shape[0])])
ll = CryoEMGaussianLikelihood()
d = gt.sum(-1)
fd = np.fft.fftshift(np.fft.fftn(d))
fd = image_to_vector.dot(fd.ravel())
slices = proj.dot(np.fft.fftshift(np.fft.fftn(gt)).ravel())
slices = slices.reshape((m,-1))
x0 = slices[0]
e0 = ll.energy(slices[0], fd)
grad = ll.gradient(slices[0], fd)[1]
grad_numeric = np.zeros_like(grad)
eps = 1e-4j
for i in range(x0.size):
x0[i] += eps
grad_numeric[i] = (ll.energy(x0, fd) - e0)/eps
x0[i] -= eps
raise
# Currently I assume that the complex part of the gradient is of
sampler = GibbsOrientationSampler(likelihood=ll,
projection=proj,
quadrature=q,
data=ft_data_sparse)
x0 = np.fft.fftshift(np.fft.fftn(gt)).ravel() * 1.
sampler.update_rotations(x0)
e0 = sampler.energy(x0)
grad = sampler.gradient(x0)
grad_num = np.zeros_like(grad)
eps = 1e-6
for i in range(x0.size):
x0[i] += eps
grad_num[i] = (sampler.energy(x0) - e0)/eps
x0[i] -= eps
| [
"numpy.zeros_like",
"numpy.log",
"xfel.numeric.quadrature.ChebyshevSO3Quadrature",
"optimize.GibbsOrientationSampler",
"numpy.fft.fftn",
"numpy.float",
"scipy.ndimage.zoom",
"numpy.fft.ifftn",
"xfel.grid.interpolation_matrix.compute_slice_interpolation_matrix",
"os.path.expanduser",
"xfel.grid.i... | [((1736, 1765), 'xfel.numeric.quadrature.ChebyshevSO3Quadrature', 'ChebyshevSO3Quadrature', (['order'], {}), '(order)\n', (1758, 1765), False, 'from xfel.numeric.quadrature import GaussSO3Quadrature, ChebyshevSO3Quadrature\n'), ((2081, 2119), 'scipy.ndimage.zoom', 'zoom', (['ground_truth', '(resolution / 128.0)'], {}), '(ground_truth, resolution / 128.0)\n', (2085, 2119), False, 'from scipy.ndimage import zoom\n'), ((2259, 2329), 'xfel.grid.interpolation_matrix.compute_slice_interpolation_matrix', 'compute_slice_interpolation_matrix', (['q.R', 'resolution'], {'radius_cutoff': 'rad'}), '(q.R, resolution, radius_cutoff=rad)\n', (2293, 2329), False, 'from xfel.grid.interpolation_matrix import compute_slice_interpolation_matrix, get_image_to_sparse_projection\n'), ((2353, 2400), 'xfel.grid.interpolation_matrix.get_image_to_sparse_projection', 'get_image_to_sparse_projection', (['resolution', 'rad'], {}), '(resolution, rad)\n', (2383, 2400), False, 'from xfel.grid.interpolation_matrix import compute_slice_interpolation_matrix, get_image_to_sparse_projection\n'), ((2959, 2978), 'numpy.zeros_like', 'np.zeros_like', (['grad'], {}), '(grad)\n', (2972, 2978), True, 'import numpy as np\n'), ((3218, 3313), 'optimize.GibbsOrientationSampler', 'GibbsOrientationSampler', ([], {'likelihood': 'll', 'projection': 'proj', 'quadrature': 'q', 'data': 'ft_data_sparse'}), '(likelihood=ll, projection=proj, quadrature=q, data=\n ft_data_sparse)\n', (3241, 3313), False, 'from optimize import GibbsOrientationSampler, Likelihood\n'), ((3589, 3608), 'numpy.zeros_like', 'np.zeros_like', (['grad'], {}), '(grad)\n', (3602, 3608), True, 'import numpy as np\n'), ((766, 777), 'numpy.float', 'np.float', (['k'], {}), '(k)\n', (774, 777), True, 'import numpy as np\n'), ((2688, 2702), 'numpy.fft.fftn', 'np.fft.fftn', (['d'], {}), '(d)\n', (2699, 2702), True, 'import numpy as np\n'), ((200, 214), 'numpy.fft.fftn', 'np.fft.fftn', (['x'], {}), '(x)\n', (211, 214), True, 'import numpy as np\n'), ((326, 340), 'numpy.fft.fftn', 'np.fft.fftn', (['x'], {}), '(x)\n', (337, 340), True, 'import numpy as np\n'), ((428, 444), 'numpy.fft.ifftn', 'np.fft.ifftn', (['dE'], {}), '(dE)\n', (440, 444), True, 'import numpy as np\n'), ((511, 525), 'numpy.fft.fftn', 'np.fft.fftn', (['x'], {}), '(x)\n', (522, 525), True, 'import numpy as np\n'), ((980, 995), 'numpy.log', 'np.log', (['self._k'], {}), '(self._k)\n', (986, 995), True, 'import numpy as np\n'), ((1210, 1225), 'numpy.log', 'np.log', (['self._k'], {}), '(self._k)\n', (1216, 1225), True, 'import numpy as np\n'), ((1990, 2052), 'os.path.expanduser', 'os.path.expanduser', (['"""~/projects/xfel/data/phantom/phantom.mrc"""'], {}), "('~/projects/xfel/data/phantom/phantom.mrc')\n", (2008, 2052), False, 'import os\n'), ((2138, 2212), 'os.path.expanduser', 'os.path.expanduser', (['"""~/projects/gmm-rec/examples/phantom/coarse_input.mrc"""'], {}), "('~/projects/gmm-rec/examples/phantom/coarse_input.mrc')\n", (2156, 2212), False, 'import os\n'), ((2442, 2456), 'numpy.fft.fftn', 'np.fft.fftn', (['d'], {}), '(d)\n', (2453, 2456), True, 'import numpy as np\n'), ((2783, 2798), 'numpy.fft.fftn', 'np.fft.fftn', (['gt'], {}), '(gt)\n', (2794, 2798), True, 'import numpy as np\n'), ((3450, 3465), 'numpy.fft.fftn', 'np.fft.fftn', (['gt'], {}), '(gt)\n', (3461, 3465), True, 'import numpy as np\n')] |
from hashlib import md5
from urllib.request import urlopen
import numpy as np
from .abc import Embedding
class NonLSHEmbedding(Embedding):
def __init__(self, dim: int):
self.__dim = dim
def get_dim(self) -> int:
return self.__dim
def get_version(self) -> str:
return f'aptl3/r-{self.__dim:d}'
def transform(self, *, url: str = None, data: bytes = None) -> np.ndarray:
if data is None:
with urlopen(url=url) as f:
data = f.read(-1)
h = md5()
h.update(data)
_hash = h.digest()
seed = int.from_bytes(_hash, 'little', signed=False)
rng = np.random.Generator(np.random.SFC64(seed=seed))
v = rng.normal(0, 1, size=[self.__dim])
v /= np.sum(v**2)**0.5 # L2 normalization
return v
| [
"numpy.random.SFC64",
"hashlib.md5",
"numpy.sum",
"urllib.request.urlopen"
] | [((531, 536), 'hashlib.md5', 'md5', ([], {}), '()\n', (534, 536), False, 'from hashlib import md5\n'), ((682, 708), 'numpy.random.SFC64', 'np.random.SFC64', ([], {'seed': 'seed'}), '(seed=seed)\n', (697, 708), True, 'import numpy as np\n'), ((771, 785), 'numpy.sum', 'np.sum', (['(v ** 2)'], {}), '(v ** 2)\n', (777, 785), True, 'import numpy as np\n'), ((462, 478), 'urllib.request.urlopen', 'urlopen', ([], {'url': 'url'}), '(url=url)\n', (469, 478), False, 'from urllib.request import urlopen\n')] |
# -*- coding: utf-8 -*-
"""
=============================
OT for image color adaptation
=============================
This example presents a way of transferring colors between two images
with Optimal Transport as introduced in [6]
[6] <NAME>., <NAME>., <NAME>., & <NAME>. (2014).
Regularized discrete optimal transport.
SIAM Journal on Imaging Sciences, 7(3), 1853-1882.
"""
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: MIT License
# sphinx_gallery_thumbnail_number = 2
import os
from pathlib import Path
import numpy as np
from matplotlib import pyplot as plt
import ot
rng = np.random.RandomState(42)
def im2mat(img):
"""Converts an image to matrix (one pixel per line)"""
return img.reshape((img.shape[0] * img.shape[1], img.shape[2]))
def mat2im(X, shape):
"""Converts back a matrix to an image"""
return X.reshape(shape)
def minmax(img):
return np.clip(img, 0, 1)
##############################################################################
# Generate data
# -------------
# Loading images
this_file = os.path.realpath('__file__')
data_path = os.path.join(Path(this_file).parent.parent.parent, 'data')
I1 = plt.imread(os.path.join(data_path, 'ocean_day.jpg')).astype(np.float64) / 256
I2 = plt.imread(os.path.join(data_path, 'ocean_sunset.jpg')).astype(np.float64) / 256
X1 = im2mat(I1)
X2 = im2mat(I2)
# training samples
nb = 500
idx1 = rng.randint(X1.shape[0], size=(nb,))
idx2 = rng.randint(X2.shape[0], size=(nb,))
Xs = X1[idx1, :]
Xt = X2[idx2, :]
##############################################################################
# Plot original image
# -------------------
plt.figure(1, figsize=(6.4, 3))
plt.subplot(1, 2, 1)
plt.imshow(I1)
plt.axis('off')
plt.title('Image 1')
plt.subplot(1, 2, 2)
plt.imshow(I2)
plt.axis('off')
plt.title('Image 2')
##############################################################################
# Scatter plot of colors
# ----------------------
plt.figure(2, figsize=(6.4, 3))
plt.subplot(1, 2, 1)
plt.scatter(Xs[:, 0], Xs[:, 2], c=Xs)
plt.axis([0, 1, 0, 1])
plt.xlabel('Red')
plt.ylabel('Blue')
plt.title('Image 1')
plt.subplot(1, 2, 2)
plt.scatter(Xt[:, 0], Xt[:, 2], c=Xt)
plt.axis([0, 1, 0, 1])
plt.xlabel('Red')
plt.ylabel('Blue')
plt.title('Image 2')
plt.tight_layout()
##############################################################################
# Instantiate the different transport algorithms and fit them
# -----------------------------------------------------------
# EMDTransport
ot_emd = ot.da.EMDTransport()
ot_emd.fit(Xs=Xs, Xt=Xt)
# SinkhornTransport
ot_sinkhorn = ot.da.SinkhornTransport(reg_e=1e-1)
ot_sinkhorn.fit(Xs=Xs, Xt=Xt)
# prediction between images (using out of sample prediction as in [6])
transp_Xs_emd = ot_emd.transform(Xs=X1)
transp_Xt_emd = ot_emd.inverse_transform(Xt=X2)
transp_Xs_sinkhorn = ot_sinkhorn.transform(Xs=X1)
transp_Xt_sinkhorn = ot_sinkhorn.inverse_transform(Xt=X2)
I1t = minmax(mat2im(transp_Xs_emd, I1.shape))
I2t = minmax(mat2im(transp_Xt_emd, I2.shape))
I1te = minmax(mat2im(transp_Xs_sinkhorn, I1.shape))
I2te = minmax(mat2im(transp_Xt_sinkhorn, I2.shape))
##############################################################################
# Plot new images
# ---------------
plt.figure(3, figsize=(8, 4))
plt.subplot(2, 3, 1)
plt.imshow(I1)
plt.axis('off')
plt.title('Image 1')
plt.subplot(2, 3, 2)
plt.imshow(I1t)
plt.axis('off')
plt.title('Image 1 Adapt')
plt.subplot(2, 3, 3)
plt.imshow(I1te)
plt.axis('off')
plt.title('Image 1 Adapt (reg)')
plt.subplot(2, 3, 4)
plt.imshow(I2)
plt.axis('off')
plt.title('Image 2')
plt.subplot(2, 3, 5)
plt.imshow(I2t)
plt.axis('off')
plt.title('Image 2 Adapt')
plt.subplot(2, 3, 6)
plt.imshow(I2te)
plt.axis('off')
plt.title('Image 2 Adapt (reg)')
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.show",
"os.path.join",
"matplotlib.pyplot.imshow",
"os.path.realpath",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"numpy.random.RandomState",
"numpy.clip",
"matplotlib.pyplot.figure",
"pathlib.Path",
"ot.d... | [((610, 635), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (631, 635), True, 'import numpy as np\n'), ((1071, 1099), 'os.path.realpath', 'os.path.realpath', (['"""__file__"""'], {}), "('__file__')\n", (1087, 1099), False, 'import os\n'), ((1652, 1683), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {'figsize': '(6.4, 3)'}), '(1, figsize=(6.4, 3))\n', (1662, 1683), True, 'from matplotlib import pyplot as plt\n'), ((1685, 1705), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1696, 1705), True, 'from matplotlib import pyplot as plt\n'), ((1706, 1720), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I1'], {}), '(I1)\n', (1716, 1720), True, 'from matplotlib import pyplot as plt\n'), ((1721, 1736), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1729, 1736), True, 'from matplotlib import pyplot as plt\n'), ((1737, 1757), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 1"""'], {}), "('Image 1')\n", (1746, 1757), True, 'from matplotlib import pyplot as plt\n'), ((1759, 1779), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1770, 1779), True, 'from matplotlib import pyplot as plt\n'), ((1780, 1794), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I2'], {}), '(I2)\n', (1790, 1794), True, 'from matplotlib import pyplot as plt\n'), ((1795, 1810), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1803, 1810), True, 'from matplotlib import pyplot as plt\n'), ((1811, 1831), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 2"""'], {}), "('Image 2')\n", (1820, 1831), True, 'from matplotlib import pyplot as plt\n'), ((1964, 1995), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {'figsize': '(6.4, 3)'}), '(2, figsize=(6.4, 3))\n', (1974, 1995), True, 'from matplotlib import pyplot as plt\n'), ((1997, 2017), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (2008, 2017), True, 'from matplotlib import pyplot as plt\n'), ((2018, 2055), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Xs[:, 0]', 'Xs[:, 2]'], {'c': 'Xs'}), '(Xs[:, 0], Xs[:, 2], c=Xs)\n', (2029, 2055), True, 'from matplotlib import pyplot as plt\n'), ((2056, 2078), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (2064, 2078), True, 'from matplotlib import pyplot as plt\n'), ((2079, 2096), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Red"""'], {}), "('Red')\n", (2089, 2096), True, 'from matplotlib import pyplot as plt\n'), ((2097, 2115), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Blue"""'], {}), "('Blue')\n", (2107, 2115), True, 'from matplotlib import pyplot as plt\n'), ((2116, 2136), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 1"""'], {}), "('Image 1')\n", (2125, 2136), True, 'from matplotlib import pyplot as plt\n'), ((2138, 2158), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (2149, 2158), True, 'from matplotlib import pyplot as plt\n'), ((2159, 2196), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Xt[:, 0]', 'Xt[:, 2]'], {'c': 'Xt'}), '(Xt[:, 0], Xt[:, 2], c=Xt)\n', (2170, 2196), True, 'from matplotlib import pyplot as plt\n'), ((2197, 2219), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (2205, 2219), True, 'from matplotlib import pyplot as plt\n'), ((2220, 2237), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Red"""'], {}), "('Red')\n", (2230, 2237), True, 'from matplotlib import pyplot as plt\n'), ((2238, 2256), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Blue"""'], {}), "('Blue')\n", (2248, 2256), True, 'from matplotlib import pyplot as plt\n'), ((2257, 2277), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 2"""'], {}), "('Image 2')\n", (2266, 2277), True, 'from matplotlib import pyplot as plt\n'), ((2278, 2296), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2294, 2296), True, 'from matplotlib import pyplot as plt\n'), ((2527, 2547), 'ot.da.EMDTransport', 'ot.da.EMDTransport', ([], {}), '()\n', (2545, 2547), False, 'import ot\n'), ((2608, 2642), 'ot.da.SinkhornTransport', 'ot.da.SinkhornTransport', ([], {'reg_e': '(0.1)'}), '(reg_e=0.1)\n', (2631, 2642), False, 'import ot\n'), ((3259, 3288), 'matplotlib.pyplot.figure', 'plt.figure', (['(3)'], {'figsize': '(8, 4)'}), '(3, figsize=(8, 4))\n', (3269, 3288), True, 'from matplotlib import pyplot as plt\n'), ((3290, 3310), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(1)'], {}), '(2, 3, 1)\n', (3301, 3310), True, 'from matplotlib import pyplot as plt\n'), ((3311, 3325), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I1'], {}), '(I1)\n', (3321, 3325), True, 'from matplotlib import pyplot as plt\n'), ((3326, 3341), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3334, 3341), True, 'from matplotlib import pyplot as plt\n'), ((3342, 3362), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 1"""'], {}), "('Image 1')\n", (3351, 3362), True, 'from matplotlib import pyplot as plt\n'), ((3364, 3384), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(2)'], {}), '(2, 3, 2)\n', (3375, 3384), True, 'from matplotlib import pyplot as plt\n'), ((3385, 3400), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I1t'], {}), '(I1t)\n', (3395, 3400), True, 'from matplotlib import pyplot as plt\n'), ((3401, 3416), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3409, 3416), True, 'from matplotlib import pyplot as plt\n'), ((3417, 3443), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 1 Adapt"""'], {}), "('Image 1 Adapt')\n", (3426, 3443), True, 'from matplotlib import pyplot as plt\n'), ((3445, 3465), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(3)'], {}), '(2, 3, 3)\n', (3456, 3465), True, 'from matplotlib import pyplot as plt\n'), ((3466, 3482), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I1te'], {}), '(I1te)\n', (3476, 3482), True, 'from matplotlib import pyplot as plt\n'), ((3483, 3498), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3491, 3498), True, 'from matplotlib import pyplot as plt\n'), ((3499, 3531), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 1 Adapt (reg)"""'], {}), "('Image 1 Adapt (reg)')\n", (3508, 3531), True, 'from matplotlib import pyplot as plt\n'), ((3533, 3553), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (3544, 3553), True, 'from matplotlib import pyplot as plt\n'), ((3554, 3568), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I2'], {}), '(I2)\n', (3564, 3568), True, 'from matplotlib import pyplot as plt\n'), ((3569, 3584), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3577, 3584), True, 'from matplotlib import pyplot as plt\n'), ((3585, 3605), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 2"""'], {}), "('Image 2')\n", (3594, 3605), True, 'from matplotlib import pyplot as plt\n'), ((3607, 3627), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(5)'], {}), '(2, 3, 5)\n', (3618, 3627), True, 'from matplotlib import pyplot as plt\n'), ((3628, 3643), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I2t'], {}), '(I2t)\n', (3638, 3643), True, 'from matplotlib import pyplot as plt\n'), ((3644, 3659), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3652, 3659), True, 'from matplotlib import pyplot as plt\n'), ((3660, 3686), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 2 Adapt"""'], {}), "('Image 2 Adapt')\n", (3669, 3686), True, 'from matplotlib import pyplot as plt\n'), ((3688, 3708), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(3)', '(6)'], {}), '(2, 3, 6)\n', (3699, 3708), True, 'from matplotlib import pyplot as plt\n'), ((3709, 3725), 'matplotlib.pyplot.imshow', 'plt.imshow', (['I2te'], {}), '(I2te)\n', (3719, 3725), True, 'from matplotlib import pyplot as plt\n'), ((3726, 3741), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3734, 3741), True, 'from matplotlib import pyplot as plt\n'), ((3742, 3774), 'matplotlib.pyplot.title', 'plt.title', (['"""Image 2 Adapt (reg)"""'], {}), "('Image 2 Adapt (reg)')\n", (3751, 3774), True, 'from matplotlib import pyplot as plt\n'), ((3775, 3793), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (3791, 3793), True, 'from matplotlib import pyplot as plt\n'), ((3795, 3805), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3803, 3805), True, 'from matplotlib import pyplot as plt\n'), ((909, 927), 'numpy.clip', 'np.clip', (['img', '(0)', '(1)'], {}), '(img, 0, 1)\n', (916, 927), True, 'import numpy as np\n'), ((1125, 1140), 'pathlib.Path', 'Path', (['this_file'], {}), '(this_file)\n', (1129, 1140), False, 'from pathlib import Path\n'), ((1188, 1228), 'os.path.join', 'os.path.join', (['data_path', '"""ocean_day.jpg"""'], {}), "(data_path, 'ocean_day.jpg')\n", (1200, 1228), False, 'import os\n'), ((1271, 1314), 'os.path.join', 'os.path.join', (['data_path', '"""ocean_sunset.jpg"""'], {}), "(data_path, 'ocean_sunset.jpg')\n", (1283, 1314), False, 'import os\n')] |
import numpy as np
import h5py
import os
class FileHelper(object):
def __init__(self, db):
self.db = h5py.File(db)
def get(self):
return self.db
def get(self, path):
return self.db[path]
def list(self, path):
return [key for key in self.db[path].keys()]
def store_file(self, path, store_path):
file = open(path, 'rb')
try:
dt = h5py.special_dtype(vlen=np.dtype('uint8'))
temp = self.db.create_dataset(store_path, (1,), dtype=dt)
except Exception as e:
temp = self.db[store_path]
temp[0] = np.fromstring(file.read(), dtype='uint8')
def store_from_folder(self, path, save_path=""):
directory = os.fsencode(path)
for file in os.listdir(directory):
filename = os.fsdecode(file)
self.store_file(path + filename, save_path + filename)
| [
"h5py.File",
"os.fsdecode",
"numpy.dtype",
"os.fsencode",
"os.listdir"
] | [((116, 129), 'h5py.File', 'h5py.File', (['db'], {}), '(db)\n', (125, 129), False, 'import h5py\n'), ((733, 750), 'os.fsencode', 'os.fsencode', (['path'], {}), '(path)\n', (744, 750), False, 'import os\n'), ((771, 792), 'os.listdir', 'os.listdir', (['directory'], {}), '(directory)\n', (781, 792), False, 'import os\n'), ((817, 834), 'os.fsdecode', 'os.fsdecode', (['file'], {}), '(file)\n', (828, 834), False, 'import os\n'), ((439, 456), 'numpy.dtype', 'np.dtype', (['"""uint8"""'], {}), "('uint8')\n", (447, 456), True, 'import numpy as np\n')] |
import numpy as np
import csv
import sys
import math
# This script counts the voxel occupancy for any boxed and binned data structure.
### Parameters/ data
box_size = 1 # (box dimensions) ex: 9x9x9
voxel_size = 9
center_index = math.ceil(box_size/2) - 1
output_path = "../data/output/box_analysis/box_size_" + str(box_size) + "_voxel_size_" + str(voxel_size) + ".csv"
input_path = "../data/input/boxes_s" + str(box_size) + "_" + str(voxel_size) + "A/boxes_train.npy"
#===================================================================================================
# Main
#===================================================================================================
# loading data (preboxes)
preboxes = np.load(input_path, allow_pickle = True).tolist()
# prebox structure: [x_index (0-(box_size-1)), y_index, z_index, aa_index (0-19)] EX: [3,0,8,19]
# list of voxel occupanccy counts for each box.
total_voxels = box_size**3 # total voxels per box
voxel_counts = np.zeros((box_size, box_size, box_size, len(preboxes)))
for i, prebox in enumerate(preboxes):
for ind_set in prebox:
x, y, z = ind_set[0], ind_set[1], ind_set[2]
voxel_counts[x, y, z, i] += 1
# get the maximum occupancy number:
max_occ = 0
for box in range(0, len(preboxes)):
for x in range(0, box_size):
for y in range(0, box_size):
for z in range(0, box_size):
voxel_occ = voxel_counts[x, y, z, box]
if int(voxel_occ) > max_occ:
max_occ = voxel_occ
# get the center voxel count/density
center_count = []
for box in range(0, len(preboxes)):
center_count.append(voxel_counts[center_index, center_index, center_index, box])
# now we can count up the occupancy types (empty, 1aa, 2aa, 3aa, 4 or more aa)
count_summary = np.zeros((int(max_occ) + 1, len(preboxes)))
'''
structure of count_summary:
occupancy: [0, 1, 2, 3, 4 ... max_occ]
prebox_1: [50, 40, 6, 7, 0 ] <- how many voxels with 0aa, 1aa, 3aa... in prebox_1
prebox_2: [80, 20, 4, 8, 0]
prebox_3: [10, 70, 6, 5, 0]
'''
for box in range(0, len(preboxes)):
for x in range(0, box_size):
for y in range(0, box_size):
for z in range(0, box_size):
voxel_occ = voxel_counts[x, y, z, box]
count_summary[int(voxel_occ), box] += 1
# creating a CSV file:
with open(output_path, 'w', newline='') as file:
writer = csv.writer(file)
# adding header to CSV
header = []
header.append("box_size")
header.append("voxel_size")
header.append("total_voxels")
header.append("center_density")
for i in range(0, int(max_occ) + 1):
header.append(str(i))
writer.writerow(header)
# appending data to CSV
for i in range(0, len(preboxes)):
row = []
row.append(box_size)
row.append(voxel_size)
row.append(total_voxels)
row.append(center_count[i])
for j in range(0, int(max_occ) + 1):
row.append(int(count_summary[j][i]))
writer.writerow(row)
print("Finished making csv.")
| [
"numpy.load",
"csv.writer",
"math.ceil"
] | [((229, 252), 'math.ceil', 'math.ceil', (['(box_size / 2)'], {}), '(box_size / 2)\n', (238, 252), False, 'import math\n'), ((2339, 2355), 'csv.writer', 'csv.writer', (['file'], {}), '(file)\n', (2349, 2355), False, 'import csv\n'), ((716, 754), 'numpy.load', 'np.load', (['input_path'], {'allow_pickle': '(True)'}), '(input_path, allow_pickle=True)\n', (723, 754), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import copy
import tempfile
import pyarrow as pa
import os
import torch
import zipfile
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from pysurvival import utils
from pysurvival.utils._functions import _get_time_buckets
class BaseModel(object):
""" Base class for all estimators in pysurvival. It should not be used on
its own.
"""
def __init__(self, auto_scaler=True):
# Creating a scikit-learner scaler
self.auto_scaler = auto_scaler
if self.auto_scaler:
self.scaler = StandardScaler()
else:
self.scaler = None
# Creating a place holder for the time axis
self.times = [0.]
# Creating the model's name
self.__repr__()
def __repr__(self):
""" Creates the representation of the Object """
self.name = self.__class__.__name__
return self.name
def save(self, path_file):
""" Save the model components:
* the paremeters of the model (parameters)
* the PyTorch model itself (model) if it exists
And Compress them into a zip file
Parameters
----------
* path_file, str
address of the file where the model will be saved
"""
# Ensuring the file has the proper name
folder_name = os.path.dirname(path_file) + '/'
file_name = os.path.basename(path_file)
if not file_name.endswith('.zip'):
file_name += '.zip'
# Checking if the folder is accessible
if not os.access(folder_name, os.W_OK):
error_msg = '{} is not an accessible directory.'.format(folder_name)
raise OSError(error_msg)
# Saving all the elements to save
elements_to_save = []
# Changing the format of scaler parameters if exist
temp_scaler = copy.deepcopy(self.__dict__.get('scaler'))
if temp_scaler is not None:
self.__dict__['scaler'] = temp_scaler.__dict__
# Saving the model parameters
parameters_to_save = {}
for k in self.__dict__ :
if k != 'model' :
parameters_to_save[k] = self.__dict__[k]
# Serializing the parameters
elements_to_save.append('parameters')
with open('parameters' , 'wb') as f:
serialized_to_save = pa.serialize(parameters_to_save)
f.write(serialized_to_save.to_buffer())
# Saving the torch model if exists
if 'model' in self.__dict__.keys():
elements_to_save.append('model')
torch.save(self.model, 'model')
# Compressing the elements to save in zip
full_path = folder_name + file_name
print('Saving the model to disk as {}'.format(full_path))
with zipfile.ZipFile(full_path, 'w') as myzip:
for f in elements_to_save:
myzip.write(f)
# Erasing temp files
for temp_file in elements_to_save:
os.remove(temp_file)
# Restore the scaler
if temp_scaler is not None:
self.scaler = StandardScaler()
self.__dict__['scaler'] = copy.deepcopy(temp_scaler)
def load(self, path_file):
""" Load the model components from a .zip file:
* the parameters of the model (.params)
* the PyTorch model itself (.model) is exists
Parameters
----------
* path_file, str
address of the file where the model will be loaded from
"""
# Ensuring the file has the proper name
folder_name = os.path.dirname(path_file) + '/'
file_name = os.path.basename(path_file)
if not file_name.endswith('.zip'):
file_name += '.zip'
# Opening the '.zip' file
full_path = folder_name + file_name
print('Loading the model from {}'.format(full_path))
# Creating temp folder
temp_folder = tempfile.mkdtemp() + '/'
# Unzip files in temp folder
with zipfile.ZipFile(path_file, 'r') as zip_ref:
zip_ref.extractall(temp_folder)
input_zip=zipfile.ZipFile(path_file)
# Loading the files
elements_to_load = []
for file_name in input_zip.namelist():
# Loading the parameters
if 'parameters' in file_name.lower():
content = input_zip.read( 'parameters' )
self.__dict__ = copy.deepcopy(pa.deserialize(content))
elements_to_load.append(temp_folder +'parameters')
# If a scaler was available then load it too
temp_scaler = copy.deepcopy(self.__dict__.get('scaler'))
if temp_scaler is not None:
self.scaler = StandardScaler()
self.scaler.__dict__ = temp_scaler
# Loading the PyTorch model
if 'model' in file_name.lower():
model = torch.load( temp_folder + 'model' )
self.model = model
elements_to_load.append(temp_folder +'model')
# Erasing temp files
for temp_file in elements_to_load:
os.remove(temp_file)
def get_time_buckets(self, extra_timepoint=False):
""" Creating the time buckets based on the times axis such that
for the k-th time bin is [ t(k-1), t(k) ] in the time axis.
"""
# Checking if the time axis has already been created
if self.times is None or len(self.times) <= 1:
error = 'The time axis needs to be created before'
error += ' using the method get_time_buckets.'
raise AttributeError(error)
# Creating the base time buckets
time_buckets = _get_time_buckets(self.times)
# Adding an additional element if specified
if extra_timepoint:
time_buckets += [ (time_buckets[-1][1], time_buckets[-1][1]*1.01) ]
self.time_buckets = time_buckets
def predict_hazard(self, x, t = None, **kwargs):
""" Predicts the hazard function h(t, x)
Parameters
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `t`: **double** *(default=None)* --
time at which the prediction should be performed.
If None, then return the function for all available t.
Returns
-------
* `hazard`: **numpy.ndarray** --
array-like representing the prediction of the hazard function
"""
# Checking if the data has the right format
x = utils.check_data(x)
# Calculating hazard, density, survival
hazard, density, survival = self.predict( x, t, **kwargs)
return hazard
def predict_density(self, x, t = None, **kwargs):
""" Predicts the density function d(t, x)
Parameters
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `t`: **double** *(default=None)* --
time at which the prediction should be performed.
If None, then return the function for all available t.
Returns
-------
* `density`: **numpy.ndarray** --
array-like representing the prediction of density function
"""
# Checking if the data has the right format
x = utils.check_data(x)
# Calculating hazard, density, survival
hazard, density, survival = self.predict( x, t, **kwargs )
return density
def predict_survival(self, x, t = None, **kwargs):
""" Predicts the survival function S(t, x)
Parameters
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `t`: **double** *(default=None)* --
time at which the prediction should be performed.
If None, then return the function for all available t.
Returns
-------
* `survival`: **numpy.ndarray** --
array-like representing the prediction of the survival function
"""
# Checking if the data has the right format
x = utils.check_data(x)
# Calculating hazard, density, survival
hazard, density, survival = self.predict( x, t, **kwargs)
return survival
def predict_cdf(self, x, t = None, **kwargs):
""" Predicts the cumulative density function F(t, x)
Parameters
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `t`: **double** *(default=None)* --
time at which the prediction should be performed.
If None, then return the function for all available t.
Returns
-------
* `cdf`: **numpy.ndarray** --
array-like representing the prediction of the cumulative
density function
"""
# Checking if the data has the right format
x = utils.check_data(x)
# Calculating survival and cdf
survival = self.predict_survival(x, t, **kwargs)
cdf = 1. - survival
return cdf
def predict_cumulative_hazard(self, x, t = None, **kwargs):
""" Predicts the cumulative hazard function H(t, x)
Parameters
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
* `t`: **double** *(default=None)* --
time at which the prediction should be performed.
If None, then return the function for all available t.
Returns
-------
* `cumulative_hazard`: **numpy.ndarray** --
array-like representing the prediction of the cumulative_hazard
function
"""
# Checking if the data has the right format
x = utils.check_data(x)
# Calculating hazard/cumulative_hazard
hazard = self.predict_hazard(x, t, **kwargs)
cumulative_hazard = np.cumsum(hazard, 1)
return cumulative_hazard
def predict_risk(self, x, **kwargs):
""" Predicts the Risk Score/Mortality function for all t,
R(x) = sum( cumsum(hazard(t, x)) )
According to Random survival forests from <NAME> al
https://arxiv.org/pdf/0811.1645.pdf
Parameters
----------
* `x` : **array-like** *shape=(n_samples, n_features)* --
array-like representing the datapoints.
x should not be standardized before, the model
will take care of it
Returns
-------
* `risk_score`: **numpy.ndarray** --
array-like representing the prediction of Risk Score function
"""
# Checking if the data has the right format
x = utils.check_data(x)
# Calculating cumulative_hazard/risk
cumulative_hazard = self.predict_cumulative_hazard(x, None, **kwargs)
risk_score = np.sum(cumulative_hazard, 1)
return risk_score
| [
"pysurvival.utils.check_data",
"os.remove",
"copy.deepcopy",
"numpy.sum",
"sklearn.preprocessing.StandardScaler",
"zipfile.ZipFile",
"os.path.basename",
"pyarrow.deserialize",
"os.path.dirname",
"torch.load",
"torch.save",
"numpy.cumsum",
"tempfile.mkdtemp",
"pysurvival.utils._functions._g... | [((1561, 1588), 'os.path.basename', 'os.path.basename', (['path_file'], {}), '(path_file)\n', (1577, 1588), False, 'import os\n'), ((3963, 3990), 'os.path.basename', 'os.path.basename', (['path_file'], {}), '(path_file)\n', (3979, 3990), False, 'import os\n'), ((6137, 6166), 'pysurvival.utils._functions._get_time_buckets', '_get_time_buckets', (['self.times'], {}), '(self.times)\n', (6154, 6166), False, 'from pysurvival.utils._functions import _get_time_buckets\n'), ((7216, 7235), 'pysurvival.utils.check_data', 'utils.check_data', (['x'], {}), '(x)\n', (7232, 7235), False, 'from pysurvival import utils\n'), ((8221, 8240), 'pysurvival.utils.check_data', 'utils.check_data', (['x'], {}), '(x)\n', (8237, 8240), False, 'from pysurvival import utils\n'), ((9232, 9251), 'pysurvival.utils.check_data', 'utils.check_data', (['x'], {}), '(x)\n', (9248, 9251), False, 'from pysurvival import utils\n'), ((10272, 10291), 'pysurvival.utils.check_data', 'utils.check_data', (['x'], {}), '(x)\n', (10288, 10291), False, 'from pysurvival import utils\n'), ((11342, 11361), 'pysurvival.utils.check_data', 'utils.check_data', (['x'], {}), '(x)\n', (11358, 11361), False, 'from pysurvival import utils\n'), ((11495, 11515), 'numpy.cumsum', 'np.cumsum', (['hazard', '(1)'], {}), '(hazard, 1)\n', (11504, 11515), True, 'import numpy as np\n'), ((12368, 12387), 'pysurvival.utils.check_data', 'utils.check_data', (['x'], {}), '(x)\n', (12384, 12387), False, 'from pysurvival import utils\n'), ((12537, 12565), 'numpy.sum', 'np.sum', (['cumulative_hazard', '(1)'], {}), '(cumulative_hazard, 1)\n', (12543, 12565), True, 'import numpy as np\n'), ((644, 660), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (658, 660), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1507, 1533), 'os.path.dirname', 'os.path.dirname', (['path_file'], {}), '(path_file)\n', (1522, 1533), False, 'import os\n'), ((1732, 1763), 'os.access', 'os.access', (['folder_name', 'os.W_OK'], {}), '(folder_name, os.W_OK)\n', (1741, 1763), False, 'import os\n'), ((2551, 2583), 'pyarrow.serialize', 'pa.serialize', (['parameters_to_save'], {}), '(parameters_to_save)\n', (2563, 2583), True, 'import pyarrow as pa\n'), ((2799, 2830), 'torch.save', 'torch.save', (['self.model', '"""model"""'], {}), "(self.model, 'model')\n", (2809, 2830), False, 'import torch\n'), ((3023, 3054), 'zipfile.ZipFile', 'zipfile.ZipFile', (['full_path', '"""w"""'], {}), "(full_path, 'w')\n", (3038, 3054), False, 'import zipfile\n'), ((3245, 3265), 'os.remove', 'os.remove', (['temp_file'], {}), '(temp_file)\n', (3254, 3265), False, 'import os\n'), ((3362, 3378), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3376, 3378), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3418, 3444), 'copy.deepcopy', 'copy.deepcopy', (['temp_scaler'], {}), '(temp_scaler)\n', (3431, 3444), False, 'import copy\n'), ((3909, 3935), 'os.path.dirname', 'os.path.dirname', (['path_file'], {}), '(path_file)\n', (3924, 3935), False, 'import os\n'), ((4270, 4288), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (4286, 4288), False, 'import tempfile\n'), ((4361, 4392), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_file', '"""r"""'], {}), "(path_file, 'r')\n", (4376, 4392), False, 'import zipfile\n'), ((4473, 4499), 'zipfile.ZipFile', 'zipfile.ZipFile', (['path_file'], {}), '(path_file)\n', (4488, 4499), False, 'import zipfile\n'), ((5528, 5548), 'os.remove', 'os.remove', (['temp_file'], {}), '(temp_file)\n', (5537, 5548), False, 'import os\n'), ((5304, 5337), 'torch.load', 'torch.load', (["(temp_folder + 'model')"], {}), "(temp_folder + 'model')\n", (5314, 5337), False, 'import torch\n'), ((4806, 4829), 'pyarrow.deserialize', 'pa.deserialize', (['content'], {}), '(content)\n', (4820, 4829), True, 'import pyarrow as pa\n'), ((5117, 5133), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (5131, 5133), False, 'from sklearn.preprocessing import StandardScaler\n')] |
import pytest
from arrayviews import xnd_xnd_as
xnd = pytest.importorskip("xnd")
try:
import pyarrow as pa
except ImportError:
pa = None
try:
import pandas as pd
except ImportError:
pd = None
try:
import numpy as np
except ImportError:
np = None
pyarrowtest = pytest.mark.skipif(
pa is None,
reason="requires the pyarrow package")
pandastest = pytest.mark.skipif(
pd is None,
reason="requires the pandas package")
numpytest = pytest.mark.skipif(
np is None,
reason="requires the numpy package")
@pyarrowtest
def test_pyarrow_array():
xd_arr = xnd.xnd([1, 2, 3, 4, 5])
pa_arr = xnd_xnd_as.pyarrow_array(xd_arr)
expected_pa_arr = pa.array([1, 2, 3, 4, 5])
assert pa_arr.to_pylist() == expected_pa_arr.to_pylist()
xd_arr[1] = 999
assert pa_arr[1] == 999
@pyarrowtest
def test_pyarrow_array_with_null():
xd_arr = xnd.xnd([1, 2, None, 4, 5])
with pytest.raises(
NotImplementedError,
match="pyarrow.Array view of xnd.xnd with optional values"):
pa_arr = xnd_xnd_as.pyarrow_array(xd_arr)
expected_pa_arr = pa.array([1, 2, None, 4, 5], type=pa.float64())
assert pa_arr.to_pylist() == expected_pa_arr.to_pylist()
xd_arr[1] = 999
assert pa_arr[1] == 999
@pandastest
def test_pandas_series():
xd_arr = xnd.xnd([1, 2, 3, 4, 5])
pd_ser = xnd_xnd_as.pandas_series(xd_arr)
expected_pd_ser = pd.Series([1, 2, 3, 4, 5])
assert (pd_ser == expected_pd_ser).all()
xd_arr[1] = 999
assert pd_ser[1] == 999
@pandastest
def test_pandas_series_with_null():
xd_arr = xnd.xnd([1, 2, None, 4, 5])
with pytest.raises(
NotImplementedError,
match="pandas.Series view of xnd.xnd with optional values"):
pd_ser = xnd_xnd_as.pandas_series(xd_arr)
expected_pd_ser = pd.Series([1, 2, None, 4, 5])
assert (pd_ser.dropna().eq(expected_pd_ser.dropna())).all()
xd_arr[1] = 999
assert pd_ser[1] == 999
@numpytest
def test_numpy_ndarray():
xd_arr = xnd.xnd([1, 2, 3, 4, 5])
np_arr = xnd_xnd_as.numpy_ndarray(xd_arr)
expected_np_arr = np.array([1, 2, 3, 4, 5])
np.testing.assert_array_equal(np_arr, expected_np_arr)
xd_arr[1] = 999
assert np_arr[1] == 999
@numpytest
def test_numpy_ndarray_with_null():
xd_arr = xnd.xnd([1, 2, None, 4, 5])
with pytest.raises(
NotImplementedError,
match="numpy.ndarray view of xnd.xnd with optional values"):
np_arr = xnd_xnd_as.numpy_ndarray(xd_arr)
expected_np_arr = np.array([1, 2, np.nan, 4, 5])
np.testing.assert_array_equal(np_arr, expected_np_arr)
xd_arr[1] = 999
assert np_arr[1] == 999
| [
"pytest.importorskip",
"numpy.testing.assert_array_equal",
"arrayviews.xnd_xnd_as.numpy_ndarray",
"pytest.raises",
"arrayviews.xnd_xnd_as.pyarrow_array",
"pytest.mark.skipif",
"pandas.Series",
"numpy.array",
"arrayviews.xnd_xnd_as.pandas_series",
"pyarrow.array",
"pyarrow.float64"
] | [((55, 81), 'pytest.importorskip', 'pytest.importorskip', (['"""xnd"""'], {}), "('xnd')\n", (74, 81), False, 'import pytest\n'), ((288, 357), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pa is None)'], {'reason': '"""requires the pyarrow package"""'}), "(pa is None, reason='requires the pyarrow package')\n", (306, 357), False, 'import pytest\n'), ((380, 448), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(pd is None)'], {'reason': '"""requires the pandas package"""'}), "(pd is None, reason='requires the pandas package')\n", (398, 448), False, 'import pytest\n'), ((470, 537), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(np is None)'], {'reason': '"""requires the numpy package"""'}), "(np is None, reason='requires the numpy package')\n", (488, 537), False, 'import pytest\n'), ((639, 671), 'arrayviews.xnd_xnd_as.pyarrow_array', 'xnd_xnd_as.pyarrow_array', (['xd_arr'], {}), '(xd_arr)\n', (663, 671), False, 'from arrayviews import xnd_xnd_as\n'), ((694, 719), 'pyarrow.array', 'pa.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (702, 719), True, 'import pyarrow as pa\n'), ((1388, 1420), 'arrayviews.xnd_xnd_as.pandas_series', 'xnd_xnd_as.pandas_series', (['xd_arr'], {}), '(xd_arr)\n', (1412, 1420), False, 'from arrayviews import xnd_xnd_as\n'), ((1443, 1469), 'pandas.Series', 'pd.Series', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (1452, 1469), True, 'import pandas as pd\n'), ((2106, 2138), 'arrayviews.xnd_xnd_as.numpy_ndarray', 'xnd_xnd_as.numpy_ndarray', (['xd_arr'], {}), '(xd_arr)\n', (2130, 2138), False, 'from arrayviews import xnd_xnd_as\n'), ((2161, 2186), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5]'], {}), '([1, 2, 3, 4, 5])\n', (2169, 2186), True, 'import numpy as np\n'), ((2191, 2245), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['np_arr', 'expected_np_arr'], {}), '(np_arr, expected_np_arr)\n', (2220, 2245), True, 'import numpy as np\n'), ((930, 1029), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""pyarrow.Array view of xnd.xnd with optional values"""'}), "(NotImplementedError, match=\n 'pyarrow.Array view of xnd.xnd with optional values')\n", (943, 1029), False, 'import pytest\n'), ((1068, 1100), 'arrayviews.xnd_xnd_as.pyarrow_array', 'xnd_xnd_as.pyarrow_array', (['xd_arr'], {}), '(xd_arr)\n', (1092, 1100), False, 'from arrayviews import xnd_xnd_as\n'), ((1664, 1763), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""pandas.Series view of xnd.xnd with optional values"""'}), "(NotImplementedError, match=\n 'pandas.Series view of xnd.xnd with optional values')\n", (1677, 1763), False, 'import pytest\n'), ((1802, 1834), 'arrayviews.xnd_xnd_as.pandas_series', 'xnd_xnd_as.pandas_series', (['xd_arr'], {}), '(xd_arr)\n', (1826, 1834), False, 'from arrayviews import xnd_xnd_as\n'), ((1861, 1890), 'pandas.Series', 'pd.Series', (['[1, 2, None, 4, 5]'], {}), '([1, 2, None, 4, 5])\n', (1870, 1890), True, 'import pandas as pd\n'), ((2394, 2493), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {'match': '"""numpy.ndarray view of xnd.xnd with optional values"""'}), "(NotImplementedError, match=\n 'numpy.ndarray view of xnd.xnd with optional values')\n", (2407, 2493), False, 'import pytest\n'), ((2532, 2564), 'arrayviews.xnd_xnd_as.numpy_ndarray', 'xnd_xnd_as.numpy_ndarray', (['xd_arr'], {}), '(xd_arr)\n', (2556, 2564), False, 'from arrayviews import xnd_xnd_as\n'), ((2591, 2621), 'numpy.array', 'np.array', (['[1, 2, np.nan, 4, 5]'], {}), '([1, 2, np.nan, 4, 5])\n', (2599, 2621), True, 'import numpy as np\n'), ((2630, 2684), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['np_arr', 'expected_np_arr'], {}), '(np_arr, expected_np_arr)\n', (2659, 2684), True, 'import numpy as np\n'), ((1161, 1173), 'pyarrow.float64', 'pa.float64', ([], {}), '()\n', (1171, 1173), True, 'import pyarrow as pa\n')] |
import cv2
import numpy as np
import os
import SimpleITK as sitk
def load_entire_resolution(dir):
# the folder name has the dimension info of the 3D image, extract
folder_name = os.path.split(dir)[-1]
# dim_y, dim_x, dim_z = folder_name[4:-2].split('x')
# when only low level resolution is provided, we have to assume that the image is evenly spliced
# if blended with max resolution, it may be able to work with the unevenly sliced
# _folders: all the folder in the corresponding dir of that dimension
# _start: the starting coord of the current dimension of the iterated block
# z_files: tiff image blocks
y_folders = [d for d in os.listdir(dir) if os.path.isdir(os.path.join(dir, d))]
ensemble_array = []
for i, y_folder in enumerate(y_folders):
y_dir = os.path.join(dir, y_folder)
# y_start = y_folder.split('_')[-1]
x_folders = [d for d in os.listdir(y_dir) if os.path.isdir(os.path.join(y_dir, d))]
x_array = []
for j, x_folder in enumerate(x_folders):
x_dir = os.path.join(y_dir, x_folder)
# x_start = x_folder.split('_')[-1]
z_files = [d for d in os.listdir(x_dir)]
z_array = []
for k, z_file in enumerate(z_files):
z_path = os.path.join(x_dir, z_file)
# z_start = z_file.split('_')[-1]
img = sitk.ReadImage(z_path)
arr = sitk.GetArrayFromImage(img).transpose([1, 2, 0])
z_array.append(arr)
x_array.append(z_array)
ensemble_array.append(x_array)
return np.block(ensemble_array) | [
"SimpleITK.ReadImage",
"SimpleITK.GetArrayFromImage",
"os.path.split",
"os.path.join",
"os.listdir",
"numpy.block"
] | [((1614, 1638), 'numpy.block', 'np.block', (['ensemble_array'], {}), '(ensemble_array)\n', (1622, 1638), True, 'import numpy as np\n'), ((188, 206), 'os.path.split', 'os.path.split', (['dir'], {}), '(dir)\n', (201, 206), False, 'import os\n'), ((814, 841), 'os.path.join', 'os.path.join', (['dir', 'y_folder'], {}), '(dir, y_folder)\n', (826, 841), False, 'import os\n'), ((673, 688), 'os.listdir', 'os.listdir', (['dir'], {}), '(dir)\n', (683, 688), False, 'import os\n'), ((1068, 1097), 'os.path.join', 'os.path.join', (['y_dir', 'x_folder'], {}), '(y_dir, x_folder)\n', (1080, 1097), False, 'import os\n'), ((706, 726), 'os.path.join', 'os.path.join', (['dir', 'd'], {}), '(dir, d)\n', (718, 726), False, 'import os\n'), ((918, 935), 'os.listdir', 'os.listdir', (['y_dir'], {}), '(y_dir)\n', (928, 935), False, 'import os\n'), ((1298, 1325), 'os.path.join', 'os.path.join', (['x_dir', 'z_file'], {}), '(x_dir, z_file)\n', (1310, 1325), False, 'import os\n'), ((1398, 1420), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['z_path'], {}), '(z_path)\n', (1412, 1420), True, 'import SimpleITK as sitk\n'), ((953, 975), 'os.path.join', 'os.path.join', (['y_dir', 'd'], {}), '(y_dir, d)\n', (965, 975), False, 'import os\n'), ((1180, 1197), 'os.listdir', 'os.listdir', (['x_dir'], {}), '(x_dir)\n', (1190, 1197), False, 'import os\n'), ((1443, 1470), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['img'], {}), '(img)\n', (1465, 1470), True, 'import SimpleITK as sitk\n')] |
#! /usr/bin/env python
import sys
import numpy as np
from matplotlib import pyplot as plt
def get_xy(fname):
poss_str=[]
x=[]
y=[]
f= open(fname, "r")
text= f.readlines()
for i in range(len(text)):
if i>=18:
line =text[i][:-1].split()
if len(line)==2:
x.append(float(line[0]))
y.append(float(line[1]))
return x, y
def ave(poss):
print(np.array(poss).shape)
poss_ave=[]
poss_ave = np.average(np.array(poss), axis=0).tolist()
return poss_ave
if __name__ == "__main__":
argvs = sys.argv
dir_list=["0_31410", "0_31414", "0_31418"]
for dir in dir_list:
x, y = get_xy(dir+"/protein_gpu/equil_n1/rmsd.xvg")
oname=dir+"_rmsd.png"
plt.figure()
plt.xlabel(r'Time [ns]')
plt.ylabel(r'RMSD [nm]')
plt.plot(x, y)
plt.savefig(oname)
| [
"matplotlib.pyplot.plot",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.savefig"
] | [((792, 804), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (802, 804), True, 'from matplotlib import pyplot as plt\n'), ((813, 836), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time [ns]"""'], {}), "('Time [ns]')\n", (823, 836), True, 'from matplotlib import pyplot as plt\n'), ((846, 869), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""RMSD [nm]"""'], {}), "('RMSD [nm]')\n", (856, 869), True, 'from matplotlib import pyplot as plt\n'), ((879, 893), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {}), '(x, y)\n', (887, 893), True, 'from matplotlib import pyplot as plt\n'), ((902, 920), 'matplotlib.pyplot.savefig', 'plt.savefig', (['oname'], {}), '(oname)\n', (913, 920), True, 'from matplotlib import pyplot as plt\n'), ((447, 461), 'numpy.array', 'np.array', (['poss'], {}), '(poss)\n', (455, 461), True, 'import numpy as np\n'), ((511, 525), 'numpy.array', 'np.array', (['poss'], {}), '(poss)\n', (519, 525), True, 'import numpy as np\n')] |
import cv2
import numpy as np
def image_resize(image, width=None, height=None, inter=cv2.INTER_AREA):
# initialize the dimensions of the image to be resized and
# grab the image size
dim = None
(h, w) = image.shape[:2]
# if both the width and height are None, then return the
# original image
if width is None and height is None:
return image
# check to see if the width is None
if width is None:
# calculate the ratio of the height and construct the
# dimensions
r = height / float(h)
dim = (int(w * r), height)
# otherwise, the height is None
else:
# calculate the ratio of the width and construct the
# dimensions
r = width / float(w)
dim = (width, int(h * r))
# resize the image
resized = cv2.resize(image, dim, interpolation=inter)
# return the resized image
return resized
def load_sticker(path: str) -> np.ndarray:
sticker = cv2.imread(path, cv2.IMREAD_UNCHANGED)
sticker = cv2.cvtColor(sticker, cv2.COLOR_BGR2RGBA)
return sticker
def overlay_transparent(background, overlay, x, y):
background_width = background.shape[1]
background_height = background.shape[0]
if x >= background_width or y >= background_height:
return background
h, w = overlay.shape[0], overlay.shape[1]
if x + w > background_width:
w = background_width - x
overlay = overlay[:, :w]
if y + h > background_height:
h = background_height - y
overlay = overlay[:h]
if overlay.shape[2] < 4:
overlay = np.concatenate(
[
overlay,
np.ones((overlay.shape[0], overlay.shape[1], 1), dtype=overlay.dtype)
* 255,
],
axis=2,
)
overlay_image = overlay[..., :3]
mask = overlay[..., 3:] / 255.0
background[y : y + h, x : x + w] = (1.0 - mask) * background[
y : y + h, x : x + w
] + mask * overlay_image
return background
| [
"cv2.cvtColor",
"cv2.imread",
"numpy.ones",
"cv2.resize"
] | [((823, 866), 'cv2.resize', 'cv2.resize', (['image', 'dim'], {'interpolation': 'inter'}), '(image, dim, interpolation=inter)\n', (833, 866), False, 'import cv2\n'), ((977, 1015), 'cv2.imread', 'cv2.imread', (['path', 'cv2.IMREAD_UNCHANGED'], {}), '(path, cv2.IMREAD_UNCHANGED)\n', (987, 1015), False, 'import cv2\n'), ((1030, 1071), 'cv2.cvtColor', 'cv2.cvtColor', (['sticker', 'cv2.COLOR_BGR2RGBA'], {}), '(sticker, cv2.COLOR_BGR2RGBA)\n', (1042, 1071), False, 'import cv2\n'), ((1681, 1750), 'numpy.ones', 'np.ones', (['(overlay.shape[0], overlay.shape[1], 1)'], {'dtype': 'overlay.dtype'}), '((overlay.shape[0], overlay.shape[1], 1), dtype=overlay.dtype)\n', (1688, 1750), True, 'import numpy as np\n')] |
# Program 18a: Generating a multifractal image.
# Save the image.
# See Figure 18.1(b).
import numpy as np
import matplotlib.pyplot as plt
from skimage import exposure, io, img_as_uint
p1, p2, p3, p4 = 0.3, 0.4, 0.25, 0.05
p = [[p1, p2], [p3, p4]]
for k in range(1, 9, 1):
M = np.zeros([2 ** (k + 1), 2 ** (k + 1)])
M.tolist()
for i in range(2**k):
for j in range(2**k):
M[i][j] = p1 * p[i][j]
M[i][j + 2**k] = p2 * p[i][j]
M[i + 2**k][j] = p3 * p[i][j]
M[i + 2**k][j + 2**k] = p4 * p[i][j]
p = M
# Plot the multifractal image.
M = exposure.adjust_gamma(M, 0.2)
plt.imshow(M, cmap='gray', interpolation='nearest')
# Save the image as a portable network graphics (png) image.
im = np.array(M, dtype='float64')
im = exposure.rescale_intensity(im, out_range='float')
im = img_as_uint(im)
io.imsave('Multifractal.png', im)
io.show()
| [
"skimage.exposure.adjust_gamma",
"skimage.img_as_uint",
"matplotlib.pyplot.imshow",
"skimage.exposure.rescale_intensity",
"skimage.io.show",
"numpy.zeros",
"numpy.array",
"skimage.io.imsave"
] | [((607, 636), 'skimage.exposure.adjust_gamma', 'exposure.adjust_gamma', (['M', '(0.2)'], {}), '(M, 0.2)\n', (628, 636), False, 'from skimage import exposure, io, img_as_uint\n'), ((637, 688), 'matplotlib.pyplot.imshow', 'plt.imshow', (['M'], {'cmap': '"""gray"""', 'interpolation': '"""nearest"""'}), "(M, cmap='gray', interpolation='nearest')\n", (647, 688), True, 'import matplotlib.pyplot as plt\n'), ((756, 784), 'numpy.array', 'np.array', (['M'], {'dtype': '"""float64"""'}), "(M, dtype='float64')\n", (764, 784), True, 'import numpy as np\n'), ((790, 839), 'skimage.exposure.rescale_intensity', 'exposure.rescale_intensity', (['im'], {'out_range': '"""float"""'}), "(im, out_range='float')\n", (816, 839), False, 'from skimage import exposure, io, img_as_uint\n'), ((845, 860), 'skimage.img_as_uint', 'img_as_uint', (['im'], {}), '(im)\n', (856, 860), False, 'from skimage import exposure, io, img_as_uint\n'), ((861, 894), 'skimage.io.imsave', 'io.imsave', (['"""Multifractal.png"""', 'im'], {}), "('Multifractal.png', im)\n", (870, 894), False, 'from skimage import exposure, io, img_as_uint\n'), ((895, 904), 'skimage.io.show', 'io.show', ([], {}), '()\n', (902, 904), False, 'from skimage import exposure, io, img_as_uint\n'), ((283, 321), 'numpy.zeros', 'np.zeros', (['[2 ** (k + 1), 2 ** (k + 1)]'], {}), '([2 ** (k + 1), 2 ** (k + 1)])\n', (291, 321), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
#import math
#import matplotlib.pyplot as plt
class Planet():
"""
The class called Planet is initialised with constants appropriate
for the given target planet, including the atmospheric density profile
and other constants
"""
def __init__(self, atmos_func='exponential', atmos_filename=None,
Cd=1., Ch=0.1, Q=1e7, Cl=1e-3, alpha=0.3, Rp=6371e3,
g=9.81, H=8000., rho0=1.2):
"""
Set up the initial parameters and constants for the target planet
Parameters
----------
atmos_func : string, optional
Function which computes atmospheric density, rho, at altitude, z.
Default is the exponential function ``rho = rho0 exp(-z/H)``.
Options are ``exponential``, ``tabular``, ``constant`` and ``mars``
atmos_filename : string, optional
If ``atmos_func`` = ``'tabular'``, then set the filename of the table
to be read in here.
Cd : float, optional
The drag coefficient
Ch : float, optional
The heat transfer coefficient
Q : float, optional
The heat of ablation (J/kg)
Cl : float, optional
Lift coefficient
alpha : float, optional
Dispersion coefficient
Rp : float, optional
Planet radius (m)
rho0 : float, optional
Air density at zero altitude (kg/m^3)
g : float, optional
Surface gravity (m/s^2)
H : float, optional
Atmospheric scale height (m)
Returns
-------
None
"""
# Input constants
self.Cd = Cd
self.Ch = Ch
self.Q = Q
self.Cl = Cl
self.alpha = alpha
self.Rp = Rp
self.g = g
self.H = H
self.rho0 = rho0
if atmos_func == 'exponential':
self.rhoa = lambda z: self.rho0 * np.exp(-z/self.H)
elif atmos_func == 'tabular':
# atmos_filename="../data/AltitudeDensityTable.csv"
assert atmos_filename is not None
data = pd.read_csv(atmos_filename, skiprows=6, delimiter=' ', names = ['Altitude', 'Density', 'Height'])
def limitz(z):
roundedz=int(z/10)
if z < 0:
roundedz=0
elif z> 86000:
roundedz=8600
return data.Density[roundedz]*np.exp((data.Altitude[roundedz]-z)/(data.Height[roundedz]))
self.rhoa = lambda z: limitz(z)
#self.rhoa = lambda z: data.Density[int(z/10)]*np.exp((data.Altitude[int(z/10)]-z)/(data.Height[int(z/10)]))
elif atmos_func == 'mars':
def T(altitudez):
if altitudez < 7000.:
return 242.1-0.000998*altitudez
else:
return 249.7-0.00222*altitudez
self.rhoa = lambda z: 0.699*np.exp(-0.00009*z)/(0.1921*T(z)) #p in kPa
elif atmos_func == 'constant':
self.rhoa = lambda x: rho0 #rho0
else:
print('''Choose from 'exponential', 'tabular', 'mars' or 'constant'.''')
raise NotImplementedError
#analytical v
def r2A(self, radius):
'''
calculate area from given radius
'''
return np.pi*radius**2
def rrho2m(self, radius, density):
'''
calculate mass from given radius & density
'''
return (4./3.)*np.pi*radius**3*density
def vz(self, radius, velocity, density, strength, angle, z, degree=True):
'''
ANALYTICAL SOLUTION!
Calculating velocity from altitude.
----------input-----------
v0, initial speed, m/s
A, cross-sectional area, m**2
m, mass in kg
angle, angle of injection, in radians
z, altitude in metres, usually an array
Cd, drag coefficient, preset as 1
rho0, density of air, preset as 1.2kg/m**3
H, height of atmosphere, preset as 8000m
radian, boolean datum that tells whether the angle is given in radians; preset as False
----------output-----------
speed in m/s, usually an array
'''
if degree:
angle_rad = angle*np.pi/180.
else:
angle_rad = angle
#print(angle)
return velocity*np.exp(-self.H*self.Cd*self.rho0*np.exp(-z/self.H)*\
self.r2A(radius)/(2*self.rrho2m(radius, density)*np.sin(angle_rad)))
#analytical dvdz
def dvdz(self, radius, velocity, density, strength, angle, z, degree=True):
'''
ANALYTICAL SOLUTION!
Calculating velocity from altitude.
----------input-----------
v0, initial speed, m/s
A, cross-sectional area, m**2
m, mass in kg
angle, angle of injection, in radians
z, altitude in metres, usually an array
Cd, drag coefficient, preset as 1
rho0, density of air, preset as 1.2kg/m**3
H, height of atmosphere, preset as 8000m
radian, boolean datum that tells whether the angle is given in radians; preset as False
----------output-----------
change in speed per unit altitude in /s, usually an array
'''
if degree:
angle_rad = angle*np.pi/180.
else:
angle_rad = angle
#print(angle)
return self.vz(radius, velocity, density, strength, angle, z, degree=False)*\
self.Cd*self.rho0*np.exp(-z/self.H)*self.r2A(radius)/(2*self.rrho2m(radius,density)*np.sin(angle_rad))
#analytical dedz
def dedz(self, radius, velocity, density, strength, angle, z, degree = True):
'''
ANALYTICAL SOLUTION!
Calculating velocity from altitude.
----------input-----------
v0, initial speed, m/s
A, cross-sectional area, m**2
m, mass in kg
angle, angle of injection, in radians
z, altitude in metres, usually an array
Cd, drag coefficient, preset as 1
rho0, density of air, preset as 1.2kg/m**3
H, height of atmosphere, preset as 8000m
radian, boolean datum that tells whether the angle is given in radians; preset as False
----------output-----------
change in energy per unit length in kT/km, usually an array
'''
if degree:
angle_rad=angle*np.pi/180.
else:
angle_rad=angle
#print(angle)
return self.dvdz(radius, velocity, density, strength, angle_rad, z, degree=False)*\
self.vz(radius, velocity, density, strength, angle_rad, z, degree=False)*self.rrho2m(radius, density)/4.184e9 #J/m to kT/km
def deg2rad(self, angle):#function which converts from degres to radians
return np.pi*angle/180.
def fun(self, t, state, strength):
"""
RHS function for impact system
"""
f = np.zeros_like(state)
# unpack the state vector, which is:
# velocity, density*volume, angle, init_altitude, x, radius
velo, mass, theta, my_z, my_x, radius = state
#rhoa = self.rho0 * np.exp(-z/self.H)
rhom = 3000
#V = (4*np.pi*r**3)/3#calculate volume to get the mass from density
A = np.pi*radius**2#calculate cross sectional area
f[0] = ((-self.Cd*self.rhoa(my_z)*A*velo**2)/(2*mass)) + self.g*np.sin(theta) #dv/dt
f[1] = (-self.Ch*self.rhoa(my_z)*A*velo**3)/(2*self.Q) #dm/dt
f[2] = (self.g*np.cos(theta))/velo-(self.Cl*self.rhoa(my_z)*A*velo)/(2*mass)-\
(velo*np.cos(theta))/(self.Rp+my_z) #dtheta/dt
f[3] = -velo*np.sin(theta) #dz/dt
f[4] = (velo*np.cos(theta))/(1+ (my_z)/self.Rp) #dx/dt
if self.rhoa(my_z)*velo**2 >= strength:
f[5] = (7/2*self.alpha* self.rhoa(my_z)/rhom)**0.5 * velo #dr/dt if needed
else:
f[5] = 0
return f
def imp_eu(self, fun, radius, velocity, density, angle, strength,
init_altitude, dt ,t_max,t0, x):
"""
the improved Euler function from the lecture
we have modified u to include all 5 of our variables
"""
volume = 4.*np.pi*radius**3/3.
my_u = np.array([velocity, density*volume, angle, init_altitude, x, radius])
time = np.array(t0)
u_all = [[velocity, density*volume, angle, init_altitude, x, radius]]
t_all = [t0]
while (time < t_max) and (my_u[0] > 0) and (my_u[1] > 0) and (my_u[3] > 0):
ue = my_u + dt*fun(time, my_u, strength)
my_u = my_u + 0.5*dt* (fun(time, my_u, strength) + fun(time + dt, ue, strength))
u_all.append(my_u)
time = time + dt
t_all.append(time)
return np.array(u_all), np.array(t_all)
def impact(self, radius, velocity, density, strength, angle,
init_altitude = 100e3, dt = 0.1, radians = False):
"""
Solve the system of differential equations for a given impact event.
Also calculates the kinetic energy lost per unit altitude and
analyses the result to determine the outcome of the impact.
Parameters
----------
radius : float
The radius of the asteroid in meters
velocity : float
The entery speed of the asteroid in meters/second
density : float
The density of the asteroid in kg/m^3
strength : float
The strength of the asteroid (i.e., the ram pressure above which
fragmentation and spreading occurs) in N/m^2 (Pa)
angle : float
The initial trajectory angle of the asteroid to the horizontal
By default, input is in degrees. If 'radians' is set to True, the
input should be in radians
init_altitude : float, optional
Initial altitude in m
dt : float, optional
The output timestep, in s
radians : logical, optional
Whether angles should be given in degrees or radians. Default=False
Angles returned in the DataFrame will have the same units as the
input
Returns
-------
Result : DataFrame
A pandas DataFrame containing the solution to the system.
Includes the following columns:
``velocity``, ``mass``, ``angle``, ``altitude``,
``distance``, ``radius``, ``time``, ``dedz``
outcome : Dict
dictionary with details of airburst and/or cratering event.
For an airburst, this will contain the following keys:
``burst_peak_dedz``, ``burst_altitude``, ``burst_total_ke_lost``.
For a cratering event, this will contain the following keys:
``impact_time``, ``impact_mass``, ``impact_speed``.
All events should also contain an entry with the key ``outcome``,
which should contain one of the following strings:
``Airburst``, ``Cratering`` or ``Airburst and cratering``
"""
res1 = self.solve_atmospheric_entry(radius, velocity, density, strength, angle,init_altitude, dt,radians)
res2 = self.calculate_energy(res1)
res3 = self.analyse_outcome(res2)
return res2, res3
def solve_atmospheric_entry(
self, radius, velocity, density, strength, angle,
init_altitude=100e3, dt=0.05, radians=False):
"""
Solve the system of differential equations for a given impact scenario
Parameters
----------
radius : float
The radius of the asteroid in meters
velocity : float
The entery speed of the asteroid in meters/second
density : float
The density of the asteroid in kg/m^3
strength : float
The strength of the asteroid (i.e., the ram pressure above which
fragmentation and spreading occurs) in N/m^2 (Pa)
angle : float
The initial trajectory angle of the asteroid to the horizontal
By default, input is in degrees. If 'radians' is set to True, the
input should be in radians
init_altitude : float, optional
Initial altitude in m
dt : float, optional
The output timestep, in s
radians : logical, optional
Whether angles should be given in degrees or radians. Default=False
Angles returned in the DataFrame will have the same units as the
input
Returns
-------
Result : DataFrame
A pandas DataFrame containing the solution to the system.
Includes the following columns:
``velocity``, ``mass``, ``angle``, ``altitude``,
``distance``, ``radius``, ``time``
"""
if not radians:
angle = self.deg2rad(angle)
variables, times = self.imp_eu(self.fun, radius, velocity, density, angle, strength,
init_altitude, dt, t_max=1e6, t0=0, x=0)
velocity = variables[:, 0]
mass = variables[:, 1]
angle = variables[:, 2]
init_altitude = variables[:, 3]
x = variables[:, 4]
radius = variables[:, 5]
return pd.DataFrame({'velocity': velocity,
'mass': mass,
'angle': (angle*180/np.pi),
'altitude': init_altitude,
'distance': x,
'radius': radius,
'time': times})
def calculate_energy(self, result):
"""
Function to calculate the kinetic energy lost per unit altitude in
kilotons TNT per km, for a given solution.
Parameters
----------
result : DataFrame
A pandas DataFrame with columns for the velocity, mass, angle,
altitude, horizontal distance and radius as a function of time
Returns
-------
Result : DataFrame
Returns the DataFrame with additional column ``dedz`` which is the
kinetic energy lost per unit altitude
"""
energy = 0.5 * result['mass'] * result['velocity']**2
dedz = energy.diff(-1)/result.altitude.diff(-1)
dedz = dedz/(4.184*1e9)
result = result.copy()
result.insert(len(result.columns),
'dedz', dedz)
return result
def analyse_outcome(self, result):
"""
Inspect a prefound solution to calculate the impact and airburst stats
Parameters
----------
result : DataFrame
pandas DataFrame with velocity, mass, angle, altitude, horizontal
distance, radius and dedz as a function of time
Returns
-------
outcome : Dict
dictionary with details of airburst and/or cratering event.
For an airburst, this will contain the following keys:
``burst_peak_dedz``, ``burst_altitude``, ``burst_total_ke_lost``.
For a cratering event, this will contain the following keys:
``impact_time``, ``impact_mass``, ``impact_speed``.
All events should also contain an entry with the key ``outcome``,
which should contain one of the following strings:
``Airburst``, ``Cratering`` or ``Airburst and cratering``
"""
# Enter your code here to process the result DataFrame and
# populate the outcome dictionary.
peak_dedz = result.dedz.max()
p_of_burst = result.dedz.idxmax()
burst_altitude = result['altitude'][p_of_burst]
total_ke_altitude = (0.5 * result['mass'][0] * result['velocity'][0]**2 -\
0.5 * result['mass'][p_of_burst] * result['velocity'][p_of_burst]**2)/(4.184e12) #kT
impact_time = result['time'][p_of_burst]
impact_mass = result['mass'][p_of_burst]
impact_speed = result['velocity'][p_of_burst]
outcome = {}
if burst_altitude > 5000:
outcome['outcome'] = 'Airburst'
outcome['burst_peak_dedz'] = peak_dedz
outcome['burst_altitude'] = burst_altitude
outcome['burst_total_ke_lost'] = total_ke_altitude
elif burst_altitude > 0 and burst_altitude < 5000:
outcome['outcome'] = 'Airburst and cratering'
outcome['burst_peak_dedz'] = peak_dedz
outcome['burst_altitude'] = burst_altitude
outcome['burst_total_ke_lost'] = total_ke_altitude
else:
outcome['outcome'] = 'Cratering'
outcome['impact_time'] = impact_time
outcome['impact_mass'] = impact_mass
outcome['impact_speed'] = impact_speed
return outcome
| [
"pandas.DataFrame",
"numpy.zeros_like",
"pandas.read_csv",
"numpy.sin",
"numpy.array",
"numpy.exp",
"numpy.cos"
] | [((7382, 7402), 'numpy.zeros_like', 'np.zeros_like', (['state'], {}), '(state)\n', (7395, 7402), True, 'import numpy as np\n'), ((8695, 8766), 'numpy.array', 'np.array', (['[velocity, density * volume, angle, init_altitude, x, radius]'], {}), '([velocity, density * volume, angle, init_altitude, x, radius])\n', (8703, 8766), True, 'import numpy as np\n'), ((8780, 8792), 'numpy.array', 'np.array', (['t0'], {}), '(t0)\n', (8788, 8792), True, 'import numpy as np\n'), ((13720, 13884), 'pandas.DataFrame', 'pd.DataFrame', (["{'velocity': velocity, 'mass': mass, 'angle': angle * 180 / np.pi,\n 'altitude': init_altitude, 'distance': x, 'radius': radius, 'time': times}"], {}), "({'velocity': velocity, 'mass': mass, 'angle': angle * 180 / np\n .pi, 'altitude': init_altitude, 'distance': x, 'radius': radius, 'time':\n times})\n", (13732, 13884), True, 'import pandas as pd\n'), ((8113, 8126), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (8119, 8126), True, 'import numpy as np\n'), ((9242, 9257), 'numpy.array', 'np.array', (['u_all'], {}), '(u_all)\n', (9250, 9257), True, 'import numpy as np\n'), ((9259, 9274), 'numpy.array', 'np.array', (['t_all'], {}), '(t_all)\n', (9267, 9274), True, 'import numpy as np\n'), ((2172, 2271), 'pandas.read_csv', 'pd.read_csv', (['atmos_filename'], {'skiprows': '(6)', 'delimiter': '""" """', 'names': "['Altitude', 'Density', 'Height']"}), "(atmos_filename, skiprows=6, delimiter=' ', names=['Altitude',\n 'Density', 'Height'])\n", (2183, 2271), True, 'import pandas as pd\n'), ((5904, 5921), 'numpy.sin', 'np.sin', (['angle_rad'], {}), '(angle_rad)\n', (5910, 5921), True, 'import numpy as np\n'), ((7852, 7865), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (7858, 7865), True, 'import numpy as np\n'), ((8155, 8168), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (8161, 8168), True, 'import numpy as np\n'), ((1986, 2005), 'numpy.exp', 'np.exp', (['(-z / self.H)'], {}), '(-z / self.H)\n', (1992, 2005), True, 'import numpy as np\n'), ((5838, 5857), 'numpy.exp', 'np.exp', (['(-z / self.H)'], {}), '(-z / self.H)\n', (5844, 5857), True, 'import numpy as np\n'), ((8051, 8064), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (8057, 8064), True, 'import numpy as np\n'), ((2513, 2574), 'numpy.exp', 'np.exp', (['((data.Altitude[roundedz] - z) / data.Height[roundedz])'], {}), '((data.Altitude[roundedz] - z) / data.Height[roundedz])\n', (2519, 2574), True, 'import numpy as np\n'), ((4699, 4716), 'numpy.sin', 'np.sin', (['angle_rad'], {}), '(angle_rad)\n', (4705, 4716), True, 'import numpy as np\n'), ((7969, 7982), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (7975, 7982), True, 'import numpy as np\n'), ((4599, 4618), 'numpy.exp', 'np.exp', (['(-z / self.H)'], {}), '(-z / self.H)\n', (4605, 4618), True, 'import numpy as np\n'), ((3007, 3025), 'numpy.exp', 'np.exp', (['(-9e-05 * z)'], {}), '(-9e-05 * z)\n', (3013, 3025), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import norm
from matplotlib import pyplot as plt
from scipy.optimize import linear_sum_assignment as optimise
from scipy.stats import linregress
import copy
try:
from openbabel.openbabel import OBConversion, OBMol, OBAtomAtomIter, OBMolAtomIter
except ImportError:
from openbabel import *
from scipy.stats import gaussian_kde
def AssignCarbon(NMRData, Isomers, settings):
for isomer in Isomers:
assigned_shifts, assigned_peaks, assigned_labels, scaled_shifts = iterative_assignment(
NMRData.carbondata["exppeaks"],
NMRData.carbondata["xdata"],
NMRData.carbondata["ydata"],
isomer.Cshifts, isomer.Clabels)
# add to isomers instance
isomer.Cexp = [''] * len(isomer.Cshifts)
for label, peak in zip(assigned_labels, assigned_peaks):
w = isomer.Clabels.index(label)
isomer.Cexp[w] = peak
return Isomers
def iterative_assignment(picked_peaks, spectral_xdata_ppm, total_spectral_ydata, calculated_shifts, C_labels):
calculated_shifts = np.array(calculated_shifts)
original_C_labels = np.array(C_labels)
s = np.argsort(np.array(calculated_shifts))
calculated_shifts = calculated_shifts[s]
scaled_shifts = copy.copy(calculated_shifts)
C_labels = original_C_labels[s]
exp_peaks = spectral_xdata_ppm[picked_peaks]
new_assigned_peaks = []
new_assigned_shifts = []
for lnum in range(0, 2):
if lnum == 0:
scaled_shifts = external_scale_carbon_shifts(calculated_shifts)
scaled_mu = 0
scaled_std = 2.486068603518297
copy_calc_shifts = copy.copy(calculated_shifts)
elif lnum == 1:
old_assigned_shifts = copy.copy(new_assigned_shifts)
old_assigned_peaks = copy.copy(new_assigned_peaks)
scaled_shifts, slope, intercept = internal_scale_carbon_shifts(old_assigned_shifts, old_assigned_peaks,
calculated_shifts)
scaled_mu = 0
scaled_std = 10
copy_calc_shifts = copy.copy(calculated_shifts)
####calculate difference matrix
diff_matrix = np.zeros((len(calculated_shifts), len(exp_peaks)))
for ind1, i in enumerate(scaled_shifts):
for ind2, j in enumerate(exp_peaks):
diff_matrix[ind1, ind2] = j - i
####find any errors larger than 10 ppm and nans
####calculate pos matirx
pos_matrix = carbon_probabilities(diff_matrix, scaled_mu, scaled_std)
pos_matrix[abs(diff_matrix) >= 10] = 0
pos_matrix[np.isnan(pos_matrix)] = 0
####calculate amp matrix
amp_matrix = amp_kde(total_spectral_ydata, picked_peaks, pos_matrix, calculated_shifts)
####duplicate the pos matrix along the horizontal to allow multiple assignment weighting
pos_matrixc = copy.copy(pos_matrix)
for d in range(0, len(calculated_shifts) - 1):
pos_matrix = np.hstack((pos_matrix, pos_matrixc))
####calculate the probability matrix
prob_matrix = (pos_matrix * amp_matrix) ** 0.5
####check for any shifts that have zero probabilites for all peaks
b = np.where(np.sum(prob_matrix, 1) == 0)
prob_matrix = np.delete(prob_matrix, b, 0)
unassignable_shifts = calculated_shifts[b]
copy_calc_shifts = np.delete(copy_calc_shifts, b)
copy_labels = np.delete(C_labels, b)
####do the assignment
vertical_ind, horizontal_ind = optimise(1 - prob_matrix)
horizontal_ind = horizontal_ind % len(picked_peaks)
opt_peaks = exp_peaks[horizontal_ind]
opt_shifts = copy_calc_shifts[vertical_ind]
opt_labels = copy_labels[vertical_ind]
####do some sorting
so = np.argsort(opt_shifts)
new_assigned_peaks = opt_peaks[so]
new_assigned_shifts = opt_shifts[so]
new_assigned_labels = opt_labels[so]
############################
# in the third round only reassign shifts that have had a change of bias
old_assigned_shifts = copy.copy(new_assigned_shifts)
old_assigned_peaks = copy.copy(new_assigned_peaks)
new_assigned_shifts = copy.copy(new_assigned_shifts)
new_assigned_peaks = copy.copy(new_assigned_peaks)
new_assigned_labels = copy.copy(new_assigned_labels)
bias_weights = []
# find unassigned peaks
ampdivide = np.zeros(len(picked_peaks))
peak_amps = total_spectral_ydata[picked_peaks]
reassign_shifts_ind = []
for i in old_assigned_peaks:
w = np.where(exp_peaks == i)
ampdivide[w] += 1
c = 0
for shift, peak in zip(old_assigned_shifts, old_assigned_peaks):
# find where peaks are within 20 ppm window
w = np.where((exp_peaks < peak + 10) & (exp_peaks > peak - 10))[0]
if len(w) > 0:
# find maximum peak height within this window - when taking into account how many times the peak has already been assigned
# find amplitude of peak given how many times it has been assigned
assigned_amp = (peak_amps[exp_peaks == peak] / ampdivide[exp_peaks == peak])[0]
# find amplitude of max peak in the 20 ppm window given how many times it would be assigned if the current shift was assigned to it as well
div_amps = peak_amps / (ampdivide + 1)
pi = np.where(exp_peaks == peak)
div_amps[pi] = peak_amps[pi] / ampdivide[pi]
max_window_amp = np.max(div_amps[w])
ratio = max_window_amp / assigned_amp
if ratio > 1:
bias_weights.append(ratio)
reassign_shifts_ind.append(c)
c += 1
####reassign the shifts with a bias above zero in order of bias to peak within ten ppm with largest unassigned amplitude
bias_weights = np.array(bias_weights)
reassign_shifts = np.array(old_assigned_shifts)[reassign_shifts_ind]
s = np.argsort(bias_weights)
reassign_shifts = reassign_shifts[s]
reassign_shifts_ind = np.array(reassign_shifts_ind)[s]
for shift, ind in zip(reassign_shifts, reassign_shifts_ind):
# find peak this shift is assigned to
p = new_assigned_peaks[ind]
pi = np.where(exp_peaks == p)
new_peak_amps = peak_amps / (ampdivide + 1)
new_peak_amps[pi] = peak_amps[pi] / (ampdivide[pi])
# find peaks within 10 ppm
w = np.where((exp_peaks < p + 10) & (
exp_peaks > p - 10))[0]
if len(w) > 0:
assigned_peak = exp_peaks[w[np.argmax(new_peak_amps[w])]]
new_assigned_peaks[ind] = assigned_peak
# recalculate estimated peak heights
ampdivide = np.zeros(len(picked_peaks))
for i in new_assigned_peaks:
w = np.where(exp_peaks == i)
ampdivide[w] += 1
#############################
# remove cross assignments
new_assigned_shifts, new_assigned_peaks, new_assigned_labels = removecrossassignments(new_assigned_peaks,
new_assigned_shifts,
new_assigned_labels)
#### sortoutput wrt original H labels
assigned_labels = []
assigned_shifts = []
assigned_peaks = []
for label in original_C_labels:
wh = np.where(new_assigned_labels == label)[0]
assigned_labels.append(label)
if len(wh) > 0:
assigned_shifts.append(new_assigned_shifts[wh[0]])
assigned_peaks.append(new_assigned_peaks[wh[0]])
else:
assigned_shifts.append('')
assigned_peaks.append('')
return assigned_shifts, assigned_peaks, assigned_labels, scaled_shifts
def external_scale_carbon_shifts(calculated_shifts):
scaled = calculated_shifts * 0.9601578792266342 - 1.2625604390657088
return scaled
def internal_scale_carbon_shifts(assigned_shifts, assigned_peaks, calculated_shifts):
slope, intercept, r_value, p_value, std_err = linregress(assigned_shifts, assigned_peaks)
scaled_shifts = calculated_shifts * slope + intercept
return scaled_shifts, slope, intercept
def amp_weighting(total_spectral_ydata, picked_peaks, prob_matrix, shifts, steep_weights):
peak_amps = total_spectral_ydata[picked_peaks]
peak_amps = peak_amps
peak_amps = peak_amps / np.max(peak_amps)
duplicated_amps = copy.copy(peak_amps)
for d in range(0, len(shifts) - 1):
duplicated_amps = np.hstack((duplicated_amps, peak_amps * (0.25) ** (d + 1)))
samps = np.sort(peak_amps)
thresh = samps[max([len(samps) - len(shifts), 0])]
def weight_curve(x, thresh, steep):
y = 1 / (1 + np.exp(-steep * (x - thresh)))
return y
steep = np.std(peak_amps)
x = np.linspace(0, 1, 1000)
plt.plot(x, weight_curve(x, thresh, 100 * steep))
plt.plot(x, weight_curve(x, thresh, 400 * steep))
amp_matrix = np.zeros((len(shifts), len(duplicated_amps)))
for i in range(0, len(amp_matrix[:, 0])):
amp_matrix[i, :] = weight_curve(duplicated_amps, thresh, steep * 100 ** steep_weights[i])
plt.plot(peak_amps, weight_curve(peak_amps, thresh, 100 * steep), 'o', color='C0')
plt.plot(peak_amps, weight_curve(peak_amps, thresh, 400 * steep), 'co', color='C1')
plt.show()
return amp_matrix
def amp_kde(total_spectral_ydata, picked_peaks, prob_matrix, shifts):
peak_amps = total_spectral_ydata[picked_peaks]
peak_amps = peak_amps
peak_amps = peak_amps / np.max(peak_amps)
x = np.linspace(0, 1, 1000)
kde = gaussian_kde(peak_amps)
y = kde.evaluate(x)
ddy = np.diff(y, 2)
ddy1 = np.roll(ddy, 1)
ddyn1 = np.roll(ddy, -1)
w = np.where((ddy[1:-1] > ddy1[1:-1]) & (ddy[1:-1] > ddyn1[1:-1]))[0] + 2
# add zero and one values
if w[0] != 0:
w = np.hstack((0, w))
if w[-1] != len(ddy) - 2:
w = np.hstack((w, len(y) - 1))
minima = x[w]
i = 0
groups = np.zeros(len(peak_amps))
number_in_group = []
for m, m1 in zip(minima[:-1], minima[1:]):
w = np.where((peak_amps > m) & (peak_amps <= m1))[0]
groups[w] = i
number_in_group.append(len(w))
i += 1
groups = groups.astype(int)
# convert group numbers to weights
cumsum = np.cumsum(number_in_group[::-1])[::-1]
weight_values = len(shifts) / cumsum
weight_values /= np.max(weight_values)
peak_weights = weight_values[groups]
duplicated_weights = copy.copy(peak_weights)
# do multiple assignment weights
for d in range(0, len(shifts) - 1):
duplicated_weights = np.hstack(
(duplicated_weights, peak_weights * (0.125 ** (np.max(groups) - groups + 1)) ** (d + 1)))
duplicated_weightsc = copy.copy(duplicated_weights)
# duplicate along vertical
for d in range(0, len(shifts) - 1):
duplicated_weights = np.vstack((duplicated_weights, duplicated_weightsc))
# renormalise
for i in range(duplicated_weights.shape[0]):
duplicated_weights[i, :] = duplicated_weights[i, :] / np.sum(duplicated_weights[i, :])
return duplicated_weights
def multiple_assignment_weighting(prob_matrix):
# shifts are columns
# peaks are rows
pmcopy = copy.copy(prob_matrix)
for i, shift in enumerate(pmcopy[:, 0]):
prob_matrix = np.hstack((prob_matrix, pmcopy * (1 / (i + 1))))
return prob_matrix
def carbon_probabilities(diff_matrix, scaled_mu, scaled_std):
prob_matrix = norm.pdf(diff_matrix, scaled_mu, scaled_std) / norm.pdf(scaled_mu, scaled_mu, scaled_std)
for i in range(prob_matrix.shape[0]):
prob_matrix[i, :] = prob_matrix[i, :] / np.sum(prob_matrix[i, :])
return prob_matrix
def simulate_spectrum(spectral_xdata_ppm, calc_shifts):
y = np.zeros(len(spectral_xdata_ppm))
for shift in calc_shifts:
y += lorentzian(spectral_xdata_ppm, 0.001, shift, 0.2)
return y
def simulate_spectrum(spectral_xdata_ppm, calc_shifts, assigned_peaks, set_exp):
for ind, shift in enumerate(calc_shifts):
exp_p = assigned_peaks[ind]
ind2 = set_exp.index(exp_p)
y = lorentzian(spectral_xdata_ppm, 0.001, shift, 0.2)
plt.plot(spectral_xdata_ppm, y + 1.05, color='C' + str(ind2))
def simulate_calc_data(spectral_xdata_ppm, calculated_locations, simulated_ydata):
###simulate calcutated data
simulated_calc_ydata = np.zeros(len(spectral_xdata_ppm))
for peak in calculated_locations:
y = np.exp(-0.5 * ((spectral_xdata_ppm - peak) / 0.002) ** 2)
simulated_calc_ydata += y
scaling_factor = np.amax(simulated_ydata) / np.amax(simulated_calc_ydata)
simulated_calc_ydata = simulated_calc_ydata * scaling_factor
return simulated_calc_ydata
def lorentzian(p, w, p0, A):
x = (p0 - p) / (w / 2)
L = A / (1 + x ** 2)
return L
def remove_iodine(sdffile, lbls, shifts):
f = sdffile + '.sdf'
obconversion = OBConversion()
obconversion.SetInFormat("sdf")
obmol = OBMol()
obconversion.ReadFile(obmol, f)
CI = []
for atom in OBMolAtomIter(obmol):
if atom.GetAtomicNum() == 6:
for NbrAtom in OBAtomAtomIter(atom):
if (NbrAtom.GetAtomicNum() == 53):
CI.append('C' + str(atom.GetIndex() + 1))
# remove these carbons
for C in CI:
ind = lbls.index(C)
lbls.remove(C)
for l in shifts:
l.pop(ind)
return lbls, shifts
def removecrossassignments(exp, calc, labels):
# sort these in decending order
s = np.argsort(calc)[::-1]
calc = calc[s]
exp = exp[s]
labels = labels[s]
# generate difference matrix
switch = 0
expcopy = np.array(exp)
while switch == 0:
swapm = np.zeros([len(calc), len(calc)])
for i, Hi in enumerate(expcopy):
for j, Hj in enumerate(expcopy):
if i > j:
swapm[i, j] = 0
else:
swapm[i, j] = round(Hi - Hj, 1)
w = np.argwhere(swapm < 0)
if len(w > 0):
expcopy[w[0]] = expcopy[w[0][::-1]]
else:
switch = 1
return calc, expcopy, labels | [
"openbabel.openbabel.OBMolAtomIter",
"numpy.sum",
"numpy.argmax",
"openbabel.openbabel.OBConversion",
"numpy.isnan",
"numpy.argsort",
"openbabel.openbabel.OBMol",
"numpy.exp",
"numpy.std",
"numpy.cumsum",
"numpy.max",
"scipy.stats.linregress",
"numpy.linspace",
"openbabel.openbabel.OBAtomA... | [((1101, 1128), 'numpy.array', 'np.array', (['calculated_shifts'], {}), '(calculated_shifts)\n', (1109, 1128), True, 'import numpy as np\n'), ((1154, 1172), 'numpy.array', 'np.array', (['C_labels'], {}), '(C_labels)\n', (1162, 1172), True, 'import numpy as np\n'), ((1289, 1317), 'copy.copy', 'copy.copy', (['calculated_shifts'], {}), '(calculated_shifts)\n', (1298, 1317), False, 'import copy\n'), ((4209, 4239), 'copy.copy', 'copy.copy', (['new_assigned_shifts'], {}), '(new_assigned_shifts)\n', (4218, 4239), False, 'import copy\n'), ((4266, 4295), 'copy.copy', 'copy.copy', (['new_assigned_peaks'], {}), '(new_assigned_peaks)\n', (4275, 4295), False, 'import copy\n'), ((4323, 4353), 'copy.copy', 'copy.copy', (['new_assigned_shifts'], {}), '(new_assigned_shifts)\n', (4332, 4353), False, 'import copy\n'), ((4380, 4409), 'copy.copy', 'copy.copy', (['new_assigned_peaks'], {}), '(new_assigned_peaks)\n', (4389, 4409), False, 'import copy\n'), ((4437, 4467), 'copy.copy', 'copy.copy', (['new_assigned_labels'], {}), '(new_assigned_labels)\n', (4446, 4467), False, 'import copy\n'), ((5977, 5999), 'numpy.array', 'np.array', (['bias_weights'], {}), '(bias_weights)\n', (5985, 5999), True, 'import numpy as np\n'), ((6083, 6107), 'numpy.argsort', 'np.argsort', (['bias_weights'], {}), '(bias_weights)\n', (6093, 6107), True, 'import numpy as np\n'), ((8245, 8288), 'scipy.stats.linregress', 'linregress', (['assigned_shifts', 'assigned_peaks'], {}), '(assigned_shifts, assigned_peaks)\n', (8255, 8288), False, 'from scipy.stats import linregress\n'), ((8634, 8654), 'copy.copy', 'copy.copy', (['peak_amps'], {}), '(peak_amps)\n', (8643, 8654), False, 'import copy\n'), ((8795, 8813), 'numpy.sort', 'np.sort', (['peak_amps'], {}), '(peak_amps)\n', (8802, 8813), True, 'import numpy as np\n'), ((8995, 9012), 'numpy.std', 'np.std', (['peak_amps'], {}), '(peak_amps)\n', (9001, 9012), True, 'import numpy as np\n'), ((9022, 9045), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (9033, 9045), True, 'import numpy as np\n'), ((9544, 9554), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9552, 9554), True, 'from matplotlib import pyplot as plt\n'), ((9784, 9807), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(1000)'], {}), '(0, 1, 1000)\n', (9795, 9807), True, 'import numpy as np\n'), ((9819, 9842), 'scipy.stats.gaussian_kde', 'gaussian_kde', (['peak_amps'], {}), '(peak_amps)\n', (9831, 9842), False, 'from scipy.stats import gaussian_kde\n'), ((9879, 9892), 'numpy.diff', 'np.diff', (['y', '(2)'], {}), '(y, 2)\n', (9886, 9892), True, 'import numpy as np\n'), ((9905, 9920), 'numpy.roll', 'np.roll', (['ddy', '(1)'], {}), '(ddy, 1)\n', (9912, 9920), True, 'import numpy as np\n'), ((9934, 9950), 'numpy.roll', 'np.roll', (['ddy', '(-1)'], {}), '(ddy, -1)\n', (9941, 9950), True, 'import numpy as np\n'), ((10653, 10674), 'numpy.max', 'np.max', (['weight_values'], {}), '(weight_values)\n', (10659, 10674), True, 'import numpy as np\n'), ((10743, 10766), 'copy.copy', 'copy.copy', (['peak_weights'], {}), '(peak_weights)\n', (10752, 10766), False, 'import copy\n'), ((11015, 11044), 'copy.copy', 'copy.copy', (['duplicated_weights'], {}), '(duplicated_weights)\n', (11024, 11044), False, 'import copy\n'), ((11506, 11528), 'copy.copy', 'copy.copy', (['prob_matrix'], {}), '(prob_matrix)\n', (11515, 11528), False, 'import copy\n'), ((13214, 13228), 'openbabel.openbabel.OBConversion', 'OBConversion', ([], {}), '()\n', (13226, 13228), False, 'from openbabel.openbabel import OBConversion, OBMol, OBAtomAtomIter, OBMolAtomIter\n'), ((13277, 13284), 'openbabel.openbabel.OBMol', 'OBMol', ([], {}), '()\n', (13282, 13284), False, 'from openbabel.openbabel import OBConversion, OBMol, OBAtomAtomIter, OBMolAtomIter\n'), ((13351, 13371), 'openbabel.openbabel.OBMolAtomIter', 'OBMolAtomIter', (['obmol'], {}), '(obmol)\n', (13364, 13371), False, 'from openbabel.openbabel import OBConversion, OBMol, OBAtomAtomIter, OBMolAtomIter\n'), ((13991, 14004), 'numpy.array', 'np.array', (['exp'], {}), '(exp)\n', (13999, 14004), True, 'import numpy as np\n'), ((1193, 1220), 'numpy.array', 'np.array', (['calculated_shifts'], {}), '(calculated_shifts)\n', (1201, 1220), True, 'import numpy as np\n'), ((2985, 3006), 'copy.copy', 'copy.copy', (['pos_matrix'], {}), '(pos_matrix)\n', (2994, 3006), False, 'import copy\n'), ((3377, 3405), 'numpy.delete', 'np.delete', (['prob_matrix', 'b', '(0)'], {}), '(prob_matrix, b, 0)\n', (3386, 3405), True, 'import numpy as np\n'), ((3486, 3516), 'numpy.delete', 'np.delete', (['copy_calc_shifts', 'b'], {}), '(copy_calc_shifts, b)\n', (3495, 3516), True, 'import numpy as np\n'), ((3540, 3562), 'numpy.delete', 'np.delete', (['C_labels', 'b'], {}), '(C_labels, b)\n', (3549, 3562), True, 'import numpy as np\n'), ((3634, 3659), 'scipy.optimize.linear_sum_assignment', 'optimise', (['(1 - prob_matrix)'], {}), '(1 - prob_matrix)\n', (3642, 3659), True, 'from scipy.optimize import linear_sum_assignment as optimise\n'), ((3912, 3934), 'numpy.argsort', 'np.argsort', (['opt_shifts'], {}), '(opt_shifts)\n', (3922, 3934), True, 'import numpy as np\n'), ((4693, 4717), 'numpy.where', 'np.where', (['(exp_peaks == i)'], {}), '(exp_peaks == i)\n', (4701, 4717), True, 'import numpy as np\n'), ((6023, 6052), 'numpy.array', 'np.array', (['old_assigned_shifts'], {}), '(old_assigned_shifts)\n', (6031, 6052), True, 'import numpy as np\n'), ((6177, 6206), 'numpy.array', 'np.array', (['reassign_shifts_ind'], {}), '(reassign_shifts_ind)\n', (6185, 6206), True, 'import numpy as np\n'), ((6374, 6398), 'numpy.where', 'np.where', (['(exp_peaks == p)'], {}), '(exp_peaks == p)\n', (6382, 6398), True, 'import numpy as np\n'), ((8593, 8610), 'numpy.max', 'np.max', (['peak_amps'], {}), '(peak_amps)\n', (8599, 8610), True, 'import numpy as np\n'), ((8722, 8779), 'numpy.hstack', 'np.hstack', (['(duplicated_amps, peak_amps * 0.25 ** (d + 1))'], {}), '((duplicated_amps, peak_amps * 0.25 ** (d + 1)))\n', (8731, 8779), True, 'import numpy as np\n'), ((9757, 9774), 'numpy.max', 'np.max', (['peak_amps'], {}), '(peak_amps)\n', (9763, 9774), True, 'import numpy as np\n'), ((10092, 10109), 'numpy.hstack', 'np.hstack', (['(0, w)'], {}), '((0, w))\n', (10101, 10109), True, 'import numpy as np\n'), ((10550, 10582), 'numpy.cumsum', 'np.cumsum', (['number_in_group[::-1]'], {}), '(number_in_group[::-1])\n', (10559, 10582), True, 'import numpy as np\n'), ((11147, 11199), 'numpy.vstack', 'np.vstack', (['(duplicated_weights, duplicated_weightsc)'], {}), '((duplicated_weights, duplicated_weightsc))\n', (11156, 11199), True, 'import numpy as np\n'), ((11597, 11645), 'numpy.hstack', 'np.hstack', (['(prob_matrix, pmcopy * (1 / (i + 1)))'], {}), '((prob_matrix, pmcopy * (1 / (i + 1))))\n', (11606, 11645), True, 'import numpy as np\n'), ((11753, 11797), 'scipy.stats.norm.pdf', 'norm.pdf', (['diff_matrix', 'scaled_mu', 'scaled_std'], {}), '(diff_matrix, scaled_mu, scaled_std)\n', (11761, 11797), False, 'from scipy.stats import norm\n'), ((11800, 11842), 'scipy.stats.norm.pdf', 'norm.pdf', (['scaled_mu', 'scaled_mu', 'scaled_std'], {}), '(scaled_mu, scaled_mu, scaled_std)\n', (11808, 11842), False, 'from scipy.stats import norm\n'), ((12758, 12815), 'numpy.exp', 'np.exp', (['(-0.5 * ((spectral_xdata_ppm - peak) / 0.002) ** 2)'], {}), '(-0.5 * ((spectral_xdata_ppm - peak) / 0.002) ** 2)\n', (12764, 12815), True, 'import numpy as np\n'), ((12872, 12896), 'numpy.amax', 'np.amax', (['simulated_ydata'], {}), '(simulated_ydata)\n', (12879, 12896), True, 'import numpy as np\n'), ((12899, 12928), 'numpy.amax', 'np.amax', (['simulated_calc_ydata'], {}), '(simulated_calc_ydata)\n', (12906, 12928), True, 'import numpy as np\n'), ((13842, 13858), 'numpy.argsort', 'np.argsort', (['calc'], {}), '(calc)\n', (13852, 13858), True, 'import numpy as np\n'), ((14317, 14339), 'numpy.argwhere', 'np.argwhere', (['(swapm < 0)'], {}), '(swapm < 0)\n', (14328, 14339), True, 'import numpy as np\n'), ((1697, 1725), 'copy.copy', 'copy.copy', (['calculated_shifts'], {}), '(calculated_shifts)\n', (1706, 1725), False, 'import copy\n'), ((2707, 2727), 'numpy.isnan', 'np.isnan', (['pos_matrix'], {}), '(pos_matrix)\n', (2715, 2727), True, 'import numpy as np\n'), ((3088, 3124), 'numpy.hstack', 'np.hstack', (['(pos_matrix, pos_matrixc)'], {}), '((pos_matrix, pos_matrixc))\n', (3097, 3124), True, 'import numpy as np\n'), ((4892, 4951), 'numpy.where', 'np.where', (['((exp_peaks < peak + 10) & (exp_peaks > peak - 10))'], {}), '((exp_peaks < peak + 10) & (exp_peaks > peak - 10))\n', (4900, 4951), True, 'import numpy as np\n'), ((5511, 5538), 'numpy.where', 'np.where', (['(exp_peaks == peak)'], {}), '(exp_peaks == peak)\n', (5519, 5538), True, 'import numpy as np\n'), ((5627, 5646), 'numpy.max', 'np.max', (['div_amps[w]'], {}), '(div_amps[w])\n', (5633, 5646), True, 'import numpy as np\n'), ((6562, 6615), 'numpy.where', 'np.where', (['((exp_peaks < p + 10) & (exp_peaks > p - 10))'], {}), '((exp_peaks < p + 10) & (exp_peaks > p - 10))\n', (6570, 6615), True, 'import numpy as np\n'), ((6932, 6956), 'numpy.where', 'np.where', (['(exp_peaks == i)'], {}), '(exp_peaks == i)\n', (6940, 6956), True, 'import numpy as np\n'), ((7559, 7597), 'numpy.where', 'np.where', (['(new_assigned_labels == label)'], {}), '(new_assigned_labels == label)\n', (7567, 7597), True, 'import numpy as np\n'), ((9960, 10022), 'numpy.where', 'np.where', (['((ddy[1:-1] > ddy1[1:-1]) & (ddy[1:-1] > ddyn1[1:-1]))'], {}), '((ddy[1:-1] > ddy1[1:-1]) & (ddy[1:-1] > ddyn1[1:-1]))\n', (9968, 10022), True, 'import numpy as np\n'), ((10335, 10380), 'numpy.where', 'np.where', (['((peak_amps > m) & (peak_amps <= m1))'], {}), '((peak_amps > m) & (peak_amps <= m1))\n', (10343, 10380), True, 'import numpy as np\n'), ((11331, 11363), 'numpy.sum', 'np.sum', (['duplicated_weights[i, :]'], {}), '(duplicated_weights[i, :])\n', (11337, 11363), True, 'import numpy as np\n'), ((11934, 11959), 'numpy.sum', 'np.sum', (['prob_matrix[i, :]'], {}), '(prob_matrix[i, :])\n', (11940, 11959), True, 'import numpy as np\n'), ((13439, 13459), 'openbabel.openbabel.OBAtomAtomIter', 'OBAtomAtomIter', (['atom'], {}), '(atom)\n', (13453, 13459), False, 'from openbabel.openbabel import OBConversion, OBMol, OBAtomAtomIter, OBMolAtomIter\n'), ((1786, 1816), 'copy.copy', 'copy.copy', (['new_assigned_shifts'], {}), '(new_assigned_shifts)\n', (1795, 1816), False, 'import copy\n'), ((1851, 1880), 'copy.copy', 'copy.copy', (['new_assigned_peaks'], {}), '(new_assigned_peaks)\n', (1860, 1880), False, 'import copy\n'), ((2179, 2207), 'copy.copy', 'copy.copy', (['calculated_shifts'], {}), '(calculated_shifts)\n', (2188, 2207), False, 'import copy\n'), ((3325, 3347), 'numpy.sum', 'np.sum', (['prob_matrix', '(1)'], {}), '(prob_matrix, 1)\n', (3331, 3347), True, 'import numpy as np\n'), ((8933, 8962), 'numpy.exp', 'np.exp', (['(-steep * (x - thresh))'], {}), '(-steep * (x - thresh))\n', (8939, 8962), True, 'import numpy as np\n'), ((6700, 6727), 'numpy.argmax', 'np.argmax', (['new_peak_amps[w]'], {}), '(new_peak_amps[w])\n', (6709, 6727), True, 'import numpy as np\n'), ((10945, 10959), 'numpy.max', 'np.max', (['groups'], {}), '(groups)\n', (10951, 10959), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import argparse
import seaborn as sns
import os
import matplotlib.pyplot as plt
import ptitprince as pt
from matplotlib.patches import PathPatch
sns.set(style="whitegrid", font_scale=1)
def get_parameters():
parser = argparse.ArgumentParser(description='Generate figure for spine generic dataset')
parser.add_argument("-ir", "--path-input-results",
help="Path to results.csv",
required=True)
parser.add_argument("-ip", "--path-input-participants",
help="Path to participants.tsv",
required=True)
parser.add_argument("-o", "--path-output",
help="Path to save images",
required=True,
)
arguments = parser.parse_args()
return arguments
def adjust_box_widths(g, fac):
# From https://github.com/mwaskom/seaborn/issues/1076#issuecomment-634541579
"""
Adjust the widths of a seaborn-generated boxplot.
"""
# iterating through Axes instances
for ax in g.axes:
# iterating through axes artists:
for c in ax.get_children():
# searching for PathPatches
if isinstance(c, PathPatch):
# getting current width of box:
p = c.get_path()
verts = p.vertices
verts_sub = verts[:-1]
xmin = np.min(verts_sub[:, 0])
xmax = np.max(verts_sub[:, 0])
xmid = 0.5*(xmin+xmax)
xhalf = 0.5*(xmax - xmin)
# setting new width of box
xmin_new = xmid-fac*xhalf
xmax_new = xmid+fac*xhalf
verts_sub[verts_sub[:, 0] == xmin, 0] = xmin_new
verts_sub[verts_sub[:, 0] == xmax, 0] = xmax_new
# setting new width of median line
for l in ax.lines:
if not l.get_xdata().size == 0:
if np.all(np.equal(l.get_xdata(), [xmin, xmax])):
l.set_xdata([xmin_new, xmax_new])
def generate_figure(data_in, column, path_output):
# Hue Input for Subgroups
dx = np.ones(len(data_in[column]))
dy = column
dhue = "Manufacturer"
ort = "v"
# dodge blue, limegreen, red
colors = [ "#1E90FF", "#32CD32","#FF0000" ]
pal = colors
sigma = .2
f, ax = plt.subplots(figsize=(4, 6))
ax = pt.RainCloud(x=dx, y=dy, hue=dhue, data=data_in, palette=pal, bw=sigma,
width_viol=.5, ax=ax, orient=ort, alpha=.4, dodge=True, width_box=.35,
box_showmeans=True,
box_meanprops={"marker":"^", "markerfacecolor":"black", "markeredgecolor":"black", "markersize":"10"},
box_notch=True)
f.gca().invert_xaxis()
#adjust boxplot width
adjust_box_widths(f, 0.4)
plt.xlabel(column)
# remove ylabel
plt.ylabel('')
# hide xtick
plt.tick_params(
axis='x',
which='both',
bottom=False,
top=False,
labelbottom=False)
# plt.legend(title="Line", loc='upper left', handles=handles[::-1])
plt.savefig(os.path.join(path_output, 'figure_' + column), bbox_inches='tight', dpi=300)
def main(path_input_results, path_input_participants, path_output):
if not os.path.isdir(path_output):
os.makedirs(path_output)
content_results_csv = pd.read_csv(path_input_results, sep=",")
content_participants_tsv = pd.read_csv(path_input_participants, encoding="ISO-8859-1", sep="\t")
list_subjects_results = content_results_csv['Subject'].tolist()
# loop across subjects list from results
for subj in list_subjects_results:
rowIndex = content_participants_tsv[content_participants_tsv['participant_id'] == subj].index
rowIndexResults = content_results_csv[content_results_csv['Subject'] == subj].index
content_results_csv.loc[rowIndexResults, 'Manufacturer'] = content_participants_tsv.loc[rowIndex]['manufacturer'].values[0]
generate_figure(content_results_csv, 'SNR_single', path_output)
generate_figure(content_results_csv, 'Contrast', path_output)
if __name__ == "__main__":
args = get_parameters()
main(args.path_input_results, args.path_input_participants, args.path_output)
| [
"argparse.ArgumentParser",
"os.path.join",
"os.makedirs",
"pandas.read_csv",
"os.path.isdir",
"matplotlib.pyplot.subplots",
"numpy.min",
"numpy.max",
"ptitprince.RainCloud",
"matplotlib.pyplot.tick_params",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"seaborn.set"
] | [((185, 225), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""', 'font_scale': '(1)'}), "(style='whitegrid', font_scale=1)\n", (192, 225), True, 'import seaborn as sns\n'), ((263, 348), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generate figure for spine generic dataset"""'}), "(description='Generate figure for spine generic dataset'\n )\n", (286, 348), False, 'import argparse\n'), ((2444, 2472), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(4, 6)'}), '(figsize=(4, 6))\n', (2456, 2472), True, 'import matplotlib.pyplot as plt\n'), ((2483, 2788), 'ptitprince.RainCloud', 'pt.RainCloud', ([], {'x': 'dx', 'y': 'dy', 'hue': 'dhue', 'data': 'data_in', 'palette': 'pal', 'bw': 'sigma', 'width_viol': '(0.5)', 'ax': 'ax', 'orient': 'ort', 'alpha': '(0.4)', 'dodge': '(True)', 'width_box': '(0.35)', 'box_showmeans': '(True)', 'box_meanprops': "{'marker': '^', 'markerfacecolor': 'black', 'markeredgecolor': 'black',\n 'markersize': '10'}", 'box_notch': '(True)'}), "(x=dx, y=dy, hue=dhue, data=data_in, palette=pal, bw=sigma,\n width_viol=0.5, ax=ax, orient=ort, alpha=0.4, dodge=True, width_box=\n 0.35, box_showmeans=True, box_meanprops={'marker': '^',\n 'markerfacecolor': 'black', 'markeredgecolor': 'black', 'markersize':\n '10'}, box_notch=True)\n", (2495, 2788), True, 'import ptitprince as pt\n'), ((2940, 2958), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['column'], {}), '(column)\n', (2950, 2958), True, 'import matplotlib.pyplot as plt\n'), ((2983, 2997), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['""""""'], {}), "('')\n", (2993, 2997), True, 'import matplotlib.pyplot as plt\n'), ((3019, 3106), 'matplotlib.pyplot.tick_params', 'plt.tick_params', ([], {'axis': '"""x"""', 'which': '"""both"""', 'bottom': '(False)', 'top': '(False)', 'labelbottom': '(False)'}), "(axis='x', which='both', bottom=False, top=False,\n labelbottom=False)\n", (3034, 3106), True, 'import matplotlib.pyplot as plt\n'), ((3478, 3518), 'pandas.read_csv', 'pd.read_csv', (['path_input_results'], {'sep': '""","""'}), "(path_input_results, sep=',')\n", (3489, 3518), True, 'import pandas as pd\n'), ((3550, 3619), 'pandas.read_csv', 'pd.read_csv', (['path_input_participants'], {'encoding': '"""ISO-8859-1"""', 'sep': '"""\t"""'}), "(path_input_participants, encoding='ISO-8859-1', sep='\\t')\n", (3561, 3619), True, 'import pandas as pd\n'), ((3232, 3277), 'os.path.join', 'os.path.join', (['path_output', "('figure_' + column)"], {}), "(path_output, 'figure_' + column)\n", (3244, 3277), False, 'import os\n'), ((3390, 3416), 'os.path.isdir', 'os.path.isdir', (['path_output'], {}), '(path_output)\n', (3403, 3416), False, 'import os\n'), ((3426, 3450), 'os.makedirs', 'os.makedirs', (['path_output'], {}), '(path_output)\n', (3437, 3450), False, 'import os\n'), ((1453, 1476), 'numpy.min', 'np.min', (['verts_sub[:, 0]'], {}), '(verts_sub[:, 0])\n', (1459, 1476), True, 'import numpy as np\n'), ((1500, 1523), 'numpy.max', 'np.max', (['verts_sub[:, 0]'], {}), '(verts_sub[:, 0])\n', (1506, 1523), True, 'import numpy as np\n')] |
import os
import numpy as np
import scipy.stats as stats
import matplotlib
# If there is $DISPLAY, display the plot
if os.name == 'posix' and "DISPLAY" not in os.environ:
matplotlib.use('Agg')
import matplotlib.pyplot as plt # noqa: E402
import glob
import pathlib
def plotgraph(n=100, agent='SimForgAgentWith', site='50-50'):
# folders = pathlib.Path(folder).glob("1616*")
# # print(folders)
# flist = []
# data = []
# for f in folders:
# flist = [p for p in pathlib.Path(f).iterdir() if p.is_file()]
# _, _, d = np.genfromtxt(flist[0], autostrip=True, unpack=True, delimiter='|')
# data.append(d)
# # print(flist)
# data = np.array(data)
# print(data.shape)
# data = read_data_n_agent(n=n, agent=agent)
dataf, datad = read_data_n_agent_site(n=n, agent=agent, site=site)
# print(data.shape)
medianf = np.quantile(dataf, 0.5, axis=0)
q1f = np.quantile(dataf, 0.25, axis=0)
q3f = np.quantile(dataf, 0.75, axis=0)
mediand = np.quantile(datad, 0.5, axis=0)
q1d = np.quantile(datad, 0.25, axis=0)
q3d = np.quantile(datad, 0.75, axis=0)
# print(median.shape, q1.shape, q3.shape)
color = [
'forestgreen', 'indianred',
'gold', 'tomato', 'royalblue']
colorshade = [
'springgreen', 'lightcoral',
'khaki', 'lightsalmon', 'deepskyblue']
plt.style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
xvalues = range(dataf.shape[1])
ax1.plot(
xvalues, medianf, color=color[0],
linewidth=1.0, label='Food')
ax1.fill_between(
xvalues, q3f, q1f,
color=colorshade[0], alpha=0.3)
ax1.plot(
xvalues, mediand, color=color[1],
linewidth=1.0, label='Dead Agents')
ax1.fill_between(
xvalues, q3d, q1d,
color=colorshade[1], alpha=0.3)
plt.title('Foraging')
# ax1.legend(title='$\it{m}$')
ax1.set_xlabel('Steps')
ax1.set_ylabel('%')
# ax1.set_xticks(
# np.linspace(0, data.shape[-1], 5))
plt.legend()
plt.tight_layout()
# fig.savefig(
# '/tmp/goal/data/experiments/' + pname + '.pdf')
maindir = '/tmp/swarm/data/experiments/'
nadir = os.path.join(maindir, str(n), agent)
fig.savefig(
nadir + str(site) + 'foraging' + '.png')
plt.close(fig)
def read_data_n_agent(n=100, agent='SimForgAgentWith'):
maindir = '/tmp/swarm/data/experiments/'
nadir = os.path.join(maindir, str(n), agent)
folders = pathlib.Path(nadir).glob("*ForagingSim*")
flist = []
data = []
for f in folders:
flist = [p for p in pathlib.Path(f).iterdir() if p.is_file()]
_, _, d = np.genfromtxt(flist[0], autostrip=True, unpack=True, delimiter='|')
data.append(d)
data = np.array(data)
return data
def read_data_n_agent_site(n=100, agent='SimForgAgentWith', site='5050'):
maindir = '/tmp/swarm/data/experiments/'
nadir = os.path.join(maindir, str(n), agent, site)
folders = pathlib.Path(nadir).glob("*ForagingSim*")
flist = []
dataf = []
datad = []
for f in folders:
flist = [p for p in pathlib.Path(f).iterdir() if p.is_file()]
try:
# print(flist)
_, _, f, d = np.genfromtxt(flist[0], autostrip=True, unpack=True, delimiter='|')
dataf.append(f)
datad.append(d)
except IndexError:
pass
# print(data)
dataf = np.array(dataf)
datad = np.array(datad)
# print(dataf.shape, datad.shape)
return dataf, datad
def boxplot(agent='SimForgAgentWith'):
# data = [read_data_n_agent(n, agent)[:,-1] for n in [50, 100, 200, 300, 400]]
data = [read_data_n_agent(n, agent)[:,-1] for n in [100, 200, 300, 400]]
fig = plt.figure()
ax1 = fig.add_subplot(1, 1, 1)
colordict = {
0: 'forestgreen',
1: 'indianred',
2: 'gold',
3: 'tomato',
4: 'royalblue'}
colorshade = [
'springgreen', 'lightcoral',
'khaki', 'lightsalmon', 'deepskyblue']
# colordict = {
# 0: 'bisque',
# 1: 'darkorange',
# 2: 'orangered',
# 3: 'seagreen'
# }
# labels = ['Agent-Key', 'Key-Door', 'Door-Goal', 'Total']
# labels = [50, 100, 200, 300, 400]
labels = [100, 200, 300, 400]
medianprops = dict(linewidth=2.5, color='firebrick')
meanprops = dict(linewidth=2.5, color='#ff7f0e')
# data = [data[:, i] for i in range(4)]
bp1 = ax1.boxplot(
data, 0, 'gD', showmeans=True, meanline=True,
patch_artist=True, medianprops=medianprops,
meanprops=meanprops)
for patch, color in zip(bp1['boxes'], colordict.values()):
patch.set_facecolor(color)
# plt.xlim(0, len(mean))
ax1.legend(zip(bp1['boxes']), labels, fontsize="small", loc="upper right", title='no. of agents')
ax1.set_xticklabels(labels)
ax1.set_xlabel('No. of agents ')
ax1.set_ylabel('Foraging Percentage')
ax1.set_title('Swarm Foraging')
plt.tight_layout()
maindir = '/tmp/swarm/data/experiments/'
fname = 'agentscomp' + agent
fig.savefig(
maindir + '/' + fname + '.png')
# pylint: disable = E1101
plt.close(fig)
def boxplotsiteloc(agent='SimForgAgentWith', site='5050'):
# sites = ['-3030', '30-30', '3030', '-5050', '50-50', '5050', '-9090', '90-90']
agents = [50, 100, 200, 300, 400]
print(agent, site)
dataf = [read_data_n_agent_site(n, agent, site=site)[0][:,-1] for n in agents]
datad = [read_data_n_agent_site(n, agent, site=site)[1][:,-1] for n in agents]
datadp = [(datad[i]/agents[i])*100 for i in range(len(agents))]
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
colordict = {
0: 'forestgreen',
1: 'indianred',
2: 'gold',
3: 'tomato',
4: 'royalblue',
5: 'orchid',
6: 'olivedrab',
7: 'peru',
8: 'linen'}
colorshade = [
'springgreen', 'lightcoral',
'khaki', 'lightsalmon', 'deepskyblue']
# colordict = {
# 0: 'bisque',
# 1: 'darkorange',
# 2: 'orangered',
# 3: 'seagreen'
# }
# labels = ['Agent-Key', 'Key-Door', 'Door-Goal', 'Total']
# labels = ['30', '30', '30', '50', '50', '50', '90', '90']
labels = ['50', '100', '200', '300', '400']
medianprops = dict(linewidth=2.5, color='firebrick')
meanprops = dict(linewidth=2.5, color='#ff7f0e')
# data = [data[:, i] for i in range(4)]
bp1 = ax1.boxplot(
dataf, 0, 'gD', showmeans=True, meanline=True,
patch_artist=True, medianprops=medianprops,
meanprops=meanprops)
for patch, color in zip(bp1['boxes'], colordict.values()):
patch.set_facecolor(color)
# plt.xlim(0, len(mean))
ax1.legend(zip(bp1['boxes']), labels, fontsize="small", loc="upper right", title='Agent Size')
ax1.set_xticklabels(labels)
ax1.set_xlabel('Agent size')
ax1.set_ylabel('Foraging Percentage')
ax1.set_title('Swarm Foraging with distance '+ site[-2:])
ax2 = fig.add_subplot(3, 1, 2)
bp2 = ax2.boxplot(
datad, 0, 'gD', showmeans=True, meanline=True,
patch_artist=True, medianprops=medianprops,
meanprops=meanprops)
for patch, color in zip(bp2['boxes'], colordict.values()):
patch.set_facecolor(color)
# plt.xlim(0, len(mean))
# ax2.legend(zip(bp2['boxes']), labels, fontsize="small", loc="upper right", title='Agent Size')
ax2.set_xticklabels(labels)
ax2.set_xlabel('Agent size')
ax2.set_ylabel('No. Dead Agents')
ax3 = fig.add_subplot(3, 1, 3)
bp3 = ax3.boxplot(
datadp, 0, 'gD', showmeans=True, meanline=True,
patch_artist=True, medianprops=medianprops,
meanprops=meanprops)
for patch, color in zip(bp3['boxes'], colordict.values()):
patch.set_facecolor(color)
# plt.xlim(0, len(mean))
# ax3.legend(zip(bp3['boxes']), labels, fontsize="small", loc="upper right", title='Agent Size')
ax3.set_xticklabels(labels)
ax3.set_xlabel('Agent size')
ax3.set_ylabel('Dead Agents %')
# ax2.set_title('Swarm Foraging with distance '+ site[-2:])
plt.tight_layout()
maindir = '/tmp/swarm/data/experiments/'
# fname = 'agentsitecomp' + agent
nadir = os.path.join(maindir, str(50))
fig.savefig(
nadir + agent + site +'agentsitecomp' + '.png')
# fig.savefig(
# maindir + '/' + fname + '.png')
# pylint: disable = E1101
plt.close(fig)
def boxplotallsites(agent='SimForgAgentWith'):
sites = ['-3131', '31-31', '3131', '-5151', '51-51', '5151', '-9191', '91-91']
agents = [50, 100, 200, 300, 400]
# print(agent, site)
datasf = []
datasd = []
datasdp = []
for n in agents:
# print(site)
dataf = [read_data_n_agent_site(n, agent, site=site)[0][:,-1] for site in sites]
datad = [read_data_n_agent_site(n, agent, site=site)[1][:,-1] for site in sites]
datadp = [(d/n)*100.0 for d in datad]
# print(n, np.hstack(dataf).shape, np.hstack(datad).shape)
datasf.append(np.hstack(dataf))
datasd.append(np.hstack(datad))
datasdp.append(np.hstack(datadp))
fig = plt.figure()
ax1 = fig.add_subplot(3, 1, 1)
colordict = {
0: 'forestgreen',
1: 'indianred',
2: 'gold',
3: 'tomato',
4: 'royalblue',
5: 'orchid',
6: 'olivedrab',
7: 'peru',
8: 'linen'}
colorshade = [
'springgreen', 'lightcoral',
'khaki', 'lightsalmon', 'deepskyblue']
# colordict = {
# 0: 'bisque',
# 1: 'darkorange',
# 2: 'orangered',
# 3: 'seagreen'
# }
# labels = ['Agent-Key', 'Key-Door', 'Door-Goal', 'Total']
# labels = ['30', '30', '30', '50', '50', '50', '90', '90']
labels = ['50', '100', '200', '300', '400']
medianprops = dict(linewidth=2.5, color='firebrick')
meanprops = dict(linewidth=2.5, color='#ff7f0e')
# data = [data[:, i] for i in range(4)]
bp1 = ax1.boxplot(
datasf, 0, 'gD', showmeans=True, meanline=True,
patch_artist=True, medianprops=medianprops,
meanprops=meanprops)
for patch, color in zip(bp1['boxes'], colordict.values()):
patch.set_facecolor(color)
# plt.xlim(0, len(mean))
ax1.legend(zip(bp1['boxes']), labels, fontsize="small", loc="upper right", title='Agent Size')
ax1.set_xticklabels(labels)
ax1.set_xlabel('Agent size')
ax1.set_ylabel('Foraging Percentage')
# ax1.set_title('Swarm Foraging with distance '+ site[-2:])
ax2 = fig.add_subplot(3, 1, 2)
bp2 = ax2.boxplot(
datasd, 0, 'gD', showmeans=True, meanline=True,
patch_artist=True, medianprops=medianprops,
meanprops=meanprops)
for patch, color in zip(bp2['boxes'], colordict.values()):
patch.set_facecolor(color)
# plt.xlim(0, len(mean))
# ax2.legend(zip(bp2['boxes']), labels, fontsize="small", loc="upper right", title='Agent Size')
ax2.set_xticklabels(labels)
ax2.set_xlabel('Agent size')
ax2.set_ylabel('No. Dead Agents')
ax3 = fig.add_subplot(3, 1, 3)
bp3 = ax3.boxplot(
datasdp, 0, 'gD', showmeans=True, meanline=True,
patch_artist=True, medianprops=medianprops,
meanprops=meanprops)
for patch, color in zip(bp3['boxes'], colordict.values()):
patch.set_facecolor(color)
# plt.xlim(0, len(mean))
# ax3.legend(zip(bp3['boxes']), labels, fontsize="small", loc="upper right", title='Agent Size')
ax3.set_xticklabels(labels)
ax3.set_xlabel('Agent size')
ax3.set_ylabel('Dead Agents %')
# ax2.set_title('Swarm Foraging with distance '+ site[-2:])
plt.tight_layout()
maindir = '/tmp/swarm/data/experiments/'
# fname = 'agentsitecomp' + agent
nadir = os.path.join(maindir, str(50), agent)
fig.savefig(
nadir + 'agentallsitecomp' + '.png')
# fig.savefig(
# maindir + '/' + fname + '.png')
# pylint: disable = E1101
plt.close(fig)
def main():
# agents = [50, 100, 200, 300, 400]
atype = ['SimForgAgentWith', 'SimForgAgentWithout']
# boxplotsiteloc(atype[1])
# boxplot(atype[1])
sitelocation = [
{"x":51, "y":-51, "radius":10, "q_value":0.9},
{"x":51, "y":51, "radius":10, "q_value":0.9},
{"x":-51, "y":51, "radius":10, "q_value":0.9},
{"x":31, "y":-31, "radius":10, "q_value":0.9},
{"x":31, "y":31, "radius":10, "q_value":0.9},
{"x":-31, "y":31, "radius":10, "q_value":0.9},
{"x":91, "y":-91, "radius":10, "q_value":0.9},
{"x":-91, "y":91, "radius":10, "q_value":0.9},
]
i = 7
# sitename = str(sitelocation[i]['x']) + str(sitelocation[i]['y'])
# print(sitename)
# for i in range(len(sitelocation)):
# sitename = str(sitelocation[i]['x']) + str(sitelocation[i]['y'])
# for n in [50, 100, 200, 300, 400]:
# plotgraph(n=n, agent=atype[1], site=sitename)
# plotgraph(n=n, agent=atype[0], site=sitename)
# print(sitename, n)
# for i in range(len(sitelocation)):
# sitename = str(sitelocation[i]['x']) + str(sitelocation[i]['y'])
# for t in atype:
# boxplotsiteloc(agent=t, site=sitename)
for a in atype:
boxplotallsites(a)
# import os
# dname = os.path.join('/tmp', 'pygoal', 'data', 'experiments')
# pathlib.Path(dname).mkdir(parents=True, exist_ok=True)
# fname = os.path.join(dname, name + '.png')
# fig.savefig(fname)
# plt.close(fig)
if __name__ == '__main__':
main() | [
"matplotlib.pyplot.title",
"numpy.quantile",
"matplotlib.pyplot.close",
"matplotlib.pyplot.legend",
"numpy.genfromtxt",
"numpy.hstack",
"matplotlib.pyplot.style.use",
"matplotlib.use",
"matplotlib.pyplot.figure",
"numpy.array",
"pathlib.Path",
"matplotlib.pyplot.tight_layout"
] | [((176, 197), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (190, 197), False, 'import matplotlib\n'), ((887, 918), 'numpy.quantile', 'np.quantile', (['dataf', '(0.5)'], {'axis': '(0)'}), '(dataf, 0.5, axis=0)\n', (898, 918), True, 'import numpy as np\n'), ((929, 961), 'numpy.quantile', 'np.quantile', (['dataf', '(0.25)'], {'axis': '(0)'}), '(dataf, 0.25, axis=0)\n', (940, 961), True, 'import numpy as np\n'), ((972, 1004), 'numpy.quantile', 'np.quantile', (['dataf', '(0.75)'], {'axis': '(0)'}), '(dataf, 0.75, axis=0)\n', (983, 1004), True, 'import numpy as np\n'), ((1020, 1051), 'numpy.quantile', 'np.quantile', (['datad', '(0.5)'], {'axis': '(0)'}), '(datad, 0.5, axis=0)\n', (1031, 1051), True, 'import numpy as np\n'), ((1062, 1094), 'numpy.quantile', 'np.quantile', (['datad', '(0.25)'], {'axis': '(0)'}), '(datad, 0.25, axis=0)\n', (1073, 1094), True, 'import numpy as np\n'), ((1105, 1137), 'numpy.quantile', 'np.quantile', (['datad', '(0.75)'], {'axis': '(0)'}), '(datad, 0.75, axis=0)\n', (1116, 1137), True, 'import numpy as np\n'), ((1380, 1412), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""fivethirtyeight"""'], {}), "('fivethirtyeight')\n", (1393, 1412), True, 'import matplotlib.pyplot as plt\n'), ((1423, 1435), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1433, 1435), True, 'import matplotlib.pyplot as plt\n'), ((1883, 1904), 'matplotlib.pyplot.title', 'plt.title', (['"""Foraging"""'], {}), "('Foraging')\n", (1892, 1904), True, 'import matplotlib.pyplot as plt\n'), ((2064, 2076), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2074, 2076), True, 'import matplotlib.pyplot as plt\n'), ((2081, 2099), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (2097, 2099), True, 'import matplotlib.pyplot as plt\n'), ((2342, 2356), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (2351, 2356), True, 'import matplotlib.pyplot as plt\n'), ((2806, 2820), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2814, 2820), True, 'import numpy as np\n'), ((3469, 3484), 'numpy.array', 'np.array', (['dataf'], {}), '(dataf)\n', (3477, 3484), True, 'import numpy as np\n'), ((3497, 3512), 'numpy.array', 'np.array', (['datad'], {}), '(datad)\n', (3505, 3512), True, 'import numpy as np\n'), ((3787, 3799), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3797, 3799), True, 'import matplotlib.pyplot as plt\n'), ((5030, 5048), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5046, 5048), True, 'import matplotlib.pyplot as plt\n'), ((5221, 5235), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (5230, 5235), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5700), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5698, 5700), True, 'import matplotlib.pyplot as plt\n'), ((8188, 8206), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8204, 8206), True, 'import matplotlib.pyplot as plt\n'), ((8504, 8518), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8513, 8518), True, 'import matplotlib.pyplot as plt\n'), ((9229, 9241), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9239, 9241), True, 'import matplotlib.pyplot as plt\n'), ((11734, 11752), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (11750, 11752), True, 'import matplotlib.pyplot as plt\n'), ((12046, 12060), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (12055, 12060), True, 'import matplotlib.pyplot as plt\n'), ((2704, 2771), 'numpy.genfromtxt', 'np.genfromtxt', (['flist[0]'], {'autostrip': '(True)', 'unpack': '(True)', 'delimiter': '"""|"""'}), "(flist[0], autostrip=True, unpack=True, delimiter='|')\n", (2717, 2771), True, 'import numpy as np\n'), ((2523, 2542), 'pathlib.Path', 'pathlib.Path', (['nadir'], {}), '(nadir)\n', (2535, 2542), False, 'import pathlib\n'), ((3027, 3046), 'pathlib.Path', 'pathlib.Path', (['nadir'], {}), '(nadir)\n', (3039, 3046), False, 'import pathlib\n'), ((3271, 3338), 'numpy.genfromtxt', 'np.genfromtxt', (['flist[0]'], {'autostrip': '(True)', 'unpack': '(True)', 'delimiter': '"""|"""'}), "(flist[0], autostrip=True, unpack=True, delimiter='|')\n", (3284, 3338), True, 'import numpy as np\n'), ((9119, 9135), 'numpy.hstack', 'np.hstack', (['dataf'], {}), '(dataf)\n', (9128, 9135), True, 'import numpy as np\n'), ((9159, 9175), 'numpy.hstack', 'np.hstack', (['datad'], {}), '(datad)\n', (9168, 9175), True, 'import numpy as np\n'), ((9200, 9217), 'numpy.hstack', 'np.hstack', (['datadp'], {}), '(datadp)\n', (9209, 9217), True, 'import numpy as np\n'), ((2644, 2659), 'pathlib.Path', 'pathlib.Path', (['f'], {}), '(f)\n', (2656, 2659), False, 'import pathlib\n'), ((3164, 3179), 'pathlib.Path', 'pathlib.Path', (['f'], {}), '(f)\n', (3176, 3179), False, 'import pathlib\n')] |
import logging
import os
import errno
import datetime as dt
import progressbar
import numpy as np
from .neuralnet import NeuralNetDetector, NeuralNetTriage
from .preprocess.detect import threshold_detection
from .preprocess.filter import whitening_matrix, whitening, butterworth
from .preprocess.score import get_score_pca, get_pca_suff_stat, get_pca_projection
from .preprocess.waveform import get_waveforms
from .preprocess.standarize import standarize, sd
from .util import deprecated
@deprecated('Use function in preprocess module, see examples/preprocess.py')
class Preprocessor(object):
# root: the absolute path to the location of the file
# filename: name of the recording file which is binary
# r_type: type of recording (int16, float, etc.)
# nChan: number of channels
# memory_allowance: maximum main memory allowance
def __init__(self, config):
self.config = config
# initialize file handler
self.File = None
self.WFile = None
# make tmp directory if not exist
try:
os.makedirs(os.path.join(config.root, 'tmp'))
except OSError as exception:
if exception.errno != errno.EEXIST:
raise
self.logger = logging.getLogger(__name__)
def openWFile(self, opt):
self.WFile = open(os.path.join(
self.config.root, 'tmp', 'wrec.bin'), opt)
def closeWFile(self):
if self.WFile == None:
return
else:
self.WFile.close()
self.WFile = None
# opens the binary file to create a buffer
def openFile(self):
self.closeFile()
self.File = open(os.path.join(
self.config.root, self.config.filename), 'rb')
def closeFile(self):
if self.File == None:
return
else:
self.File.close()
self.File = None
# loads a chunk of binary data into memory
# offset should be in terms of timesamples
def load(self, offset, length):
dsize = self.config.dsize
self.File.seek(offset*dsize*self.config.nChan)
rec = self.File.read(dsize*self.config.nChan*length)
rec = np.fromstring(rec, dtype=self.config.dtype)
rec = rec.reshape(length, self.config.nChan)
return rec
# chunck should be in C x T format
# format: format of the recording to be saved
# 's': standard flattened
# 't': temporally readable
def save(self, fid, chunk, _format='s'):
if _format == 's':
chunk = chunk.reshape(chunk.shape[0]*chunk.shape[1])
chunk.astype(self.config.dtype).tofile(fid)
else:
chunk = chunk.transpose().reshape(chunk.shape[0]*chunk.shape[1])
chunk.astype(self.config.dtype).tofile(fid)
def addZeroBuffer(self, rec, buffSize, option):
buff = np.zeros((buffSize, rec.shape[1]))
if option == 0:
return np.append(buff, rec, axis=0)
elif option == 1:
return np.append(rec, buff, axis=0)
elif option == 2:
return np.concatenate((buff, rec, buff), axis=0)
def process(self):
# r: reading, f: filtering, s: standardization, d: detection, p: pca
# w: whitening, e: extracting and saving waveform
startTime = dt.datetime.now()
Time = {'r': 0, 'f': 0, 's': 0, 'd': 0, 'w': 0, 'b': 0, 'e': 0}
# load nueral net detector if necessary:
if self.config.detctionMethod == 'nn':
self.nnDetector = NeuralNetDetector(self.config)
self.proj = self.nnDetector.load_w_ae()
self.nnTriage = NeuralNetTriage(self.config)
self.openFile()
self.openWFile('wb')
batch_size = self.config.batch_size
BUFF = self.config.BUFF
nBatches = self.config.nBatches
nPortion = self.config.nPortion
residual = self.config.residual
# initialize output variables
get_score = 1
spike_index_clear = None
spike_index_collision = None
score = None
pca_suff_stat = None
spikes_per_channel = None
self.logger.info("Preprocessing the data in progress...")
bar = progressbar.ProgressBar(maxval=nBatches)
for i in range(0, nBatches):
# reading data
_b = dt.datetime.now()
if nBatches == 1:
rec = self.load(0, batch_size)
rec = self.addZeroBuffer(rec, BUFF, 2)
elif i == 0:
rec = self.load(i*batch_size, batch_size+BUFF)
rec = self.addZeroBuffer(rec, BUFF, 0)
elif i < nBatches-1:
rec = self.load(i*batch_size-BUFF, batch_size+2*BUFF)
elif residual == 0:
rec = self.load(i*batch_size-BUFF, batch_size+BUFF)
rec = self.addZeroBuffer(rec, BUFF, 1)
else:
rec = self.load(i*batch_size-BUFF, residual+BUFF)
rec = self.addZeroBuffer(rec, BUFF+(batch_size-residual), 1)
Time['r'] += (dt.datetime.now()-_b).total_seconds()
if i > nPortion:
get_score = 0
(si_clr_batch, score_batch,
si_col_batch, pss_batch,
spc_batch, Time) = self.batch_process(rec, get_score,
BUFF, Time)
# spike time w.r.t. to the whole recording
si_clr_batch[:,0] = si_clr_batch[:,0] + i*batch_size - BUFF
si_col_batch[:,0] = si_col_batch[:,0] + i*batch_size - BUFF
if i == 0:
spike_index_clear = si_clr_batch
spike_index_collision = si_col_batch
score = score_batch
pca_suff_stat = pss_batch
spikes_per_channel = spc_batch
else:
spike_index_clear = np.vstack((spike_index_clear,
si_clr_batch))
spike_index_collision = np.vstack((spike_index_collision,
si_col_batch))
if get_score == 1:
score = np.concatenate((score, score_batch), axis = 0)
pca_suff_stat += pss_batch
spikes_per_channel += spc_batch
bar.update(i+1)
self.closeFile()
self.closeWFile()
if self.config.detctionMethod != 'nn':
_b = dt.datetime.now()
rot = get_pca_projection(pca_suff_stat, spikes_per_channel,
self.config.nFeat, self.config.neighChannels)
score = get_score_pca(spike_index_clear, rot,
self.config.neighChannels,
self.config.geom,
self.config.batch_size,
self.config.BUFF,
self.config.nBatches,
os.path.join(self.config.root, 'tmp', 'wrec.bin'),
self.config.scaleToSave)
Time['e'] += (dt.datetime.now()-_b).total_seconds()
# timing
currentTime = dt.datetime.now()
self.logger.info("Preprocessing done in {0} seconds.".format(
(currentTime-startTime).seconds))
self.logger.info("\treading data:\t{0} seconds".format(Time['r']))
self.logger.info("\tfiltering:\t{0} seconds".format(Time['f']))
self.logger.info("\tstandardization:\t{0} seconds".format(Time['s']))
self.logger.info("\tdetection:\t{0} seconds".format(Time['d']))
self.logger.info("\twhitening:\t{0} seconds".format(Time['w']))
self.logger.info("\tsaving recording:\t{0} seconds".format(Time['b']))
self.logger.info("\tgetting waveforms:\t{0} seconds".format(Time['e']))
bar.finish()
return score, spike_index_clear, spike_index_collision
def batch_process(self, rec, get_score, BUFF, Time):
# filter recording
if self.config.doFilter == 1:
_b = dt.datetime.now()
rec = butterworth(rec, self.config.filterLow,
self.config.filterHighFactor,
self.config.filterOrder,
self.config.srate)
Time['f'] += (dt.datetime.now()-_b).total_seconds()
# standardize recording
_b = dt.datetime.now()
if not hasattr(self, 'sd'):
self.sd = sd(rec, self.config.srate)
rec = standarize(rec, self.sd)
Time['s'] += (dt.datetime.now()-_b).total_seconds()
# detect spikes
_b = dt.datetime.now()
if self.config.detctionMethod == 'nn':
spike_index = self.nnDetector.get_spikes(rec)
else:
spike_index = threshold_detection(rec,
self.config.neighChannels,
self.config.spikeSize,
self.config.stdFactor)
# From Peter: When the recording is too long, I load them by
# little chunk by chunk (chunk it time-wise). But I also add
# some buffer. If the detected spike time is in the buffer,
# i remove that because it will be detected in another chunk
spike_index = spike_index[np.logical_and(spike_index[:, 0] > BUFF,
spike_index[:, 0] < (rec.shape[0] - BUFF))]
Time['d'] += (dt.datetime.now()-_b).total_seconds()
# get withening matrix per batch or onece in total
if self.config.doWhitening == 1:
_b = dt.datetime.now()
if self.config.whitenBatchwise or not hasattr(self, 'Q'):
self.Q = whitening_matrix(rec, self.config.neighChannels,
self.config.spikeSize)
rec = whitening(rec, self.Q)
Time['w'] += (dt.datetime.now()-_b).total_seconds()
_b = dt.datetime.now()
# what is being saved here?
self.save(self.WFile, rec*self.config.scaleToSave)
Time['b'] += (dt.datetime.now()-_b).total_seconds()
_b = dt.datetime.now()
if get_score == 0:
# if we are not calculating score for this minibatch, every spikes
# are considered as collision and will be referred during deconvlution
spike_index_clear = np.zeros((0,2), 'int32')
score = None
spike_index_collision = spike_index
pca_suff_stat = 0
spikes_per_channel = 0
elif self.config.detctionMethod == 'nn':
# with nn, get scores and triage bad ones
(spike_index_clear, score,
spike_index_collision) = get_waveforms(rec,
spike_index,
self.proj,
self.config.neighChannels,
self.config.geom,
self.nnTriage,
self.config.nnThreshdoldCol)
# since we alread have scores, no need to calculated sufficient
# statistics for pca
pca_suff_stat = 0
spikes_per_channel = 0
elif self.config.detctionMethod == 'threshold':
# every spikes are considered as clear spikes as no triage is done
spike_index_clear = spike_index
score = None
spike_index_collision = np.zeros((0,2), 'int32')
# get sufficient statistics for pca if we don't have projection matrix
pca_suff_stat, spikes_per_channel = get_pca_suff_stat(rec, spike_index,
self.config.spikeSize)
Time['e'] += (dt.datetime.now()-_b).total_seconds()
return (spike_index_clear, score, spike_index_collision,
pca_suff_stat, spikes_per_channel, Time)
def getTemplates(self, spikeTrain, R):
K = np.amax(spikeTrain[:, 1])+1
batch_size = self.config.batch_size
BUFF = np.max( (self.config.BUFF, R) )
nBatches = self.config.nBatches
nPortion = self.config.nPortion
residual = self.config.residual
self.openFile()
summedTemplatesBig = np.zeros((K, 2*R+1, self.config.nChan))
ndata = np.zeros(K)
for i in range(0, nBatches):
spt = spikeTrain[np.logical_and(
spikeTrain[:, 0] >= i*batch_size, spikeTrain[:, 0] < (i+1)*batch_size)]
# reading data
if nBatches == 1:
rec = self.load(0, batch_size)
rec = self.addZeroBuffer(rec, BUFF, 2)
spt[:, 0] = spt[:, 0] + BUFF
elif i == 0:
rec = self.load(i*batch_size, batch_size+BUFF)
rec = self.addZeroBuffer(rec, BUFF, 0)
spt[:, 0] = spt[:, 0] - i*batch_size + BUFF
elif i < nBatches-1:
rec = self.load(i*batch_size-BUFF, batch_size+2*BUFF)
spt[:, 0] = spt[:, 0] - i*batch_size
elif residual == 0:
rec = self.load(i*batch_size-BUFF, batch_size+BUFF)
rec = self.addZeroBuffer(rec, BUFF, 1)
spt[:, 0] = spt[:, 0] - i*batch_size
else:
rec = self.load(i*batch_size-BUFF, residual+BUFF)
rec = self.addZeroBuffer(rec, BUFF+(batch_size-residual), 1)
spt[:, 0] = spt[:, 0] - i*batch_size
# filter recording
if self.config.doFilter == 1:
rec = butterworth(rec, self.config.filterLow,
self.config.filterHighFactor,
self.config.filterOrder,
self.config.srate)
# standardize recording
if not hasattr(self, 'sd'):
small_t = int(np.min((int(self.config.srate*5), rec.shape[0]))/2)
mid_T = int(np.ceil(rec.shape[0]/2))
rec_temp = rec[np.arange(mid_T-small_t, mid_T+small_t)]
self.sd = np.median(np.abs(rec_temp), 0)/0.6745
rec = np.divide(rec, self.sd)
for j in range(spt.shape[0]):
summedTemplatesBig[
spt[j, 1]] += rec[spt[j, 0]+np.arange(-R, R+1)]
ndata[spt[j, 1]] += 1
self.closeFile()
return summedTemplatesBig/ndata[:, np.newaxis, np.newaxis]
| [
"numpy.divide",
"numpy.abs",
"os.path.join",
"numpy.logical_and",
"numpy.concatenate",
"numpy.ceil",
"numpy.zeros",
"numpy.amax",
"numpy.append",
"numpy.max",
"numpy.arange",
"numpy.vstack",
"progressbar.ProgressBar",
"datetime.datetime.now",
"numpy.fromstring",
"logging.getLogger"
] | [((1246, 1273), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1263, 1273), False, 'import logging\n'), ((2190, 2233), 'numpy.fromstring', 'np.fromstring', (['rec'], {'dtype': 'self.config.dtype'}), '(rec, dtype=self.config.dtype)\n', (2203, 2233), True, 'import numpy as np\n'), ((2881, 2915), 'numpy.zeros', 'np.zeros', (['(buffSize, rec.shape[1])'], {}), '((buffSize, rec.shape[1]))\n', (2889, 2915), True, 'import numpy as np\n'), ((3329, 3346), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (3344, 3346), True, 'import datetime as dt\n'), ((4241, 4281), 'progressbar.ProgressBar', 'progressbar.ProgressBar', ([], {'maxval': 'nBatches'}), '(maxval=nBatches)\n', (4264, 4281), False, 'import progressbar\n'), ((7208, 7225), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (7223, 7225), True, 'import datetime as dt\n'), ((8459, 8476), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (8474, 8476), True, 'import datetime as dt\n'), ((8701, 8718), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (8716, 8718), True, 'import datetime as dt\n'), ((10036, 10053), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (10051, 10053), True, 'import datetime as dt\n'), ((10225, 10242), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (10240, 10242), True, 'import datetime as dt\n'), ((12280, 12309), 'numpy.max', 'np.max', (['(self.config.BUFF, R)'], {}), '((self.config.BUFF, R))\n', (12286, 12309), True, 'import numpy as np\n'), ((12486, 12529), 'numpy.zeros', 'np.zeros', (['(K, 2 * R + 1, self.config.nChan)'], {}), '((K, 2 * R + 1, self.config.nChan))\n', (12494, 12529), True, 'import numpy as np\n'), ((12542, 12553), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (12550, 12553), True, 'import numpy as np\n'), ((1331, 1380), 'os.path.join', 'os.path.join', (['self.config.root', '"""tmp"""', '"""wrec.bin"""'], {}), "(self.config.root, 'tmp', 'wrec.bin')\n", (1343, 1380), False, 'import os\n'), ((1674, 1726), 'os.path.join', 'os.path.join', (['self.config.root', 'self.config.filename'], {}), '(self.config.root, self.config.filename)\n', (1686, 1726), False, 'import os\n'), ((2959, 2987), 'numpy.append', 'np.append', (['buff', 'rec'], {'axis': '(0)'}), '(buff, rec, axis=0)\n', (2968, 2987), True, 'import numpy as np\n'), ((4364, 4381), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (4379, 4381), True, 'import datetime as dt\n'), ((6451, 6468), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (6466, 6468), True, 'import datetime as dt\n'), ((8109, 8126), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (8124, 8126), True, 'import datetime as dt\n'), ((9393, 9478), 'numpy.logical_and', 'np.logical_and', (['(spike_index[:, 0] > BUFF)', '(spike_index[:, 0] < rec.shape[0] - BUFF)'], {}), '(spike_index[:, 0] > BUFF, spike_index[:, 0] < rec.shape[0] -\n BUFF)\n', (9407, 9478), True, 'import numpy as np\n'), ((9687, 9704), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (9702, 9704), True, 'import datetime as dt\n'), ((10464, 10489), 'numpy.zeros', 'np.zeros', (['(0, 2)', '"""int32"""'], {}), "((0, 2), 'int32')\n", (10472, 10489), True, 'import numpy as np\n'), ((12192, 12217), 'numpy.amax', 'np.amax', (['spikeTrain[:, 1]'], {}), '(spikeTrain[:, 1])\n', (12199, 12217), True, 'import numpy as np\n'), ((14394, 14417), 'numpy.divide', 'np.divide', (['rec', 'self.sd'], {}), '(rec, self.sd)\n', (14403, 14417), True, 'import numpy as np\n'), ((1082, 1114), 'os.path.join', 'os.path.join', (['config.root', '"""tmp"""'], {}), "(config.root, 'tmp')\n", (1094, 1114), False, 'import os\n'), ((3033, 3061), 'numpy.append', 'np.append', (['rec', 'buff'], {'axis': '(0)'}), '(rec, buff, axis=0)\n', (3042, 3061), True, 'import numpy as np\n'), ((5930, 5974), 'numpy.vstack', 'np.vstack', (['(spike_index_clear, si_clr_batch)'], {}), '((spike_index_clear, si_clr_batch))\n', (5939, 5974), True, 'import numpy as np\n'), ((6035, 6083), 'numpy.vstack', 'np.vstack', (['(spike_index_collision, si_col_batch)'], {}), '((spike_index_collision, si_col_batch))\n', (6044, 6083), True, 'import numpy as np\n'), ((6994, 7043), 'os.path.join', 'os.path.join', (['self.config.root', '"""tmp"""', '"""wrec.bin"""'], {}), "(self.config.root, 'tmp', 'wrec.bin')\n", (7006, 7043), False, 'import os\n'), ((12622, 12718), 'numpy.logical_and', 'np.logical_and', (['(spikeTrain[:, 0] >= i * batch_size)', '(spikeTrain[:, 0] < (i + 1) * batch_size)'], {}), '(spikeTrain[:, 0] >= i * batch_size, spikeTrain[:, 0] < (i + \n 1) * batch_size)\n', (12636, 12718), True, 'import numpy as np\n'), ((3107, 3148), 'numpy.concatenate', 'np.concatenate', (['(buff, rec, buff)'], {'axis': '(0)'}), '((buff, rec, buff), axis=0)\n', (3121, 3148), True, 'import numpy as np\n'), ((6167, 6211), 'numpy.concatenate', 'np.concatenate', (['(score, score_batch)'], {'axis': '(0)'}), '((score, score_batch), axis=0)\n', (6181, 6211), True, 'import numpy as np\n'), ((8625, 8642), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (8640, 8642), True, 'import datetime as dt\n'), ((9531, 9548), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (9546, 9548), True, 'import datetime as dt\n'), ((10173, 10190), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (10188, 10190), True, 'import datetime as dt\n'), ((11711, 11736), 'numpy.zeros', 'np.zeros', (['(0, 2)', '"""int32"""'], {}), "((0, 2), 'int32')\n", (11719, 11736), True, 'import numpy as np\n'), ((11974, 11991), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (11989, 11991), True, 'import datetime as dt\n'), ((14215, 14240), 'numpy.ceil', 'np.ceil', (['(rec.shape[0] / 2)'], {}), '(rec.shape[0] / 2)\n', (14222, 14240), True, 'import numpy as np\n'), ((14271, 14314), 'numpy.arange', 'np.arange', (['(mid_T - small_t)', '(mid_T + small_t)'], {}), '(mid_T - small_t, mid_T + small_t)\n', (14280, 14314), True, 'import numpy as np\n'), ((5102, 5119), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (5117, 5119), True, 'import datetime as dt\n'), ((7130, 7147), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (7145, 7147), True, 'import datetime as dt\n'), ((8375, 8392), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (8390, 8392), True, 'import datetime as dt\n'), ((9984, 10001), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (9999, 10001), True, 'import datetime as dt\n'), ((14348, 14364), 'numpy.abs', 'np.abs', (['rec_temp'], {}), '(rec_temp)\n', (14354, 14364), True, 'import numpy as np\n'), ((14545, 14565), 'numpy.arange', 'np.arange', (['(-R)', '(R + 1)'], {}), '(-R, R + 1)\n', (14554, 14565), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Integration tests for solidspy
"""
import numpy as np
from scipy.sparse.linalg import eigsh
import solidspy.postprocesor as pos
import solidspy.assemutil as ass
import solidspy.solutil as sol
def test_4_elements():
"""2×2 mesh with uniaxial load"""
nodes = np.array([
[0, 0, 0],
[1, 2, 0],
[2, 2, 2],
[3, 0, 2],
[4, 1, 0],
[5, 2, 1],
[6, 1, 2],
[7, 0, 1],
[8, 1, 1]])
cons = np.array([
[0, -1],
[0, -1],
[0, 0],
[0, 0],
[-1, -1],
[0, 0],
[0, 0],
[0, 0],
[0, 0]])
eles = np.array([
[0, 1, 0, 0, 4, 8, 7],
[1, 1, 0, 4, 1, 5, 8],
[2, 1, 0, 7, 8, 6, 3],
[3, 1, 0, 8, 5, 2, 6]])
loads = np.array([
[3, 0, 1],
[6, 0, 2],
[2, 0, 1]])
mater = np.array([[1.0, 0.3]])
assem_op, bc_array, neq = ass.DME(cons, eles)
stiff, _ = ass.assembler(eles, mater, nodes, neq, assem_op)
load_vec = ass.loadasem(loads, bc_array, neq)
disp = sol.static_sol(stiff, load_vec)
disp_complete = pos.complete_disp(bc_array, nodes, disp)
disp_analytic = np.array([
[ 0.6, 0.0],
[-0.6, 0.0],
[-0.6, 4.0],
[0.6, 4.0],
[0.0, 0.0],
[-0.6, 2.0],
[0.0, 4.0],
[0.6, 2.0],
[0.0, 2.0]])
assert np.allclose(disp_complete, disp_analytic)
def test_2_elements():
"""2x1 mesh cantilever beam"""
nodes = np.array([
[0, 0, 0],
[1, 1, 0],
[2, 2, 0],
[3, 0, 1],
[4, 1, 1],
[5, 2, 1]])
cons = np.array([
[-1, -1],
[0, 0],
[0, 0],
[-1, -1],
[0, 0],
[0, 0]])
eles = np.array([
[0, 1, 0, 0, 1, 4, 3],
[1, 1, 0, 1, 2, 5, 4]])
loads = np.array([
[2, 0, -0.5],
[5, 0, -0.5]])
mater = np.array([[1.0, 0.3]])
assem_op, bc_array, neq = ass.DME(cons, eles)
stiff, _ = ass.assembler(eles, mater, nodes, neq, assem_op)
load_vec = ass.loadasem(loads, bc_array, neq)
disp = sol.static_sol(stiff, load_vec)
disp_complete = pos.complete_disp(bc_array, nodes, disp)
disp_analytic = 1/45 * np.array([
[0, 0],
[-273, -390],
[-364, -1144],
[0, 0],
[273, -390],
[364, -1144]])
assert np.allclose(disp_complete, disp_analytic)
def test_beams():
"""Beams with axial force"""
# Analytic problem
nodes = np.array([
[0, 0.0, 0.0],
[1, 0.0, 6.0],
[2, 4.0, 6.0]])
cons = np.array([
[-1, -1, -1],
[0, 0, 0],
[-1, -1, -1]])
mats = np.array([[200e9, 1.33e-4, 0.04]])
elements = np.array([
[0, 8, 0, 0, 1],
[1, 8, 0, 1, 2]])
loads = np.array([
[1, -12000, -24000, -6000]])
assem_op, bc_array, neq = ass.DME(cons, elements, ndof_node=3)
stiff, _ = ass.assembler(elements, mats, nodes, neq, assem_op,
sparse=False)
load_vec = ass.loadasem(loads, bc_array, neq, ndof_node=3)
solution = sol.static_sol(stiff, load_vec)
solution_analytic = np.array([-6.29e-6, -1.695e-5, -0.13e-3])
assert np.allclose(solution, solution_analytic, rtol=1e-1)
def test_eigs_truss():
"""Eigenvalues of a bar"""
nnodes = 513
x = np.linspace(0, np.pi, nnodes)
nodes = np.zeros((nnodes, 3))
nodes[:, 0] = range(nnodes)
nodes[:, 1] = x
cons = np.zeros((nnodes, 2))
cons[:, 1] = -1
cons[0, 0] = -1
cons[-1, 0] = -1
mats = np.array([[1.0, 1.0, 1.0]])
elements = np.zeros((nnodes - 1, 5 ), dtype=int)
elements[:, 0] = range(nnodes - 1)
elements[:, 1] = 6
elements[:, 3] = range(nnodes - 1)
elements[:, 4] = range(1, nnodes)
assem_op, bc_array, neq = ass.DME(cons, elements)
stiff, mass = ass.assembler(elements, mats, nodes, neq, assem_op)
vals, _ = eigsh(stiff, M=mass, which="SM")
assert np.allclose(vals, np.linspace(1, 6, 6)**2, rtol=1e-2)
def test_eigs_beam():
"""Eigenvalues of a cantilever beam"""
nnodes = 10
x = np.linspace(0, np.pi, nnodes)
nodes = np.zeros((nnodes, 3))
nodes[:, 0] = range(nnodes)
nodes[:, 1] = x
cons = np.zeros((nnodes, 3))
cons[0, :] = -1
cons[:, 0] = -1
mats = np.array([[1.0, 1.0, 1.0, 1.0]])
elements = np.zeros((nnodes - 1, 5 ), dtype=int)
elements[:, 0] = range(nnodes - 1)
elements[:, 1] = 7
elements[:, 3] = range(nnodes - 1)
elements[:, 4] = range(1, nnodes)
assem_op, bc_array, neq = ass.DME(cons, elements, ndof_node=3)
stiff, mass = ass.assembler(elements, mats, nodes, neq, assem_op)
vals, _ = eigsh(stiff, M=mass, which="SM")
vals_analytic = np.array([0.596864162694467, 1.49417561427335,
2.50024694616670, 3.49998931984744,
4.50000046151508, 5.49999998005609])
assert np.allclose(vals**0.25, vals_analytic, rtol=1e-2)
| [
"solidspy.assemutil.assembler",
"solidspy.assemutil.DME",
"numpy.allclose",
"solidspy.solutil.static_sol",
"numpy.zeros",
"solidspy.assemutil.loadasem",
"scipy.sparse.linalg.eigsh",
"numpy.array",
"numpy.linspace",
"solidspy.postprocesor.complete_disp"
] | [((296, 409), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 2, 0], [2, 2, 2], [3, 0, 2], [4, 1, 0], [5, 2, 1], [6, 1, 2\n ], [7, 0, 1], [8, 1, 1]]'], {}), '([[0, 0, 0], [1, 2, 0], [2, 2, 2], [3, 0, 2], [4, 1, 0], [5, 2, 1],\n [6, 1, 2], [7, 0, 1], [8, 1, 1]])\n', (304, 409), True, 'import numpy as np\n'), ((526, 617), 'numpy.array', 'np.array', (['[[0, -1], [0, -1], [0, 0], [0, 0], [-1, -1], [0, 0], [0, 0], [0, 0], [0, 0]]'], {}), '([[0, -1], [0, -1], [0, 0], [0, 0], [-1, -1], [0, 0], [0, 0], [0, 0\n ], [0, 0]])\n', (534, 617), True, 'import numpy as np\n'), ((733, 840), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 4, 8, 7], [1, 1, 0, 4, 1, 5, 8], [2, 1, 0, 7, 8, 6, 3], [3, 1,\n 0, 8, 5, 2, 6]]'], {}), '([[0, 1, 0, 0, 4, 8, 7], [1, 1, 0, 4, 1, 5, 8], [2, 1, 0, 7, 8, 6, \n 3], [3, 1, 0, 8, 5, 2, 6]])\n', (741, 840), True, 'import numpy as np\n'), ((897, 940), 'numpy.array', 'np.array', (['[[3, 0, 1], [6, 0, 2], [2, 0, 1]]'], {}), '([[3, 0, 1], [6, 0, 2], [2, 0, 1]])\n', (905, 940), True, 'import numpy as np\n'), ((990, 1012), 'numpy.array', 'np.array', (['[[1.0, 0.3]]'], {}), '([[1.0, 0.3]])\n', (998, 1012), True, 'import numpy as np\n'), ((1043, 1062), 'solidspy.assemutil.DME', 'ass.DME', (['cons', 'eles'], {}), '(cons, eles)\n', (1050, 1062), True, 'import solidspy.assemutil as ass\n'), ((1078, 1126), 'solidspy.assemutil.assembler', 'ass.assembler', (['eles', 'mater', 'nodes', 'neq', 'assem_op'], {}), '(eles, mater, nodes, neq, assem_op)\n', (1091, 1126), True, 'import solidspy.assemutil as ass\n'), ((1142, 1176), 'solidspy.assemutil.loadasem', 'ass.loadasem', (['loads', 'bc_array', 'neq'], {}), '(loads, bc_array, neq)\n', (1154, 1176), True, 'import solidspy.assemutil as ass\n'), ((1188, 1219), 'solidspy.solutil.static_sol', 'sol.static_sol', (['stiff', 'load_vec'], {}), '(stiff, load_vec)\n', (1202, 1219), True, 'import solidspy.solutil as sol\n'), ((1240, 1280), 'solidspy.postprocesor.complete_disp', 'pos.complete_disp', (['bc_array', 'nodes', 'disp'], {}), '(bc_array, nodes, disp)\n', (1257, 1280), True, 'import solidspy.postprocesor as pos\n'), ((1301, 1427), 'numpy.array', 'np.array', (['[[0.6, 0.0], [-0.6, 0.0], [-0.6, 4.0], [0.6, 4.0], [0.0, 0.0], [-0.6, 2.0],\n [0.0, 4.0], [0.6, 2.0], [0.0, 2.0]]'], {}), '([[0.6, 0.0], [-0.6, 0.0], [-0.6, 4.0], [0.6, 4.0], [0.0, 0.0], [-\n 0.6, 2.0], [0.0, 4.0], [0.6, 2.0], [0.0, 2.0]])\n', (1309, 1427), True, 'import numpy as np\n'), ((1544, 1585), 'numpy.allclose', 'np.allclose', (['disp_complete', 'disp_analytic'], {}), '(disp_complete, disp_analytic)\n', (1555, 1585), True, 'import numpy as np\n'), ((1658, 1734), 'numpy.array', 'np.array', (['[[0, 0, 0], [1, 1, 0], [2, 2, 0], [3, 0, 1], [4, 1, 1], [5, 2, 1]]'], {}), '([[0, 0, 0], [1, 1, 0], [2, 2, 0], [3, 0, 1], [4, 1, 1], [5, 2, 1]])\n', (1666, 1734), True, 'import numpy as np\n'), ((1819, 1881), 'numpy.array', 'np.array', (['[[-1, -1], [0, 0], [0, 0], [-1, -1], [0, 0], [0, 0]]'], {}), '([[-1, -1], [0, 0], [0, 0], [-1, -1], [0, 0], [0, 0]])\n', (1827, 1881), True, 'import numpy as np\n'), ((1966, 2022), 'numpy.array', 'np.array', (['[[0, 1, 0, 0, 1, 4, 3], [1, 1, 0, 1, 2, 5, 4]]'], {}), '([[0, 1, 0, 0, 1, 4, 3], [1, 1, 0, 1, 2, 5, 4]])\n', (1974, 2022), True, 'import numpy as np\n'), ((2060, 2098), 'numpy.array', 'np.array', (['[[2, 0, -0.5], [5, 0, -0.5]]'], {}), '([[2, 0, -0.5], [5, 0, -0.5]])\n', (2068, 2098), True, 'import numpy as np\n'), ((2136, 2158), 'numpy.array', 'np.array', (['[[1.0, 0.3]]'], {}), '([[1.0, 0.3]])\n', (2144, 2158), True, 'import numpy as np\n'), ((2189, 2208), 'solidspy.assemutil.DME', 'ass.DME', (['cons', 'eles'], {}), '(cons, eles)\n', (2196, 2208), True, 'import solidspy.assemutil as ass\n'), ((2224, 2272), 'solidspy.assemutil.assembler', 'ass.assembler', (['eles', 'mater', 'nodes', 'neq', 'assem_op'], {}), '(eles, mater, nodes, neq, assem_op)\n', (2237, 2272), True, 'import solidspy.assemutil as ass\n'), ((2288, 2322), 'solidspy.assemutil.loadasem', 'ass.loadasem', (['loads', 'bc_array', 'neq'], {}), '(loads, bc_array, neq)\n', (2300, 2322), True, 'import solidspy.assemutil as ass\n'), ((2334, 2365), 'solidspy.solutil.static_sol', 'sol.static_sol', (['stiff', 'load_vec'], {}), '(stiff, load_vec)\n', (2348, 2365), True, 'import solidspy.solutil as sol\n'), ((2386, 2426), 'solidspy.postprocesor.complete_disp', 'pos.complete_disp', (['bc_array', 'nodes', 'disp'], {}), '(bc_array, nodes, disp)\n', (2403, 2426), True, 'import solidspy.postprocesor as pos\n'), ((2626, 2667), 'numpy.allclose', 'np.allclose', (['disp_complete', 'disp_analytic'], {}), '(disp_complete, disp_analytic)\n', (2637, 2667), True, 'import numpy as np\n'), ((2761, 2816), 'numpy.array', 'np.array', (['[[0, 0.0, 0.0], [1, 0.0, 6.0], [2, 4.0, 6.0]]'], {}), '([[0, 0.0, 0.0], [1, 0.0, 6.0], [2, 4.0, 6.0]])\n', (2769, 2816), True, 'import numpy as np\n'), ((2853, 2902), 'numpy.array', 'np.array', (['[[-1, -1, -1], [0, 0, 0], [-1, -1, -1]]'], {}), '([[-1, -1, -1], [0, 0, 0], [-1, -1, -1]])\n', (2861, 2902), True, 'import numpy as np\n'), ((2951, 2995), 'numpy.array', 'np.array', (['[[200000000000.0, 0.000133, 0.04]]'], {}), '([[200000000000.0, 0.000133, 0.04]])\n', (2959, 2995), True, 'import numpy as np\n'), ((3001, 3045), 'numpy.array', 'np.array', (['[[0, 8, 0, 0, 1], [1, 8, 0, 1, 2]]'], {}), '([[0, 8, 0, 0, 1], [1, 8, 0, 1, 2]])\n', (3009, 3045), True, 'import numpy as np\n'), ((3075, 3113), 'numpy.array', 'np.array', (['[[1, -12000, -24000, -6000]]'], {}), '([[1, -12000, -24000, -6000]])\n', (3083, 3113), True, 'import numpy as np\n'), ((3153, 3189), 'solidspy.assemutil.DME', 'ass.DME', (['cons', 'elements'], {'ndof_node': '(3)'}), '(cons, elements, ndof_node=3)\n', (3160, 3189), True, 'import solidspy.assemutil as ass\n'), ((3205, 3270), 'solidspy.assemutil.assembler', 'ass.assembler', (['elements', 'mats', 'nodes', 'neq', 'assem_op'], {'sparse': '(False)'}), '(elements, mats, nodes, neq, assem_op, sparse=False)\n', (3218, 3270), True, 'import solidspy.assemutil as ass\n'), ((3315, 3362), 'solidspy.assemutil.loadasem', 'ass.loadasem', (['loads', 'bc_array', 'neq'], {'ndof_node': '(3)'}), '(loads, bc_array, neq, ndof_node=3)\n', (3327, 3362), True, 'import solidspy.assemutil as ass\n'), ((3378, 3409), 'solidspy.solutil.static_sol', 'sol.static_sol', (['stiff', 'load_vec'], {}), '(stiff, load_vec)\n', (3392, 3409), True, 'import solidspy.solutil as sol\n'), ((3434, 3477), 'numpy.array', 'np.array', (['[-6.29e-06, -1.695e-05, -0.00013]'], {}), '([-6.29e-06, -1.695e-05, -0.00013])\n', (3442, 3477), True, 'import numpy as np\n'), ((3487, 3537), 'numpy.allclose', 'np.allclose', (['solution', 'solution_analytic'], {'rtol': '(0.1)'}), '(solution, solution_analytic, rtol=0.1)\n', (3498, 3537), True, 'import numpy as np\n'), ((3625, 3654), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'nnodes'], {}), '(0, np.pi, nnodes)\n', (3636, 3654), True, 'import numpy as np\n'), ((3667, 3688), 'numpy.zeros', 'np.zeros', (['(nnodes, 3)'], {}), '((nnodes, 3))\n', (3675, 3688), True, 'import numpy as np\n'), ((3752, 3773), 'numpy.zeros', 'np.zeros', (['(nnodes, 2)'], {}), '((nnodes, 2))\n', (3760, 3773), True, 'import numpy as np\n'), ((3847, 3874), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0]])\n', (3855, 3874), True, 'import numpy as np\n'), ((3890, 3926), 'numpy.zeros', 'np.zeros', (['(nnodes - 1, 5)'], {'dtype': 'int'}), '((nnodes - 1, 5), dtype=int)\n', (3898, 3926), True, 'import numpy as np\n'), ((4102, 4125), 'solidspy.assemutil.DME', 'ass.DME', (['cons', 'elements'], {}), '(cons, elements)\n', (4109, 4125), True, 'import solidspy.assemutil as ass\n'), ((4144, 4195), 'solidspy.assemutil.assembler', 'ass.assembler', (['elements', 'mats', 'nodes', 'neq', 'assem_op'], {}), '(elements, mats, nodes, neq, assem_op)\n', (4157, 4195), True, 'import solidspy.assemutil as ass\n'), ((4215, 4247), 'scipy.sparse.linalg.eigsh', 'eigsh', (['stiff'], {'M': 'mass', 'which': '"""SM"""'}), "(stiff, M=mass, which='SM')\n", (4220, 4247), False, 'from scipy.sparse.linalg import eigsh\n'), ((4410, 4439), 'numpy.linspace', 'np.linspace', (['(0)', 'np.pi', 'nnodes'], {}), '(0, np.pi, nnodes)\n', (4421, 4439), True, 'import numpy as np\n'), ((4452, 4473), 'numpy.zeros', 'np.zeros', (['(nnodes, 3)'], {}), '((nnodes, 3))\n', (4460, 4473), True, 'import numpy as np\n'), ((4537, 4558), 'numpy.zeros', 'np.zeros', (['(nnodes, 3)'], {}), '((nnodes, 3))\n', (4545, 4558), True, 'import numpy as np\n'), ((4610, 4642), 'numpy.array', 'np.array', (['[[1.0, 1.0, 1.0, 1.0]]'], {}), '([[1.0, 1.0, 1.0, 1.0]])\n', (4618, 4642), True, 'import numpy as np\n'), ((4658, 4694), 'numpy.zeros', 'np.zeros', (['(nnodes - 1, 5)'], {'dtype': 'int'}), '((nnodes - 1, 5), dtype=int)\n', (4666, 4694), True, 'import numpy as np\n'), ((4870, 4906), 'solidspy.assemutil.DME', 'ass.DME', (['cons', 'elements'], {'ndof_node': '(3)'}), '(cons, elements, ndof_node=3)\n', (4877, 4906), True, 'import solidspy.assemutil as ass\n'), ((4925, 4976), 'solidspy.assemutil.assembler', 'ass.assembler', (['elements', 'mats', 'nodes', 'neq', 'assem_op'], {}), '(elements, mats, nodes, neq, assem_op)\n', (4938, 4976), True, 'import solidspy.assemutil as ass\n'), ((4996, 5028), 'scipy.sparse.linalg.eigsh', 'eigsh', (['stiff'], {'M': 'mass', 'which': '"""SM"""'}), "(stiff, M=mass, which='SM')\n", (5001, 5028), False, 'from scipy.sparse.linalg import eigsh\n'), ((5049, 5172), 'numpy.array', 'np.array', (['[0.596864162694467, 1.49417561427335, 2.5002469461667, 3.49998931984744, \n 4.50000046151508, 5.49999998005609]'], {}), '([0.596864162694467, 1.49417561427335, 2.5002469461667, \n 3.49998931984744, 4.50000046151508, 5.49999998005609])\n', (5057, 5172), True, 'import numpy as np\n'), ((5241, 5292), 'numpy.allclose', 'np.allclose', (['(vals ** 0.25)', 'vals_analytic'], {'rtol': '(0.01)'}), '(vals ** 0.25, vals_analytic, rtol=0.01)\n', (5252, 5292), True, 'import numpy as np\n'), ((2454, 2541), 'numpy.array', 'np.array', (['[[0, 0], [-273, -390], [-364, -1144], [0, 0], [273, -390], [364, -1144]]'], {}), '([[0, 0], [-273, -390], [-364, -1144], [0, 0], [273, -390], [364, -\n 1144]])\n', (2462, 2541), True, 'import numpy as np\n'), ((4277, 4297), 'numpy.linspace', 'np.linspace', (['(1)', '(6)', '(6)'], {}), '(1, 6, 6)\n', (4288, 4297), True, 'import numpy as np\n')] |
import numpy as np
import glob
import os
import matplotlib.pyplot as plt
#fileroot = '/home/common/data/cpu-b000-hp01/cryo_data/data2/20180920/' + \
# '1537461840/outputs'
fileroot = '/data/smurf_data/20181107/1541621474/outputs'
files = glob.glob(os.path.join(fileroot, '*freq_full_band_resp.txt'))
#files = np.array(['/home/common/data/cpu-b000-hp01/cryo_data/data2/20180920/1537461840/outputs/1537461850_freq_full_band_resp.txt'])
n_files = len(files)
freq = np.loadtxt(files[0])
idx = np.argsort(freq)
freq = freq[idx]
n_pts = len(freq)
resp = np.zeros((n_files, n_pts), dtype=complex)
for i, f in enumerate(files):
print('loading data from {}'.format(f))
resp[i] = (np.loadtxt(f.replace('freq', 'real')) + \
1.j*np.loadtxt(f.replace('freq', 'imag')))[idx]
fig, ax = plt.subplots(3,3, sharex=True, sharey=True, figsize=(12,9))
resp_mean = np.mean(resp, axis=0)
cm = plt.get_cmap('viridis')
for i in np.arange(n_files):
y = i//3
x = i%3
ax[y,x].semilogy(freq, np.abs(resp[i]))
ax[y,x].set_title('Att {}'.format(i*3))
ax[y,x].plot(freq, np.abs(resp_mean))
plt.tight_layout()
#import pysmurf
#S = pysmurf.SmurfControl()
#grad_loc = S.find_peak(freq, resp)
#fig, ax = plt.subplots(1)
#ax.plot(freq, np.abs(resp), '-bD', markevery=grad_loc)
#ax.set_yscale('log')
| [
"matplotlib.pyplot.tight_layout",
"numpy.abs",
"matplotlib.pyplot.get_cmap",
"os.path.join",
"numpy.zeros",
"numpy.argsort",
"numpy.mean",
"numpy.arange",
"numpy.loadtxt",
"matplotlib.pyplot.subplots"
] | [((466, 486), 'numpy.loadtxt', 'np.loadtxt', (['files[0]'], {}), '(files[0])\n', (476, 486), True, 'import numpy as np\n'), ((493, 509), 'numpy.argsort', 'np.argsort', (['freq'], {}), '(freq)\n', (503, 509), True, 'import numpy as np\n'), ((553, 594), 'numpy.zeros', 'np.zeros', (['(n_files, n_pts)'], {'dtype': 'complex'}), '((n_files, n_pts), dtype=complex)\n', (561, 594), True, 'import numpy as np\n'), ((794, 855), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {'sharex': '(True)', 'sharey': '(True)', 'figsize': '(12, 9)'}), '(3, 3, sharex=True, sharey=True, figsize=(12, 9))\n', (806, 855), True, 'import matplotlib.pyplot as plt\n'), ((866, 887), 'numpy.mean', 'np.mean', (['resp'], {'axis': '(0)'}), '(resp, axis=0)\n', (873, 887), True, 'import numpy as np\n'), ((893, 916), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis"""'], {}), "('viridis')\n", (905, 916), True, 'import matplotlib.pyplot as plt\n'), ((926, 944), 'numpy.arange', 'np.arange', (['n_files'], {}), '(n_files)\n', (935, 944), True, 'import numpy as np\n'), ((1102, 1120), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1118, 1120), True, 'import matplotlib.pyplot as plt\n'), ((252, 302), 'os.path.join', 'os.path.join', (['fileroot', '"""*freq_full_band_resp.txt"""'], {}), "(fileroot, '*freq_full_band_resp.txt')\n", (264, 302), False, 'import os\n'), ((998, 1013), 'numpy.abs', 'np.abs', (['resp[i]'], {}), '(resp[i])\n', (1004, 1013), True, 'import numpy as np\n'), ((1082, 1099), 'numpy.abs', 'np.abs', (['resp_mean'], {}), '(resp_mean)\n', (1088, 1099), True, 'import numpy as np\n')] |
# assemble infer result
# -*- coding: utf-8 -*-
#
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License
# You may obtain a copy of the License at
#
# http://www.apache.org/license/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOOUT WARRANTIES OR CONDITIONS OF ANY KIND,either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
import codecs
import argparse
import evaluation_utils
import numpy as np
parser = argparse.ArgumentParser(description='Calculate the BLEU score')
parser.add_argument('--source_dir', type=str, default='../result_Files', help='infer result folder')
parser.add_argument('--assemble_file', type=str, default='../result_Files/infer_assemble', help='vocable file')
def assemble_infer_result(source_dir, target_file):
res = []
print("====len listdir:", len(os.listdir(source_dir)))
file_seg_str = "_".join(os.listdir(source_dir)[0].split("_")[:-2])
print("====file_seg_str:", file_seg_str)
with open(target_file, "w") as f:
for i in range(len(os.listdir(source_dir))):
file = file_seg_str + "_" + str(i) + "_output0.bin"
file_full_name = os.path.join(source_dir, file)
val = list(np.fromfile(file_full_name, np.int32).reshape((80)))
j = ""
for i in val:
j = j + str(i) + " "
print("===val", j)
print("====val type:", type(val))
f.write(j + "\n")
print("Program hit the end successfully")
if __name__ == "__main__":
args = parser.parse_args()
source_dir = args.source_dir
target_file = args.assemble_file
assemble_infer_result(source_dir, target_file)
| [
"os.listdir",
"os.path.join",
"argparse.ArgumentParser",
"numpy.fromfile"
] | [((744, 807), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Calculate the BLEU score"""'}), "(description='Calculate the BLEU score')\n", (767, 807), False, 'import argparse\n'), ((1123, 1145), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (1133, 1145), False, 'import os\n'), ((1449, 1479), 'os.path.join', 'os.path.join', (['source_dir', 'file'], {}), '(source_dir, file)\n', (1461, 1479), False, 'import os\n'), ((1330, 1352), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (1340, 1352), False, 'import os\n'), ((1176, 1198), 'os.listdir', 'os.listdir', (['source_dir'], {}), '(source_dir)\n', (1186, 1198), False, 'import os\n'), ((1503, 1540), 'numpy.fromfile', 'np.fromfile', (['file_full_name', 'np.int32'], {}), '(file_full_name, np.int32)\n', (1514, 1540), True, 'import numpy as np\n')] |
"""
Example to show how to draw text using OpenCV
"""
# Import required packages:
import cv2
import numpy as np
import matplotlib.pyplot as plt
def show_with_matplotlib(img, title):
"""Shows an image using matplotlib capabilities"""
# Convert BGR image to RGB:
img_RGB = img[:, :, ::-1]
# Show the image using matplotlib:
plt.imshow(img_RGB)
plt.title(title)
plt.show()
# Dictionary containing some colors:
colors = {'blue': (255, 0, 0), 'green': (0, 255, 0), 'red': (0, 0, 255), 'yellow': (0, 255, 255),
'magenta': (255, 0, 255), 'cyan': (255, 255, 0), 'white': (255, 255, 255), 'black': (0, 0, 0),
'gray': (125, 125, 125), 'rand': np.random.randint(0, high=256, size=(3,)).tolist(),
'dark_gray': (50, 50, 50), 'light_gray': (220, 220, 220)}
# We create the canvas to draw: 120 x 512 pixels, 3 channels, uint8 (8-bit unsigned integers)
# We set background to black using np.zeros():
image = np.zeros((120, 512, 3), dtype="uint8")
# If you want another background color you can do the following:
# image[:] = colors['light_gray']
image.fill(255)
# We draw some text in the image:
cv2.putText(image, 'Mastering OpenCV4 with Python', (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.9, colors['red'], 2,
cv2.LINE_4)
cv2.putText(image, 'Mastering OpenCV4 with Python', (10, 70), cv2.FONT_HERSHEY_SIMPLEX, 0.9, colors['red'], 2,
cv2.LINE_8)
cv2.putText(image, 'Mastering OpenCV4 with Python', (10, 110), cv2.FONT_HERSHEY_SIMPLEX, 0.9, colors['red'], 2,
cv2.LINE_AA)
# Show image:
show_with_matplotlib(image, 'cv2.putText()')
| [
"matplotlib.pyplot.title",
"cv2.putText",
"matplotlib.pyplot.show",
"matplotlib.pyplot.imshow",
"numpy.zeros",
"numpy.random.randint"
] | [((958, 996), 'numpy.zeros', 'np.zeros', (['(120, 512, 3)'], {'dtype': '"""uint8"""'}), "((120, 512, 3), dtype='uint8')\n", (966, 996), True, 'import numpy as np\n'), ((1148, 1275), 'cv2.putText', 'cv2.putText', (['image', '"""Mastering OpenCV4 with Python"""', '(10, 30)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', "colors['red']", '(2)', 'cv2.LINE_4'], {}), "(image, 'Mastering OpenCV4 with Python', (10, 30), cv2.\n FONT_HERSHEY_SIMPLEX, 0.9, colors['red'], 2, cv2.LINE_4)\n", (1159, 1275), False, 'import cv2\n'), ((1283, 1410), 'cv2.putText', 'cv2.putText', (['image', '"""Mastering OpenCV4 with Python"""', '(10, 70)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', "colors['red']", '(2)', 'cv2.LINE_8'], {}), "(image, 'Mastering OpenCV4 with Python', (10, 70), cv2.\n FONT_HERSHEY_SIMPLEX, 0.9, colors['red'], 2, cv2.LINE_8)\n", (1294, 1410), False, 'import cv2\n'), ((1418, 1547), 'cv2.putText', 'cv2.putText', (['image', '"""Mastering OpenCV4 with Python"""', '(10, 110)', 'cv2.FONT_HERSHEY_SIMPLEX', '(0.9)', "colors['red']", '(2)', 'cv2.LINE_AA'], {}), "(image, 'Mastering OpenCV4 with Python', (10, 110), cv2.\n FONT_HERSHEY_SIMPLEX, 0.9, colors['red'], 2, cv2.LINE_AA)\n", (1429, 1547), False, 'import cv2\n'), ((347, 366), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img_RGB'], {}), '(img_RGB)\n', (357, 366), True, 'import matplotlib.pyplot as plt\n'), ((371, 387), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (380, 387), True, 'import matplotlib.pyplot as plt\n'), ((392, 402), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (400, 402), True, 'import matplotlib.pyplot as plt\n'), ((688, 729), 'numpy.random.randint', 'np.random.randint', (['(0)'], {'high': '(256)', 'size': '(3,)'}), '(0, high=256, size=(3,))\n', (705, 729), True, 'import numpy as np\n')] |
from kneuralnet.nn import NeuralNet
import pytest
import numpy as np
data = np.array([[0,0,0],
[0,1,1],
[1,0,1],
[1,1,0]])
X = np.array(data[:,0:-1])
Y = np.array([data[:,-1]]).T
nand = np.array([[0,0,1],
[0,1,1],
[1,0,1],
[1,1,0]])
nX = np.array(nand[:,0:-1])
nY = np.array([nand[:,-1]]).T
def test_xor_11():
nn = NeuralNet(Y, X, layers=[4,1])
nn.learn()
res = nn.predict(np.array([1,1]))
assert(round(res['yhat'][0]) == 0.0)
def test_xor_00():
nn = NeuralNet(Y, X, layers=[4,1])
nn.learn()
res = nn.predict(np.array([0,0]))
assert(round(res['yhat'][0]) == 0.0)
def test_xor_10():
nn = NeuralNet(Y, X, layers=[4,1])
nn.learn()
res = nn.predict(np.array([1,0]))
assert(round(res['yhat'][0]) == 1.0)
def test_xor_01():
nn = NeuralNet(Y, X, layers=[4,1])
nn.learn()
res = nn.predict(np.array([0,1]))
assert(round(res['yhat'][0]) == 1.0)
def test_xor_01_b1():
nn = NeuralNet(Y, X, layers=[4,1], bias=1)
nn.learn()
res = nn.predict(np.array([0,1]))
assert(round(res['yhat'][0]) == 1.0)
def test_nand_01():
nn = NeuralNet(nY, nX, layers=[3,1], bias=1)
nn.learn()
res = nn.predict(np.array([0,1]))
assert(round(res['yhat'][0]) == 1.0)
def test_nand_11():
nn = NeuralNet(nY, nX, layers=[3,1], bias=1)
nn.learn()
res = nn.predict(np.array([1,1]))
assert(round(res['yhat'][0]) == 0.0)
| [
"kneuralnet.nn.NeuralNet",
"numpy.array"
] | [((78, 132), 'numpy.array', 'np.array', (['[[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 0, 0], [0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (86, 132), True, 'import numpy as np\n'), ((180, 203), 'numpy.array', 'np.array', (['data[:, 0:-1]'], {}), '(data[:, 0:-1])\n', (188, 203), True, 'import numpy as np\n'), ((240, 294), 'numpy.array', 'np.array', (['[[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 0]]'], {}), '([[0, 0, 1], [0, 1, 1], [1, 0, 1], [1, 1, 0]])\n', (248, 294), True, 'import numpy as np\n'), ((343, 366), 'numpy.array', 'np.array', (['nand[:, 0:-1]'], {}), '(nand[:, 0:-1])\n', (351, 366), True, 'import numpy as np\n'), ((207, 230), 'numpy.array', 'np.array', (['[data[:, -1]]'], {}), '([data[:, -1]])\n', (215, 230), True, 'import numpy as np\n'), ((371, 394), 'numpy.array', 'np.array', (['[nand[:, -1]]'], {}), '([nand[:, -1]])\n', (379, 394), True, 'import numpy as np\n'), ((425, 455), 'kneuralnet.nn.NeuralNet', 'NeuralNet', (['Y', 'X'], {'layers': '[4, 1]'}), '(Y, X, layers=[4, 1])\n', (434, 455), False, 'from kneuralnet.nn import NeuralNet\n'), ((578, 608), 'kneuralnet.nn.NeuralNet', 'NeuralNet', (['Y', 'X'], {'layers': '[4, 1]'}), '(Y, X, layers=[4, 1])\n', (587, 608), False, 'from kneuralnet.nn import NeuralNet\n'), ((731, 761), 'kneuralnet.nn.NeuralNet', 'NeuralNet', (['Y', 'X'], {'layers': '[4, 1]'}), '(Y, X, layers=[4, 1])\n', (740, 761), False, 'from kneuralnet.nn import NeuralNet\n'), ((884, 914), 'kneuralnet.nn.NeuralNet', 'NeuralNet', (['Y', 'X'], {'layers': '[4, 1]'}), '(Y, X, layers=[4, 1])\n', (893, 914), False, 'from kneuralnet.nn import NeuralNet\n'), ((1040, 1078), 'kneuralnet.nn.NeuralNet', 'NeuralNet', (['Y', 'X'], {'layers': '[4, 1]', 'bias': '(1)'}), '(Y, X, layers=[4, 1], bias=1)\n', (1049, 1078), False, 'from kneuralnet.nn import NeuralNet\n'), ((1202, 1242), 'kneuralnet.nn.NeuralNet', 'NeuralNet', (['nY', 'nX'], {'layers': '[3, 1]', 'bias': '(1)'}), '(nY, nX, layers=[3, 1], bias=1)\n', (1211, 1242), False, 'from kneuralnet.nn import NeuralNet\n'), ((1366, 1406), 'kneuralnet.nn.NeuralNet', 'NeuralNet', (['nY', 'nX'], {'layers': '[3, 1]', 'bias': '(1)'}), '(nY, nX, layers=[3, 1], bias=1)\n', (1375, 1406), False, 'from kneuralnet.nn import NeuralNet\n'), ((491, 507), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (499, 507), True, 'import numpy as np\n'), ((644, 660), 'numpy.array', 'np.array', (['[0, 0]'], {}), '([0, 0])\n', (652, 660), True, 'import numpy as np\n'), ((797, 813), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (805, 813), True, 'import numpy as np\n'), ((950, 966), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (958, 966), True, 'import numpy as np\n'), ((1114, 1130), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1122, 1130), True, 'import numpy as np\n'), ((1278, 1294), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (1286, 1294), True, 'import numpy as np\n'), ((1442, 1458), 'numpy.array', 'np.array', (['[1, 1]'], {}), '([1, 1])\n', (1450, 1458), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""steering: calculates whether to brake, or where to turn, and publishes it.
Uses ackerman's steering to determine where to turn.
Subscribes:
world_model Image calculated model of movability around the car
lidar_model Image model of drivable (obstacle-free) area in front of car
Publishes:
auto_car_pose CarPose with command (whether to brake, where to turn)
tentacle_frame Image visualization of CarPose, for mission_control
"""
from __future__ import division
# TODO(irapha): remove this line when #126 is fixed.
import cv2
import numpy as np
import rospy
from buzzmobile.msg import CarPose
from cv_bridge import CvBridge
from image_utils import draw_pose_viz, create_tentacle_mask
from sensor_msgs.msg import Image
g = {} # globals
g['lidar_model'] = None
bridge = CvBridge()
pub = rospy.Publisher('auto_car_pose', CarPose, queue_size=1)
TENTACLE_PUB = rospy.Publisher('tentacle_frame', Image, queue_size=1)
PIXELS_PER_M = rospy.get_param('pixels_per_m')
HEIGHT = rospy.get_param('image_height')
WIDTH = rospy.get_param('image_width')
MAX_ANGLE = rospy.get_param('max_steering_angle', 1.0)
TRAVEL_DISTANCE = rospy.get_param('travel_distance')
NUM_POINTS = rospy.get_param('num_points_in_tentacle')
WHEEL_BASE = rospy.get_param('wheel_base')
ANGLE_MULTIPLIER = rospy.get_param('angle_multiplier')
BRAKING_DISTANCE = rospy.get_param('braking_distance')
THRESHHOLD = rospy.get_param('braking_score_threshhold')
BUZZMOBILE_WIDTH = rospy.get_param('buzzmobile_width')
MAX_SPEED = rospy.get_param('max_speed')
IMMEDIATE_FUTURE_MASK = np.zeros((HEIGHT, WIDTH), np.uint8)
cv2.circle(IMMEDIATE_FUTURE_MASK, (WIDTH//2, HEIGHT),
int(BRAKING_DISTANCE * PIXELS_PER_M), [255, 255, 255], -1)
def steer(ros_world_model):
"""Performs best path and brake calculations, and publishes a car_pose."""
# convert RosImage to cv2
world_frame = np.squeeze(bridge.imgmsg_to_cv2(ros_world_model, 'mono8'))
# pick tentacle
height, width = world_frame.shape
points, angle = pick_tentacle(width//2, height, world_frame)
pose = CarPose()
pose.mode = 'auto'
# check our path for obstacles
if should_brake(points, g['lidar_model']):
pose.brake = True
else:
pose.angle = angle
pose.velocity = MAX_SPEED
# publish carpose
pub.publish(pose)
# publish drawn tentacle
draw_pose_viz(pose, world_frame, points, (BRAKING_DISTANCE * PIXELS_PER_M))
tentacle_frame = bridge.cv2_to_imgmsg(world_frame)
TENTACLE_PUB.publish(tentacle_frame)
def set_lidar_model(new_lidar_model):
"""Updates lidar_model."""
lidar_model = bridge.imgmsg_to_cv2(new_lidar_model, 'mono8')
g['lidar_model'] = lidar_model
def should_brake(points, lidar_model):
"""Returns whether we should brake, given best path and obstacle model."""
if lidar_model is None:
rospy.loginfo('braking because no lidar image received.')
return True
tentacle_mask = create_tentacle_mask(points, HEIGHT, WIDTH,
BUZZMOBILE_WIDTH, PIXELS_PER_M)
immediate_path_mask = cv2.bitwise_and(IMMEDIATE_FUTURE_MASK, tentacle_mask)
lidar_model_path = cv2.bitwise_and(lidar_model, lidar_model,
mask=immediate_path_mask)
# must use np.sum. cv2 uses ints to store pixels, so sum(sum()) overflows.
score = (np.sum(np.sum(lidar_model_path)) /
float(np.sum(np.sum(immediate_path_mask))))
return score < THRESHHOLD
def turning_radius(steering_angle):
"""Returns turning radius given steering angle (rad)."""
if steering_angle == 0:
return float('inf')
return abs(WHEEL_BASE / np.tan(steering_angle))
def ackerman_step(x_0, y_0, heading, steering_angle):
"""Single ackerman's steering pose estimation step.
Returns the final x, y, and the new heading.
"""
if steering_angle == 0:
y = y_0 + (PIXELS_PER_M * TRAVEL_DISTANCE) * (1 if heading == 0 else -1)
return x_0, y, heading
radius = turning_radius(steering_angle)
travel_angle = TRAVEL_DISTANCE / radius + heading
x = x_0 + PIXELS_PER_M * radius * (
np.cos(heading) - np.cos(travel_angle)) * np.sign(steering_angle)
y = y_0 + PIXELS_PER_M * radius * (
np.sin(travel_angle) - np.sin(heading))
return x, y, travel_angle
def project_tentacle(x_0, y_0, heading, steering_angle,
num_points):
"""Returns expected future positions using ackerman's steering.
Recursively computes expected positions (in pixel coordinates),
starting at (x, y), given constant steering_angle (in radians) and
TRAVEL_DISTANCE (in meters). Heading is angle at which the car is facing.
"""
x, y, heading = ackerman_step(x_0, y_0, heading, steering_angle)
if num_points == 0:
return [(int(round(x)), int(round(y)))]
return [(int(round(x)), int(round(y)))] + project_tentacle(x, y,
heading, steering_angle, num_points - 1)
def score_tentacle(points, frame):
"""Returns a tentacle's score from 0 to 1.
Calculated by masking the frame with the tentacle's
points, then adding the values of the result and normalizing by the sum of
the values of the mask.
"""
tentacle_mask = create_tentacle_mask(points, HEIGHT, WIDTH,
BUZZMOBILE_WIDTH, PIXELS_PER_M)
# must use np.sum. cv2 uses ints to store pixels, so sum(sum()) overflows.
normalizing_factor = np.sum(np.sum(tentacle_mask))
if normalizing_factor == 0.0:
rospy.logerr('Mask has no pixels')
return 0.0
tentacle_score_image = cv2.bitwise_and(frame, frame, mask=tentacle_mask)
# must use np.sum. cv2 uses ints to store pixels, so sum(sum()) overflows.
tentacle_score = np.sum(np.sum(tentacle_score_image)) / normalizing_factor
return tentacle_score
def pick_tentacle(x_0, y_0, frame):
"""Returns the tentacle with the highest score provided by score_tentacle.
Breaks ties by picking the tentacle with the smallest magnitude angle
"""
angles = np.linspace(0.0, MAX_ANGLE, MAX_ANGLE * ANGLE_MULTIPLIER)
best_score = -1
best_points = []
best_angle = 0
for angle in angles:
if angle == 0:
branches = [project_tentacle(x_0, y_0, -np.pi, angle, NUM_POINTS)]
else:
branches = [project_tentacle(x_0, y_0, -np.pi, angle, NUM_POINTS),
project_tentacle(x_0, y_0, -np.pi, -angle, NUM_POINTS)]
for points in branches:
score = score_tentacle(points, frame)
if score > best_score or (score == best_score
and abs(angle) < abs(best_angle)):
best_score = score
best_points = points
best_angle = angle
return best_points, best_angle
def steering_node():
"""Initializes steering node."""
rospy.init_node('steering', anonymous=True)
rospy.Subscriber('world_model', Image, steer)
rospy.Subscriber('lidar_model', Image, set_lidar_model)
rospy.spin()
if __name__ == '__main__': steering_node()
| [
"rospy.logerr",
"rospy.Subscriber",
"numpy.sum",
"cv2.bitwise_and",
"numpy.sin",
"image_utils.draw_pose_viz",
"image_utils.create_tentacle_mask",
"numpy.tan",
"rospy.init_node",
"numpy.linspace",
"buzzmobile.msg.CarPose",
"rospy.loginfo",
"numpy.cos",
"cv_bridge.CvBridge",
"numpy.zeros",... | [((814, 824), 'cv_bridge.CvBridge', 'CvBridge', ([], {}), '()\n', (822, 824), False, 'from cv_bridge import CvBridge\n'), ((831, 886), 'rospy.Publisher', 'rospy.Publisher', (['"""auto_car_pose"""', 'CarPose'], {'queue_size': '(1)'}), "('auto_car_pose', CarPose, queue_size=1)\n", (846, 886), False, 'import rospy\n'), ((902, 956), 'rospy.Publisher', 'rospy.Publisher', (['"""tentacle_frame"""', 'Image'], {'queue_size': '(1)'}), "('tentacle_frame', Image, queue_size=1)\n", (917, 956), False, 'import rospy\n'), ((973, 1004), 'rospy.get_param', 'rospy.get_param', (['"""pixels_per_m"""'], {}), "('pixels_per_m')\n", (988, 1004), False, 'import rospy\n'), ((1014, 1045), 'rospy.get_param', 'rospy.get_param', (['"""image_height"""'], {}), "('image_height')\n", (1029, 1045), False, 'import rospy\n'), ((1054, 1084), 'rospy.get_param', 'rospy.get_param', (['"""image_width"""'], {}), "('image_width')\n", (1069, 1084), False, 'import rospy\n'), ((1097, 1139), 'rospy.get_param', 'rospy.get_param', (['"""max_steering_angle"""', '(1.0)'], {}), "('max_steering_angle', 1.0)\n", (1112, 1139), False, 'import rospy\n'), ((1158, 1192), 'rospy.get_param', 'rospy.get_param', (['"""travel_distance"""'], {}), "('travel_distance')\n", (1173, 1192), False, 'import rospy\n'), ((1206, 1247), 'rospy.get_param', 'rospy.get_param', (['"""num_points_in_tentacle"""'], {}), "('num_points_in_tentacle')\n", (1221, 1247), False, 'import rospy\n'), ((1261, 1290), 'rospy.get_param', 'rospy.get_param', (['"""wheel_base"""'], {}), "('wheel_base')\n", (1276, 1290), False, 'import rospy\n'), ((1310, 1345), 'rospy.get_param', 'rospy.get_param', (['"""angle_multiplier"""'], {}), "('angle_multiplier')\n", (1325, 1345), False, 'import rospy\n'), ((1365, 1400), 'rospy.get_param', 'rospy.get_param', (['"""braking_distance"""'], {}), "('braking_distance')\n", (1380, 1400), False, 'import rospy\n'), ((1414, 1457), 'rospy.get_param', 'rospy.get_param', (['"""braking_score_threshhold"""'], {}), "('braking_score_threshhold')\n", (1429, 1457), False, 'import rospy\n'), ((1477, 1512), 'rospy.get_param', 'rospy.get_param', (['"""buzzmobile_width"""'], {}), "('buzzmobile_width')\n", (1492, 1512), False, 'import rospy\n'), ((1525, 1553), 'rospy.get_param', 'rospy.get_param', (['"""max_speed"""'], {}), "('max_speed')\n", (1540, 1553), False, 'import rospy\n'), ((1581, 1616), 'numpy.zeros', 'np.zeros', (['(HEIGHT, WIDTH)', 'np.uint8'], {}), '((HEIGHT, WIDTH), np.uint8)\n', (1589, 1616), True, 'import numpy as np\n'), ((2090, 2099), 'buzzmobile.msg.CarPose', 'CarPose', ([], {}), '()\n', (2097, 2099), False, 'from buzzmobile.msg import CarPose\n'), ((2382, 2455), 'image_utils.draw_pose_viz', 'draw_pose_viz', (['pose', 'world_frame', 'points', '(BRAKING_DISTANCE * PIXELS_PER_M)'], {}), '(pose, world_frame, points, BRAKING_DISTANCE * PIXELS_PER_M)\n', (2395, 2455), False, 'from image_utils import draw_pose_viz, create_tentacle_mask\n'), ((2978, 3053), 'image_utils.create_tentacle_mask', 'create_tentacle_mask', (['points', 'HEIGHT', 'WIDTH', 'BUZZMOBILE_WIDTH', 'PIXELS_PER_M'], {}), '(points, HEIGHT, WIDTH, BUZZMOBILE_WIDTH, PIXELS_PER_M)\n', (2998, 3053), False, 'from image_utils import draw_pose_viz, create_tentacle_mask\n'), ((3092, 3145), 'cv2.bitwise_and', 'cv2.bitwise_and', (['IMMEDIATE_FUTURE_MASK', 'tentacle_mask'], {}), '(IMMEDIATE_FUTURE_MASK, tentacle_mask)\n', (3107, 3145), False, 'import cv2\n'), ((3169, 3236), 'cv2.bitwise_and', 'cv2.bitwise_and', (['lidar_model', 'lidar_model'], {'mask': 'immediate_path_mask'}), '(lidar_model, lidar_model, mask=immediate_path_mask)\n', (3184, 3236), False, 'import cv2\n'), ((5257, 5332), 'image_utils.create_tentacle_mask', 'create_tentacle_mask', (['points', 'HEIGHT', 'WIDTH', 'BUZZMOBILE_WIDTH', 'PIXELS_PER_M'], {}), '(points, HEIGHT, WIDTH, BUZZMOBILE_WIDTH, PIXELS_PER_M)\n', (5277, 5332), False, 'from image_utils import draw_pose_viz, create_tentacle_mask\n'), ((5604, 5653), 'cv2.bitwise_and', 'cv2.bitwise_and', (['frame', 'frame'], {'mask': 'tentacle_mask'}), '(frame, frame, mask=tentacle_mask)\n', (5619, 5653), False, 'import cv2\n'), ((6049, 6106), 'numpy.linspace', 'np.linspace', (['(0.0)', 'MAX_ANGLE', '(MAX_ANGLE * ANGLE_MULTIPLIER)'], {}), '(0.0, MAX_ANGLE, MAX_ANGLE * ANGLE_MULTIPLIER)\n', (6060, 6106), True, 'import numpy as np\n'), ((6868, 6911), 'rospy.init_node', 'rospy.init_node', (['"""steering"""'], {'anonymous': '(True)'}), "('steering', anonymous=True)\n", (6883, 6911), False, 'import rospy\n'), ((6916, 6961), 'rospy.Subscriber', 'rospy.Subscriber', (['"""world_model"""', 'Image', 'steer'], {}), "('world_model', Image, steer)\n", (6932, 6961), False, 'import rospy\n'), ((6966, 7021), 'rospy.Subscriber', 'rospy.Subscriber', (['"""lidar_model"""', 'Image', 'set_lidar_model'], {}), "('lidar_model', Image, set_lidar_model)\n", (6982, 7021), False, 'import rospy\n'), ((7026, 7038), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (7036, 7038), False, 'import rospy\n'), ((2879, 2936), 'rospy.loginfo', 'rospy.loginfo', (['"""braking because no lidar image received."""'], {}), "('braking because no lidar image received.')\n", (2892, 2936), False, 'import rospy\n'), ((5457, 5478), 'numpy.sum', 'np.sum', (['tentacle_mask'], {}), '(tentacle_mask)\n', (5463, 5478), True, 'import numpy as np\n'), ((5522, 5556), 'rospy.logerr', 'rospy.logerr', (['"""Mask has no pixels"""'], {}), "('Mask has no pixels')\n", (5534, 5556), False, 'import rospy\n'), ((3376, 3400), 'numpy.sum', 'np.sum', (['lidar_model_path'], {}), '(lidar_model_path)\n', (3382, 3400), True, 'import numpy as np\n'), ((3673, 3695), 'numpy.tan', 'np.tan', (['steering_angle'], {}), '(steering_angle)\n', (3679, 3695), True, 'import numpy as np\n'), ((4199, 4222), 'numpy.sign', 'np.sign', (['steering_angle'], {}), '(steering_angle)\n', (4206, 4222), True, 'import numpy as np\n'), ((5761, 5789), 'numpy.sum', 'np.sum', (['tentacle_score_image'], {}), '(tentacle_score_image)\n', (5767, 5789), True, 'import numpy as np\n'), ((3430, 3457), 'numpy.sum', 'np.sum', (['immediate_path_mask'], {}), '(immediate_path_mask)\n', (3436, 3457), True, 'import numpy as np\n'), ((4275, 4295), 'numpy.sin', 'np.sin', (['travel_angle'], {}), '(travel_angle)\n', (4281, 4295), True, 'import numpy as np\n'), ((4298, 4313), 'numpy.sin', 'np.sin', (['heading'], {}), '(heading)\n', (4304, 4313), True, 'import numpy as np\n'), ((4157, 4172), 'numpy.cos', 'np.cos', (['heading'], {}), '(heading)\n', (4163, 4172), True, 'import numpy as np\n'), ((4175, 4195), 'numpy.cos', 'np.cos', (['travel_angle'], {}), '(travel_angle)\n', (4181, 4195), True, 'import numpy as np\n')] |
"""
Manually defined network - directly by ndarray or with function.
The data structure to generate and manage connections between neurons.
Contains generation, arithmetic and get operations.
Updates are handled in spikey.snn.Synapse objects.
"""
import numpy as np
from spikey.module import Key
from spikey.snn.weight.template import Weight
class Manual(Weight):
"""
Manually defined network - directly by ndarray or with function.
The data structure to generate and manage connections between neurons.
Contains generation, arithmetic and get operations.
Updates are handled in spikey.snn.Synapse objects.
.. note::
Weight._matrix must be a masked ndarray with fill_value=0 while Weight.matrix
is a simple ndarray.
Arithmetic operations(a * b) use unmasked matrix for speed while inplace(a += b)
arithmetic uses masked values.
Get operations(Weight[[1, 2, 3]]) apply to masked ndarray.
Parameters
----------
kwargs: dict
Dictionary with values for each key in NECESSARY_KEYS.
Examples
--------
.. code-block:: python
config = {
"n_inputs": 1,
"n_neurons": 10,
"max_weight": 3,
"matrix": np.random.uniform(size=(1+10, 10)) <= .2,
}
w = Manual(**config)
in_volts = w * np.ones(config['n_neurons'])
.. code-block:: python
class network_template(Network):
keys = {
"n_inputs": 1,
"n_neurons": 10,
"max_weight": 3,
"matrix": np.random.uniform(size=(1+10, 10)) <= .2,
}
parts = {
"weights": Manual
}
"""
NECESSARY_KEYS = Weight.extend_keys(
[
Key("matrix", "ndarray/func Matrix to use/generate."),
]
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
if callable(self._matrix):
self._matrix = self._matrix(self)
elif isinstance(self._matrix, list) and isinstance(self._matrix[0], np.ndarray):
self._matrix = self._convert_feedforward(self._matrix)
if not hasattr(self._matrix, "mask"):
self._matrix = np.ma.array(
self._matrix, mask=(self._matrix == 0), fill_value=0
)
else:
self._matrix.fill_value = 0
self._matrix = np.ma.copy(self._matrix)
self._matrix = np.ma.clip(self._matrix, 0, self._max_weight)
self._assert_matrix_shape(self._matrix, key="matrix")
| [
"numpy.ma.copy",
"numpy.ma.array",
"spikey.module.Key",
"numpy.ma.clip"
] | [((2421, 2445), 'numpy.ma.copy', 'np.ma.copy', (['self._matrix'], {}), '(self._matrix)\n', (2431, 2445), True, 'import numpy as np\n'), ((2469, 2514), 'numpy.ma.clip', 'np.ma.clip', (['self._matrix', '(0)', 'self._max_weight'], {}), '(self._matrix, 0, self._max_weight)\n', (2479, 2514), True, 'import numpy as np\n'), ((1795, 1848), 'spikey.module.Key', 'Key', (['"""matrix"""', '"""ndarray/func Matrix to use/generate."""'], {}), "('matrix', 'ndarray/func Matrix to use/generate.')\n", (1798, 1848), False, 'from spikey.module import Key\n'), ((2248, 2311), 'numpy.ma.array', 'np.ma.array', (['self._matrix'], {'mask': '(self._matrix == 0)', 'fill_value': '(0)'}), '(self._matrix, mask=self._matrix == 0, fill_value=0)\n', (2259, 2311), True, 'import numpy as np\n')] |
import numpy as np
from numpy import ndarray
from sklearn.neighbors import KNeighborsRegressor
__all__ = ["GridSampler"]
class GridSampler(object):
def __init__(self, data: ndarray, longitude: ndarray, latitude: ndarray, k: int = 5):
"""
:param data: dbz
:param longitude: 经度
:param latitude: 维度
:param k: 取几个临近点
※※※※※※※※※※※※※※※※※※※※
※※ ※※
※※ 所有的参数都是一维的数据 ※※
※※ ※※
※※※※※※※※※※※※※※※※※※※※
"""
assert data.ndim == 1 and longitude.ndim == 1 and latitude.ndim == 1 and k > 1
longitude, self.lon_avg, self.lon_std = _normalize(longitude)
latitude, self.lat_avg, self.lat_std = _normalize(latitude)
inputs = np.dstack([longitude, latitude])[0]
self.knn = KNeighborsRegressor(n_neighbors=k, weights="distance")
data[np.isnan(data)] = 0 # 雷达缺失值用 0 填充,否则 KNN 会报错
self.knn.fit(inputs, data)
def map_data(self, lon: ndarray, lat: ndarray) -> ndarray:
shape = lon.shape
lon = lon.ravel()
lat = lat.ravel()
lon = (lon - self.lon_avg) / self.lon_std
lat = (lat - self.lat_avg) / self.lat_std
inputs = np.dstack([lon, lat])[0]
outputs = self.knn.predict(inputs).reshape(shape)
return outputs
def _normalize(data: ndarray):
avg = data.mean()
std = data.std()
data = (data - avg) / std
return data, avg, std
| [
"numpy.dstack",
"sklearn.neighbors.KNeighborsRegressor",
"numpy.isnan"
] | [((883, 937), 'sklearn.neighbors.KNeighborsRegressor', 'KNeighborsRegressor', ([], {'n_neighbors': 'k', 'weights': '"""distance"""'}), "(n_neighbors=k, weights='distance')\n", (902, 937), False, 'from sklearn.neighbors import KNeighborsRegressor\n'), ((828, 860), 'numpy.dstack', 'np.dstack', (['[longitude, latitude]'], {}), '([longitude, latitude])\n', (837, 860), True, 'import numpy as np\n'), ((951, 965), 'numpy.isnan', 'np.isnan', (['data'], {}), '(data)\n', (959, 965), True, 'import numpy as np\n'), ((1293, 1314), 'numpy.dstack', 'np.dstack', (['[lon, lat]'], {}), '([lon, lat])\n', (1302, 1314), True, 'import numpy as np\n')] |
def read(filename, path='dataset/'):
import numpy as np
return np.loadtxt(path+filename, delimiter=',')
def center(data):
'''
center data and return mean
'''
import numpy as np
mean = np.mean(data[:, 2])
data[:, 2] -= mean
return mean
def data_to_list(N, M, data):
'''
data -> user, item, score
return list of items rated by user &
list of users rated item.
'''
user_list = [[] for _ in range(N+1)]
item_list = [[] for _ in range(M+1)]
for i in range(data.shape[0]):
u_id = int(data[i][0])
i_id = int(data[i][1])
score = data[i][2]
user_list[u_id].append([i_id, score])
item_list[i_id].append([u_id, score])
return user_list, item_list
def plot(x, y, xlabel, ylabel, title, color):
import matplotlib.pyplot as plt
plt.plot(x, y, color=color)
plt.plot(x, y, color+'o')
plt.title(title)
plt.ylabel(ylabel)
plt.xlabel(xlabel)
plt.show()
def edge_list_to_adj_list(MAX_ID, edge_list, LIMIT=-1):
'''
MAX_ID -> maximum id of users
LIMIT -> maximum outdegree
for default value there is no limit
return G, G^T
'''
import numpy as np
adj = [[] for _ in range(MAX_ID+1)]
adjT = [[] for _ in range(MAX_ID+1)]
for l in edge_list:
[u, v] = l
u, v = int(u), int(v)
if max(u, v) <= MAX_ID and (LIMIT == -1 or len(adj[u]) < LIMIT):
adj[u].append(v)
adjT[v].append(u)
return adj, adjT
| [
"matplotlib.pyplot.title",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"numpy.mean",
"numpy.loadtxt",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel"
] | [((71, 113), 'numpy.loadtxt', 'np.loadtxt', (['(path + filename)'], {'delimiter': '""","""'}), "(path + filename, delimiter=',')\n", (81, 113), True, 'import numpy as np\n'), ((218, 237), 'numpy.mean', 'np.mean', (['data[:, 2]'], {}), '(data[:, 2])\n', (225, 237), True, 'import numpy as np\n'), ((861, 888), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y'], {'color': 'color'}), '(x, y, color=color)\n', (869, 888), True, 'import matplotlib.pyplot as plt\n'), ((893, 920), 'matplotlib.pyplot.plot', 'plt.plot', (['x', 'y', "(color + 'o')"], {}), "(x, y, color + 'o')\n", (901, 920), True, 'import matplotlib.pyplot as plt\n'), ((923, 939), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (932, 939), True, 'import matplotlib.pyplot as plt\n'), ((944, 962), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['ylabel'], {}), '(ylabel)\n', (954, 962), True, 'import matplotlib.pyplot as plt\n'), ((967, 985), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['xlabel'], {}), '(xlabel)\n', (977, 985), True, 'import matplotlib.pyplot as plt\n'), ((990, 1000), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (998, 1000), True, 'import matplotlib.pyplot as plt\n')] |
import numpy as np
import pandas as pd
from scipy.special import factorial
from itertools import combinations, permutations, chain
from warnings import warn
class Kruskals(object):
"""
Class to run William Kruskal's algorithm
Parameters
----------
ndarr : numpy.ndarray (dtype: float/int)
non-aggregated 2-dimensional array containing
independent variables on the veritcal axis and (usually)
respondent level data on the horizontal axis
arr : numpy.ndarray (dtype: float/int)
1-dimensional array of the dependent variable associated with ndarr
"""
def __init__(self, ndarr, arr, i_vars=None):
self._arr = arr[~np.isnan(arr)]
self._ndarr = ndarr[~np.isnan(arr)]
self._driver_score = None
self._i_vars = i_vars
if self._arr.shape[0] < arr.shape[0]:
warn("NaN values have been removed from the dependent variable")
if i_vars is not None and len(i_vars) != ndarr.shape[1]:
raise ValueError("driver labels: {}, not sufficient for ndarray of shape {}".format(i_vars, ndarr.shape))
@staticmethod
def from_pandas_df(df, i_vars, d_var):
"""
Helper method to pre-process a pandas data frame in order to run Kruskal's algorithm
analysis
Parameters
----------
df : pandas.DataFrame
the dataframe with the dependent and independent variables in which
to slice from
i_vars : array-like
list of the column names for the independent variables
d_var : string
the name of the dependent variable in the dataframe
"""
ind_df = df[i_vars]
ind_values = ind_df.values
dep_values = df[d_var].values
return Kruskals(ind_values, dep_values, i_vars)
def driver_score_to_series(self, directional=False, percentage=False):
"""
Returns the driver score for each variable in the independent set
as a pandas series
"""
series = pd.Series(self.driver_score(directional, percentage), index=self._i_vars)
series.name = 'score'
series.index.name = 'driver'
return series
def driver_score(self, directional=False, percentage=False):
"""
Calculate the driver score for all independent variables
"""
if self._driver_score is None:
ind_c, m_ij, m_ijm = self.generate_diff(self._ndarr, self._arr)
m_ij_row_mean = np.nanmean(m_ij, axis=1) * (ind_c - 1)
fact = factorial(ind_c - 1) / (2 * factorial(ind_c - 3))
m_ijm_row_mean = np.nanmean(m_ijm, axis=1) * fact
self._driver_score = (m_ij_row_mean + m_ijm_row_mean) / ((ind_c - 1) + fact)
self._driver_score = np.nan_to_num(self._driver_score)
driver_score = self._driver_score
if directional:
driver_score = driver_score * np.apply_along_axis(self.correlation_coef, 0, self._ndarr, self._arr)
if percentage:
return driver_score / np.fabs(driver_score).sum() * 100
else:
return driver_score
def percentage(self, directional=False):
""" Distance as a relative percentage """
warn("percentage() has been deprecated, please use driver_score(percentage=True)")
return self.driver_score(directional) / np.fabs(self.driver_score(directional)).sum() * 100
def generate_diff(self, ndarr, arr):
"""
Internal method to calculate the partial correlation squared between
the independent and the dependent variables
"""
l = ndarr.shape[1]
cov_ij = tuple(
np.cov(np.array([ndarr[:,j], arr, ndarr[:,i]]))
for j, i in permutations(range(l), 2)
)
if len(cov_ij) == 0:
return (l, np.empty((1, l-1)) * np.nan, np.empty((l,1)) * np.nan)
pinv_ij = np.linalg.pinv(cov_ij, hermitian=True)
pcor_ij = ((pinv_ij[:, 0, 1] * pinv_ij[:, 0, 1]) / (pinv_ij[:, 0, 0] * pinv_ij[:, 1, 1]))
cov_mij = [
np.cov(np.array([ndarr[:,i], arr, ndarr[:,j], ndarr[:, m]]))
for i, j in permutations(range(l), 2)
for m in range(j+1, l) if m != i
]
if len(cov_mij) == 0:
return (l, pcor_ij.reshape(-1, l-1), np.empty((l,1)) * np.nan)
pinv_mij = np.linalg.pinv(cov_mij, hermitian=True)
pcor_mij = ((pinv_mij[:, 0, 1] * pinv_mij[:, 0, 1]) / (pinv_mij[:, 0, 0] * pinv_mij[:, 1, 1]))
return (l, pcor_ij.reshape(-1, l-1), pcor_mij.reshape(l, -1))
@staticmethod
def correlation_coef(ind, dep):
return 1 if np.corrcoef(ind, dep)[0][1] >= 0 else -1
| [
"scipy.special.factorial",
"numpy.nan_to_num",
"numpy.empty",
"numpy.corrcoef",
"numpy.isnan",
"numpy.apply_along_axis",
"numpy.fabs",
"numpy.array",
"warnings.warn",
"numpy.linalg.pinv",
"numpy.nanmean"
] | [((3276, 3368), 'warnings.warn', 'warn', (['"""percentage() has been deprecated, please use driver_score(percentage=True)"""'], {}), "(\n 'percentage() has been deprecated, please use driver_score(percentage=True)'\n )\n", (3280, 3368), False, 'from warnings import warn\n'), ((3952, 3990), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cov_ij'], {'hermitian': '(True)'}), '(cov_ij, hermitian=True)\n', (3966, 3990), True, 'import numpy as np\n'), ((4414, 4453), 'numpy.linalg.pinv', 'np.linalg.pinv', (['cov_mij'], {'hermitian': '(True)'}), '(cov_mij, hermitian=True)\n', (4428, 4453), True, 'import numpy as np\n'), ((893, 957), 'warnings.warn', 'warn', (['"""NaN values have been removed from the dependent variable"""'], {}), "('NaN values have been removed from the dependent variable')\n", (897, 957), False, 'from warnings import warn\n'), ((2823, 2856), 'numpy.nan_to_num', 'np.nan_to_num', (['self._driver_score'], {}), '(self._driver_score)\n', (2836, 2856), True, 'import numpy as np\n'), ((711, 724), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (719, 724), True, 'import numpy as np\n'), ((755, 768), 'numpy.isnan', 'np.isnan', (['arr'], {}), '(arr)\n', (763, 768), True, 'import numpy as np\n'), ((2531, 2555), 'numpy.nanmean', 'np.nanmean', (['m_ij'], {'axis': '(1)'}), '(m_ij, axis=1)\n', (2541, 2555), True, 'import numpy as np\n'), ((2589, 2609), 'scipy.special.factorial', 'factorial', (['(ind_c - 1)'], {}), '(ind_c - 1)\n', (2598, 2609), False, 'from scipy.special import factorial\n'), ((2668, 2693), 'numpy.nanmean', 'np.nanmean', (['m_ijm'], {'axis': '(1)'}), '(m_ijm, axis=1)\n', (2678, 2693), True, 'import numpy as np\n'), ((2965, 3034), 'numpy.apply_along_axis', 'np.apply_along_axis', (['self.correlation_coef', '(0)', 'self._ndarr', 'self._arr'], {}), '(self.correlation_coef, 0, self._ndarr, self._arr)\n', (2984, 3034), True, 'import numpy as np\n'), ((4129, 4183), 'numpy.array', 'np.array', (['[ndarr[:, i], arr, ndarr[:, j], ndarr[:, m]]'], {}), '([ndarr[:, i], arr, ndarr[:, j], ndarr[:, m]])\n', (4137, 4183), True, 'import numpy as np\n'), ((2617, 2637), 'scipy.special.factorial', 'factorial', (['(ind_c - 3)'], {}), '(ind_c - 3)\n', (2626, 2637), False, 'from scipy.special import factorial\n'), ((3724, 3765), 'numpy.array', 'np.array', (['[ndarr[:, j], arr, ndarr[:, i]]'], {}), '([ndarr[:, j], arr, ndarr[:, i]])\n', (3732, 3765), True, 'import numpy as np\n'), ((3878, 3898), 'numpy.empty', 'np.empty', (['(1, l - 1)'], {}), '((1, l - 1))\n', (3886, 3898), True, 'import numpy as np\n'), ((3907, 3923), 'numpy.empty', 'np.empty', (['(l, 1)'], {}), '((l, 1))\n', (3915, 3923), True, 'import numpy as np\n'), ((4368, 4384), 'numpy.empty', 'np.empty', (['(l, 1)'], {}), '((l, 1))\n', (4376, 4384), True, 'import numpy as np\n'), ((4703, 4724), 'numpy.corrcoef', 'np.corrcoef', (['ind', 'dep'], {}), '(ind, dep)\n', (4714, 4724), True, 'import numpy as np\n'), ((3092, 3113), 'numpy.fabs', 'np.fabs', (['driver_score'], {}), '(driver_score)\n', (3099, 3113), True, 'import numpy as np\n')] |
import math
import os
import random
import cv2 as cv
import numpy as np
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import tensorflow as tf
import tfrecord_creator
from config import im_size, unknown_code, fg_path, bg_path, a_path, num_valid, valid_ratio
from utils import safe_crop, parse_args, maybe_random_interp
global args
args = parse_args()
num_fgs = 431
num_bgs_per_fg = 100
num_bgs = num_fgs * num_bgs_per_fg
# Data augmentation and normalization for training
# Just normalization for validation
data_transforms = {
'train': transforms.Compose([
transforms.ColorJitter(brightness=0.125, contrast=0.125, saturation=0.125),
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]),
]),
'valid': transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
def return_raw_image(dataset):
dataset_raw = []
for image_features in dataset:
image_raw = image_features['image'].numpy()
image = tf.image.decode_jpeg(image_raw)
dataset_raw.append(image)
return dataset_raw
fg_dataset = tfrecord_creator.read("fg", "./data/tfrecord/")
bg_dataset = tfrecord_creator.read("bg", "./data/tfrecord/")
a_dataset = tfrecord_creator.read("a", "./data/tfrecord/")
fg_dataset = list(fg_dataset)
bg_dataset = list(bg_dataset)
a_dataset = list(a_dataset)
# fg_raw = return_raw_image(fg_dataset)
# bg_raw = return_raw_image(bg_dataset)
# a_raw = return_raw_image(a_dataset)
def get_raw(type_of_dataset, count):
if type_of_dataset == 'fg':
temp = fg_dataset[count]['image']
channels=3
elif type_of_dataset == 'bg':
temp = bg_dataset[count]['image']
channels=3
else :
temp = a_dataset[count]['image']
channels=0
temp = tf.image.decode_jpeg(temp, channels=channels)
temp = np.asarray(temp)
return temp
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (3, 3))
with open('Combined_Dataset/Training_set/training_fg_names.txt') as f:
fg_files = f.read().splitlines()
with open('Combined_Dataset/Training_set/training_bg_names.txt') as f:
bg_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_fg_names.txt') as f:
fg_test_files = f.read().splitlines()
with open('Combined_Dataset/Test_set/test_bg_names.txt') as f:
bg_test_files = f.read().splitlines()
def get_alpha(name):
fg_i = int(name.split("_")[0])
name = fg_files[fg_i]
filename = os.path.join('data/mask', name)
alpha = cv.imread(filename, 0)
return alpha
def get_alpha_test(name):
fg_i = int(name.split("_")[0])
name = fg_test_files[fg_i]
filename = os.path.join('data/mask_test', name)
alpha = cv.imread(filename, 0)
return alpha
def composite4(fg, bg, a, w, h):
fg = np.array(fg, np.float32)
bg_h, bg_w = bg.shape[:2]
x = 0
if bg_w > w:
x = np.random.randint(0, bg_w - w)
y = 0
if bg_h > h:
y = np.random.randint(0, bg_h - h)
if bg.ndim == 2:
bg = np.reshape(bg, (h,w,1))
bg = np.array(bg[y:y + h, x:x + w], np.float32)
bg = np.reshape(bg, (h,w,-1))
fg = np.reshape(fg, (h,w,-1))
alpha = np.zeros((h, w, 1), np.float32)
alpha[:, :, 0] = a / 255.
im = alpha * fg + (1 - alpha) * bg
im = im.astype(np.uint8)
return im, a, fg, bg
def process(fcount, bcount):
im = get_raw("fg", fcount)
a = get_raw("a", fcount)
a = np.reshape(a, (a.shape[0], a.shape[1]))
h, w = im.shape[:2]
bg = get_raw("bg", bcount)
bh, bw = bg.shape[:2]
wratio = w / bw
hratio = h / bh
ratio = wratio if wratio > hratio else hratio
if ratio > 1:
bg = cv.resize(src=bg, dsize=(math.ceil(bw * ratio), math.ceil(bh * ratio)), interpolation=cv.INTER_CUBIC)
return composite4(im, bg, a, w, h)
def gen_trimap(alpha):
k_size = random.choice(range(1, 5))
iterations = np.random.randint(1, 20)
kernel = cv.getStructuringElement(cv.MORPH_ELLIPSE, (k_size, k_size))
dilated = cv.dilate(alpha, kernel, iterations)
eroded = cv.erode(alpha, kernel, iterations)
trimap = np.zeros(alpha.shape)
trimap.fill(128)
trimap[eroded >= 255] = 255
trimap[dilated <= 0] = 0
return trimap
# Randomly crop (image, trimap) pairs centered on pixels in the unknown regions.
def random_choice(trimap, crop_size=(320, 320)):
crop_height, crop_width = crop_size
y_indices, x_indices = np.where(trimap == unknown_code)
# print(y_indices)
# print(x_indices)
num_unknowns = len(y_indices)
x, y = 0, 0
if num_unknowns > 0:
ix = np.random.choice(range(num_unknowns))
center_x = x_indices[ix]
center_y = y_indices[ix]
x = max(0, center_x - int(crop_width / 2))
y = max(0, center_y - int(crop_height / 2))
return x, y
def _composite_fg(alpha, fg, idx):
idx2 = 15
alpha2 = get_raw("a", idx2 )
alpha2 = np.reshape(alpha2, (alpha2.shape[0], alpha2.shape[1]))
fg2 = get_raw("fg", idx2)
cv.imshow("fg2", fg2[:,:,::-1].astype(np.uint8))
cv.waitKey(0)
cv.destroyAllWindows()
h, w = alpha.shape
fg2 = cv.resize(fg2, (w, h), interpolation=maybe_random_interp(cv.INTER_NEAREST))
alpha2 = cv.resize(alpha2, (w, h), interpolation=maybe_random_interp(cv.INTER_NEAREST))
alpha_tmp = 1 - (1 - alpha / 255.0) * (1 - alpha2 / 255.0)
if np.any(alpha_tmp < 1):
img, alpha, _, _ = composite4(fg, fg2, alpha, w, h)
alpha = alpha_tmp * 255.0
return img, alpha
def split_name():
names = list(range(num_fgs))
np.random.shuffle(names)
split_index = math.ceil(num_fgs - num_fgs * valid_ratio)
names_train = names[:split_index]
print(len(names_train))
names_valid = names[split_index:]
return names_train, names_valid
if __name__ == "__main__":
img, alpha, fg, bg = process(2, 19)
h, w = alpha.shape
cv.imshow("fg", fg[:,:,::-1].astype(np.uint8))
cv.waitKey(0)
cv.destroyAllWindows()
img, alpha = _composite_fg(alpha, img, 2)
cv.imshow("fg", fg[:,:,::-1].astype(np.uint8))
cv.waitKey(0)
cv.destroyAllWindows()
cv.imshow("combination", img[:,:,::-1].astype(np.uint8))
cv.waitKey(0)
cv.destroyAllWindows()
img, alpha, fg, bg = composite4(img, bg, alpha, w, h)
interpolation = maybe_random_interp(cv.INTER_NEAREST)
img = cv.resize(img, (640, 640), interpolation=interpolation)
fg = cv.resize(fg, (640, 640), interpolation=interpolation)
alpha = cv.resize(alpha, (640, 640), interpolation=interpolation)
bg = cv.resize(bg, (640, 640), interpolation=interpolation)
cv.imshow("with background", img[:,:,::-1].astype(np.uint8))
cv.waitKey(0)
cv.destroyAllWindows()
| [
"numpy.random.randint",
"cv2.erode",
"torchvision.transforms.Normalize",
"os.path.join",
"utils.maybe_random_interp",
"cv2.dilate",
"numpy.reshape",
"cv2.destroyAllWindows",
"numpy.random.shuffle",
"cv2.resize",
"math.ceil",
"cv2.waitKey",
"numpy.asarray",
"tfrecord_creator.read",
"torch... | [((380, 392), 'utils.parse_args', 'parse_args', ([], {}), '()\n', (390, 392), False, 'from utils import safe_crop, parse_args, maybe_random_interp\n'), ((1223, 1270), 'tfrecord_creator.read', 'tfrecord_creator.read', (['"""fg"""', '"""./data/tfrecord/"""'], {}), "('fg', './data/tfrecord/')\n", (1244, 1270), False, 'import tfrecord_creator\n'), ((1284, 1331), 'tfrecord_creator.read', 'tfrecord_creator.read', (['"""bg"""', '"""./data/tfrecord/"""'], {}), "('bg', './data/tfrecord/')\n", (1305, 1331), False, 'import tfrecord_creator\n'), ((1345, 1391), 'tfrecord_creator.read', 'tfrecord_creator.read', (['"""a"""', '"""./data/tfrecord/"""'], {}), "('a', './data/tfrecord/')\n", (1366, 1391), False, 'import tfrecord_creator\n'), ((2009, 2059), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(3, 3)'], {}), '(cv.MORPH_ELLIPSE, (3, 3))\n', (2033, 2059), True, 'import cv2 as cv\n'), ((1909, 1954), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['temp'], {'channels': 'channels'}), '(temp, channels=channels)\n', (1929, 1954), True, 'import tensorflow as tf\n'), ((1966, 1982), 'numpy.asarray', 'np.asarray', (['temp'], {}), '(temp)\n', (1976, 1982), True, 'import numpy as np\n'), ((2585, 2616), 'os.path.join', 'os.path.join', (['"""data/mask"""', 'name'], {}), "('data/mask', name)\n", (2597, 2616), False, 'import os\n'), ((2629, 2651), 'cv2.imread', 'cv.imread', (['filename', '(0)'], {}), '(filename, 0)\n', (2638, 2651), True, 'import cv2 as cv\n'), ((2778, 2814), 'os.path.join', 'os.path.join', (['"""data/mask_test"""', 'name'], {}), "('data/mask_test', name)\n", (2790, 2814), False, 'import os\n'), ((2827, 2849), 'cv2.imread', 'cv.imread', (['filename', '(0)'], {}), '(filename, 0)\n', (2836, 2849), True, 'import cv2 as cv\n'), ((2911, 2935), 'numpy.array', 'np.array', (['fg', 'np.float32'], {}), '(fg, np.float32)\n', (2919, 2935), True, 'import numpy as np\n'), ((3173, 3215), 'numpy.array', 'np.array', (['bg[y:y + h, x:x + w]', 'np.float32'], {}), '(bg[y:y + h, x:x + w], np.float32)\n', (3181, 3215), True, 'import numpy as np\n'), ((3225, 3251), 'numpy.reshape', 'np.reshape', (['bg', '(h, w, -1)'], {}), '(bg, (h, w, -1))\n', (3235, 3251), True, 'import numpy as np\n'), ((3259, 3285), 'numpy.reshape', 'np.reshape', (['fg', '(h, w, -1)'], {}), '(fg, (h, w, -1))\n', (3269, 3285), True, 'import numpy as np\n'), ((3296, 3327), 'numpy.zeros', 'np.zeros', (['(h, w, 1)', 'np.float32'], {}), '((h, w, 1), np.float32)\n', (3304, 3327), True, 'import numpy as np\n'), ((3550, 3589), 'numpy.reshape', 'np.reshape', (['a', '(a.shape[0], a.shape[1])'], {}), '(a, (a.shape[0], a.shape[1]))\n', (3560, 3589), True, 'import numpy as np\n'), ((4016, 4040), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (4033, 4040), True, 'import numpy as np\n'), ((4054, 4114), 'cv2.getStructuringElement', 'cv.getStructuringElement', (['cv.MORPH_ELLIPSE', '(k_size, k_size)'], {}), '(cv.MORPH_ELLIPSE, (k_size, k_size))\n', (4078, 4114), True, 'import cv2 as cv\n'), ((4129, 4165), 'cv2.dilate', 'cv.dilate', (['alpha', 'kernel', 'iterations'], {}), '(alpha, kernel, iterations)\n', (4138, 4165), True, 'import cv2 as cv\n'), ((4179, 4214), 'cv2.erode', 'cv.erode', (['alpha', 'kernel', 'iterations'], {}), '(alpha, kernel, iterations)\n', (4187, 4214), True, 'import cv2 as cv\n'), ((4228, 4249), 'numpy.zeros', 'np.zeros', (['alpha.shape'], {}), '(alpha.shape)\n', (4236, 4249), True, 'import numpy as np\n'), ((4549, 4581), 'numpy.where', 'np.where', (['(trimap == unknown_code)'], {}), '(trimap == unknown_code)\n', (4557, 4581), True, 'import numpy as np\n'), ((5047, 5101), 'numpy.reshape', 'np.reshape', (['alpha2', '(alpha2.shape[0], alpha2.shape[1])'], {}), '(alpha2, (alpha2.shape[0], alpha2.shape[1]))\n', (5057, 5101), True, 'import numpy as np\n'), ((5201, 5214), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (5211, 5214), True, 'import cv2 as cv\n'), ((5223, 5245), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (5243, 5245), True, 'import cv2 as cv\n'), ((5538, 5559), 'numpy.any', 'np.any', (['(alpha_tmp < 1)'], {}), '(alpha_tmp < 1)\n', (5544, 5559), True, 'import numpy as np\n'), ((5745, 5769), 'numpy.random.shuffle', 'np.random.shuffle', (['names'], {}), '(names)\n', (5762, 5769), True, 'import numpy as np\n'), ((5788, 5830), 'math.ceil', 'math.ceil', (['(num_fgs - num_fgs * valid_ratio)'], {}), '(num_fgs - num_fgs * valid_ratio)\n', (5797, 5830), False, 'import math\n'), ((6117, 6130), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (6127, 6130), True, 'import cv2 as cv\n'), ((6135, 6157), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (6155, 6157), True, 'import cv2 as cv\n'), ((6259, 6272), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (6269, 6272), True, 'import cv2 as cv\n'), ((6277, 6299), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (6297, 6299), True, 'import cv2 as cv\n'), ((6365, 6378), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (6375, 6378), True, 'import cv2 as cv\n'), ((6383, 6405), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (6403, 6405), True, 'import cv2 as cv\n'), ((6484, 6521), 'utils.maybe_random_interp', 'maybe_random_interp', (['cv.INTER_NEAREST'], {}), '(cv.INTER_NEAREST)\n', (6503, 6521), False, 'from utils import safe_crop, parse_args, maybe_random_interp\n'), ((6532, 6587), 'cv2.resize', 'cv.resize', (['img', '(640, 640)'], {'interpolation': 'interpolation'}), '(img, (640, 640), interpolation=interpolation)\n', (6541, 6587), True, 'import cv2 as cv\n'), ((6597, 6651), 'cv2.resize', 'cv.resize', (['fg', '(640, 640)'], {'interpolation': 'interpolation'}), '(fg, (640, 640), interpolation=interpolation)\n', (6606, 6651), True, 'import cv2 as cv\n'), ((6664, 6721), 'cv2.resize', 'cv.resize', (['alpha', '(640, 640)'], {'interpolation': 'interpolation'}), '(alpha, (640, 640), interpolation=interpolation)\n', (6673, 6721), True, 'import cv2 as cv\n'), ((6731, 6785), 'cv2.resize', 'cv.resize', (['bg', '(640, 640)'], {'interpolation': 'interpolation'}), '(bg, (640, 640), interpolation=interpolation)\n', (6740, 6785), True, 'import cv2 as cv\n'), ((6855, 6868), 'cv2.waitKey', 'cv.waitKey', (['(0)'], {}), '(0)\n', (6865, 6868), True, 'import cv2 as cv\n'), ((6873, 6895), 'cv2.destroyAllWindows', 'cv.destroyAllWindows', ([], {}), '()\n', (6893, 6895), True, 'import cv2 as cv\n'), ((1111, 1142), 'tensorflow.image.decode_jpeg', 'tf.image.decode_jpeg', (['image_raw'], {}), '(image_raw)\n', (1131, 1142), True, 'import tensorflow as tf\n'), ((3005, 3035), 'numpy.random.randint', 'np.random.randint', (['(0)', '(bg_w - w)'], {}), '(0, bg_w - w)\n', (3022, 3035), True, 'import numpy as np\n'), ((3075, 3105), 'numpy.random.randint', 'np.random.randint', (['(0)', '(bg_h - h)'], {}), '(0, bg_h - h)\n', (3092, 3105), True, 'import numpy as np\n'), ((3140, 3165), 'numpy.reshape', 'np.reshape', (['bg', '(h, w, 1)'], {}), '(bg, (h, w, 1))\n', (3150, 3165), True, 'import numpy as np\n'), ((614, 688), 'torchvision.transforms.ColorJitter', 'transforms.ColorJitter', ([], {'brightness': '(0.125)', 'contrast': '(0.125)', 'saturation': '(0.125)'}), '(brightness=0.125, contrast=0.125, saturation=0.125)\n', (636, 688), False, 'from torchvision import transforms\n'), ((698, 719), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (717, 719), False, 'from torchvision import transforms\n'), ((729, 795), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (749, 795), False, 'from torchvision import transforms\n'), ((847, 868), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (866, 868), False, 'from torchvision import transforms\n'), ((878, 944), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (898, 944), False, 'from torchvision import transforms\n'), ((5324, 5361), 'utils.maybe_random_interp', 'maybe_random_interp', (['cv.INTER_NEAREST'], {}), '(cv.INTER_NEAREST)\n', (5343, 5361), False, 'from utils import safe_crop, parse_args, maybe_random_interp\n'), ((5420, 5457), 'utils.maybe_random_interp', 'maybe_random_interp', (['cv.INTER_NEAREST'], {}), '(cv.INTER_NEAREST)\n', (5439, 5457), False, 'from utils import safe_crop, parse_args, maybe_random_interp\n'), ((3817, 3838), 'math.ceil', 'math.ceil', (['(bw * ratio)'], {}), '(bw * ratio)\n', (3826, 3838), False, 'import math\n'), ((3840, 3861), 'math.ceil', 'math.ceil', (['(bh * ratio)'], {}), '(bh * ratio)\n', (3849, 3861), False, 'import math\n')] |
import pandas
import numpy as np
import scipy
import time
import falcon
import json
import backend
import utils
class MedianAbsoluteDeviation(object):
"""
A timeseries is anomalous if the deviation of its latest datapoint with
respect to the median is X times larger than the median of deviations.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
deviation_threshold = req.get_param_as_int("deviation_threshold",
required=False)
if deviation_threshold:
result = self._work(timeseries, tidx, vidx, deviation_threshold)
else:
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1, deviation_threshold=6):
series = pandas.Series([x[vidx] for x in timeseries])
median = series.median()
demedianed = np.abs(series - median)
median_deviation = demedianed.median()
# The test statistic is infinite when the median is zero,
# so it becomes super sensitive. We play it safe and skip when this
# happens.
if median_deviation == 0:
return False
test_statistic = demedianed.iat[-1] / median_deviation
# Completely arbitary...triggers if the median deviation is
# 6 times bigger than the median
if test_statistic > deviation_threshold:
return True
else:
return False
class Grubbs(object):
"""
A timeseries is anomalous if the Z score is greater than the Grubb's score.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1):
series = scipy.array([x[vidx] for x in timeseries])
stdDev = scipy.std(series)
mean = np.mean(series)
tail_average = _tail_avg(timeseries, tidx, vidx)
z_score = (tail_average - mean) / stdDev
len_series = len(series)
threshold = scipy.stats.t.isf(.05 / (2 * len_series), len_series - 2)
threshold_squared = threshold * threshold
grubbs_score = ((len_series - 1) / np.sqrt(len_series)) * \
np.sqrt(threshold_squared / (len_series - 2 + threshold_squared))
return z_score > grubbs_score
class FirstHourAverage(object):
"""
Calcuate the simple average over one hour, FULL_DURATION seconds ago.
A timeseries is anomalous if the average of the last three datapoints
are outside of three standard deviations of this value.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
full_duration = req.get_param_as_int("full_duration",
required=False)
if full_duration:
result = self._work(timeseries, tidx, vidx, full_duration)
else:
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1, full_duration=86400):
last_hour_threshold = time() - (full_duration - 3600)
series = pandas.Series(
[x[vidx] for x in timeseries if x[tidx] < last_hour_threshold])
mean = (series).mean()
stdDev = (series).std()
t = _tail_avg(timeseries, tidx, vidx)
return abs(t - mean) > 3 * stdDev
class StddevFromAverage(object):
"""
A timeseries is anomalous if the absolute value of the average of the
latestthree datapoint minus the moving average is greater than three
standard deviations of the average. This does not exponentially
weight the MA and so is better for detecting anomalies with respect
to the entire series.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1):
series = pandas.Series([x[vidx] for x in timeseries])
mean = series.mean()
stdDev = series.std()
t = tail_avg(timeseries)
return abs(t - mean) > 3 * stdDev
class StddevFromMovingAverage(object):
"""
A timeseries is anomalous if the absolute value of the average of
the latest three datapoint minus the moving average is greater than
three standard deviations of the moving average. This is better for
finding anomalies with respect to the short term trends.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
com = req.get_param_as_int("com",
required=False)
if com:
result = self._work(timeseries, tidx, vidx, com)
else:
result = self._work(timeseries, tidx=tidx, vidx=vidx)
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1, com=50):
series = pandas.Series([x[vidx] for x in timeseries])
expAverage = pandas.stats.moments.ewma(series, com=com)
stdDev = pandas.stats.moments.ewmstd(series, com=com)
return abs(series.iat[-1] - expAverage.iat[-1]) > 3 * stdDev.iat[-1]
class MeanSubtractionCumulation(object):
"""
A timeseries is anomalous if the value of the next datapoint in the
series is farther than three standard deviations out in cumulative terms
after subtracting the mean from each data point.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
com = req.get_param_as_int("com",
required=False)
if com:
result = self._work(timeseries, tidx, vidx, com)
else:
result = self._work(timeseries, tidx=tidx, vidx=vidx)
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1, com=15):
series = pandas.Series([x[vidx] if x[vidx] else 0 for x in timeseries])
series = series - series[0:len(series) - 1].mean()
stdDev = series[0:len(series) - 1].std()
expAverage = pandas.stats.moments.ewma(series, com=com)
return abs(series.iat[-1]) > 3 * stdDev
class LeastSquares(object):
"""
A timeseries is anomalous if the average of the last three datapoints
on a projected least squares model is greater than three sigma.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1):
x = np.array([t[tidx] for t in timeseries])
y = np.array([t[vidx] for t in timeseries])
A = np.vstack([x, np.ones(len(x))]).T
results = np.linalg.lstsq(A, y)
residual = results[1]
m, c = np.linalg.lstsq(A, y)[0]
errors = []
for i, value in enumerate(y):
projected = m * x[i] + c
error = value - projected
errors.append(error)
if len(errors) < 3:
return False
std_dev = scipy.std(errors)
t = (errors[-1] + errors[-2] + errors[-3]) / 3
return abs(t) > std_dev * 3 and round(std_dev) != 0 and round(t) != 0
class HistogramBins(object):
"""
A timeseries is anomalous if the average of the last three datapoints falls
into a histogram bin with less than 20 datapoints you'll need to tweak
that number depending on your data.
Returns: the size of the bin which contains the tail_avg. Smaller bin size
means more anomalous.
"""
def on_get(self, req, resp):
timeseries, tidx, vidx = utils.backend_retreival(req)
self._run(req, resp, timeseries, tidx, vidx)
def on_post(self, req, resp):
timeseries, tidx, vidx = utils._non_backend_call(req)
self._run(req, resp, timeseries, tidx, vidx)
def _run(self, req, resp, timeseries, tidx, vidx):
bins = req.get_param_as_int("bins", required=False)
if bins:
result = self._work(timeseries, tidx=tidx, vidx=vidx, bins=bins)
else:
result = self._work(timeseries, tidx=tidx, vidx=vidx)
resp.body = json.dumps(result)
resp.status = falcon.HTTP_200
def _work(self, timeseries, tidx=0, vidx=1, bins=15):
series = scipy.array([x[vidx] for x in timeseries])
t = utils._tail_avg(timeseries, tidx, vidx)
h = np.histogram(series, bins=bins)
bins = h[1]
for index, bin_size in enumerate(h[0]):
if bin_size <= 20:
# Is it in the first bin?
if index == 0:
if t <= bins[0]:
return True
# Is it in the current bin?
elif t >= bins[index] and t < bins[index + 1]:
return True
return False
| [
"utils.backend_retreival",
"pandas.stats.moments.ewma",
"utils._tail_avg",
"numpy.abs",
"numpy.linalg.lstsq",
"time",
"pandas.stats.moments.ewmstd",
"scipy.stats.t.isf",
"json.dumps",
"numpy.histogram",
"numpy.mean",
"numpy.array",
"pandas.Series",
"scipy.array",
"numpy.sqrt",
"utils._... | [((388, 416), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (411, 416), False, 'import utils\n'), ((539, 567), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (562, 567), False, 'import utils\n'), ((1030, 1048), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (1040, 1048), False, 'import json\n'), ((1178, 1222), 'pandas.Series', 'pandas.Series', (['[x[vidx] for x in timeseries]'], {}), '([x[vidx] for x in timeseries])\n', (1191, 1222), False, 'import pandas\n'), ((1277, 1300), 'numpy.abs', 'np.abs', (['(series - median)'], {}), '(series - median)\n', (1283, 1300), True, 'import numpy as np\n'), ((2043, 2071), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (2066, 2071), False, 'import utils\n'), ((2194, 2222), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (2217, 2222), False, 'import utils\n'), ((2416, 2434), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (2426, 2434), False, 'import json\n'), ((2541, 2583), 'scipy.array', 'scipy.array', (['[x[vidx] for x in timeseries]'], {}), '([x[vidx] for x in timeseries])\n', (2552, 2583), False, 'import scipy\n'), ((2601, 2618), 'scipy.std', 'scipy.std', (['series'], {}), '(series)\n', (2610, 2618), False, 'import scipy\n'), ((2634, 2649), 'numpy.mean', 'np.mean', (['series'], {}), '(series)\n', (2641, 2649), True, 'import numpy as np\n'), ((2809, 2867), 'scipy.stats.t.isf', 'scipy.stats.t.isf', (['(0.05 / (2 * len_series))', '(len_series - 2)'], {}), '(0.05 / (2 * len_series), len_series - 2)\n', (2826, 2867), False, 'import scipy\n'), ((3428, 3456), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (3451, 3456), False, 'import utils\n'), ((3579, 3607), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (3602, 3607), False, 'import utils\n'), ((4040, 4058), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (4050, 4058), False, 'import json\n'), ((4248, 4325), 'pandas.Series', 'pandas.Series', (['[x[vidx] for x in timeseries if x[tidx] < last_hour_threshold]'], {}), '([x[vidx] for x in timeseries if x[tidx] < last_hour_threshold])\n', (4261, 4325), False, 'import pandas\n'), ((4923, 4951), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (4946, 4951), False, 'import utils\n'), ((5074, 5102), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (5097, 5102), False, 'import utils\n'), ((5296, 5314), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (5306, 5314), False, 'import json\n'), ((5421, 5465), 'pandas.Series', 'pandas.Series', (['[x[vidx] for x in timeseries]'], {}), '([x[vidx] for x in timeseries])\n', (5434, 5465), False, 'import pandas\n'), ((6001, 6029), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (6024, 6029), False, 'import utils\n'), ((6152, 6180), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (6175, 6180), False, 'import utils\n'), ((6626, 6644), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (6636, 6644), False, 'import json\n'), ((6759, 6803), 'pandas.Series', 'pandas.Series', (['[x[vidx] for x in timeseries]'], {}), '([x[vidx] for x in timeseries])\n', (6772, 6803), False, 'import pandas\n'), ((6825, 6867), 'pandas.stats.moments.ewma', 'pandas.stats.moments.ewma', (['series'], {'com': 'com'}), '(series, com=com)\n', (6850, 6867), False, 'import pandas\n'), ((6885, 6929), 'pandas.stats.moments.ewmstd', 'pandas.stats.moments.ewmstd', (['series'], {'com': 'com'}), '(series, com=com)\n', (6912, 6929), False, 'import pandas\n'), ((7337, 7365), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (7360, 7365), False, 'import utils\n'), ((7488, 7516), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (7511, 7516), False, 'import utils\n'), ((7962, 7980), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (7972, 7980), False, 'import json\n'), ((8095, 8159), 'pandas.Series', 'pandas.Series', (['[(x[vidx] if x[vidx] else 0) for x in timeseries]'], {}), '([(x[vidx] if x[vidx] else 0) for x in timeseries])\n', (8108, 8159), False, 'import pandas\n'), ((8287, 8329), 'pandas.stats.moments.ewma', 'pandas.stats.moments.ewma', (['series'], {'com': 'com'}), '(series, com=com)\n', (8312, 8329), False, 'import pandas\n'), ((8635, 8663), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (8658, 8663), False, 'import utils\n'), ((8786, 8814), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (8809, 8814), False, 'import utils\n'), ((9008, 9026), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (9018, 9026), False, 'import json\n'), ((9128, 9167), 'numpy.array', 'np.array', (['[t[tidx] for t in timeseries]'], {}), '([t[tidx] for t in timeseries])\n', (9136, 9167), True, 'import numpy as np\n'), ((9180, 9219), 'numpy.array', 'np.array', (['[t[vidx] for t in timeseries]'], {}), '([t[vidx] for t in timeseries])\n', (9188, 9219), True, 'import numpy as np\n'), ((9284, 9305), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {}), '(A, y)\n', (9299, 9305), True, 'import numpy as np\n'), ((9615, 9632), 'scipy.std', 'scipy.std', (['errors'], {}), '(errors)\n', (9624, 9632), False, 'import scipy\n'), ((10183, 10211), 'utils.backend_retreival', 'utils.backend_retreival', (['req'], {}), '(req)\n', (10206, 10211), False, 'import utils\n'), ((10334, 10362), 'utils._non_backend_call', 'utils._non_backend_call', (['req'], {}), '(req)\n', (10357, 10362), False, 'import utils\n'), ((10729, 10747), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (10739, 10747), False, 'import json\n'), ((10863, 10905), 'scipy.array', 'scipy.array', (['[x[vidx] for x in timeseries]'], {}), '([x[vidx] for x in timeseries])\n', (10874, 10905), False, 'import scipy\n'), ((10918, 10957), 'utils._tail_avg', 'utils._tail_avg', (['timeseries', 'tidx', 'vidx'], {}), '(timeseries, tidx, vidx)\n', (10933, 10957), False, 'import utils\n'), ((10970, 11001), 'numpy.histogram', 'np.histogram', (['series'], {'bins': 'bins'}), '(series, bins=bins)\n', (10982, 11001), True, 'import numpy as np\n'), ((2997, 3062), 'numpy.sqrt', 'np.sqrt', (['(threshold_squared / (len_series - 2 + threshold_squared))'], {}), '(threshold_squared / (len_series - 2 + threshold_squared))\n', (3004, 3062), True, 'import numpy as np\n'), ((4199, 4205), 'time', 'time', ([], {}), '()\n', (4203, 4205), False, 'import time\n'), ((9351, 9372), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'y'], {}), '(A, y)\n', (9366, 9372), True, 'import numpy as np\n'), ((2960, 2979), 'numpy.sqrt', 'np.sqrt', (['len_series'], {}), '(len_series)\n', (2967, 2979), True, 'import numpy as np\n')] |
import numpy as np
import math
from dataset_specifications.dataset import Dataset
class Mixture2DSet(Dataset):
def __init__(self):
super().__init__()
self.name = "mixture_2d"
# Mixture of 6 2D isotropic gaussians, laid out along a unit circle
# Not conditional, so all x:s are just 0
self.y_dim = 2
self.std = 0.15
def sample_ys(self, xs):
n = xs.shape[0]
components = np.random.randint(6, size=n) # uniform
angles = (math.pi/3.0) * components # Angles to centers
means = np.stack((np.cos(angles), np.sin(angles)), axis=1)
noise = np.random.randn(n, 2) # samples form 2d gaussian
ys = means + self.std*noise
return ys
def sample(self, n):
xs = np.zeros((n,1))
ys = self.sample_ys(xs)
return np.concatenate((xs, ys), axis=1)
| [
"numpy.random.randn",
"numpy.zeros",
"numpy.random.randint",
"numpy.sin",
"numpy.cos",
"numpy.concatenate"
] | [((446, 474), 'numpy.random.randint', 'np.random.randint', (['(6)'], {'size': 'n'}), '(6, size=n)\n', (463, 474), True, 'import numpy as np\n'), ((633, 654), 'numpy.random.randn', 'np.random.randn', (['n', '(2)'], {}), '(n, 2)\n', (648, 654), True, 'import numpy as np\n'), ((776, 792), 'numpy.zeros', 'np.zeros', (['(n, 1)'], {}), '((n, 1))\n', (784, 792), True, 'import numpy as np\n'), ((839, 871), 'numpy.concatenate', 'np.concatenate', (['(xs, ys)'], {'axis': '(1)'}), '((xs, ys), axis=1)\n', (853, 871), True, 'import numpy as np\n'), ((575, 589), 'numpy.cos', 'np.cos', (['angles'], {}), '(angles)\n', (581, 589), True, 'import numpy as np\n'), ((591, 605), 'numpy.sin', 'np.sin', (['angles'], {}), '(angles)\n', (597, 605), True, 'import numpy as np\n')] |
import time
import traceback
import unittest.mock
from concurrent.futures import ThreadPoolExecutor
from typing import Callable, List, Optional, Tuple, TypeVar, cast
import ipywidgets
import ipywidgets as widgets
import numpy as np
import pytest
import traitlets
import react_ipywidgets as react
from react_ipywidgets.core import component, use_effect
from . import logging # noqa: F401
from . import bqplot
from . import ipyvuetify as v
from . import ipywidgets as w
T = TypeVar("T")
def first(container: List[T]) -> T:
return container[0]
def clear():
widgets.Widget.widgets = {}
def count():
return len(widgets.Widget.widgets)
# components used for testing
@component
def MyComponent():
return w.Button()
@react.component
def ButtonComponentFunction(**kwargs):
return w.Button(**kwargs)
@react.component
def ButtonNumber(value):
# to test state reuse
value, set_value = react.use_state(value)
return w.Button(description=str(value))
@react.component
def ButtonNumber2(value):
# to test state reuse
value, set_value = react.use_state(value)
return w.Button(description=str(value))
@react.component
def Container(children=[]):
return w.HBox(children=children)
@pytest.fixture(autouse=True)
def cleanup_guard():
before = set(widgets.Widget.widgets)
yield
after = set(widgets.Widget.widgets)
leftover = after - before
if leftover:
leftover_widgets = [widgets.Widget.widgets[k] for k in leftover]
assert not leftover_widgets
# raise RuntimeError(f"{leftover_widgets}")
# @pytest.fixture(params=["ButtonComponentWidget", "ButtonComponentFunction"])
@pytest.fixture(params=["ButtonComponentWidget"])
def ButtonComponent(request):
return dict(ButtonComponentWidget=w.Button, ButtonComponentFunction=ButtonComponentFunction)[request.param]
def test_internals():
@react.component
def Child():
return w.VBox()
@react.component
def App():
return Child().key("child") # type: ignore
app = App()
# root_context:
# element: App
# children:
# - '/':
widget, rc = react.render_fixed(app, handle_error=False)
assert rc.context_root.root_element == app
assert list(rc.context_root.children_next) == []
assert list(rc.context_root.children) == ["App/"]
app_context = rc.context_root.children["App/"]
assert list(app_context.children_next) == []
assert list(app_context.children) == ["child"]
assert app_context.invoke_element is app
rc.close()
# assert app_context.children["child"].invoke_element is app_context.elements["child"]
# assert list(rc.context_root.children["/"].children["child"].invoke_element) == ["child"]
def test_create_element():
clear()
button: react.core.Element[widgets.Button] = react.core.Element[widgets.Button](MyComponent)
assert count() == 0
hbox, rc = react.render(button, handle_error=False)
assert len(hbox.children) == 1
assert isinstance(hbox.children[0], widgets.Button)
assert count() == 2 + 3 # button + button layout + button style
rc.close()
def test_monkey_patch():
button = widgets.Button.element(description="Hi")
hbox, rc = react.render(button)
assert len(hbox.children) == 1
assert isinstance(hbox.children[0], widgets.Button)
assert hbox.children[0].description == "Hi"
rc.close()
def test_component_function():
clear()
assert count() == 0
@react.component
def button_or_label(make_button):
if make_button:
return w.Button(description="Button")
else:
return w.Label(description="Label")
hbox, rc = react.render(button_or_label(make_button=False))
assert count() == 2 + 3 # label + label layout + label style
assert len(hbox.children) == 1
assert isinstance(hbox.children[0], widgets.Label)
assert hbox.children[0].description == "Label"
rc.close()
assert count() == 0
hbox, rc = react.render(button_or_label(make_button=True), hbox, "children")
assert len(hbox.children) == 1
assert isinstance(hbox.children[0], widgets.Button)
assert hbox.children[0].description == "Button"
assert count() == 3 # button + label
rc.close()
def test_state_simple():
clear()
@react.component
def slider_text():
value, set_value = react.use_state(0.0)
description, set_description = react.use_state("Not changed")
def on_value(value: float):
set_description(f"Value = {value}")
set_value(value)
return w.FloatSlider(value=value, on_value=on_value, description=description)
hbox, rc = react.render(slider_text())
assert count() == 2 + 3
assert len(hbox.children) == 1
slider = hbox.children[0]
assert isinstance(slider, widgets.FloatSlider)
assert slider.description == "Not changed"
assert slider.value == 0
slider.value = 1
# we should update, not replace
assert slider is hbox.children[0]
assert slider.description == "Value = 1.0"
assert count() == 2 + 3
rc.close()
def test_restore_default():
@react.component
def Slider(value):
return w.IntSlider(value=value, description=f"Value {value}")
slider, rc = react.render_fixed(Slider(2))
assert slider.description == "Value 2"
assert slider.value == 2
rc.render(Slider(0))
assert slider.description == "Value 0"
assert slider.value == 0
rc.render(Slider(2))
assert slider.description == "Value 2"
assert slider.value == 2
rc.close()
# now start with the default
slider, rc = react.render_fixed(Slider(0))
assert slider.description == "Value 0"
assert slider.value == 0
rc.render(Slider(2))
assert slider.description == "Value 2"
assert slider.value == 2
rc.render(Slider(0))
assert slider.description == "Value 0"
assert slider.value == 0
rc.close()
def test_state_complicated():
@react.component
def slider_text():
value, set_value = react.use_state(0.0)
description, set_description = react.use_state("Initial value")
set_description(f"Value = {value}")
def on_value(value: float):
set_value(value)
set_description(f"Value = {value}")
return w.FloatSlider(value=value, on_value=on_value, description=description)
slider, rc = react.render_fixed(slider_text())
assert slider.description == "Value = 0.0"
assert slider.value == 0
slider.value = 1
assert slider.description == "Value = 1.0"
rc.close()
def test_state_outside():
clear()
checkbox = widgets.Checkbox(value=False, description="Show button?")
assert count() == 3
@react.component
def button_or_label(checkbox):
show_button = react.use_state_widget(checkbox, "value")
if show_button:
return w.Button(description="Button")
else:
return w.Label(description="Label")
el = button_or_label(checkbox=checkbox)
hbox, rc = react.render(el)
assert count() == 3 + 2 + 3 # checkbox, box, label
assert len(hbox.children) == 1
assert isinstance(hbox.children[0], widgets.Label)
assert hbox.children[0].description == "Label"
before = dict(ipywidgets.Widget.widgets)
checkbox.value = True
after = dict(ipywidgets.Widget.widgets)
diff = set(after) - set(before)
extra = list(diff)
assert count() == 3 + 2 + 3 # similar
assert len(extra) == 3
assert len(hbox.children) == 1
assert isinstance(hbox.children[0], widgets.Button)
assert hbox.children[0].description == "Button"
before = dict(ipywidgets.Widget.widgets)
checkbox.value = False
after = dict(ipywidgets.Widget.widgets)
diff = set(after) - set(before)
extra = list(diff)
assert len(extra) == 3
assert count() == 3 + 2 + 3
assert len(hbox.children) == 1
assert isinstance(hbox.children[0], widgets.Label)
assert hbox.children[0].description == "Label"
rc.close()
checkbox.layout.close()
checkbox.style.close()
checkbox.close()
def test_children():
clear()
# hbox = widgets.HBox()
# slider = widgets.IntSlider(value=2, description="How many buttons?")
@react.component
def buttons():
buttons, set_buttons = react.use_state(2)
with w.HBox() as main:
_ = w.IntSlider(value=buttons, on_value=set_buttons, description="How many buttons?")
_ = [w.Button(description=f"Button {i}") for i in range(buttons)]
return main
hbox, rc = react.render_fixed(buttons())
slider = hbox.children[0]
# hbox + slider: 2 + 3
assert count() == 2 + 3 + 2 * 3 # added 2 buttons
assert len(hbox.children) == 1 + 2
assert isinstance(hbox.children[1], widgets.Button)
assert isinstance(hbox.children[2], widgets.Button)
assert hbox.children[1] != hbox.children[2]
assert hbox.children[1].description == "Button 0"
assert hbox.children[2].description == "Button 1"
slider.value = 3
assert len(hbox.children) == 1 + 3
assert count() == 2 + 3 + 2 * 3 + 3 # added 1 button
assert isinstance(hbox.children[1], widgets.Button)
assert isinstance(hbox.children[2], widgets.Button)
assert isinstance(hbox.children[3], widgets.Button)
assert hbox.children[1] != hbox.children[2]
assert hbox.children[1] != hbox.children[3]
assert hbox.children[2] != hbox.children[3]
assert hbox.children[1].description == "Button 0"
assert hbox.children[2].description == "Button 1"
assert hbox.children[3].description == "Button 2"
slider.value = 2
assert count() == 2 + 3 + 2 * 3 # nothing added, just 1 unused
# TODO: what do we do with pool?
# assert len(rc.pool) == 1 # which is added to the pool
assert len(hbox.children) == 1 + 2
assert isinstance(hbox.children[1], widgets.Button)
assert isinstance(hbox.children[2], widgets.Button)
rc.close()
def test_display():
slider = widgets.IntSlider(value=2, description="How many buttons?")
@react.component
def buttons(slider):
buttons = react.use_state_widget(slider, "value")
return w.Button(description=f"Button {buttons}")
react.display(buttons(slider))
assert react.core.local.last_rc is not None
react.core.local.last_rc.close()
slider.style.close()
slider.layout.close()
slider.close()
def test_box():
hbox = widgets.HBox()
slider = widgets.IntSlider(value=2, description="How many buttons?")
assert count() == 2 + 3
@react.component
def buttons(slider):
buttons = react.use_state_widget(slider, "value")
return w.VBox(children=[w.Button(description=f"Button {i}") for i in range(buttons)])
el = buttons(slider)
hbox, rc = react.render(el, hbox, "children")
assert count() == 2 + 3 + 2 + 2 * 3 # add vbox and 2 buttons
assert len(hbox.children[0].children) == 2
assert isinstance(hbox.children[0].children[0], widgets.Button)
assert isinstance(hbox.children[0].children[1], widgets.Button)
slider.value = 3
rc.force_update()
assert count() == 2 + 3 + 2 + 2 * 3 + 3 # add 1 button
assert len(hbox.children[0].children) == 3
assert isinstance(hbox.children[0].children[0], widgets.Button)
assert isinstance(hbox.children[0].children[1], widgets.Button)
assert isinstance(hbox.children[0].children[2], widgets.Button)
slider.value = 2
rc.force_update()
assert count() == 2 + 3 + 2 + 2 * 3 # should clean up
assert len(hbox.children[0].children) == 2
assert isinstance(hbox.children[0].children[0], widgets.Button)
assert isinstance(hbox.children[0].children[1], widgets.Button)
rc.close()
slider.style.close()
slider.layout.close()
slider.close()
def test_shared_instance(ButtonComponent):
checkbox = widgets.Checkbox(value=True, description="Share button")
@react.component
def Buttons(checkbox):
share = react.use_state_widget(checkbox, "value", "share")
if share:
button_shared = ButtonComponent(description="Button shared", tooltip="shared")
return w.VBox(children=[button_shared, button_shared])
else:
return w.VBox(children=[ButtonComponent(description=f"Button {i}") for i in range(2)])
hbox, rc = react.render(Buttons(checkbox))
vbox = hbox.children[0]
assert vbox.children[0] is vbox.children[1]
assert vbox.children[0].description == "Button shared"
assert vbox.children[0].tooltip == "shared"
checkbox.value = False
rc.force_update()
assert vbox.children[0] is not vbox.children[1]
assert vbox.children[0].description == "Button 0"
assert vbox.children[1].description == "Button 1"
assert vbox.children[0].tooltip == ""
assert vbox.children[1].tooltip == ""
rc.close()
checkbox.style.close()
checkbox.layout.close()
checkbox.close()
def test_shared_instance_via_component(ButtonComponent):
@react.component
def Child(button):
return button
@react.component
def Buttons():
button = ButtonComponent(description="Button shared")
return w.VBox(children=[Child(button), Child(button)])
vbox, rc = react.render_fixed(Buttons())
assert vbox.children[0].description == "Button shared"
assert vbox.children[1].description == "Button shared"
assert vbox.children[0] is vbox.children[1]
rc.close()
def test_bqplot():
clear()
exponent = widgets.FloatSlider(min=0.1, max=2, value=1.0, description="Exponent")
assert count() == 3
@react.component
def Plot(exponent, x, y):
exponent_value = react.use_state_widget(exponent, "value")
x = x**exponent_value
x_scale = bqplot.LinearScale(allow_padding=False)
y_scale = bqplot.LinearScale(allow_padding=False)
lines = bqplot.Lines(x=x, y=y, scales={"x": x_scale, "y": y_scale}, stroke_width=3, colors=["red"], display_legend=True, labels=["Line chart"])
x_axis = bqplot.Axis(scale=x_scale)
y_axis = bqplot.Axis(scale=y_scale)
axes = [x_axis, y_axis]
return bqplot.Figure(axes=axes, marks=[lines], scale_x=x_scale, scale_y=y_scale)
x = np.arange(4)
y = x**exponent.value
hbox, rc = react.render(Plot(exponent, x, y))
widgets_initial = count()
figure = hbox.children[0]
import bqplot as bq # type: ignore
assert isinstance(figure.axes[0], bq.Axis)
assert figure.marks[0].x.tolist() == x.tolist()
assert figure.axes[0] is not figure.axes[1]
assert figure.axes[0].scale is not figure.axes[1].scale
assert figure.axes[0].scale is figure.marks[0].scales["x"]
before = dict(ipywidgets.Widget.widgets)
exponent.value = 2
after = dict(ipywidgets.Widget.widgets)
diff = set(after) - set(before)
extra = list(diff)
assert extra == []
before = dict(ipywidgets.Widget.widgets)
assert count() == widgets_initial # nothing should be recreated
# figure = box.children[0]
assert figure.marks[0].x.tolist() == (x**2).tolist()
exponent.style.close()
exponent.layout.close()
exponent.close()
rc.close()
def test_use_effect():
@react.component
def Button2():
clicks, set_clicks = react.use_state(0)
def add_event_handler():
button: widgets.Button = react.core.get_widget(button_el)
def handler(change):
set_clicks(clicks + 1)
button.on_click(handler)
return lambda: button.on_click(handler, remove=True)
react.use_effect(add_event_handler)
button_el = w.Button(description=f"Clicked {clicks} times")
return button_el
hbox, rc = react.render(Button2())
assert count() == 2 + 3 # label + button
button = hbox.children[0]
assert button.description == "Clicked 0 times"
button.click()
assert len(button._click_handlers.callbacks) == 1
assert button.description == "Clicked 1 times"
rc.close()
def test_use_effect_no_deps():
calls = 0
cleanups = 0
@react.component
def TestNoDeps(a, b):
def test_effect():
def cleanup():
nonlocal cleanups
cleanups += 1
nonlocal calls
calls += 1
return cleanup
react.use_effect(test_effect)
return w.Button()
hbox, rc = react.render(TestNoDeps(a=1, b=1))
assert calls == 1
assert cleanups == 0
rc.render(TestNoDeps(a=1, b=1), hbox)
assert calls == 2
assert cleanups == 1
rc.close()
def test_use_effect_once():
calls = 0
cleanups = 0
counters_seen = []
@react.component
def TestNoDeps(a, b):
counter, set_counter = react.use_state(0)
def test_effect():
def cleanup():
nonlocal cleanups
cleanups += 1
nonlocal calls
calls += 1
# we should only be executed after the last render
# when counter is 1
counters_seen.append(counter)
return cleanup
# this forces a rerender, but the use_effect should
# still be called just once
if counter == 0:
set_counter(1)
react.use_effect(test_effect, [])
return w.Button()
hbox, rc = react.render(TestNoDeps(a=1, b=1))
assert calls == 1
assert cleanups == 0
assert counters_seen == [1]
rc.render(TestNoDeps(a=1, b=1), hbox)
assert calls == 1
assert cleanups == 0
rc.close()
def test_use_effect_deps():
calls = 0
cleanups = 0
@react.component
def TestNoDeps(a, b):
def test_effect():
def cleanup():
nonlocal cleanups
cleanups += 1
nonlocal calls
calls += 1
return cleanup
react.use_effect(test_effect, [a, b])
return w.Button()
hbox, rc = react.render(TestNoDeps(a=1, b=1))
assert calls == 1
assert cleanups == 0
rc.render(TestNoDeps(a=1, b=1), hbox)
assert calls == 1
assert cleanups == 0
rc.render(TestNoDeps(a=1, b=2), hbox)
assert calls == 2
assert cleanups == 1
rc.close()
@react.component
def ButtonClicks(**kwargs):
clicks, set_clicks = react.use_state(0)
return w.Button(description=f"Clicked {clicks} times", on_click=lambda: set_clicks(clicks + 1), **kwargs)
def test_use_button():
hbox, rc = react.render(ButtonClicks())
assert count() == 2 + 3 # label + button
button = hbox.children[0]
assert button.description == "Clicked 0 times"
button.click()
assert len(button._click_handlers.callbacks) == 1
assert button.description == "Clicked 1 times"
rc.close()
def test_key_widget():
set_reverse = None
@react.component
def Buttons():
nonlocal set_reverse
reverse, set_reverse = react.use_state(False)
with w.VBox() as main:
if reverse:
widgets.IntSlider.element(value=4).key("slider")
widgets.Button.element(description="Hi").key("btn")
else:
widgets.Button.element(description="Hi").key("btn")
widgets.IntSlider.element(value=4).key("slider")
return main
box = react.make(Buttons(), handle_error=False)
assert react.core.local.last_rc
rc = react.core.local.last_rc
assert set_reverse is not None
button1, slider1 = box.children[0].children
assert isinstance(button1, widgets.Button)
assert isinstance(slider1, widgets.IntSlider)
set_reverse(True)
rc.force_update()
slider2, button2 = box.children[0].children
assert isinstance(button2, widgets.Button)
assert isinstance(slider2, widgets.IntSlider)
assert button1 is button2
assert slider1 is slider2
assert react.core.local.last_rc
rc.close()
def test_key_root():
@react.component
def Buttons():
return widgets.Button.element(description="Hi").key("btn")
box = react.make(Buttons(), handle_error=False)
button = box.children[0]
assert isinstance(button, widgets.Button)
assert react.core.local.last_rc
react.core.local.last_rc.close()
def test_key_component_function():
@react.component
def ButtonClicks(nr, **kwargs):
clicks, set_clicks = react.use_state(0)
return w.Button(description=f"{nr}: Clicked {clicks} times", on_click=lambda: set_clicks(clicks + 1), **kwargs)
@react.component
def Buttons():
count, set_count = react.use_state(3)
reverse, set_reverse = react.use_state(False)
slider = w.IntSlider(value=count, description="How many?", on_value=set_count)
checkbox = w.Checkbox(value=reverse, on_value=lambda x: set_reverse(x), description="Reverse?")
buttons = [ButtonClicks(i).key(f"button-{i}") for i in range(count)]
if reverse:
buttons = buttons[::-1]
buttons_box = w.VBox(children=buttons)
return w.HBox(children=[slider, checkbox, buttons_box])
box = react.make(Buttons(), handle_error=False)
assert react.core.local.last_rc
rc = react.core.local.last_rc
slider, checkbox, buttons = box.children[0].children
assert buttons.children[0].description == "0: Clicked 0 times"
assert buttons.children[1].description == "1: Clicked 0 times"
assert buttons.children[2].description == "2: Clicked 0 times"
buttons.children[0].click()
rc.force_update()
assert buttons.children[0].description == "0: Clicked 1 times"
assert buttons.children[1].description == "1: Clicked 0 times"
assert buttons.children[2].description == "2: Clicked 0 times"
checkbox.value = True
rc.force_update()
assert buttons.children[0].description == "2: Clicked 0 times"
assert buttons.children[1].description == "1: Clicked 0 times"
assert buttons.children[2].description == "0: Clicked 1 times"
slider.value = 2
rc.force_update()
assert buttons.children[0].description == "1: Clicked 0 times"
assert buttons.children[1].description == "0: Clicked 1 times"
slider.value = 3
rc.force_update()
assert buttons.children[0].description == "2: Clicked 0 times"
assert buttons.children[1].description == "1: Clicked 0 times"
assert buttons.children[2].description == "0: Clicked 1 times"
rc.close()
def test_key_collision():
@react.component
def Child():
return w.Button()
@react.component
def Test():
with w.HBox() as main:
Child().key("collide") # type: ignore
Child().key("collide") # type: ignore
return main
with pytest.raises(KeyError, match="Duplicate"):
hbox, _rc = react.render_fixed(Test(), handle_error=False)
def test_vue():
@react.component
def Single():
clicks, set_clicks = react.use_state(0)
btn = v.Btn(children=[f"Clicks {clicks}"])
v.use_event(btn, "click", lambda *_ignore: set_clicks(clicks + 1))
return btn
btn, rc = react.render_fixed(Single())
btn.fire_event("click", {})
assert btn.children[0] == "Clicks 1"
assert len(btn._event_handlers_map["click"].callbacks) == 1
rc.force_update()
rc.close()
def test_interactive():
@react.component_interactive(count=3)
def f(count):
with w.HBox() as main:
[w.Button(description=f"Button {i}") for i in range(count)]
return main
control = f.children[0]
slider = control.children[0]
assert isinstance(slider, widgets.IntSlider)
box = f.children[1]
hbox = box.children[0]
button0 = hbox.children[0]
assert button0.description == "Button 0"
assert react.core.local.last_rc is not None
react.core.local.last_rc.close()
for widget in control.children:
if hasattr(widget, "layout"):
widget.layout.close()
if hasattr(widget, "style"):
widget.style.close()
widget.close()
control.close()
control.layout.close()
assert react.core._last_interactive_vbox is not None
react.core._last_interactive_vbox.layout.close()
react.core._last_interactive_vbox.close()
def test_use_reducer():
def click_reducer(state, action):
if action == "increment":
state = state + 1
return state
@react.component
def Button():
clicks, dispatch = react.use_reducer(click_reducer, 0)
return w.Button(description=f"Clicked {clicks} times", on_click=lambda: dispatch("increment"))
hbox, rc = react.render(Button())
button = hbox.children[0]
assert button.description == "Clicked 0 times"
button.click()
assert button.description == "Clicked 1 times"
button.click()
assert button.description == "Clicked 2 times"
rc.close()
def test_context():
v = cast(Optional[Tuple[int, Callable[[str], None]]], None)
store_context = react.create_context(v)
def click_reducer(state: int, action: str):
if action == "increment":
state = state + 1
return state
@react.component
def SubChild2():
clicks, _dispatch = react.use_context(store_context)
return w.Button(description=f"Child2: Clicked {clicks} times")
@react.component
def Child2():
return SubChild2()
@react.component
def Child1():
clicks, dispatch = react.use_context(store_context)
return w.Button(description=f"Child1: Clicked {clicks} times", on_click=lambda: dispatch("increment"))
@react.component
def App():
clicks, dispatch = react.use_reducer(click_reducer, 0)
store_context.provide((clicks, dispatch))
with w.HBox() as main:
Child1()
Child2()
return main
hbox, rc = react.render_fixed(App())
button1 = hbox.children[0]
button2 = hbox.children[1]
assert button1.description == "Child1: Clicked 0 times"
assert button2.description == "Child2: Clicked 0 times"
button1.click()
assert button1.description == "Child1: Clicked 1 times"
assert button2.description == "Child2: Clicked 1 times"
button2.click() # not attached
assert button1.description == "Child1: Clicked 1 times"
assert button2.description == "Child2: Clicked 1 times"
button1.click()
assert button1.description == "Child1: Clicked 2 times"
assert button2.description == "Child2: Clicked 2 times"
rc.close()
def test_memo():
calls_ab = 0
calls_ac = 0
@react.component
def TestMemo(a, b, c):
def expensive_ab(i, j):
nonlocal calls_ab
calls_ab += 1
return i + j
@react.use_memo
def expensive_ac(i, j):
nonlocal calls_ac
calls_ac += 1
return i + j * 2
x = react.use_memo(expensive_ab, args=[a], kwargs={"j": b})
y = expensive_ac(a, c)
return w.Label(value=f"{x} - {y}")
label, rc = react.render_fixed(TestMemo(a=1, b=2, c=3))
assert calls_ab == 1
assert calls_ac == 1
assert label.value == "3 - 7"
rc.render(TestMemo(a=1, b=20, c=3))
assert calls_ab == 2
assert calls_ac == 1
assert label.value == "21 - 7"
rc.render(TestMemo(a=1, b=20, c=30))
assert calls_ab == 2
assert calls_ac == 2
assert label.value == "21 - 61"
rc.render(TestMemo(a=10, b=20, c=30))
assert calls_ab == 3
assert calls_ac == 3
assert label.value == "30 - 70"
rc.close()
def test_container_context_simple():
@react.component
def ContainerContext():
with w.HBox() as box:
w.Label(value="in container")
w.Button(description="button")
return box
box, rc = react.render_fixed(ContainerContext())
assert len(box.children) == 2
assert box.children[0]
rc.close()
def test_container_context_bqplot():
@react.component
def ContainerContext(exponent=1.2):
x = np.arange(4)
y = x**1.2
with w.HBox() as box:
x_scale = bqplot.LinearScale(allow_padding=False)
y_scale = bqplot.LinearScale(allow_padding=False)
lines = bqplot.Lines(x=x, y=y, scales={"x": x_scale, "y": y_scale}, stroke_width=3, colors=["red"], display_legend=True, labels=["Line chart"])
x_axis = bqplot.Axis(scale=x_scale)
y_axis = bqplot.Axis(scale=y_scale)
axes = [x_axis, y_axis]
bqplot.Figure(axes=axes, marks=[lines], scale_x=x_scale, scale_y=y_scale)
return box
box, rc = react.render_fixed(ContainerContext())
assert len(box.children) == 1
rc.close()
def test_get_widget():
button1 = None
button2 = None
@component
def Multiple():
def get_widgets():
nonlocal button1
nonlocal button2
button1 = react.core.get_widget(button1el)
button2 = react.core.get_widget(button2el)
use_effect(get_widgets)
with w.VBox() as main:
button1el = w.Button(description="1")
button2el = w.Button(description="2")
return main
box, rc = react.render_fixed(Multiple())
assert box.children[0] is button1
assert box.children[1] is button2
rc.close()
def test_get_widget_multi_render():
button = None
@component
def Multiple():
value, set_value = react.use_state(0)
def get_widgets():
nonlocal button
button = react.core.get_widget(button_el)
use_effect(get_widgets, [])
if value < 3:
set_value(value + 1)
with w.VBox() as main:
button_el = w.Button(description="1")
return main
box, rc = react.render_fixed(Multiple())
assert box.children[0] is button
rc.close()
def test_on_argument():
# since we have special treatment with on_, lets test special cases
# like an argument with the same name
@component
def Test(on_test):
on_test("hi")
return w.Button()
mock = unittest.mock.Mock()
box, rc = react.render_fixed(Test(on_test=mock))
mock.assert_called()
rc.close()
def test_on_trait():
# similar to an argument, but now a trait that happens to start with on_
class SomeWidget(widgets.Button):
on_hover = traitlets.traitlets.Callable(None, allow_none=True)
mock = unittest.mock.Mock()
widget, rc = react.render_fixed(SomeWidget.element(on_hover=mock))
assert widget.on_hover is mock
# mock.assert_called()
rc.close()
def test_other_event_handlers():
# previously, we used unobserve_all, which removed event handlers not attached via on_ arguments
mock = unittest.mock.Mock()
@component
def Test():
value, set_value = react.use_state("hi")
text = w.Text(value=value, on_value=set_value)
def add_my_own_event_handler():
widget = react.get_widget(text)
def event_handler(change):
mock(change.new)
# react is not aware of this event handler, and should not
# remove it
widget.observe(event_handler, "value")
return lambda: None
use_effect(add_my_own_event_handler, []) # only add it once
return text
text, rc = react.render_fixed(Test())
# first time, it's always ok
text.value = "hallo"
mock.assert_called()
mock.reset_mock()
# second time, we executed the render loop again, did we remove the event handler?
text.value = "ola"
mock.assert_called()
rc.close()
def test_state_leak_different_components():
@react.component
def SliderComponent():
value, set_value = react.use_state(1)
return w.IntSlider(value=value, on_value=set_value)
@react.component
def OtherComponent():
value, set_value = react.use_state(10)
return w.IntText(value=value, on_value=set_value)
set_show_other = None
@react.component
def Test():
nonlocal set_show_other
show_other, set_show_other = react.use_state(False)
with w.VBox() as main:
if show_other:
OtherComponent()
else:
SliderComponent()
return main
test, rc = react.render_fixed(Test())
assert set_show_other is not None
assert test.children[0].value == 1
set_show_other(True)
rc.force_update()
assert test.children[0].value == 10
test.children[0].value = 11
set_show_other(False)
rc.force_update()
assert test.children[0].value == 1
test.children[0].value = 2
set_show_other(True)
rc.force_update()
# the state should be reset, if the component was detached
assert test.children[0].value == 10
set_show_other(False)
rc.force_update()
assert test.children[0].value == 1
rc.close()
@pytest.mark.skipif(not react.core.DEBUG, reason="only works in debug mode")
def test_exceptions():
@react.component
def Test():
return w.Button()
try:
test, _rc = react.render_fixed(Test(non_existing_arg=1)) # type: ignore
except TypeError as e:
formatted = traceback.format_tb(e.__traceback__)
assert "Test" in formatted[3]
assert "non_existing_arg" in formatted[3]
else:
assert False, "no error occurred"
@pytest.mark.skipif(not react.core.DEBUG, reason="only works in debug mode")
def test_exceptions_debug_in_render_function():
@react.component
def Child():
# the stacktrace should include the next line
a = non_existing # type: ignore # noqa
return w.Button()
@react.component
def Test():
return Child() # but also this line, which is not an actual part of the stack trace
try:
test, rc = react.render_fixed(Test()) # type: ignore
except NameError as e:
formatted = traceback.format_tb(e.__traceback__)
assert "Child" in formatted[3]
assert "non_existing" in formatted[4]
else:
assert False, "no error occurred"
@pytest.mark.skipif(not react.core.DEBUG, reason="only works in debug mode")
def test_exceptions_debug_in_consolidate():
set_value = None
@react.component
def Child():
nonlocal set_value
value, set_value = react.use_state("label")
return w.Button(description=value) # we'd like to see this line
@react.component
def Test():
return Child()
test, rc = react.render_fixed(Test()) # type: ignore
assert set_value is not None
try:
set_value(1) # type: ignore
except Exception as e:
formatted = traceback.format_tb(e.__traceback__)
assert "Child" in formatted[3]
assert "Button" in formatted[4]
rc.close()
def test_mime_bundle():
@react.component(mime_bundle={"text/plain": "text"})
def Test(a=1):
return w.Button()
el = Test()
assert el.mime_bundle["text/plain"] == "text"
def test_use_state_with_function():
@react.component
def ButtonClick(label="Hi"):
clicks, set_clicks = react.use_state(0)
def update_click(click):
return click + 1
return w.Button(description=f"{label}: Clicked {clicks} times", on_click=lambda: set_clicks(update_click))
clicker, rc = react.render_fixed(ButtonClick())
clicker.click()
assert clicker.description == "Hi: Clicked 1 times"
rc.close()
def test_use_ref():
last = None
@react.component
def ButtonClick(label="Hi"):
nonlocal last
clicks, set_clicks = react.use_state(0)
ref = react.use_ref({"hi": 1})
last = ref.current
return w.Button(description=f"{label}: Clicked {clicks} times", on_click=lambda: set_clicks(clicks + 1))
clicker, rc = react.render_fixed(ButtonClick())
clicker.click()
last1 = last
clicker.click()
assert last is last1
rc.close()
def test_render_many_consolidate_once():
set_value = None
@react.component
def Test():
nonlocal set_value
value, set_value = react.use_state(0)
if value >= 5 and value < 15: # force 10 renders
set_value(value + 1)
return w.IntSlider(value=value)
mock = unittest.mock.Mock()
slider, rc = react.render_fixed(Test())
assert set_value is not None
slider.observe(lambda change: mock(change.new), "value")
set_value(4)
mock.assert_called_once_with(4)
set_value(5)
assert slider.value == 15
# even though we had many renders (from 5 to 15), we only reconsolidated once (i.e. 1 extra call)
mock.assert_has_calls([unittest.mock.call(4), unittest.mock.call(15)])
rc.close()
def test_recover_exception(capsys):
set_fail = None
@react.component
def Test():
nonlocal set_fail
fail, set_fail = react.use_state(False)
if fail:
raise Exception("fail")
return w.IntSlider()
# with pytest.raises(Exception):
slider, rc = react.render_fixed(Test())
assert set_fail is not None
with pytest.raises(Exception, match="fail"):
set_fail(True)
set_fail(False)
rc.close()
assert isinstance(slider, ipywidgets.IntSlider)
def test_state_get():
set_value = None
@react.component
def Test():
nonlocal set_value
value, set_value = react.use_state(0)
return w.IntSlider(value=value)
slider, rc = react.render_fixed(Test())
assert set_value is not None
state = rc.state_get()
assert state == {"children": {"Test/": {"state": {"0": 0}}}, "state": {}}
set_value(42)
assert state == {"children": {"Test/": {"state": {"0": 42}}}, "state": {}}
assert slider.value == 42
rc.close()
box = widgets.VBox()
hbox, rc = react.render(Test(), box, initial_state=state, handle_error=False)
assert box.children[0].value == 42
rc.close()
def test_cleanup():
set_count = None
@react.component
def Test():
nonlocal set_count
count, set_count = react.use_state(1)
with w.VBox() as main:
for i in range(count):
ButtonClicks()
return main
# with pytest.raises(Exception):
box, rc = react.render_fixed(Test())
assert set_count is not None
buttons = box.children
assert len(buttons) == 1
buttons[0].click()
assert buttons[0].description == "Clicked 1 times"
set_count(2)
rc.force_update()
buttons = box.children
assert len(buttons) == 2
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].description == "Clicked 0 times"
buttons[1].click()
buttons[1].click()
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].description == "Clicked 2 times"
set_count(1)
rc.force_update()
buttons = box.children
assert len(buttons) == 1
assert buttons[0].description == "Clicked 1 times"
set_count(2)
rc.force_update()
buttons = box.children
assert len(buttons) == 2
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].description == "Clicked 0 times"
rc.close()
# same, but to react a different component
@react.component
def ButtonClicks2():
clicks, set_clicks = react.use_state(0)
with w.VBox() as main:
w.Label()
w.Text()
w.Button(description=f"Clicked {clicks} times", on_click=lambda: set_clicks(clicks + 1))
w.VBox(children=[ButtonClicks3()])
w.VBox(children=[w.Checkbox()])
return main
@react.component
def ButtonClicks3():
clicks, set_clicks = react.use_state(0)
with w.VBox() as main:
w.Button(description=f"Clicked {clicks} times", on_click=lambda: set_clicks(clicks + 1))
return main
def test_insert_no_key():
set_insert = None
@react.component
def Test():
nonlocal set_insert
insert, set_insert = react.use_state(False)
with w.VBox() as main:
ButtonClicks()
if insert:
ButtonClicks2()
w.Text()
ButtonClicks3()
return main
# with pytest.raises(Exception):
box, rc = react.render_fixed(Test(), handle_error=False)
assert set_insert is not None
# rc.force_update()
buttons = box.children
assert len(buttons) == 2
buttons[0].click()
buttons[1].children[0].click()
buttons[1].children[0].click()
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[0].description == "Clicked 2 times"
rc.force_update()
set_insert(True)
buttons = box.children
assert len(buttons) == 4
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[2].description == "Clicked 0 times"
assert buttons[3].children[0].description == "Clicked 0 times"
buttons[1].children[2].click()
buttons[1].children[2].click()
buttons[1].children[2].click()
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[2].description == "Clicked 3 times"
assert buttons[3].children[0].description == "Clicked 0 times"
rc.force_update()
set_insert(False)
buttons = box.children
assert len(buttons) == 2
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[0].description == "Clicked 0 times"
rc.force_update()
set_insert(True)
buttons = box.children
assert len(buttons) == 4
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[2].description == "Clicked 0 times"
assert buttons[3].children[0].description == "Clicked 0 times"
rc.force_update()
rc.close()
def test_insert_with_key():
set_insert = None
@react.component
def Test():
nonlocal set_insert
insert, set_insert = react.use_state(False)
with w.VBox() as main:
ButtonClicks().key("1") # type: ignore
if insert:
ButtonClicks2().key("2") # type: ignore
ButtonClicks3().key("3") # type: ignore
return main
# with pytest.raises(Exception):
box, rc = react.render_fixed(Test())
assert set_insert is not None
rc.force_update()
buttons = box.children
assert len(buttons) == 2
buttons[0].click()
buttons[1].children[0].click()
buttons[1].children[0].click()
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[0].description == "Clicked 2 times"
rc.force_update()
set_insert(True)
buttons = box.children
assert len(buttons) == 3
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[2].description == "Clicked 0 times"
assert buttons[2].children[0].description == "Clicked 2 times"
buttons[1].children[2].click()
buttons[1].children[2].click()
buttons[1].children[2].click()
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[2].description == "Clicked 3 times"
assert buttons[2].children[0].description == "Clicked 2 times"
rc.force_update()
set_insert(False)
buttons = box.children
assert len(buttons) == 2
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[0].description == "Clicked 2 times"
rc.force_update()
set_insert(True)
buttons = box.children
assert len(buttons) == 3
assert buttons[0].description == "Clicked 1 times"
assert buttons[1].children[2].description == "Clicked 0 times"
assert buttons[2].children[0].description == "Clicked 2 times"
rc.force_update()
rc.close()
def test_vue_orphan_not_close():
import ipyvue
class MyTemplate(ipyvue.VueTemplate):
template_file = (__file__, "test.vue")
@react.component
def Test():
return MyTemplate.element()
box, rc = react.render(Test())
template = box.children[0].template
rc.close()
# make sure we can render after close (close should not close the template widget)
box2, rc2 = react.render(Test())
rc2.close()
template.close()
@pytest.mark.parametrize("in_container", [False, True])
def test_switch_component(in_container):
@react.component
def Child1():
with Container() as main:
w.Button(description="1")
return main
@react.component
def Child2():
with Container() as main:
ButtonNumber(2)
return main
@react.component
def Child3():
with Container() as main:
ButtonNumber(3)
return main
set_value = None
@react.component
def Test():
nonlocal set_value
value, set_value = react.use_state(1)
children = [None, Child1, Child2, Child3]
component = children[value]
assert component is not None
if in_container:
return Container(children=[component()])
else:
return component()
box, rc = react.render(Test())
def get_description():
if in_container:
button = box.children[0].children[0].children[0]
else:
button = box.children[0].children[0]
return button.description
assert get_description() == "1"
assert set_value is not None
# set_value(2)
# assert get_description() == "2"
rc.force_update()
set_value(3)
rc.force_update()
assert get_description() == "3"
set_value(1)
rc.force_update()
assert get_description() == "1"
set_value(3)
rc.force_update()
assert get_description() == "3"
set_value(2)
rc.force_update()
assert get_description() == "2"
set_value(1)
rc.force_update()
assert get_description() == "1"
rc.close()
def test_switch_simple():
set_value = None
@react.component
def Test():
nonlocal set_value
value, set_value = react.use_state(True)
if value:
return Container(children=[w.Button(description="button")])
else:
return Container(children=[w.IntSlider(description="slider")])
box, rc = react.render(Test())
assert set_value is not None
assert box.children[0].children[0].description == "button"
rc.force_update()
set_value(False)
rc.force_update()
assert box.children[0].children[0].description == "slider"
rc.force_update()
rc.close()
def test_switch_widget_and_component():
set_value = None
effect = unittest.mock.Mock()
cleanup = unittest.mock.Mock()
@react.component
def Child():
def my_effect():
effect()
return cleanup
use_effect(my_effect, [])
return w.Button(description="child")
@react.component
def Test():
nonlocal set_value
value, set_value = react.use_state(True)
with Container() as main:
if value:
w.Button(description="widget")
else:
Child()
return main
box, rc = react.render_fixed(Test())
assert set_value is not None
assert box.children[0].description == "widget"
rc.force_update()
set_value(False)
effect.assert_called_once()
cleanup.assert_not_called()
rc.force_update()
effect.assert_called_once()
cleanup.assert_not_called()
assert box.children[0].description == "child"
set_value(True)
assert box.children[0].description == "widget"
cleanup.assert_called_once()
rc.force_update()
cleanup.assert_called_once()
rc.force_update()
rc.close()
def test_switch_component_key():
set_value = None
effect = unittest.mock.Mock()
cleanup = unittest.mock.Mock()
@react.component
def Child(value):
def my_effect():
effect()
return cleanup
use_effect(my_effect, [])
return w.Button(description=value)
@react.component
def Test():
nonlocal set_value
value, set_value = react.use_state("1")
with Container() as main:
Child(value=value).key(value)
return main
box, rc = react.render_fixed(Test())
assert set_value is not None
assert box.children[0].description == "1"
effect.assert_called_once()
cleanup.assert_not_called()
rc.force_update()
set_value("2")
assert box.children[0].description == "2"
effect.assert_has_calls([unittest.mock.call(), unittest.mock.call()])
cleanup.assert_called_once()
rc.force_update()
effect.assert_has_calls([unittest.mock.call(), unittest.mock.call()])
cleanup.assert_called_once()
set_value("3")
assert box.children[0].description == "3"
effect.assert_has_calls([unittest.mock.call(), unittest.mock.call(), unittest.mock.call()])
cleanup.assert_has_calls([unittest.mock.call(), unittest.mock.call()])
rc.force_update()
effect.assert_has_calls([unittest.mock.call(), unittest.mock.call(), unittest.mock.call()])
cleanup.assert_has_calls([unittest.mock.call(), unittest.mock.call()])
rc.force_update()
rc.close()
cleanup.assert_has_calls([unittest.mock.call(), unittest.mock.call(), unittest.mock.call()])
def test_render_twice_different_element():
set_action = None
@react.component
def Test():
nonlocal set_action
action, set_action = react.use_state(0)
if action == 0:
return w.Button()
elif action == 1:
# render float slider, and text in the same render phase
set_action(2)
return w.FloatSlider()
else:
return w.Text()
box = react.make(Test())
assert isinstance(box.children[0], widgets.Button)
assert set_action is not None
set_action(1)
assert isinstance(box.children[0], widgets.Text)
assert react.core.local.last_rc is not None
react.core.local.last_rc.close()
def test_multithreaded_support():
N = 1000
@react.component
def Test(i):
value, set_value = react.use_state(10)
# trigger a few calls to set_value which uses the thread local
# render context
if value == 10:
# a bit of sleep so we get real concurrency
time.sleep(0.001)
set_value(5)
if value == 5:
time.sleep(0.001)
set_value(1)
return w.Button(description=str(i))
def worker(i):
button, rc = react.render_fixed(Test(i))
assert button.description == str(i)
return button, rc
pool = ThreadPoolExecutor(max_workers=N)
results = pool.map(worker, range(100))
for _button, rc in results:
rc = cast(react.core._RenderContext, rc)
rc.close()
| [
"react_ipywidgets.get_widget",
"typing.cast",
"react_ipywidgets.use_state_widget",
"react_ipywidgets.create_context",
"traceback.format_tb",
"react_ipywidgets.core.component",
"react_ipywidgets.core._last_interactive_vbox.close",
"react_ipywidgets.core.get_widget",
"pytest.mark.skipif",
"numpy.ara... | [((477, 489), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (484, 489), False, 'from typing import Callable, List, Optional, Tuple, TypeVar, cast\n'), ((1235, 1263), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (1249, 1263), False, 'import pytest\n'), ((1666, 1714), 'pytest.fixture', 'pytest.fixture', ([], {'params': "['ButtonComponentWidget']"}), "(params=['ButtonComponentWidget'])\n", (1680, 1714), False, 'import pytest\n'), ((32922, 32997), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not react.core.DEBUG)'], {'reason': '"""only works in debug mode"""'}), "(not react.core.DEBUG, reason='only works in debug mode')\n", (32940, 32997), False, 'import pytest\n'), ((33402, 33477), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not react.core.DEBUG)'], {'reason': '"""only works in debug mode"""'}), "(not react.core.DEBUG, reason='only works in debug mode')\n", (33420, 33477), False, 'import pytest\n'), ((34119, 34194), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not react.core.DEBUG)'], {'reason': '"""only works in debug mode"""'}), "(not react.core.DEBUG, reason='only works in debug mode')\n", (34137, 34194), False, 'import pytest\n'), ((44148, 44202), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""in_container"""', '[False, True]'], {}), "('in_container', [False, True])\n", (44171, 44202), False, 'import pytest\n'), ((920, 942), 'react_ipywidgets.use_state', 'react.use_state', (['value'], {}), '(value)\n', (935, 942), True, 'import react_ipywidgets as react\n'), ((1081, 1103), 'react_ipywidgets.use_state', 'react.use_state', (['value'], {}), '(value)\n', (1096, 1103), True, 'import react_ipywidgets as react\n'), ((2147, 2190), 'react_ipywidgets.render_fixed', 'react.render_fixed', (['app'], {'handle_error': '(False)'}), '(app, handle_error=False)\n', (2165, 2190), True, 'import react_ipywidgets as react\n'), ((2922, 2962), 'react_ipywidgets.render', 'react.render', (['button'], {'handle_error': '(False)'}), '(button, handle_error=False)\n', (2934, 2962), True, 'import react_ipywidgets as react\n'), ((3178, 3218), 'ipywidgets.Button.element', 'widgets.Button.element', ([], {'description': '"""Hi"""'}), "(description='Hi')\n", (3200, 3218), True, 'import ipywidgets as widgets\n'), ((3234, 3254), 'react_ipywidgets.render', 'react.render', (['button'], {}), '(button)\n', (3246, 3254), True, 'import react_ipywidgets as react\n'), ((6671, 6728), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(False)', 'description': '"""Show button?"""'}), "(value=False, description='Show button?')\n", (6687, 6728), True, 'import ipywidgets as widgets\n'), ((7070, 7086), 'react_ipywidgets.render', 'react.render', (['el'], {}), '(el)\n', (7082, 7086), True, 'import react_ipywidgets as react\n'), ((10043, 10102), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'value': '(2)', 'description': '"""How many buttons?"""'}), "(value=2, description='How many buttons?')\n", (10060, 10102), True, 'import ipywidgets as widgets\n'), ((10353, 10385), 'react_ipywidgets.core.local.last_rc.close', 'react.core.local.last_rc.close', ([], {}), '()\n', (10383, 10385), True, 'import react_ipywidgets as react\n'), ((10485, 10499), 'ipywidgets.HBox', 'widgets.HBox', ([], {}), '()\n', (10497, 10499), True, 'import ipywidgets as widgets\n'), ((10513, 10572), 'ipywidgets.IntSlider', 'widgets.IntSlider', ([], {'value': '(2)', 'description': '"""How many buttons?"""'}), "(value=2, description='How many buttons?')\n", (10530, 10572), True, 'import ipywidgets as widgets\n'), ((10841, 10875), 'react_ipywidgets.render', 'react.render', (['el', 'hbox', '"""children"""'], {}), "(el, hbox, 'children')\n", (10853, 10875), True, 'import react_ipywidgets as react\n'), ((11911, 11967), 'ipywidgets.Checkbox', 'widgets.Checkbox', ([], {'value': '(True)', 'description': '"""Share button"""'}), "(value=True, description='Share button')\n", (11927, 11967), True, 'import ipywidgets as widgets\n'), ((13556, 13626), 'ipywidgets.FloatSlider', 'widgets.FloatSlider', ([], {'min': '(0.1)', 'max': '(2)', 'value': '(1.0)', 'description': '"""Exponent"""'}), "(min=0.1, max=2, value=1.0, description='Exponent')\n", (13575, 13626), True, 'import ipywidgets as widgets\n'), ((14286, 14298), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (14295, 14298), True, 'import numpy as np\n'), ((18359, 18377), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (18374, 18377), True, 'import react_ipywidgets as react\n'), ((20255, 20287), 'react_ipywidgets.core.local.last_rc.close', 'react.core.local.last_rc.close', ([], {}), '()\n', (20285, 20287), True, 'import react_ipywidgets as react\n'), ((23352, 23388), 'react_ipywidgets.component_interactive', 'react.component_interactive', ([], {'count': '(3)'}), '(count=3)\n', (23379, 23388), True, 'import react_ipywidgets as react\n'), ((23820, 23852), 'react_ipywidgets.core.local.last_rc.close', 'react.core.local.last_rc.close', ([], {}), '()\n', (23850, 23852), True, 'import react_ipywidgets as react\n'), ((24162, 24210), 'react_ipywidgets.core._last_interactive_vbox.layout.close', 'react.core._last_interactive_vbox.layout.close', ([], {}), '()\n', (24208, 24210), True, 'import react_ipywidgets as react\n'), ((24215, 24256), 'react_ipywidgets.core._last_interactive_vbox.close', 'react.core._last_interactive_vbox.close', ([], {}), '()\n', (24254, 24256), True, 'import react_ipywidgets as react\n'), ((24917, 24972), 'typing.cast', 'cast', (['Optional[Tuple[int, Callable[[str], None]]]', 'None'], {}), '(Optional[Tuple[int, Callable[[str], None]]], None)\n', (24921, 24972), False, 'from typing import Callable, List, Optional, Tuple, TypeVar, cast\n'), ((24993, 25016), 'react_ipywidgets.create_context', 'react.create_context', (['v'], {}), '(v)\n', (25013, 25016), True, 'import react_ipywidgets as react\n'), ((34859, 34910), 'react_ipywidgets.component', 'react.component', ([], {'mime_bundle': "{'text/plain': 'text'}"}), "(mime_bundle={'text/plain': 'text'})\n", (34874, 34910), True, 'import react_ipywidgets as react\n'), ((37804, 37818), 'ipywidgets.VBox', 'widgets.VBox', ([], {}), '()\n', (37816, 37818), True, 'import ipywidgets as widgets\n'), ((39315, 39333), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (39330, 39333), True, 'import react_ipywidgets as react\n'), ((39657, 39675), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (39672, 39675), True, 'import react_ipywidgets as react\n'), ((49875, 49907), 'react_ipywidgets.core.local.last_rc.close', 'react.core.local.last_rc.close', ([], {}), '()\n', (49905, 49907), True, 'import react_ipywidgets as react\n'), ((50548, 50581), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': 'N'}), '(max_workers=N)\n', (50566, 50581), False, 'from concurrent.futures import ThreadPoolExecutor\n'), ((4378, 4398), 'react_ipywidgets.use_state', 'react.use_state', (['(0.0)'], {}), '(0.0)\n', (4393, 4398), True, 'import react_ipywidgets as react\n'), ((4438, 4468), 'react_ipywidgets.use_state', 'react.use_state', (['"""Not changed"""'], {}), "('Not changed')\n", (4453, 4468), True, 'import react_ipywidgets as react\n'), ((6065, 6085), 'react_ipywidgets.use_state', 'react.use_state', (['(0.0)'], {}), '(0.0)\n', (6080, 6085), True, 'import react_ipywidgets as react\n'), ((6125, 6157), 'react_ipywidgets.use_state', 'react.use_state', (['"""Initial value"""'], {}), "('Initial value')\n", (6140, 6157), True, 'import react_ipywidgets as react\n'), ((6832, 6873), 'react_ipywidgets.use_state_widget', 'react.use_state_widget', (['checkbox', '"""value"""'], {}), "(checkbox, 'value')\n", (6854, 6873), True, 'import react_ipywidgets as react\n'), ((8350, 8368), 'react_ipywidgets.use_state', 'react.use_state', (['(2)'], {}), '(2)\n', (8365, 8368), True, 'import react_ipywidgets as react\n'), ((10168, 10207), 'react_ipywidgets.use_state_widget', 'react.use_state_widget', (['slider', '"""value"""'], {}), "(slider, 'value')\n", (10190, 10207), True, 'import react_ipywidgets as react\n'), ((10666, 10705), 'react_ipywidgets.use_state_widget', 'react.use_state_widget', (['slider', '"""value"""'], {}), "(slider, 'value')\n", (10688, 10705), True, 'import react_ipywidgets as react\n'), ((12033, 12083), 'react_ipywidgets.use_state_widget', 'react.use_state_widget', (['checkbox', '"""value"""', '"""share"""'], {}), "(checkbox, 'value', 'share')\n", (12055, 12083), True, 'import react_ipywidgets as react\n'), ((13728, 13769), 'react_ipywidgets.use_state_widget', 'react.use_state_widget', (['exponent', '"""value"""'], {}), "(exponent, 'value')\n", (13750, 13769), True, 'import react_ipywidgets as react\n'), ((15331, 15349), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (15346, 15349), True, 'import react_ipywidgets as react\n'), ((15639, 15674), 'react_ipywidgets.use_effect', 'react.use_effect', (['add_event_handler'], {}), '(add_event_handler)\n', (15655, 15674), True, 'import react_ipywidgets as react\n'), ((16391, 16420), 'react_ipywidgets.use_effect', 'react.use_effect', (['test_effect'], {}), '(test_effect)\n', (16407, 16420), True, 'import react_ipywidgets as react\n'), ((16812, 16830), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (16827, 16830), True, 'import react_ipywidgets as react\n'), ((17323, 17356), 'react_ipywidgets.use_effect', 'react.use_effect', (['test_effect', '[]'], {}), '(test_effect, [])\n', (17339, 17356), True, 'import react_ipywidgets as react\n'), ((17931, 17968), 'react_ipywidgets.use_effect', 'react.use_effect', (['test_effect', '[a, b]'], {}), '(test_effect, [a, b])\n', (17947, 17968), True, 'import react_ipywidgets as react\n'), ((18972, 18994), 'react_ipywidgets.use_state', 'react.use_state', (['(False)'], {}), '(False)\n', (18987, 18994), True, 'import react_ipywidgets as react\n'), ((20411, 20429), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (20426, 20429), True, 'import react_ipywidgets as react\n'), ((20618, 20636), 'react_ipywidgets.use_state', 'react.use_state', (['(3)'], {}), '(3)\n', (20633, 20636), True, 'import react_ipywidgets as react\n'), ((20668, 20690), 'react_ipywidgets.use_state', 'react.use_state', (['(False)'], {}), '(False)\n', (20683, 20690), True, 'import react_ipywidgets as react\n'), ((22742, 22784), 'pytest.raises', 'pytest.raises', (['KeyError'], {'match': '"""Duplicate"""'}), "(KeyError, match='Duplicate')\n", (22755, 22784), False, 'import pytest\n'), ((22939, 22957), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (22954, 22957), True, 'import react_ipywidgets as react\n'), ((24473, 24508), 'react_ipywidgets.use_reducer', 'react.use_reducer', (['click_reducer', '(0)'], {}), '(click_reducer, 0)\n', (24490, 24508), True, 'import react_ipywidgets as react\n'), ((25222, 25254), 'react_ipywidgets.use_context', 'react.use_context', (['store_context'], {}), '(store_context)\n', (25239, 25254), True, 'import react_ipywidgets as react\n'), ((25460, 25492), 'react_ipywidgets.use_context', 'react.use_context', (['store_context'], {}), '(store_context)\n', (25477, 25492), True, 'import react_ipywidgets as react\n'), ((25668, 25703), 'react_ipywidgets.use_reducer', 'react.use_reducer', (['click_reducer', '(0)'], {}), '(click_reducer, 0)\n', (25685, 25703), True, 'import react_ipywidgets as react\n'), ((26892, 26947), 'react_ipywidgets.use_memo', 'react.use_memo', (['expensive_ab'], {'args': '[a]', 'kwargs': "{'j': b}"}), "(expensive_ab, args=[a], kwargs={'j': b})\n", (26906, 26947), True, 'import react_ipywidgets as react\n'), ((28029, 28041), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (28038, 28041), True, 'import numpy as np\n'), ((29015, 29038), 'react_ipywidgets.core.use_effect', 'use_effect', (['get_widgets'], {}), '(get_widgets)\n', (29025, 29038), False, 'from react_ipywidgets.core import component, use_effect\n'), ((29446, 29464), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (29461, 29464), True, 'import react_ipywidgets as react\n'), ((29584, 29611), 'react_ipywidgets.core.use_effect', 'use_effect', (['get_widgets', '[]'], {}), '(get_widgets, [])\n', (29594, 29611), False, 'from react_ipywidgets.core import component, use_effect\n'), ((30375, 30426), 'traitlets.traitlets.Callable', 'traitlets.traitlets.Callable', (['None'], {'allow_none': '(True)'}), '(None, allow_none=True)\n', (30403, 30426), False, 'import traitlets\n'), ((30835, 30856), 'react_ipywidgets.use_state', 'react.use_state', (['"""hi"""'], {}), "('hi')\n", (30850, 30856), True, 'import react_ipywidgets as react\n'), ((31258, 31298), 'react_ipywidgets.core.use_effect', 'use_effect', (['add_my_own_event_handler', '[]'], {}), '(add_my_own_event_handler, [])\n', (31268, 31298), False, 'from react_ipywidgets.core import component, use_effect\n'), ((31758, 31776), 'react_ipywidgets.use_state', 'react.use_state', (['(1)'], {}), '(1)\n', (31773, 31776), True, 'import react_ipywidgets as react\n'), ((31912, 31931), 'react_ipywidgets.use_state', 'react.use_state', (['(10)'], {}), '(10)\n', (31927, 31931), True, 'import react_ipywidgets as react\n'), ((32124, 32146), 'react_ipywidgets.use_state', 'react.use_state', (['(False)'], {}), '(False)\n', (32139, 32146), True, 'import react_ipywidgets as react\n'), ((34353, 34377), 'react_ipywidgets.use_state', 'react.use_state', (['"""label"""'], {}), "('label')\n", (34368, 34377), True, 'import react_ipywidgets as react\n'), ((35144, 35162), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (35159, 35162), True, 'import react_ipywidgets as react\n'), ((35630, 35648), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (35645, 35648), True, 'import react_ipywidgets as react\n'), ((35663, 35687), 'react_ipywidgets.use_ref', 'react.use_ref', (["{'hi': 1}"], {}), "({'hi': 1})\n", (35676, 35687), True, 'import react_ipywidgets as react\n'), ((36134, 36152), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (36149, 36152), True, 'import react_ipywidgets as react\n'), ((36894, 36916), 'react_ipywidgets.use_state', 'react.use_state', (['(False)'], {}), '(False)\n', (36909, 36916), True, 'import react_ipywidgets as react\n'), ((37122, 37160), 'pytest.raises', 'pytest.raises', (['Exception'], {'match': '"""fail"""'}), "(Exception, match='fail')\n", (37135, 37160), False, 'import pytest\n'), ((37409, 37427), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (37424, 37427), True, 'import react_ipywidgets as react\n'), ((38090, 38108), 'react_ipywidgets.use_state', 'react.use_state', (['(1)'], {}), '(1)\n', (38105, 38108), True, 'import react_ipywidgets as react\n'), ((39961, 39983), 'react_ipywidgets.use_state', 'react.use_state', (['(False)'], {}), '(False)\n', (39976, 39983), True, 'import react_ipywidgets as react\n'), ((41878, 41900), 'react_ipywidgets.use_state', 'react.use_state', (['(False)'], {}), '(False)\n', (41893, 41900), True, 'import react_ipywidgets as react\n'), ((44733, 44751), 'react_ipywidgets.use_state', 'react.use_state', (['(1)'], {}), '(1)\n', (44748, 44751), True, 'import react_ipywidgets as react\n'), ((45925, 45946), 'react_ipywidgets.use_state', 'react.use_state', (['(True)'], {}), '(True)\n', (45940, 45946), True, 'import react_ipywidgets as react\n'), ((46677, 46702), 'react_ipywidgets.core.use_effect', 'use_effect', (['my_effect', '[]'], {}), '(my_effect, [])\n', (46687, 46702), False, 'from react_ipywidgets.core import component, use_effect\n'), ((46840, 46861), 'react_ipywidgets.use_state', 'react.use_state', (['(True)'], {}), '(True)\n', (46855, 46861), True, 'import react_ipywidgets as react\n'), ((47847, 47872), 'react_ipywidgets.core.use_effect', 'use_effect', (['my_effect', '[]'], {}), '(my_effect, [])\n', (47857, 47872), False, 'from react_ipywidgets.core import component, use_effect\n'), ((48008, 48028), 'react_ipywidgets.use_state', 'react.use_state', (['"""1"""'], {}), "('1')\n", (48023, 48028), True, 'import react_ipywidgets as react\n'), ((49361, 49379), 'react_ipywidgets.use_state', 'react.use_state', (['(0)'], {}), '(0)\n', (49376, 49379), True, 'import react_ipywidgets as react\n'), ((50023, 50042), 'react_ipywidgets.use_state', 'react.use_state', (['(10)'], {}), '(10)\n', (50038, 50042), True, 'import react_ipywidgets as react\n'), ((50671, 50706), 'typing.cast', 'cast', (['react.core._RenderContext', 'rc'], {}), '(react.core._RenderContext, rc)\n', (50675, 50706), False, 'from typing import Callable, List, Optional, Tuple, TypeVar, cast\n'), ((15421, 15453), 'react_ipywidgets.core.get_widget', 'react.core.get_widget', (['button_el'], {}), '(button_el)\n', (15442, 15453), True, 'import react_ipywidgets as react\n'), ((28918, 28950), 'react_ipywidgets.core.get_widget', 'react.core.get_widget', (['button1el'], {}), '(button1el)\n', (28939, 28950), True, 'import react_ipywidgets as react\n'), ((28973, 29005), 'react_ipywidgets.core.get_widget', 'react.core.get_widget', (['button2el'], {}), '(button2el)\n', (28994, 29005), True, 'import react_ipywidgets as react\n'), ((29542, 29574), 'react_ipywidgets.core.get_widget', 'react.core.get_widget', (['button_el'], {}), '(button_el)\n', (29563, 29574), True, 'import react_ipywidgets as react\n'), ((30974, 30996), 'react_ipywidgets.get_widget', 'react.get_widget', (['text'], {}), '(text)\n', (30990, 30996), True, 'import react_ipywidgets as react\n'), ((33222, 33258), 'traceback.format_tb', 'traceback.format_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (33241, 33258), False, 'import traceback\n'), ((33942, 33978), 'traceback.format_tb', 'traceback.format_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (33961, 33978), False, 'import traceback\n'), ((34697, 34733), 'traceback.format_tb', 'traceback.format_tb', (['e.__traceback__'], {}), '(e.__traceback__)\n', (34716, 34733), False, 'import traceback\n'), ((44986, 44997), 'react_ipywidgets.core.component', 'component', ([], {}), '()\n', (44995, 44997), False, 'from react_ipywidgets.core import component, use_effect\n'), ((50231, 50248), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (50241, 50248), False, 'import time\n'), ((50309, 50326), 'time.sleep', 'time.sleep', (['(0.001)'], {}), '(0.001)\n', (50319, 50326), False, 'import time\n'), ((20035, 20075), 'ipywidgets.Button.element', 'widgets.Button.element', ([], {'description': '"""Hi"""'}), "(description='Hi')\n", (20057, 20075), True, 'import ipywidgets as widgets\n'), ((19066, 19100), 'ipywidgets.IntSlider.element', 'widgets.IntSlider.element', ([], {'value': '(4)'}), '(value=4)\n', (19091, 19100), True, 'import ipywidgets as widgets\n'), ((19131, 19171), 'ipywidgets.Button.element', 'widgets.Button.element', ([], {'description': '"""Hi"""'}), "(description='Hi')\n", (19153, 19171), True, 'import ipywidgets as widgets\n'), ((19217, 19257), 'ipywidgets.Button.element', 'widgets.Button.element', ([], {'description': '"""Hi"""'}), "(description='Hi')\n", (19239, 19257), True, 'import ipywidgets as widgets\n'), ((19285, 19319), 'ipywidgets.IntSlider.element', 'widgets.IntSlider.element', ([], {'value': '(4)'}), '(value=4)\n', (19310, 19319), True, 'import ipywidgets as widgets\n'), ((44939, 44950), 'react_ipywidgets.core.component', 'component', ([], {}), '()\n', (44948, 44950), False, 'from react_ipywidgets.core import component, use_effect\n')] |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : data_preprocessing.py
@Time : 2021/07/13 09:54:03
@Author : searobbersandduck
@Version : 1.0
@Contact : <EMAIL>
@License : (C)Copyright 2020-2021, MIT
@Desc : None
'''
# here put the import lib
import os
import sys
ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
sys.path.append(ROOT)
from tqdm import tqdm
from glob import glob
import shutil
import SimpleITK as sitk
import numpy as np
import shutil
from external_lib.MedCommon.utils.data_io_utils import DataIO
from external_lib.MedCommon.utils.image_postprocessing_utils import ImagePostProcessingUtils
from external_lib.MedCommon.utils.mask_bounding_utils import MaskBoundingUtils
from external_lib.MedCommon.utils.mask_utils import MaskUtils
from external_lib.MedCommon.utils.image_show_utils import ImageShowUtils
error_list = [
'4409402',
'4623558',
'4825713',
'5007511',
'5101568',
'1237062',
'1285440',
'1305155',
'1397046',
'1445543',
'1902661',
'1935168',
'2094368',
'2182657',
'2504214',
'2602401',
'2670896',
'2693475',
'2813431',
'2835569',
'2999343',
'3772244',
'3839904',
'3869885',
'3949254',
'3998837',
'4185501',
'4303024',
'4381668',
'4409402',
'4440093',
'4465419',
'4577418',
'4587103',
'4479986',
'4455178',
'4238407',
'4285331',
'4503839',
'4597717',
'4634411',
'4669297',
'4981609'
]
def step_1_check_folder_format(root, out_root):
'''
root = '/data/medical/brain/gan/hospital_6_multi_classified/CTA2DWI-多中心-20201102'
out_root = '/data/medical/brain/gan/cta2dwi_multi_classified'
note:
1. 确认各组数据中是否都至少包含CTA和DWI两组数据,并统计各自数据的数量
2. 将数据重新copy到新的路径下,并只保留CTA和DWI两个文件夹,
同时有CTA1和CTA2两个文件夹时,将CTA1重命名成CTA
.
├── CTA阴性(108例)
│ ├── 六院-CTA阴性(69)
│ ├── 南通大学-阴性-血管(14)
│ └── 闵中心-阴性-血管(25)
└── 阳性-闭塞(188例)
├── 六院-DWI闭塞病例(105)
├── 六院-阳性-血管闭塞(25)
├── 六院-阳性-血管闭塞(37)
├── 南通大学-阳性-血管闭塞(5)
└── 闵中心-阳性-血管闭塞(16)
具体展开其中的一个文件夹:
闵中心-阳性-血管闭塞(16)$ tree -L 2
.
├── 101878640
│ ├── CTA
│ └── DWI
├── 102512839-101477685
│ ├── CTA
│ └── DWI
├── 102661445
│ ├── CTA
│ └── DWI
├── 102869917
│ ├── CTA
│ └── DWI
'''
pn_roots = os.listdir(root)
'''
.
├── CTA阴性(108例)
└── 阳性-闭塞(188例)
'''
def comprass_modalities(modalities):
pairs = []
if 'CTA' in modalities:
pairs.append('CTA')
elif 'CTA1' in modalities:
pairs.append('CTA1')
elif 'CTA2' in modalities:
pairs.append('CTA2')
if 'DWI' in modalities:
pairs.append('DWI')
elif 'DWI1' in modalities:
pairs.append('DWI1')
elif 'DWI2' in modalities:
pairs.append('DWI2')
return pairs
for pn_root in pn_roots:
pn_path = os.path.join(root, pn_root)
for hospital_name in os.listdir(pn_path):
hospital_path = os.path.join(pn_path, hospital_name)
for pid in tqdm(os.listdir(hospital_path)):
try:
if len(pid) != 7:
continue
pid_path = os.path.join(hospital_path, pid)
modalities = os.listdir(pid_path)
pairs_modalities = []
pairs_modalities = comprass_modalities(modalities)
for m in pairs_modalities:
src_path = os.path.join(pid_path, m)
dst_path = src_path.replace(root, out_root)
if dst_path.endswith('1') or dst_path.endswith('2'):
dst_path = dst_path[:-1]
os.makedirs(os.path.dirname(dst_path), exist_ok=True)
shutil.copytree(src_path, dst_path)
print('copy from {} to {o}'.format(src_path, dst_path))
if len(pairs_modalities) != 2:
print(pid_path)
except Exception as e:
print('Error case:\t', pid)
def convert_dcm_to_nii(in_root, out_root):
'''
tree -L 1
.
├── 1124013
├── 1140092
├── 1195207
├── 1399063
├── 1424031
├── 1534457
├── 1870593
├── 1944927
'''
for pid in tqdm(os.listdir(in_root)):
patient_path = os.path.join(in_root, pid)
out_sub_root = os.path.join(out_root, pid)
os.makedirs(out_sub_root, exist_ok=True)
cta_path = os.path.join(patient_path, 'CTA')
cta_image = DataIO.load_dicom_series(cta_path)
out_cta_file = os.path.join(out_sub_root, 'CTA.nii.gz')
sitk.WriteImage(cta_image['sitk_image'], out_cta_file)
dwi_path = os.path.join(patient_path, 'DWI')
dwi_image = DataIO.load_dicom_series(dwi_path)
out_dwi_file = os.path.join(out_sub_root, 'DWI.nii.gz')
sitk.WriteImage(dwi_image['sitk_image'], out_dwi_file)
def step_2_dcm_to_nii(in_root, out_root):
for pn_root in os.listdir(in_root):
pn_path = os.path.join(in_root, pn_root)
for hospital_name in os.listdir(pn_path):
hospital_path = os.path.join(pn_path, hospital_name)
convert_dcm_to_nii(hospital_path, out_root)
def cerebral_parenchyma_segmentation_new_algo(
data_root=None,
out_dir = None
):
import torch
from external_lib.MedCommon.experiments.seg.brain.parenchyma.inference.inference import load_inference_opts
from external_lib.MedCommon.segmentation.runner.train_seg import SegmentationTrainer
opts = load_inference_opts()
model = SegmentationTrainer.load_model(opts)
model = torch.nn.DataParallel(model).cuda()
model.eval()
for pid in tqdm(os.listdir(data_root)):
pid_path = os.path.join(data_root, pid)
if not os.path.isdir(pid_path):
print('patient path not exist!\t{}'.format(pid_path))
continue
cta_file = os.path.join(pid_path, 'CTA.nii.gz')
if not os.path.isfile(cta_file):
print('cta file not exist!\t{}'.format(cta_file))
continue
image, pred_mask = SegmentationTrainer.inference_one_case(model, cta_file, is_dcm=False)
out_cta_dir = os.path.join(out_dir, pid, 'CTA')
os.makedirs(out_cta_dir, exist_ok=True)
out_cta_file = os.path.join(out_cta_dir, 'CTA.nii.gz')
out_cta_mask_file = os.path.join(out_cta_dir, 'CTA_MASK.nii.gz')
sitk.WriteImage(image, out_cta_file)
sitk.WriteImage(pred_mask, out_cta_mask_file)
def step_3_3_segment_cerebral_parenchyma_connected_region(root_dir = '/data/medical/cardiac/cta2mbf/20201216/3.sorted_mask'):
# root_dir = '/data/medical/cardiac/cta2mbf/20201216/3.sorted_mask'
for pid in tqdm(os.listdir(root_dir)):
pid_path = os.path.join(root_dir, pid)
if not os.path.isdir(pid_path):
continue
cta_root = os.path.join(pid_path, 'CTA')
in_cta_file = os.path.join(cta_root, 'CTA_MASK.nii.gz')
out_cta_file = os.path.join(cta_root, 'CTA_MASK_connected.nii.gz')
try:
if os.path.isfile(in_cta_file):
in_mask = sitk.ReadImage(in_cta_file)
out_mask_sitk = ImagePostProcessingUtils.get_maximal_connected_region_multilabel(in_mask, mask_labels=[1])
out_mask_sitk = MaskUtils.fill_hole(out_mask_sitk, radius=6)
sitk.WriteImage(out_mask_sitk, out_cta_file)
except Exception as e:
print(e)
print('====> Error case:\t{}'.format(pid))
def extract_cta_cerebral_parenchyma_zlayers(
cta_root,
mask_root,
out_root,
cta_pattern = 'CTA/CTA.nii.gz',
mask_pattern = 'CTA/DWI_BBOX_MASK.nii.gz'):
pids = os.listdir(mask_root)
for pid in tqdm(pids):
cta_file = os.path.join(cta_root, pid, cta_pattern)
mask_file = os.path.join(mask_root, pid, mask_pattern)
in_image = sitk.ReadImage(cta_file)
in_mask = sitk.ReadImage(mask_file)
out_image, out_mask = MaskBoundingUtils.extract_target_area_by_mask_zboundary(in_image, in_mask)
out_dir = os.path.join(out_root, pid)
os.makedirs(out_dir, exist_ok=True)
out_image_file = os.path.join(out_dir, 'CTA.nii.gz')
sitk.WriteImage(out_image, out_image_file)
out_mask_file = os.path.join(out_dir, 'MASK.nii.gz')
sitk.WriteImage(out_mask, out_mask_file)
def extract_cta_cerebral_parenchyma(
cta_root,
mask_root,
out_root,
cta_pattern = 'CTA.nii.gz',
mask_pattern = 'MASK.nii.gz',
out_pattern = 'CTA_parenchyma.nii.gz'
):
pids = os.listdir(mask_root)
for pid in tqdm(pids):
try:
cta_file = os.path.join(cta_root, pid, cta_pattern)
mask_file = os.path.join(mask_root, pid, mask_pattern)
out_file = os.path.join(out_root, pid, out_pattern)
in_image = sitk.ReadImage(cta_file)
in_mask = sitk.ReadImage(mask_file)
out_image = ImagePostProcessingUtils.extract_region_by_mask(in_image, in_mask, default_value=-1024, mask_label=1)
sitk.WriteImage(out_image, out_file)
except Exception as e:
print('====> Error case:\t{}'.format(pid))
print(e)
pass
def generate_dwi_bbox_mask(in_root, out_root, dwi_pattern='DWI.nii.gz', out_dwi_mask_pattern='DWI_BBOX_MASK.nii.gz'):
for pid in tqdm(os.listdir(in_root)):
try:
dwi_file = os.path.join(in_root, pid, dwi_pattern)
dwi_image = sitk.ReadImage(dwi_file)
size = dwi_image.GetSize()
size = size[::-1]
bbox_mask_arr = np.ones(size, dtype=np.uint8)
bbox_mask = sitk.GetImageFromArray(bbox_mask_arr)
bbox_mask.CopyInformation(dwi_image)
out_sub_dir = os.path.join(out_root, pid)
os.makedirs(out_sub_dir, exist_ok=True)
out_mask_file = os.path.join(out_sub_dir, out_dwi_mask_pattern)
sitk.WriteImage(bbox_mask, out_mask_file)
# copy dwi文件到指定路径,方便后续操作
src_file = dwi_file
dst_file = os.path.join(out_sub_dir, os.path.basename(src_file))
shutil.copyfile(src_file, dst_file)
print('hello world!')
except Exception as e:
print(e)
continue
def extract_dwi_cerebral_parenchyma(
dwi_root,
mask_root,
out_root,
dwi_pattern = 'registried_dwi.nii.gz',
mask_pattern = 'MASK.nii.gz',
out_dwi_pattern = 'registried_dwi_parenchyma.nii.gz',
mask_label=1
):
for pid in tqdm(os.listdir(dwi_root)):
try:
dwi_file = os.path.join(dwi_root, pid, dwi_pattern)
mask_file = os.path.join(mask_root, pid, mask_pattern)
if not os.path.isfile(dwi_file):
continue
if not os.path.isfile(mask_file):
continue
dwi_image = DataIO.load_nii_image(dwi_file)['sitk_image']
mask_image = DataIO.load_nii_image(mask_file)['sitk_image']
extracted_dwi_image = ImagePostProcessingUtils.extract_region_by_mask(dwi_image, mask_image, default_value=-1024, mask_label=mask_label)
# 将脑实质中小于0的值,设置为-1024,避免造成干扰
tmp_arr = sitk.GetArrayFromImage(extracted_dwi_image)
tmp_arr[tmp_arr<0] = -1024
extracted_dwi_image = sitk.GetImageFromArray(tmp_arr)
extracted_dwi_image.CopyInformation(dwi_image)
out_sub_dir = os.path.join(out_root, pid)
os.makedirs(out_sub_dir, exist_ok=True)
out_dwi_file = os.path.join(out_sub_dir, out_dwi_pattern)
sitk.WriteImage(extracted_dwi_image, out_dwi_file)
except Exception as e:
print(e)
print('====> Error case:\t{}'.format(pid))
def merge_cerebral_parenchyma_mask_and_dwi_bbox(
parenchyma_mask_root,
dwi_bbox_mask_root,
out_root,
parenchyma_mask_pattern='MASK.nii.gz',
dwi_mask_pattern='registried_dwi_bbox.nii.gz',
out_mask_pattern='final_mask.nii.gz'
):
for pid in tqdm(os.listdir(dwi_bbox_mask_root)):
try:
parenchyma_mask_file = os.path.join(parenchyma_mask_root, pid, parenchyma_mask_pattern)
dwi_bbox_mask_file = os.path.join(dwi_bbox_mask_root, pid, dwi_mask_pattern)
parenchyma_mask_image = sitk.ReadImage(parenchyma_mask_file)
dwi_bbox_mask_image = sitk.ReadImage(dwi_bbox_mask_file)
parenchyma_mask_arr = sitk.GetArrayFromImage(parenchyma_mask_image)
dwi_bbox_mask_arr = sitk.GetArrayFromImage(dwi_bbox_mask_image)
merged_mask_arr = parenchyma_mask_arr * dwi_bbox_mask_arr
merged_mask_arr = np.array(merged_mask_arr, np.uint8)
merged_mask_image = sitk.GetImageFromArray(merged_mask_arr)
merged_mask_image.CopyInformation(parenchyma_mask_image)
out_sub_dir = os.path.join(out_root, pid)
os.makedirs(out_sub_dir, exist_ok=True)
out_mask_file = os.path.join(out_sub_dir, out_mask_pattern)
sitk.WriteImage(merged_mask_image, out_mask_file)
except Exception as e:
print(e)
print('====> Error case:\t{}'.format(pid))
def copy_train_data(data_root, out_root, cta_pattern='fixed_cta.nii.gz'):
'''
很多数据的分辨率太低,将能达到要求的数据copy到另外的文件夹
'''
os.makedirs(out_root, exist_ok=True)
min_z = 10000
max_z = 0
for pid in tqdm(os.listdir(data_root)):
cta_file = os.path.join(data_root, pid, cta_pattern)
cta_image = sitk.ReadImage(cta_file)
size = cta_image.GetSize()
print('{}\t{}'.format(pid, size))
if size[2] < 100:
continue
if min_z > size[2]:
min_z = size[2]
if max_z < size[2]:
max_z = size[2]
src_file = os.path.join(data_root, pid)
dst_file = os.path.join(out_root, pid)
shutil.copytree(src_file, dst_file)
print('min z:\t{},\t\tmax z:\t{}'.format(min_z, max_z))
# 生成切面图像
def genereate_mpr_slice(data_root, out_root,
src_pattern='fixed_cta.nii.gz',
dst_pattern='registried_dwi.nii.gz'
):
for pid in tqdm(os.listdir(data_root)):
try:
src_image_file = os.path.join(data_root, pid, src_pattern)
dst_image_file = os.path.join(data_root, pid, dst_pattern)
# out_sub_root = os.path.join(out_root, pid)
out_sub_root = out_root
os.makedirs(out_sub_root, exist_ok=True)
out_src_prefix = '{}_src'.format(pid)
out_dst_prefix = '{}_dst'.format(pid)
src_image = sitk.ReadImage(src_image_file)
dst_image = sitk.ReadImage(dst_image_file)
ImageShowUtils.save_volume_to_mpr_jpg(src_image, out_sub_root, 150, 50, out_src_prefix)
ImageShowUtils.save_volume_to_mpr_jpg(dst_image, out_sub_root, 400, 200, out_dst_prefix)
except Exception as e:
print('====> Error case:\t{}'.format(pid))
pass
def data_preprocessing():
data_root = '/data/medical/brain/gan/cta2dwi_multi_classified'
# step_2_dcm_to_nii(os.path.join(data_root, '0.ori'),
# os.path.join(data_root, '3.sorted_nii'))
# step 3 cerebral parenchyma segmentation
# cerebral_parenchyma_segmentation_new_algo(
# os.path.join(data_root, '3.sorted_nii'),
# os.path.join(data_root, '3.sorted_mask')
# )
# step_3_3_segment_cerebral_parenchyma_connected_region(
# os.path.join(data_root, '3.sorted_mask')
# )
# extract_cta_cerebral_parenchyma_zlayers(
# os.path.join(data_root, '3.sorted_mask'),
# os.path.join(data_root, '3.sorted_mask'),
# os.path.join(data_root, '4.cropped_nii')
# )
# generate_dwi_bbox_mask(
# os.path.join(data_root, '3.sorted_nii'),
# os.path.join(data_root, '4.cropped_nii')
# )
# registration : run data_preprocessing_registration_dwi2cta.py
# extract_dwi_cerebral_parenchyma(
# os.path.join(data_root, '4.registration_batch'),
# os.path.join(data_root, '4.cropped_nii'),
# os.path.join(data_root, '4.registration_batch')
# )
# merge_cerebral_parenchyma_mask_and_dwi_bbox(
# os.path.join(data_root, '4.cropped_nii'),
# os.path.join(data_root, '4.registration_batch'),
# os.path.join(data_root, '4.registration_batch')
# )
copy_train_data(
os.path.join(data_root, '4.registration_batch'),
os.path.join(data_root, '5.train_batch')
)
def convert_dcm_to_nii_history(in_root, out_root):
pids = os.listdir(in_root)
for pid in tqdm(pids):
try:
out_sub_root = os.path.join(out_root, pid)
os.makedirs(out_sub_root, exist_ok=True)
patient_path = os.path.join(in_root, pid, 'NCCT')
suid = os.listdir(patient_path)[0]
cta_path = os.path.join(patient_path, suid)
cta_image = DataIO.load_dicom_series(cta_path)
out_cta_file = os.path.join(out_sub_root, 'CTA.nii.gz')
sitk.WriteImage(cta_image['sitk_image'], out_cta_file)
patient_path = os.path.join(in_root, pid, 'DWI')
suid = os.listdir(patient_path)[0]
dwi_path = os.path.join(patient_path, suid, 'bxxx')
dwi_image = DataIO.load_dicom_series(dwi_path)
out_dwi_file = os.path.join(out_sub_root, 'DWI.nii.gz')
sitk.WriteImage(dwi_image['sitk_image'], out_dwi_file)
except Exception as e:
print('====> Error case:\t', pid)
continue
# def data_preprocessing_batch1():
# data_root = '/data/medical/brain/gan/cta2dwi_history_pos'
# # data_root = '/data/medical/brain/gan/cta2dwi_history_neg'
# # convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm'),
# # os.path.join(data_root, '3.sorted_nii'))
# # convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm_neg'),
# # os.path.join(data_root, '3.sorted_nii'))
# # '''
# # deleted algo
# # '''
# # # step 3 cerebral parenchyma segmentation
# # # cerebral_parenchyma_segmentation_new_algo(
# # # os.path.join(data_root, '3.sorted_nii'),
# # # os.path.join(data_root, '3.sorted_mask')
# # # )
# # # step_3_3_segment_cerebral_parenchyma_connected_region(
# # # os.path.join(data_root, '3.sorted_mask')
# # # )
# # # extract_cta_cerebral_parenchyma_zlayers(
# # # os.path.join(data_root, '3.sorted_mask'),
# # # os.path.join(data_root, '3.sorted_mask'),
# # # os.path.join(data_root, '4.cropped_nii')
# # # )
# # # generate_dwi_bbox_mask(
# # # os.path.join(data_root, '3.sorted_nii'),
# # # os.path.join(data_root, '4.cropped_nii')
# # # )
# # generate_dwi_bbox_mask(
# # os.path.join(data_root, '3.sorted_nii'),
# # os.path.join(data_root, '3.sorted_mask')
# # )
# # extract_cta_cerebral_parenchyma(
# # os.path.join(data_root, '4.cropped_nii'),
# # os.path.join(data_root, '4.cropped_nii'),
# # os.path.join(data_root, '4.cropped_nii'),
# # )
# # registration : run data_preprocessing_registration_dwi2cta.py
# # extract_dwi_cerebral_parenchyma(
# # os.path.join(data_root, '4.registration_batch'),
# # os.path.join(data_root, '4.cropped_nii'),
# # os.path.join(data_root, '4.registration_batch')
# # )
# extract_dwi_cerebral_parenchyma(
# os.path.join(data_root, '4.registration_batch_2d'),
# os.path.join(data_root, '3.sorted_mask'),
# os.path.join(data_root, '4.registration_batch_2d'),
# mask_pattern='_brain_mask.nii.gz'
# )
# # merge_cerebral_parenchyma_mask_and_dwi_bbox(
# # os.path.join(data_root, '4.cropped_nii'),
# # os.path.join(data_root, '4.registration_batch'),
# # os.path.join(data_root, '4.registration_batch')
# # )
# # copy_train_data(
# # os.path.join(data_root, '4.registration_batch'),
# # os.path.join(data_root, '5.train_batch')
# # )
# # genereate_mpr_slice(
# # os.path.join(data_root, '5.train_batch'),
# # os.path.join(data_root, '6.mpr')
# # )
def data_preprocessing_batch1():
data_root = '/data/medical/brain/gan/cta2dwi_history_pos'
# data_root = '/data/medical/brain/gan/cta2dwi_history_neg'
# step 1
# convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm'),
# os.path.join(data_root, '3.sorted_nii'))
# convert_dcm_to_nii_history(os.path.join(data_root, '0.raw_dcm_neg'),
# os.path.join(data_root, '3.sorted_nii'))
# step 2
# segment by 2d algorithm
# step 3
# generate_dwi_bbox_mask(
# os.path.join(data_root, '3.sorted_nii'),
# os.path.join(data_root, '3.sorted_mask')
# )
# step 4
# registration : run data_preprocessing_registration_dwi2cta.py
# step 5
# extract_dwi_cerebral_parenchyma(
# os.path.join(data_root, '4.registration_batch_2d'),
# os.path.join(data_root, '3.sorted_mask'),
# os.path.join(data_root, '4.registration_batch_2d'),
# mask_pattern='_brain_mask.nii.gz'
# )
# step 6
# merge_cerebral_parenchyma_mask_and_dwi_bbox(
# os.path.join(data_root, '3.sorted_mask'),
# os.path.join(data_root, '4.registration_batch_2d'),
# os.path.join(data_root, '4.registration_batch_2d'),
# parenchyma_mask_pattern='_brain_mask.nii.gz'
# )
# step 7
# copy_train_data(
# os.path.join(data_root, '4.registration_batch_2d'),
# os.path.join(data_root, '5.train_batch_2d')
# )
# step 8
# genereate_mpr_slice(
# os.path.join(data_root, '5.train_batch_2d'),
# os.path.join(data_root, '6.mpr_2d')
# )
# step 5
extract_dwi_cerebral_parenchyma(
os.path.join(data_root, '4.registration_batch_2d_parenchyma'),
os.path.join(data_root, '3.sorted_mask'),
os.path.join(data_root, '4.registration_batch_2d_parenchyma'),
mask_pattern='_brain_mask.nii.gz'
)
# step 6
merge_cerebral_parenchyma_mask_and_dwi_bbox(
os.path.join(data_root, '3.sorted_mask'),
os.path.join(data_root, '4.registration_batch_2d_parenchyma'),
os.path.join(data_root, '4.registration_batch_2d_parenchyma'),
parenchyma_mask_pattern='_brain_mask.nii.gz'
)
# step 7
copy_train_data(
os.path.join(data_root, '4.registration_batch_2d_parenchyma'),
os.path.join(data_root, '5.train_batch_2d_parenchyma')
)
# step 8
genereate_mpr_slice(
os.path.join(data_root, '5.train_batch_2d_parenchyma'),
os.path.join(data_root, '6.mpr_2d_parenchyma')
)
def remove_error_pairs(data_root):
for pid in tqdm(os.listdir(data_root)):
patient_path = os.path.join(data_root, pid)
if pid not in error_list:
continue
print('remove\t', pid)
shutil.rmtree(patient_path)
if __name__ == '__main__':
# step_1_check_folder_format('/data/medical/brain/gan/hospital_6_multi_classified/CTA2DWI-多中心-20201102',
# '/data/medical/brain/gan/cta2dwi_multi_classified')
# data_preprocessing()
# data_preprocessing_batch1()
# remove_error_pairs('/data/medical/brain/gan/cta2dwi_all_2d_parenchyma/5.train_batch_2d_parenchyma')
remove_error_pairs('/ssd/zhangwd/cta2mbf/cta2dwi_all_2d_parenchyma/5.train_batch_2d_parenchyma') | [
"external_lib.MedCommon.segmentation.runner.train_seg.SegmentationTrainer.load_model",
"numpy.ones",
"os.path.isfile",
"shutil.rmtree",
"os.path.join",
"external_lib.MedCommon.utils.mask_utils.MaskUtils.fill_hole",
"sys.path.append",
"external_lib.MedCommon.segmentation.runner.train_seg.SegmentationTr... | [((382, 403), 'sys.path.append', 'sys.path.append', (['ROOT'], {}), '(ROOT)\n', (397, 403), False, 'import sys\n'), ((2465, 2481), 'os.listdir', 'os.listdir', (['root'], {}), '(root)\n', (2475, 2481), False, 'import os\n'), ((5307, 5326), 'os.listdir', 'os.listdir', (['in_root'], {}), '(in_root)\n', (5317, 5326), False, 'import os\n'), ((5881, 5902), 'external_lib.MedCommon.experiments.seg.brain.parenchyma.inference.inference.load_inference_opts', 'load_inference_opts', ([], {}), '()\n', (5900, 5902), False, 'from external_lib.MedCommon.experiments.seg.brain.parenchyma.inference.inference import load_inference_opts\n'), ((5915, 5951), 'external_lib.MedCommon.segmentation.runner.train_seg.SegmentationTrainer.load_model', 'SegmentationTrainer.load_model', (['opts'], {}), '(opts)\n', (5945, 5951), False, 'from external_lib.MedCommon.segmentation.runner.train_seg import SegmentationTrainer\n'), ((8090, 8111), 'os.listdir', 'os.listdir', (['mask_root'], {}), '(mask_root)\n', (8100, 8111), False, 'import os\n'), ((8127, 8137), 'tqdm.tqdm', 'tqdm', (['pids'], {}), '(pids)\n', (8131, 8137), False, 'from tqdm import tqdm\n'), ((9004, 9025), 'os.listdir', 'os.listdir', (['mask_root'], {}), '(mask_root)\n', (9014, 9025), False, 'import os\n'), ((9041, 9051), 'tqdm.tqdm', 'tqdm', (['pids'], {}), '(pids)\n', (9045, 9051), False, 'from tqdm import tqdm\n'), ((13866, 13902), 'os.makedirs', 'os.makedirs', (['out_root'], {'exist_ok': '(True)'}), '(out_root, exist_ok=True)\n', (13877, 13902), False, 'import os\n'), ((17145, 17164), 'os.listdir', 'os.listdir', (['in_root'], {}), '(in_root)\n', (17155, 17164), False, 'import os\n'), ((17180, 17190), 'tqdm.tqdm', 'tqdm', (['pids'], {}), '(pids)\n', (17184, 17190), False, 'from tqdm import tqdm\n'), ((338, 363), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (353, 363), False, 'import os\n'), ((3088, 3115), 'os.path.join', 'os.path.join', (['root', 'pn_root'], {}), '(root, pn_root)\n', (3100, 3115), False, 'import os\n'), ((3145, 3164), 'os.listdir', 'os.listdir', (['pn_path'], {}), '(pn_path)\n', (3155, 3164), False, 'import os\n'), ((4602, 4621), 'os.listdir', 'os.listdir', (['in_root'], {}), '(in_root)\n', (4612, 4621), False, 'import os\n'), ((4647, 4673), 'os.path.join', 'os.path.join', (['in_root', 'pid'], {}), '(in_root, pid)\n', (4659, 4673), False, 'import os\n'), ((4697, 4724), 'os.path.join', 'os.path.join', (['out_root', 'pid'], {}), '(out_root, pid)\n', (4709, 4724), False, 'import os\n'), ((4733, 4773), 'os.makedirs', 'os.makedirs', (['out_sub_root'], {'exist_ok': '(True)'}), '(out_sub_root, exist_ok=True)\n', (4744, 4773), False, 'import os\n'), ((4793, 4826), 'os.path.join', 'os.path.join', (['patient_path', '"""CTA"""'], {}), "(patient_path, 'CTA')\n", (4805, 4826), False, 'import os\n'), ((4847, 4881), 'external_lib.MedCommon.utils.data_io_utils.DataIO.load_dicom_series', 'DataIO.load_dicom_series', (['cta_path'], {}), '(cta_path)\n', (4871, 4881), False, 'from external_lib.MedCommon.utils.data_io_utils import DataIO\n'), ((4905, 4945), 'os.path.join', 'os.path.join', (['out_sub_root', '"""CTA.nii.gz"""'], {}), "(out_sub_root, 'CTA.nii.gz')\n", (4917, 4945), False, 'import os\n'), ((4954, 5008), 'SimpleITK.WriteImage', 'sitk.WriteImage', (["cta_image['sitk_image']", 'out_cta_file'], {}), "(cta_image['sitk_image'], out_cta_file)\n", (4969, 5008), True, 'import SimpleITK as sitk\n'), ((5029, 5062), 'os.path.join', 'os.path.join', (['patient_path', '"""DWI"""'], {}), "(patient_path, 'DWI')\n", (5041, 5062), False, 'import os\n'), ((5083, 5117), 'external_lib.MedCommon.utils.data_io_utils.DataIO.load_dicom_series', 'DataIO.load_dicom_series', (['dwi_path'], {}), '(dwi_path)\n', (5107, 5117), False, 'from external_lib.MedCommon.utils.data_io_utils import DataIO\n'), ((5141, 5181), 'os.path.join', 'os.path.join', (['out_sub_root', '"""DWI.nii.gz"""'], {}), "(out_sub_root, 'DWI.nii.gz')\n", (5153, 5181), False, 'import os\n'), ((5190, 5244), 'SimpleITK.WriteImage', 'sitk.WriteImage', (["dwi_image['sitk_image']", 'out_dwi_file'], {}), "(dwi_image['sitk_image'], out_dwi_file)\n", (5205, 5244), True, 'import SimpleITK as sitk\n'), ((5346, 5376), 'os.path.join', 'os.path.join', (['in_root', 'pn_root'], {}), '(in_root, pn_root)\n', (5358, 5376), False, 'import os\n'), ((5406, 5425), 'os.listdir', 'os.listdir', (['pn_path'], {}), '(pn_path)\n', (5416, 5425), False, 'import os\n'), ((6038, 6059), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (6048, 6059), False, 'import os\n'), ((6081, 6109), 'os.path.join', 'os.path.join', (['data_root', 'pid'], {}), '(data_root, pid)\n', (6093, 6109), False, 'import os\n'), ((6256, 6292), 'os.path.join', 'os.path.join', (['pid_path', '"""CTA.nii.gz"""'], {}), "(pid_path, 'CTA.nii.gz')\n", (6268, 6292), False, 'import os\n'), ((6444, 6513), 'external_lib.MedCommon.segmentation.runner.train_seg.SegmentationTrainer.inference_one_case', 'SegmentationTrainer.inference_one_case', (['model', 'cta_file'], {'is_dcm': '(False)'}), '(model, cta_file, is_dcm=False)\n', (6482, 6513), False, 'from external_lib.MedCommon.segmentation.runner.train_seg import SegmentationTrainer\n'), ((6536, 6569), 'os.path.join', 'os.path.join', (['out_dir', 'pid', '"""CTA"""'], {}), "(out_dir, pid, 'CTA')\n", (6548, 6569), False, 'import os\n'), ((6578, 6617), 'os.makedirs', 'os.makedirs', (['out_cta_dir'], {'exist_ok': '(True)'}), '(out_cta_dir, exist_ok=True)\n', (6589, 6617), False, 'import os\n'), ((6641, 6680), 'os.path.join', 'os.path.join', (['out_cta_dir', '"""CTA.nii.gz"""'], {}), "(out_cta_dir, 'CTA.nii.gz')\n", (6653, 6680), False, 'import os\n'), ((6709, 6753), 'os.path.join', 'os.path.join', (['out_cta_dir', '"""CTA_MASK.nii.gz"""'], {}), "(out_cta_dir, 'CTA_MASK.nii.gz')\n", (6721, 6753), False, 'import os\n'), ((6763, 6799), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['image', 'out_cta_file'], {}), '(image, out_cta_file)\n', (6778, 6799), True, 'import SimpleITK as sitk\n'), ((6808, 6853), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['pred_mask', 'out_cta_mask_file'], {}), '(pred_mask, out_cta_mask_file)\n', (6823, 6853), True, 'import SimpleITK as sitk\n'), ((7075, 7095), 'os.listdir', 'os.listdir', (['root_dir'], {}), '(root_dir)\n', (7085, 7095), False, 'import os\n'), ((7117, 7144), 'os.path.join', 'os.path.join', (['root_dir', 'pid'], {}), '(root_dir, pid)\n', (7129, 7144), False, 'import os\n'), ((7225, 7254), 'os.path.join', 'os.path.join', (['pid_path', '"""CTA"""'], {}), "(pid_path, 'CTA')\n", (7237, 7254), False, 'import os\n'), ((7286, 7327), 'os.path.join', 'os.path.join', (['cta_root', '"""CTA_MASK.nii.gz"""'], {}), "(cta_root, 'CTA_MASK.nii.gz')\n", (7298, 7327), False, 'import os\n'), ((7351, 7402), 'os.path.join', 'os.path.join', (['cta_root', '"""CTA_MASK_connected.nii.gz"""'], {}), "(cta_root, 'CTA_MASK_connected.nii.gz')\n", (7363, 7402), False, 'import os\n'), ((8158, 8198), 'os.path.join', 'os.path.join', (['cta_root', 'pid', 'cta_pattern'], {}), '(cta_root, pid, cta_pattern)\n', (8170, 8198), False, 'import os\n'), ((8219, 8261), 'os.path.join', 'os.path.join', (['mask_root', 'pid', 'mask_pattern'], {}), '(mask_root, pid, mask_pattern)\n', (8231, 8261), False, 'import os\n'), ((8281, 8305), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['cta_file'], {}), '(cta_file)\n', (8295, 8305), True, 'import SimpleITK as sitk\n'), ((8324, 8349), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mask_file'], {}), '(mask_file)\n', (8338, 8349), True, 'import SimpleITK as sitk\n'), ((8380, 8454), 'external_lib.MedCommon.utils.mask_bounding_utils.MaskBoundingUtils.extract_target_area_by_mask_zboundary', 'MaskBoundingUtils.extract_target_area_by_mask_zboundary', (['in_image', 'in_mask'], {}), '(in_image, in_mask)\n', (8435, 8454), False, 'from external_lib.MedCommon.utils.mask_bounding_utils import MaskBoundingUtils\n'), ((8473, 8500), 'os.path.join', 'os.path.join', (['out_root', 'pid'], {}), '(out_root, pid)\n', (8485, 8500), False, 'import os\n'), ((8509, 8544), 'os.makedirs', 'os.makedirs', (['out_dir'], {'exist_ok': '(True)'}), '(out_dir, exist_ok=True)\n', (8520, 8544), False, 'import os\n'), ((8570, 8605), 'os.path.join', 'os.path.join', (['out_dir', '"""CTA.nii.gz"""'], {}), "(out_dir, 'CTA.nii.gz')\n", (8582, 8605), False, 'import os\n'), ((8614, 8656), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_image', 'out_image_file'], {}), '(out_image, out_image_file)\n', (8629, 8656), True, 'import SimpleITK as sitk\n'), ((8681, 8717), 'os.path.join', 'os.path.join', (['out_dir', '"""MASK.nii.gz"""'], {}), "(out_dir, 'MASK.nii.gz')\n", (8693, 8717), False, 'import os\n'), ((8726, 8766), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_mask', 'out_mask_file'], {}), '(out_mask, out_mask_file)\n', (8741, 8766), True, 'import SimpleITK as sitk\n'), ((9798, 9817), 'os.listdir', 'os.listdir', (['in_root'], {}), '(in_root)\n', (9808, 9817), False, 'import os\n'), ((11014, 11034), 'os.listdir', 'os.listdir', (['dwi_root'], {}), '(dwi_root)\n', (11024, 11034), False, 'import os\n'), ((12552, 12582), 'os.listdir', 'os.listdir', (['dwi_bbox_mask_root'], {}), '(dwi_bbox_mask_root)\n', (12562, 12582), False, 'import os\n'), ((13955, 13976), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (13965, 13976), False, 'import os\n'), ((13998, 14039), 'os.path.join', 'os.path.join', (['data_root', 'pid', 'cta_pattern'], {}), '(data_root, pid, cta_pattern)\n', (14010, 14039), False, 'import os\n'), ((14060, 14084), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['cta_file'], {}), '(cta_file)\n', (14074, 14084), True, 'import SimpleITK as sitk\n'), ((14340, 14368), 'os.path.join', 'os.path.join', (['data_root', 'pid'], {}), '(data_root, pid)\n', (14352, 14368), False, 'import os\n'), ((14388, 14415), 'os.path.join', 'os.path.join', (['out_root', 'pid'], {}), '(out_root, pid)\n', (14400, 14415), False, 'import os\n'), ((14424, 14459), 'shutil.copytree', 'shutil.copytree', (['src_file', 'dst_file'], {}), '(src_file, dst_file)\n', (14439, 14459), False, 'import shutil\n'), ((14689, 14710), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (14699, 14710), False, 'import os\n'), ((16968, 17015), 'os.path.join', 'os.path.join', (['data_root', '"""4.registration_batch"""'], {}), "(data_root, '4.registration_batch')\n", (16980, 17015), False, 'import os\n'), ((17030, 17070), 'os.path.join', 'os.path.join', (['data_root', '"""5.train_batch"""'], {}), "(data_root, '5.train_batch')\n", (17042, 17070), False, 'import os\n'), ((22577, 22638), 'os.path.join', 'os.path.join', (['data_root', '"""4.registration_batch_2d_parenchyma"""'], {}), "(data_root, '4.registration_batch_2d_parenchyma')\n", (22589, 22638), False, 'import os\n'), ((22649, 22689), 'os.path.join', 'os.path.join', (['data_root', '"""3.sorted_mask"""'], {}), "(data_root, '3.sorted_mask')\n", (22661, 22689), False, 'import os\n'), ((22700, 22761), 'os.path.join', 'os.path.join', (['data_root', '"""4.registration_batch_2d_parenchyma"""'], {}), "(data_root, '4.registration_batch_2d_parenchyma')\n", (22712, 22761), False, 'import os\n'), ((22883, 22923), 'os.path.join', 'os.path.join', (['data_root', '"""3.sorted_mask"""'], {}), "(data_root, '3.sorted_mask')\n", (22895, 22923), False, 'import os\n'), ((22934, 22995), 'os.path.join', 'os.path.join', (['data_root', '"""4.registration_batch_2d_parenchyma"""'], {}), "(data_root, '4.registration_batch_2d_parenchyma')\n", (22946, 22995), False, 'import os\n'), ((23006, 23067), 'os.path.join', 'os.path.join', (['data_root', '"""4.registration_batch_2d_parenchyma"""'], {}), "(data_root, '4.registration_batch_2d_parenchyma')\n", (23018, 23067), False, 'import os\n'), ((23176, 23237), 'os.path.join', 'os.path.join', (['data_root', '"""4.registration_batch_2d_parenchyma"""'], {}), "(data_root, '4.registration_batch_2d_parenchyma')\n", (23188, 23237), False, 'import os\n'), ((23252, 23306), 'os.path.join', 'os.path.join', (['data_root', '"""5.train_batch_2d_parenchyma"""'], {}), "(data_root, '5.train_batch_2d_parenchyma')\n", (23264, 23306), False, 'import os\n'), ((23368, 23422), 'os.path.join', 'os.path.join', (['data_root', '"""5.train_batch_2d_parenchyma"""'], {}), "(data_root, '5.train_batch_2d_parenchyma')\n", (23380, 23422), False, 'import os\n'), ((23437, 23483), 'os.path.join', 'os.path.join', (['data_root', '"""6.mpr_2d_parenchyma"""'], {}), "(data_root, '6.mpr_2d_parenchyma')\n", (23449, 23483), False, 'import os\n'), ((23551, 23572), 'os.listdir', 'os.listdir', (['data_root'], {}), '(data_root)\n', (23561, 23572), False, 'import os\n'), ((23598, 23626), 'os.path.join', 'os.path.join', (['data_root', 'pid'], {}), '(data_root, pid)\n', (23610, 23626), False, 'import os\n'), ((23721, 23748), 'shutil.rmtree', 'shutil.rmtree', (['patient_path'], {}), '(patient_path)\n', (23734, 23748), False, 'import shutil\n'), ((3194, 3230), 'os.path.join', 'os.path.join', (['pn_path', 'hospital_name'], {}), '(pn_path, hospital_name)\n', (3206, 3230), False, 'import os\n'), ((5455, 5491), 'os.path.join', 'os.path.join', (['pn_path', 'hospital_name'], {}), '(pn_path, hospital_name)\n', (5467, 5491), False, 'import os\n'), ((5964, 5992), 'torch.nn.DataParallel', 'torch.nn.DataParallel', (['model'], {}), '(model)\n', (5985, 5992), False, 'import torch\n'), ((6125, 6148), 'os.path.isdir', 'os.path.isdir', (['pid_path'], {}), '(pid_path)\n', (6138, 6148), False, 'import os\n'), ((6308, 6332), 'os.path.isfile', 'os.path.isfile', (['cta_file'], {}), '(cta_file)\n', (6322, 6332), False, 'import os\n'), ((7160, 7183), 'os.path.isdir', 'os.path.isdir', (['pid_path'], {}), '(pid_path)\n', (7173, 7183), False, 'import os\n'), ((7432, 7459), 'os.path.isfile', 'os.path.isfile', (['in_cta_file'], {}), '(in_cta_file)\n', (7446, 7459), False, 'import os\n'), ((9089, 9129), 'os.path.join', 'os.path.join', (['cta_root', 'pid', 'cta_pattern'], {}), '(cta_root, pid, cta_pattern)\n', (9101, 9129), False, 'import os\n'), ((9154, 9196), 'os.path.join', 'os.path.join', (['mask_root', 'pid', 'mask_pattern'], {}), '(mask_root, pid, mask_pattern)\n', (9166, 9196), False, 'import os\n'), ((9220, 9260), 'os.path.join', 'os.path.join', (['out_root', 'pid', 'out_pattern'], {}), '(out_root, pid, out_pattern)\n', (9232, 9260), False, 'import os\n'), ((9284, 9308), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['cta_file'], {}), '(cta_file)\n', (9298, 9308), True, 'import SimpleITK as sitk\n'), ((9331, 9356), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mask_file'], {}), '(mask_file)\n', (9345, 9356), True, 'import SimpleITK as sitk\n'), ((9381, 9486), 'external_lib.MedCommon.utils.image_postprocessing_utils.ImagePostProcessingUtils.extract_region_by_mask', 'ImagePostProcessingUtils.extract_region_by_mask', (['in_image', 'in_mask'], {'default_value': '(-1024)', 'mask_label': '(1)'}), '(in_image, in_mask,\n default_value=-1024, mask_label=1)\n', (9428, 9486), False, 'from external_lib.MedCommon.utils.image_postprocessing_utils import ImagePostProcessingUtils\n'), ((9495, 9531), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_image', 'out_file'], {}), '(out_image, out_file)\n', (9510, 9531), True, 'import SimpleITK as sitk\n'), ((9856, 9895), 'os.path.join', 'os.path.join', (['in_root', 'pid', 'dwi_pattern'], {}), '(in_root, pid, dwi_pattern)\n', (9868, 9895), False, 'import os\n'), ((9920, 9944), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['dwi_file'], {}), '(dwi_file)\n', (9934, 9944), True, 'import SimpleITK as sitk\n'), ((10042, 10071), 'numpy.ones', 'np.ones', (['size'], {'dtype': 'np.uint8'}), '(size, dtype=np.uint8)\n', (10049, 10071), True, 'import numpy as np\n'), ((10096, 10133), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['bbox_mask_arr'], {}), '(bbox_mask_arr)\n', (10118, 10133), True, 'import SimpleITK as sitk\n'), ((10209, 10236), 'os.path.join', 'os.path.join', (['out_root', 'pid'], {}), '(out_root, pid)\n', (10221, 10236), False, 'import os\n'), ((10249, 10288), 'os.makedirs', 'os.makedirs', (['out_sub_dir'], {'exist_ok': '(True)'}), '(out_sub_dir, exist_ok=True)\n', (10260, 10288), False, 'import os\n'), ((10317, 10364), 'os.path.join', 'os.path.join', (['out_sub_dir', 'out_dwi_mask_pattern'], {}), '(out_sub_dir, out_dwi_mask_pattern)\n', (10329, 10364), False, 'import os\n'), ((10377, 10418), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['bbox_mask', 'out_mask_file'], {}), '(bbox_mask, out_mask_file)\n', (10392, 10418), True, 'import SimpleITK as sitk\n'), ((10577, 10612), 'shutil.copyfile', 'shutil.copyfile', (['src_file', 'dst_file'], {}), '(src_file, dst_file)\n', (10592, 10612), False, 'import shutil\n'), ((11073, 11113), 'os.path.join', 'os.path.join', (['dwi_root', 'pid', 'dwi_pattern'], {}), '(dwi_root, pid, dwi_pattern)\n', (11085, 11113), False, 'import os\n'), ((11138, 11180), 'os.path.join', 'os.path.join', (['mask_root', 'pid', 'mask_pattern'], {}), '(mask_root, pid, mask_pattern)\n', (11150, 11180), False, 'import os\n'), ((11498, 11616), 'external_lib.MedCommon.utils.image_postprocessing_utils.ImagePostProcessingUtils.extract_region_by_mask', 'ImagePostProcessingUtils.extract_region_by_mask', (['dwi_image', 'mask_image'], {'default_value': '(-1024)', 'mask_label': 'mask_label'}), '(dwi_image, mask_image,\n default_value=-1024, mask_label=mask_label)\n', (11545, 11616), False, 'from external_lib.MedCommon.utils.image_postprocessing_utils import ImagePostProcessingUtils\n'), ((11689, 11732), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['extracted_dwi_image'], {}), '(extracted_dwi_image)\n', (11711, 11732), True, 'import SimpleITK as sitk\n'), ((11806, 11837), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['tmp_arr'], {}), '(tmp_arr)\n', (11828, 11837), True, 'import SimpleITK as sitk\n'), ((11924, 11951), 'os.path.join', 'os.path.join', (['out_root', 'pid'], {}), '(out_root, pid)\n', (11936, 11951), False, 'import os\n'), ((11964, 12003), 'os.makedirs', 'os.makedirs', (['out_sub_dir'], {'exist_ok': '(True)'}), '(out_sub_dir, exist_ok=True)\n', (11975, 12003), False, 'import os\n'), ((12031, 12073), 'os.path.join', 'os.path.join', (['out_sub_dir', 'out_dwi_pattern'], {}), '(out_sub_dir, out_dwi_pattern)\n', (12043, 12073), False, 'import os\n'), ((12087, 12137), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['extracted_dwi_image', 'out_dwi_file'], {}), '(extracted_dwi_image, out_dwi_file)\n', (12102, 12137), True, 'import SimpleITK as sitk\n'), ((12633, 12697), 'os.path.join', 'os.path.join', (['parenchyma_mask_root', 'pid', 'parenchyma_mask_pattern'], {}), '(parenchyma_mask_root, pid, parenchyma_mask_pattern)\n', (12645, 12697), False, 'import os\n'), ((12731, 12786), 'os.path.join', 'os.path.join', (['dwi_bbox_mask_root', 'pid', 'dwi_mask_pattern'], {}), '(dwi_bbox_mask_root, pid, dwi_mask_pattern)\n', (12743, 12786), False, 'import os\n'), ((12823, 12859), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['parenchyma_mask_file'], {}), '(parenchyma_mask_file)\n', (12837, 12859), True, 'import SimpleITK as sitk\n'), ((12894, 12928), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['dwi_bbox_mask_file'], {}), '(dwi_bbox_mask_file)\n', (12908, 12928), True, 'import SimpleITK as sitk\n'), ((12963, 13008), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['parenchyma_mask_image'], {}), '(parenchyma_mask_image)\n', (12985, 13008), True, 'import SimpleITK as sitk\n'), ((13041, 13084), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['dwi_bbox_mask_image'], {}), '(dwi_bbox_mask_image)\n', (13063, 13084), True, 'import SimpleITK as sitk\n'), ((13185, 13220), 'numpy.array', 'np.array', (['merged_mask_arr', 'np.uint8'], {}), '(merged_mask_arr, np.uint8)\n', (13193, 13220), True, 'import numpy as np\n'), ((13253, 13292), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['merged_mask_arr'], {}), '(merged_mask_arr)\n', (13275, 13292), True, 'import SimpleITK as sitk\n'), ((13401, 13428), 'os.path.join', 'os.path.join', (['out_root', 'pid'], {}), '(out_root, pid)\n', (13413, 13428), False, 'import os\n'), ((13441, 13480), 'os.makedirs', 'os.makedirs', (['out_sub_dir'], {'exist_ok': '(True)'}), '(out_sub_dir, exist_ok=True)\n', (13452, 13480), False, 'import os\n'), ((13509, 13552), 'os.path.join', 'os.path.join', (['out_sub_dir', 'out_mask_pattern'], {}), '(out_sub_dir, out_mask_pattern)\n', (13521, 13552), False, 'import os\n'), ((13566, 13615), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['merged_mask_image', 'out_mask_file'], {}), '(merged_mask_image, out_mask_file)\n', (13581, 13615), True, 'import SimpleITK as sitk\n'), ((14755, 14796), 'os.path.join', 'os.path.join', (['data_root', 'pid', 'src_pattern'], {}), '(data_root, pid, src_pattern)\n', (14767, 14796), False, 'import os\n'), ((14826, 14867), 'os.path.join', 'os.path.join', (['data_root', 'pid', 'dst_pattern'], {}), '(data_root, pid, dst_pattern)\n', (14838, 14867), False, 'import os\n'), ((14973, 15013), 'os.makedirs', 'os.makedirs', (['out_sub_root'], {'exist_ok': '(True)'}), '(out_sub_root, exist_ok=True)\n', (14984, 15013), False, 'import os\n'), ((15138, 15168), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['src_image_file'], {}), '(src_image_file)\n', (15152, 15168), True, 'import SimpleITK as sitk\n'), ((15193, 15223), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['dst_image_file'], {}), '(dst_image_file)\n', (15207, 15223), True, 'import SimpleITK as sitk\n'), ((15236, 15327), 'external_lib.MedCommon.utils.image_show_utils.ImageShowUtils.save_volume_to_mpr_jpg', 'ImageShowUtils.save_volume_to_mpr_jpg', (['src_image', 'out_sub_root', '(150)', '(50)', 'out_src_prefix'], {}), '(src_image, out_sub_root, 150, 50,\n out_src_prefix)\n', (15273, 15327), False, 'from external_lib.MedCommon.utils.image_show_utils import ImageShowUtils\n'), ((15336, 15428), 'external_lib.MedCommon.utils.image_show_utils.ImageShowUtils.save_volume_to_mpr_jpg', 'ImageShowUtils.save_volume_to_mpr_jpg', (['dst_image', 'out_sub_root', '(400)', '(200)', 'out_dst_prefix'], {}), '(dst_image, out_sub_root, 400, 200,\n out_dst_prefix)\n', (15373, 15428), False, 'from external_lib.MedCommon.utils.image_show_utils import ImageShowUtils\n'), ((17232, 17259), 'os.path.join', 'os.path.join', (['out_root', 'pid'], {}), '(out_root, pid)\n', (17244, 17259), False, 'import os\n'), ((17272, 17312), 'os.makedirs', 'os.makedirs', (['out_sub_root'], {'exist_ok': '(True)'}), '(out_sub_root, exist_ok=True)\n', (17283, 17312), False, 'import os\n'), ((17341, 17375), 'os.path.join', 'os.path.join', (['in_root', 'pid', '"""NCCT"""'], {}), "(in_root, pid, 'NCCT')\n", (17353, 17375), False, 'import os\n'), ((17446, 17478), 'os.path.join', 'os.path.join', (['patient_path', 'suid'], {}), '(patient_path, suid)\n', (17458, 17478), False, 'import os\n'), ((17503, 17537), 'external_lib.MedCommon.utils.data_io_utils.DataIO.load_dicom_series', 'DataIO.load_dicom_series', (['cta_path'], {}), '(cta_path)\n', (17527, 17537), False, 'from external_lib.MedCommon.utils.data_io_utils import DataIO\n'), ((17565, 17605), 'os.path.join', 'os.path.join', (['out_sub_root', '"""CTA.nii.gz"""'], {}), "(out_sub_root, 'CTA.nii.gz')\n", (17577, 17605), False, 'import os\n'), ((17618, 17672), 'SimpleITK.WriteImage', 'sitk.WriteImage', (["cta_image['sitk_image']", 'out_cta_file'], {}), "(cta_image['sitk_image'], out_cta_file)\n", (17633, 17672), True, 'import SimpleITK as sitk\n'), ((17701, 17734), 'os.path.join', 'os.path.join', (['in_root', 'pid', '"""DWI"""'], {}), "(in_root, pid, 'DWI')\n", (17713, 17734), False, 'import os\n'), ((17805, 17845), 'os.path.join', 'os.path.join', (['patient_path', 'suid', '"""bxxx"""'], {}), "(patient_path, suid, 'bxxx')\n", (17817, 17845), False, 'import os\n'), ((17870, 17904), 'external_lib.MedCommon.utils.data_io_utils.DataIO.load_dicom_series', 'DataIO.load_dicom_series', (['dwi_path'], {}), '(dwi_path)\n', (17894, 17904), False, 'from external_lib.MedCommon.utils.data_io_utils import DataIO\n'), ((17932, 17972), 'os.path.join', 'os.path.join', (['out_sub_root', '"""DWI.nii.gz"""'], {}), "(out_sub_root, 'DWI.nii.gz')\n", (17944, 17972), False, 'import os\n'), ((17985, 18039), 'SimpleITK.WriteImage', 'sitk.WriteImage', (["dwi_image['sitk_image']", 'out_dwi_file'], {}), "(dwi_image['sitk_image'], out_dwi_file)\n", (18000, 18039), True, 'import SimpleITK as sitk\n'), ((3272, 3297), 'os.listdir', 'os.listdir', (['hospital_path'], {}), '(hospital_path)\n', (3282, 3297), False, 'import os\n'), ((7487, 7514), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['in_cta_file'], {}), '(in_cta_file)\n', (7501, 7514), True, 'import SimpleITK as sitk\n'), ((7547, 7641), 'external_lib.MedCommon.utils.image_postprocessing_utils.ImagePostProcessingUtils.get_maximal_connected_region_multilabel', 'ImagePostProcessingUtils.get_maximal_connected_region_multilabel', (['in_mask'], {'mask_labels': '[1]'}), '(in_mask,\n mask_labels=[1])\n', (7611, 7641), False, 'from external_lib.MedCommon.utils.image_postprocessing_utils import ImagePostProcessingUtils\n'), ((7670, 7714), 'external_lib.MedCommon.utils.mask_utils.MaskUtils.fill_hole', 'MaskUtils.fill_hole', (['out_mask_sitk'], {'radius': '(6)'}), '(out_mask_sitk, radius=6)\n', (7689, 7714), False, 'from external_lib.MedCommon.utils.mask_utils import MaskUtils\n'), ((7731, 7775), 'SimpleITK.WriteImage', 'sitk.WriteImage', (['out_mask_sitk', 'out_cta_file'], {}), '(out_mask_sitk, out_cta_file)\n', (7746, 7775), True, 'import SimpleITK as sitk\n'), ((10537, 10563), 'os.path.basename', 'os.path.basename', (['src_file'], {}), '(src_file)\n', (10553, 10563), False, 'import os\n'), ((11200, 11224), 'os.path.isfile', 'os.path.isfile', (['dwi_file'], {}), '(dwi_file)\n', (11214, 11224), False, 'import os\n'), ((11270, 11295), 'os.path.isfile', 'os.path.isfile', (['mask_file'], {}), '(mask_file)\n', (11284, 11295), False, 'import os\n'), ((11346, 11377), 'external_lib.MedCommon.utils.data_io_utils.DataIO.load_nii_image', 'DataIO.load_nii_image', (['dwi_file'], {}), '(dwi_file)\n', (11367, 11377), False, 'from external_lib.MedCommon.utils.data_io_utils import DataIO\n'), ((11417, 11449), 'external_lib.MedCommon.utils.data_io_utils.DataIO.load_nii_image', 'DataIO.load_nii_image', (['mask_file'], {}), '(mask_file)\n', (11438, 11449), False, 'from external_lib.MedCommon.utils.data_io_utils import DataIO\n'), ((17395, 17419), 'os.listdir', 'os.listdir', (['patient_path'], {}), '(patient_path)\n', (17405, 17419), False, 'import os\n'), ((17754, 17778), 'os.listdir', 'os.listdir', (['patient_path'], {}), '(patient_path)\n', (17764, 17778), False, 'import os\n'), ((3423, 3455), 'os.path.join', 'os.path.join', (['hospital_path', 'pid'], {}), '(hospital_path, pid)\n', (3435, 3455), False, 'import os\n'), ((3489, 3509), 'os.listdir', 'os.listdir', (['pid_path'], {}), '(pid_path)\n', (3499, 3509), False, 'import os\n'), ((3726, 3751), 'os.path.join', 'os.path.join', (['pid_path', 'm'], {}), '(pid_path, m)\n', (3738, 3751), False, 'import os\n'), ((4052, 4087), 'shutil.copytree', 'shutil.copytree', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (4067, 4087), False, 'import shutil\n'), ((3986, 4011), 'os.path.dirname', 'os.path.dirname', (['dst_path'], {}), '(dst_path)\n', (4001, 4011), False, 'import os\n')] |
"""
The MIT License (MIT)
Copyright (c) 2017 <NAME>
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import numpy as np
import scipy as scp
import logging
import shutil
import time
import traceback
from multiprocessing import Process
from mutils import json
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
if __name__ == '__main__':
logging.info("Hello World.")
def robust_training(model, restarts=5, subprocess=False):
if not subprocess:
robust_training_exceptions(model, restarts=restarts)
else:
robust_training_subprocess(model, restarts=restarts)
def robust_training_exceptions(model, restarts=5):
start_time = time.time()
crash_count = 0
crashed_epoch = 0
crash_epoch_count = 0
model.epoch = 0
while True:
try:
model.load_from_logdir()
logging.info("Starting training at epoch: {}".format(model.epoch))
model.fit()
# p = Process(target=model.fit)
# p.start()
# p.join()
break
except KeyboardInterrupt:
break
except:
# logging.info("Error: {}".format(sys.exc_info()[0]))
traceback.print_exc()
print()
crash_count += 1
logging.warning("Training was KILLED, count: {}".format(
crash_count))
if crashed_epoch >= model.epoch or restarts == 0:
crash_epoch_count += 1
if crash_epoch_count >= restarts:
logging.info(
"Model crashed {} times at epoch {}. "
"Stopping training.".format(
restarts + 1, crashed_epoch))
break
else:
crashed_epoch = model.epoch
crash_epoch_count = 0
end_time = (time.time() - start_time) / 3600
logging.info("Finished training in {} hours".format(end_time))
def robust_training_subprocess(model, restarts=5):
start_time = time.time()
crash_count = 0
crashed_epoch = 0
crash_epoch_count = 0
model.epoch = 0
logging.warning("Run training in a seperate process."
" Make sure that your model supports multiprocessing or"
" deactivate robust training.")
while True:
model.load_from_logdir()
logging.info("Starting training at epoch: {}".format(model.epoch))
p = Process(target=model.fit)
p.start()
p.join()
if p.exitcode == 0:
break
else:
# logging.info("Error: {}".format(sys.exc_info()[0]))
# traceback.print_exc()
crash_count += 1
logging.warning("Training was KILLED, count: {}".format(
crash_count))
if crashed_epoch >= model.epoch:
crash_epoch_count += 1
if crash_epoch_count >= restarts:
logging.info(
"Model crashed {} times at epoch {}. "
"Stopping training.".format(
restarts + 1, crashed_epoch))
break
else:
crashed_epoch = model.epoch
crash_epoch_count = 0
end_time = (time.time() - start_time) / 3600
logging.info("Finished training in {} hours".format(end_time))
def set_gpus_to_use(args, gpus=None):
"""Set the gpus to use."""
if gpus is not None:
os.environ['CUDA_VISIBLE_DEVICES'] = gpus
if args.gpus is None:
if 'TV_USE_GPUS' in os.environ:
if os.environ['TV_USE_GPUS'] == 'force':
logging.error('Please specify a GPU.')
logging.error('Usage {} --gpus <ids>'.format(sys.argv[0]))
exit(1)
else:
logging.info("GPUs are set to: %s", args.gpus)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpus
def create_filewrite_handler(logging_file, mode='w'):
"""
Create a filewriter handler.
A copy of the output will be written to logging_file.
Parameters
----------
logging_file : string
File to log output
Returns
-------
The filewriter handler
"""
target_dir = os.path.dirname(logging_file)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
filewriter = logging.FileHandler(logging_file, mode=mode)
formatter = logging.Formatter(
'%(asctime)s %(name)-3s %(levelname)-3s %(message)s')
filewriter.setLevel(logging.INFO)
filewriter.setFormatter(formatter)
logging.getLogger('').addHandler(filewriter)
return filewriter
def initialize_output_dir(cfg, cfg_file, output_dir, do_logging=True):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
else:
logging.warning("Path exists: {}".format(output_dir))
logging.warning("Potentially overwriting existing model.")
target_file = os.path.join(output_dir, 'conf.json')
with open(target_file, 'w') as outfile:
json.dump(cfg, outfile, indent=2, sort_keys=True)
base_path = os.path.dirname(os.path.realpath(cfg_file))
for dir_name in cfg['copy_dirs']:
src = os.path.join(base_path, dir_name)
src = os.path.realpath(src)
name = os.path.basename(dir_name)
dst = os.path.join(output_dir, name)
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
# Creating an additional logging saving the console outputs
# into the training folder
# if do_logging:
# logging_file = os.path.join(output_dir, "output.log")
# create_filewrite_handler(logging_file)
return
class ExpoSmoother():
"""docstring for expo_smoother"""
def __init__(self, decay=0.9):
self.weights = None
self.decay = decay
def update_weights(self, l):
if self.weights is None:
self.weights = np.array(l)
return self.weights
else:
dec = self.decay * self.weights
self.weights = dec + (1 - self.decay) * np.array(l)
return self.weights
def get_weights(self):
return self.weights.tolist()
class MedianSmoother():
"""docstring for expo_smoother"""
def __init__(self, num_entries=50):
self.weights = None
self.num = 50
def update_weights(self, l):
l = np.array(l).tolist()
if self.weights is None:
self.weights = [[i] for i in l]
return [np.median(w[-self.num:]) for w in self.weights]
else:
for i, w in enumerate(self.weights):
w.append(l[i])
if len(self.weights) > 20 * self.num:
self.weights = [w[-self.num:] for w in self.weights]
return [np.median(w[-self.num:]) for w in self.weights]
def get_weights(self):
return [np.median(w[-self.num:]) for w in self.weights]
| [
"mutils.json.dump",
"logging.Formatter",
"shutil.rmtree",
"os.path.join",
"logging.error",
"traceback.print_exc",
"logging.FileHandler",
"logging.warning",
"os.path.dirname",
"os.path.exists",
"os.path.basename",
"numpy.median",
"os.path.realpath",
"os.makedirs",
"logging.basicConfig",
... | [((352, 463), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(levelname)s %(message)s"""', 'level': 'logging.INFO', 'stream': 'sys.stdout'}), "(format='%(asctime)s %(levelname)s %(message)s', level=\n logging.INFO, stream=sys.stdout)\n", (371, 463), False, 'import logging\n'), ((532, 560), 'logging.info', 'logging.info', (['"""Hello World."""'], {}), "('Hello World.')\n", (544, 560), False, 'import logging\n'), ((847, 858), 'time.time', 'time.time', ([], {}), '()\n', (856, 858), False, 'import time\n'), ((2221, 2232), 'time.time', 'time.time', ([], {}), '()\n', (2230, 2232), False, 'import time\n'), ((2328, 2474), 'logging.warning', 'logging.warning', (['"""Run training in a seperate process. Make sure that your model supports multiprocessing or deactivate robust training."""'], {}), "(\n 'Run training in a seperate process. Make sure that your model supports multiprocessing or deactivate robust training.'\n )\n", (2343, 2474), False, 'import logging\n'), ((4446, 4475), 'os.path.dirname', 'os.path.dirname', (['logging_file'], {}), '(logging_file)\n', (4461, 4475), False, 'import os\n'), ((4564, 4608), 'logging.FileHandler', 'logging.FileHandler', (['logging_file'], {'mode': 'mode'}), '(logging_file, mode=mode)\n', (4583, 4608), False, 'import logging\n'), ((4625, 4696), 'logging.Formatter', 'logging.Formatter', (['"""%(asctime)s %(name)-3s %(levelname)-3s %(message)s"""'], {}), "('%(asctime)s %(name)-3s %(levelname)-3s %(message)s')\n", (4642, 4696), False, 'import logging\n'), ((5156, 5193), 'os.path.join', 'os.path.join', (['output_dir', '"""conf.json"""'], {}), "(output_dir, 'conf.json')\n", (5168, 5193), False, 'import os\n'), ((2650, 2675), 'multiprocessing.Process', 'Process', ([], {'target': 'model.fit'}), '(target=model.fit)\n', (2657, 2675), False, 'from multiprocessing import Process\n'), ((4027, 4073), 'logging.info', 'logging.info', (['"""GPUs are set to: %s"""', 'args.gpus'], {}), "('GPUs are set to: %s', args.gpus)\n", (4039, 4073), False, 'import logging\n'), ((4487, 4513), 'os.path.exists', 'os.path.exists', (['target_dir'], {}), '(target_dir)\n', (4501, 4513), False, 'import os\n'), ((4523, 4546), 'os.makedirs', 'os.makedirs', (['target_dir'], {}), '(target_dir)\n', (4534, 4546), False, 'import os\n'), ((4938, 4964), 'os.path.exists', 'os.path.exists', (['output_dir'], {}), '(output_dir)\n', (4952, 4964), False, 'import os\n'), ((4974, 4997), 'os.makedirs', 'os.makedirs', (['output_dir'], {}), '(output_dir)\n', (4985, 4997), False, 'import os\n'), ((5078, 5136), 'logging.warning', 'logging.warning', (['"""Potentially overwriting existing model."""'], {}), "('Potentially overwriting existing model.')\n", (5093, 5136), False, 'import logging\n'), ((5246, 5295), 'mutils.json.dump', 'json.dump', (['cfg', 'outfile'], {'indent': '(2)', 'sort_keys': '(True)'}), '(cfg, outfile, indent=2, sort_keys=True)\n', (5255, 5295), False, 'from mutils import json\n'), ((5329, 5355), 'os.path.realpath', 'os.path.realpath', (['cfg_file'], {}), '(cfg_file)\n', (5345, 5355), False, 'import os\n'), ((5410, 5443), 'os.path.join', 'os.path.join', (['base_path', 'dir_name'], {}), '(base_path, dir_name)\n', (5422, 5443), False, 'import os\n'), ((5458, 5479), 'os.path.realpath', 'os.path.realpath', (['src'], {}), '(src)\n', (5474, 5479), False, 'import os\n'), ((5496, 5522), 'os.path.basename', 'os.path.basename', (['dir_name'], {}), '(dir_name)\n', (5512, 5522), False, 'import os\n'), ((5537, 5567), 'os.path.join', 'os.path.join', (['output_dir', 'name'], {}), '(output_dir, name)\n', (5549, 5567), False, 'import os\n'), ((5579, 5598), 'os.path.exists', 'os.path.exists', (['dst'], {}), '(dst)\n', (5593, 5598), False, 'import os\n'), ((5639, 5664), 'shutil.copytree', 'shutil.copytree', (['src', 'dst'], {}), '(src, dst)\n', (5654, 5664), False, 'import shutil\n'), ((2051, 2062), 'time.time', 'time.time', ([], {}), '()\n', (2060, 2062), False, 'import time\n'), ((3489, 3500), 'time.time', 'time.time', ([], {}), '()\n', (3498, 3500), False, 'import time\n'), ((4787, 4808), 'logging.getLogger', 'logging.getLogger', (['""""""'], {}), "('')\n", (4804, 4808), False, 'import logging\n'), ((5612, 5630), 'shutil.rmtree', 'shutil.rmtree', (['dst'], {}), '(dst)\n', (5625, 5630), False, 'import shutil\n'), ((6151, 6162), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (6159, 6162), True, 'import numpy as np\n'), ((7105, 7129), 'numpy.median', 'np.median', (['w[-self.num:]'], {}), '(w[-self.num:])\n', (7114, 7129), True, 'import numpy as np\n'), ((1377, 1398), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (1396, 1398), False, 'import traceback\n'), ((3871, 3909), 'logging.error', 'logging.error', (['"""Please specify a GPU."""'], {}), "('Please specify a GPU.')\n", (3884, 3909), False, 'import logging\n'), ((6614, 6625), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (6622, 6625), True, 'import numpy as np\n'), ((6732, 6756), 'numpy.median', 'np.median', (['w[-self.num:]'], {}), '(w[-self.num:])\n', (6741, 6756), True, 'import numpy as np\n'), ((7013, 7037), 'numpy.median', 'np.median', (['w[-self.num:]'], {}), '(w[-self.num:])\n', (7022, 7037), True, 'import numpy as np\n'), ((6305, 6316), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (6313, 6316), True, 'import numpy as np\n')] |
from model.MSVR import MSVR
from model.utility import create_dataset,rmse
from sklearn.preprocessing import MinMaxScaler
import numpy as np
import argparse
dataPath = 'data/MackeyGlass_t17.txt'
rawData = np.loadtxt(dataPath)
parser = argparse.ArgumentParser(
description='MSVR for Time Series Forecasting')
parser.add_argument('-inputDim', type=int, default=10, metavar='N',
help='steps for prediction (default: 1)')
parser.add_argument('-outputH', type=int, default=2)
if __name__ == "__main__":
opt = parser.parse_args()
dim = opt.inputDim
h = opt.outputH
ts = rawData.reshape(-1)
segmentation = int(len(ts)*2/3)
dataset = create_dataset(ts,dim,h)
scaler = MinMaxScaler(feature_range=(-1, 1))
dataset = scaler.fit_transform(dataset)
X, Y = dataset[:, :(0 - h)], dataset[:, (0-h):]
train_input = X[:segmentation, :]
train_target = Y[:segmentation].reshape(-1, h)
test_input = X[segmentation:, :]
test_target = Y[segmentation:].reshape(-1, h)
msvr = MSVR()
msvr.fit(train_input,train_target)
trainPred = msvr.predict(train_input)
testPred = msvr.predict(test_input)
trainMetric = rmse(train_target,trainPred)
testMetric = rmse(test_target,testPred)
print(trainMetric, testMetric)
| [
"model.utility.rmse",
"model.MSVR.MSVR",
"argparse.ArgumentParser",
"sklearn.preprocessing.MinMaxScaler",
"model.utility.create_dataset",
"numpy.loadtxt"
] | [((208, 228), 'numpy.loadtxt', 'np.loadtxt', (['dataPath'], {}), '(dataPath)\n', (218, 228), True, 'import numpy as np\n'), ((239, 310), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""MSVR for Time Series Forecasting"""'}), "(description='MSVR for Time Series Forecasting')\n", (262, 310), False, 'import argparse\n'), ((686, 712), 'model.utility.create_dataset', 'create_dataset', (['ts', 'dim', 'h'], {}), '(ts, dim, h)\n', (700, 712), False, 'from model.utility import create_dataset, rmse\n'), ((724, 759), 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {'feature_range': '(-1, 1)'}), '(feature_range=(-1, 1))\n', (736, 759), False, 'from sklearn.preprocessing import MinMaxScaler\n'), ((1044, 1050), 'model.MSVR.MSVR', 'MSVR', ([], {}), '()\n', (1048, 1050), False, 'from model.MSVR import MSVR\n'), ((1191, 1220), 'model.utility.rmse', 'rmse', (['train_target', 'trainPred'], {}), '(train_target, trainPred)\n', (1195, 1220), False, 'from model.utility import create_dataset, rmse\n'), ((1237, 1264), 'model.utility.rmse', 'rmse', (['test_target', 'testPred'], {}), '(test_target, testPred)\n', (1241, 1264), False, 'from model.utility import create_dataset, rmse\n')] |
# This script processes MIMIC-III dataset and builds a binary matrix or a count matrix depending on your input.
# The output matrix is a Numpy matrix of type float32, and suitable for training medGAN.
# Written by <NAME> (<EMAIL>), augmented by <NAME> (<EMAIL>)
# Usage: Put this script to the folder where MIMIC-III CSV files are located. Then execute the below command.
# python process_mimic.py ADMISSIONS.csv DIAGNOSES_ICD.csv <output file> <"binary"|"count">
# Note that the last argument "binary/count" determines whether you want to create a binary matrix or a count matrix.
from sklearn import preprocessing
from torch.utils.data import Dataset
import numpy as np
import pandas as pd
import torch
from datetime import datetime
def get_patient_matrix(admissionFile, diagnosisFile, binary_count):
if binary_count != 'binary' and binary_count != 'count':
raise Exception('You must choose either binary or count.')
# Building pid-admission mapping, admission-date mapping
pidAdmMap = {}
admDateMap = {}
infd = open(admissionFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
pid = int(tokens[1])
admId = int(tokens[2])
admTime = datetime.strptime(tokens[3], '%Y-%m-%d %H:%M:%S')
admDateMap[admId] = admTime
if pid in pidAdmMap: pidAdmMap[pid].append(admId)
else: pidAdmMap[pid] = [admId]
infd.close()
# Building admission-dxList mapping
admDxMap = {}
infd = open(diagnosisFile, 'r')
infd.readline()
for line in infd:
tokens = line.strip().split(',')
admId = int(tokens[2])
#dxStr = 'D_' + convert_to_icd9(tokens[4][1:-1]) ############## Uncomment this line and comment the line below, if you want to use the entire ICD9 digits.
dxStr = 'D_' + convert_to_3digit_icd9(tokens[4][1:-1])
if admId in admDxMap: admDxMap[admId].append(dxStr)
else: admDxMap[admId] = [dxStr]
infd.close()
# Building pid-sortedVisits mapping
pidSeqMap = {}
for pid, admIdList in pidAdmMap.items():
#if len(admIdList) < 2: continue
sortedList = sorted([(admDateMap[admId], admDxMap[admId]) for admId in admIdList])
pidSeqMap[pid] = sortedList
# Building pids, dates, strSeqs
pids = []
dates = []
seqs = []
for pid, visits in pidSeqMap.items():
pids.append(pid)
seq = []
date = []
for visit in visits:
date.append(visit[0])
seq.append(visit[1])
dates.append(date)
seqs.append(seq)
# Converting strSeqs to intSeqs, and making types
types = {}
newSeqs = []
for patient in seqs:
newPatient = []
for visit in patient:
newVisit = []
for code in visit:
if code in types:
newVisit.append(types[code])
else:
types[code] = len(types)
newVisit.append(types[code])
newPatient.append(newVisit)
newSeqs.append(newPatient)
# Constructing the matrix
numPatients = len(newSeqs)
numCodes = len(types)
matrix = np.zeros((numPatients, numCodes)).astype('float32')
for i, patient in enumerate(newSeqs):
for visit in patient:
for code in visit:
if binary_count == 'binary':
matrix[i][code] = 1.
else:
matrix[i][code] += 1.
return matrix
def convert_to_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4] + '.' + dxStr[4:]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3] + '.' + dxStr[3:]
else: return dxStr
def convert_to_3digit_icd9(dxStr):
if dxStr.startswith('E'):
if len(dxStr) > 4: return dxStr[:4]
else: return dxStr
else:
if len(dxStr) > 3: return dxStr[:3]
else: return dxStr
class MimicDataset(Dataset):
def __init__(self, matrix):
self.data = torch.tensor(matrix)
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return self.data.shape[0]
def postprocess(self, data):
return (data > 0.5).type(torch.IntTensor)
def get_datasets(train_prop=0.6, validate_prop=0.2):
matrix = get_patient_matrix('ADMISSIONS.csv', 'DIAGNOSES_ICD.csv', 'binary')
end_of_train = int(train_prop * len(matrix))
end_of_validate = int((train_prop + validate_prop) * len(matrix))
full = MimicDataset(matrix)
train = MimicDataset(matrix[:end_of_train])
validate = MimicDataset(matrix[end_of_train:end_of_validate])
test = MimicDataset(matrix[end_of_validate:])
return full, train, validate, test
if __name__ == '__main__':
full, _, _, _ = get_datasets()
print('Example: {}'.format(full[0]))
print('Length of example: {}'.format(len(full[0])))
print('Number of examples: {}'.format(len(full)))
| [
"datetime.datetime.strptime",
"numpy.zeros",
"torch.tensor"
] | [((1232, 1281), 'datetime.datetime.strptime', 'datetime.strptime', (['tokens[3]', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(tokens[3], '%Y-%m-%d %H:%M:%S')\n", (1249, 1281), False, 'from datetime import datetime\n'), ((4059, 4079), 'torch.tensor', 'torch.tensor', (['matrix'], {}), '(matrix)\n', (4071, 4079), False, 'import torch\n'), ((3185, 3218), 'numpy.zeros', 'np.zeros', (['(numPatients, numCodes)'], {}), '((numPatients, numCodes))\n', (3193, 3218), True, 'import numpy as np\n')] |
#auxilliary routines
import numpy as np
def apply2DRotation(pointx, pointy, theta):
"""apply the 2D rotation matrix to a point
"""
if isinstance(pointx, list) and isinstance(pointy, list):
#if we pass a list of points and one angle
r_mat = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
for idx, item in enumerate(zip(pointx, pointy)):
point = np.array([[pointx[idx], pointy[idx]]]).T
point = np.dot(r_mat, point)
pointx[idx], pointy[idx] = point[0][0], point[1][0]
return [pointx, pointy]
elif isinstance(pointx, np.ndarray) and isinstance(pointy, np.ndarray) and isinstance(theta, np.ndarray):
#if we pass a list of points and angles
for idx, item in enumerate(zip(pointx, pointy, theta)):
r_mat = np.array([[np.cos(theta[idx]), -np.sin(theta[idx])], [np.sin(theta[idx]), np.cos(theta[idx])]])
point = np.array([[pointx[idx], pointy[idx]]]).T
point = np.dot(r_mat, point)
pointx[idx], pointy[idx] = point[0][0], point[1][0]
return [pointx, pointy]
else:
#if we pass sth else
raise Exception("invalid input")
def subsample(feet, model, states, controls, current_foots, time_sim, olddt, newdt):
"""subsample the data between given points
using the model matrices also produce CoP constraints for plotting
"""
#placeholders for the results
st = states[0, np.newaxis]
cop = np.dot(model.D(newdt), st.T)
for state, control in zip(states, controls[1:, :]):
s = state[np.newaxis].T
#subsample
for i in xrange(0, int(olddt/newdt)):
s = np.dot(model.A(newdt), s) + np.dot(model.B(newdt), control[np.newaxis].T)
st = np.vstack((st, s.T))
cop = np.hstack((cop, np.dot(model.D(newdt), s)))
#new subsampled time vector
tms = np.linspace(0, time_sim, cop.shape[1])
#not subsampled time vector for plotting CoP constraints
tm = np.linspace(0, time_sim, current_foots.shape[0])
#compute the restrained CoP zone
zone = (np.min(feet)/np.sqrt(2.))/2.
CoP_ubounds = np.array([ zone, zone])
CoP_lbounds = np.array([-zone, -zone])
#[x+up, y+up, x+down, y+down] - we don't subsample constraints
constraints = np.hstack((current_foots, current_foots))
constraints[:, 0] = current_foots[:, 0] + CoP_ubounds[0]
constraints[:, 1] = current_foots[:, 1] + CoP_ubounds[1]
constraints[:, 2] = current_foots[:, 0] + CoP_lbounds[0]
constraints[:, 3] = current_foots[:, 1] + CoP_lbounds[1]
#return subsampled CoPs and states
return [st, cop, tms, tm, constraints]
def generate_trajectories(state, current_foots, h_step, dt, save=True):
"""
generate foot trajectories (x, y, z, theta - doubledots)
for tracking with whole body controller
output -> accelerations
"""
#support states durations
ss = 0.7
tds = 0.1
#collect values from feet coords
x = current_foots[::8, 0]
y = current_foots[::8, 1]
theta = current_foots[::8, 2]
#build time vector for x, y, z, theta
time_nzero = np.linspace(0, ss, ss/dt)
time_zero = np.linspace(0, tds, tds/dt)
time_z = np.linspace(0, ss/2, (ss/2)/dt)
pzero = np.poly1d([0])
#first step handling
#build polynomials
pxdd = np.poly1d([(-12.0/(ss**3))*(x[1] - x[0]), (6.0/(ss**2))*(x[1] - x[0])])
#y case
pydd = np.poly1d([(-12.0/(ss**3))*(y[1] - y[1]), (6.0/(ss**2))*(y[1] - y[1])])
#theta case
ptdd = np.poly1d([(-12.0/(ss**3))*(theta[1] - theta[0]), (6.0/(ss**2))*(theta[1] - theta[0])])
#z case
pzdd1 = np.poly1d([(-12.0/((ss/2)**3))*(h_step - 0.0), (6.0/((ss/2)**2))*(h_step - 0.0)])
pzdd2 = np.poly1d([(-12.0/((ss/2)**3))*(0.0 - h_step), (6.0/((ss/2)**2))*(0.0 - h_step)])
#evaluate polynomials
pyx = np.hstack((pxdd(time_nzero), pzero(time_zero)))
pyy = np.hstack((pydd(time_nzero), pzero(time_zero)))
pytheta = np.hstack((ptdd(time_nzero), pzero(time_zero)))
pyz = np.hstack((pzdd1(time_z), pzdd2(time_z), pzero(time_zero)))
for idx in xrange(x.shape[0]-2):
#build polynomials
pxdd = np.poly1d([(-12.0/(ss**3))*(x[idx+2] - x[idx]), (6.0/(ss**2))*(x[idx+2] - x[idx])])
#y case
pydd = np.poly1d([(-12.0/(ss**3))*(y[idx+2] - y[idx]), (6.0/(ss**2))*(y[idx+2] - y[idx])])
#theta case
ptdd = np.poly1d([(-12.0/(ss**3))*(theta[idx+2] - theta[idx]), (6.0/(ss**2))*(theta[idx+2] - theta[idx])])
#z case
pzdd1 = np.poly1d([(-12.0/((ss/2)**3))*(h_step - 0.0), (6.0/((ss/2)**2))*(h_step - 0.0)])
pzdd2 = np.poly1d([(-12.0/((ss/2)**3))*(0.0 - h_step), (6.0/((ss/2)**2))*(0.0 - h_step)])
#evaluate polynomials
pyx = np.vstack((pyx, np.hstack((pxdd(time_nzero), pzero(time_zero)))))
pyy = np.vstack((pyy, np.hstack((pydd(time_nzero), pzero(time_zero)))))
pytheta = np.vstack((pytheta, np.hstack((ptdd(time_nzero), pzero(time_zero)))))
pyz = np.vstack((pyz, np.hstack((pzdd1(time_z), pzdd2(time_z), pzero(time_zero)))))
if save:
#save stuff for whole body motion
np.savetxt('fx.txt', pyx.ravel(), delimiter=' ')
np.savetxt('fy.txt', pyy.ravel(), delimiter=' ')
np.savetxt('fz.txt', pyz.ravel(), delimiter=' ')
np.savetxt('ftheta.txt', pytheta.ravel(), delimiter=' ')
np.savetxt('xcom.txt', state[:pyx.ravel().shape[0], 2], delimiter=' ')
np.savetxt('ycom.txt', state[:pyx.ravel().shape[0], 5], delimiter=' ')
np.savetxt('thetacom.txt', state[:pyx.ravel().shape[0], 8], delimiter=' ')
return [pyx, pyy, pyz, pytheta]
| [
"numpy.poly1d",
"numpy.hstack",
"numpy.min",
"numpy.sin",
"numpy.array",
"numpy.linspace",
"numpy.cos",
"numpy.dot",
"numpy.vstack",
"numpy.sqrt"
] | [((1866, 1904), 'numpy.linspace', 'np.linspace', (['(0)', 'time_sim', 'cop.shape[1]'], {}), '(0, time_sim, cop.shape[1])\n', (1877, 1904), True, 'import numpy as np\n'), ((1975, 2023), 'numpy.linspace', 'np.linspace', (['(0)', 'time_sim', 'current_foots.shape[0]'], {}), '(0, time_sim, current_foots.shape[0])\n', (1986, 2023), True, 'import numpy as np\n'), ((2125, 2147), 'numpy.array', 'np.array', (['[zone, zone]'], {}), '([zone, zone])\n', (2133, 2147), True, 'import numpy as np\n'), ((2167, 2191), 'numpy.array', 'np.array', (['[-zone, -zone]'], {}), '([-zone, -zone])\n', (2175, 2191), True, 'import numpy as np\n'), ((2282, 2323), 'numpy.hstack', 'np.hstack', (['(current_foots, current_foots)'], {}), '((current_foots, current_foots))\n', (2291, 2323), True, 'import numpy as np\n'), ((3131, 3158), 'numpy.linspace', 'np.linspace', (['(0)', 'ss', '(ss / dt)'], {}), '(0, ss, ss / dt)\n', (3142, 3158), True, 'import numpy as np\n'), ((3173, 3202), 'numpy.linspace', 'np.linspace', (['(0)', 'tds', '(tds / dt)'], {}), '(0, tds, tds / dt)\n', (3184, 3202), True, 'import numpy as np\n'), ((3217, 3252), 'numpy.linspace', 'np.linspace', (['(0)', '(ss / 2)', '(ss / 2 / dt)'], {}), '(0, ss / 2, ss / 2 / dt)\n', (3228, 3252), True, 'import numpy as np\n'), ((3265, 3279), 'numpy.poly1d', 'np.poly1d', (['[0]'], {}), '([0])\n', (3274, 3279), True, 'import numpy as np\n'), ((3338, 3413), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / ss ** 3 * (x[1] - x[0]), 6.0 / ss ** 2 * (x[1] - x[0])]'], {}), '([-12.0 / ss ** 3 * (x[1] - x[0]), 6.0 / ss ** 2 * (x[1] - x[0])])\n', (3347, 3413), True, 'import numpy as np\n'), ((3432, 3507), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / ss ** 3 * (y[1] - y[1]), 6.0 / ss ** 2 * (y[1] - y[1])]'], {}), '([-12.0 / ss ** 3 * (y[1] - y[1]), 6.0 / ss ** 2 * (y[1] - y[1])])\n', (3441, 3507), True, 'import numpy as np\n'), ((3530, 3626), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / ss ** 3 * (theta[1] - theta[0]), 6.0 / ss ** 2 * (theta[1] - theta[0])\n ]'], {}), '([-12.0 / ss ** 3 * (theta[1] - theta[0]), 6.0 / ss ** 2 * (theta[\n 1] - theta[0])])\n', (3539, 3626), True, 'import numpy as np\n'), ((3640, 3734), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / (ss / 2) ** 3 * (h_step - 0.0), 6.0 / (ss / 2) ** 2 * (h_step - 0.0)]'], {}), '([-12.0 / (ss / 2) ** 3 * (h_step - 0.0), 6.0 / (ss / 2) ** 2 * (\n h_step - 0.0)])\n', (3649, 3734), True, 'import numpy as np\n'), ((3733, 3827), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / (ss / 2) ** 3 * (0.0 - h_step), 6.0 / (ss / 2) ** 2 * (0.0 - h_step)]'], {}), '([-12.0 / (ss / 2) ** 3 * (0.0 - h_step), 6.0 / (ss / 2) ** 2 * (\n 0.0 - h_step)])\n', (3742, 3827), True, 'import numpy as np\n'), ((4179, 4274), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / ss ** 3 * (x[idx + 2] - x[idx]), 6.0 / ss ** 2 * (x[idx + 2] - x[idx])\n ]'], {}), '([-12.0 / ss ** 3 * (x[idx + 2] - x[idx]), 6.0 / ss ** 2 * (x[idx +\n 2] - x[idx])])\n', (4188, 4274), True, 'import numpy as np\n'), ((4293, 4388), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / ss ** 3 * (y[idx + 2] - y[idx]), 6.0 / ss ** 2 * (y[idx + 2] - y[idx])\n ]'], {}), '([-12.0 / ss ** 3 * (y[idx + 2] - y[idx]), 6.0 / ss ** 2 * (y[idx +\n 2] - y[idx])])\n', (4302, 4388), True, 'import numpy as np\n'), ((4411, 4522), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / ss ** 3 * (theta[idx + 2] - theta[idx]), 6.0 / ss ** 2 * (theta[\n idx + 2] - theta[idx])]'], {}), '([-12.0 / ss ** 3 * (theta[idx + 2] - theta[idx]), 6.0 / ss ** 2 *\n (theta[idx + 2] - theta[idx])])\n', (4420, 4522), True, 'import numpy as np\n'), ((4541, 4635), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / (ss / 2) ** 3 * (h_step - 0.0), 6.0 / (ss / 2) ** 2 * (h_step - 0.0)]'], {}), '([-12.0 / (ss / 2) ** 3 * (h_step - 0.0), 6.0 / (ss / 2) ** 2 * (\n h_step - 0.0)])\n', (4550, 4635), True, 'import numpy as np\n'), ((4639, 4733), 'numpy.poly1d', 'np.poly1d', (['[-12.0 / (ss / 2) ** 3 * (0.0 - h_step), 6.0 / (ss / 2) ** 2 * (0.0 - h_step)]'], {}), '([-12.0 / (ss / 2) ** 3 * (0.0 - h_step), 6.0 / (ss / 2) ** 2 * (\n 0.0 - h_step)])\n', (4648, 4733), True, 'import numpy as np\n'), ((474, 494), 'numpy.dot', 'np.dot', (['r_mat', 'point'], {}), '(r_mat, point)\n', (480, 494), True, 'import numpy as np\n'), ((1745, 1765), 'numpy.vstack', 'np.vstack', (['(st, s.T)'], {}), '((st, s.T))\n', (1754, 1765), True, 'import numpy as np\n'), ((2079, 2091), 'numpy.min', 'np.min', (['feet'], {}), '(feet)\n', (2085, 2091), True, 'import numpy as np\n'), ((2092, 2104), 'numpy.sqrt', 'np.sqrt', (['(2.0)'], {}), '(2.0)\n', (2099, 2104), True, 'import numpy as np\n'), ((416, 454), 'numpy.array', 'np.array', (['[[pointx[idx], pointy[idx]]]'], {}), '([[pointx[idx], pointy[idx]]])\n', (424, 454), True, 'import numpy as np\n'), ((991, 1011), 'numpy.dot', 'np.dot', (['r_mat', 'point'], {}), '(r_mat, point)\n', (997, 1011), True, 'import numpy as np\n'), ((279, 292), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (285, 292), True, 'import numpy as np\n'), ((312, 325), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (318, 325), True, 'import numpy as np\n'), ((327, 340), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (333, 340), True, 'import numpy as np\n'), ((933, 971), 'numpy.array', 'np.array', (['[[pointx[idx], pointy[idx]]]'], {}), '([[pointx[idx], pointy[idx]]])\n', (941, 971), True, 'import numpy as np\n'), ((295, 308), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (301, 308), True, 'import numpy as np\n'), ((831, 849), 'numpy.cos', 'np.cos', (['theta[idx]'], {}), '(theta[idx])\n', (837, 849), True, 'import numpy as np\n'), ((874, 892), 'numpy.sin', 'np.sin', (['theta[idx]'], {}), '(theta[idx])\n', (880, 892), True, 'import numpy as np\n'), ((894, 912), 'numpy.cos', 'np.cos', (['theta[idx]'], {}), '(theta[idx])\n', (900, 912), True, 'import numpy as np\n'), ((852, 870), 'numpy.sin', 'np.sin', (['theta[idx]'], {}), '(theta[idx])\n', (858, 870), True, 'import numpy as np\n')] |
import json
import random
import collections
import torch
from torch.autograd import Variable
import torch.utils.data as Data
from torchvision.ops import box_iou
from config import opt
from utils import non_model
from make_dataset import train_Dataset, val_Dataset
from net import model_tools
import numpy as np
from tqdm import tqdm
import warnings
import resource
warnings.filterwarnings("ignore")
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (2000, rlimit[1]))
def train(**kwargs):
# stage 1
kwargs, data_info_dict = non_model.read_kwargs(kwargs)
opt.load_config('../config/all.txt')
config_dict = opt._spec(kwargs)
# stage 2
save_model_folder = '../model/%s/' % (opt.path_key) + str(opt.net_idx) + '/'
# info
save_info_folder = '../info/%s/' % (opt.path_key) + str(opt.net_idx) + '/'
non_model.make_path_folder(save_model_folder)
non_model.make_path_folder(save_info_folder)
with open(save_info_folder + 'config.json', 'w', encoding='utf-8') as json_file:
json.dump(config_dict, json_file, ensure_ascii=False, indent=4)
fold_list = data_info_dict['Train']
for k in opt.kidx:
GLOBAL_SEED = 2021
random.seed(GLOBAL_SEED)
np.random.seed(GLOBAL_SEED)
torch.manual_seed(GLOBAL_SEED)
torch.cuda.manual_seed(GLOBAL_SEED)
torch.cuda.manual_seed_all(GLOBAL_SEED)
torch.backends.cudnn.enabled = False
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
data_gpu = opt.gpu_idx
torch.cuda.set_device(data_gpu)
net = model_tools.get_model()
net = net.cuda()
lr = opt.lr
if opt.optim == 'SGD':
optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, net.parameters()),
lr=lr, weight_decay=opt.wd, momentum=0.9)
print('================== SGD lr = %.6f ==================' % lr)
elif opt.optim == 'AdamW':
optimizer = torch.optim.AdamW(filter(lambda p: p.requires_grad, net.parameters()),
lr=lr, weight_decay=opt.wd)
print('================== AdamW lr = %.6f ==================' % lr)
if opt.cos_lr:
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=opt.Tmax,
eta_min=opt.lr / opt.lr_gap)
else:
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=opt.patience)
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
GLOBAL_WORKER_ID = None
def worker_init_fn(worker_id):
global GLOBAL_WORKER_ID
GLOBAL_WORKER_ID = worker_id
set_seed(GLOBAL_SEED + worker_id)
train_slice_list = fold_list[str(k)]['train']
train_set = train_Dataset(train_slice_list)
train_data_num = len(train_set.img_list)
train_batch = Data.DataLoader(dataset=train_set, batch_size=opt.train_bs, shuffle=True,
num_workers=opt.num_workers, worker_init_fn=worker_init_fn,
drop_last=True, collate_fn=non_model.num_collate)
print('load train data done, num =', train_data_num)
val_slice_list = fold_list[str(k)]['val']
val_set = val_Dataset(val_slice_list)
val_data_num = len(val_set.img_list)
val_batch = Data.DataLoader(dataset=val_set, batch_size=opt.val_bs, shuffle=False,
num_workers=opt.test_num_workers, worker_init_fn=worker_init_fn)
print('load val data done, num =', val_data_num)
# return
best_net = None
epoch_save = 0
best_metric = 0
lr_change = 0
loss_hist = collections.deque(maxlen=500)
for e in range(opt.epoch):
tmp_epoch = e + opt.start_epoch
print('====================== Folder %s Epoch %s ========================' % (k, tmp_epoch))
tmp_lr = optimizer.__getstate__()['param_groups'][0]['lr']
if opt.cycle_r > 0:
if e % (2 * opt.Tmax) == 0:
best_net = None
best_metric_list = np.zeros((opt.label_length - 1))
best_metric = 0
min_loss = 10
else:
if tmp_epoch > epoch_save + opt.gap_epoch:
break
if lr_change == 2:
break
net.train()
for i, return_list in tqdm(enumerate(train_batch)):
case_name, x, y = return_list
im = Variable(x.type(torch.FloatTensor).cuda())
label = Variable(y.type(torch.FloatTensor).cuda())
if e == 0 and i == 0:
print('input size:', im.shape)
# forward
if opt.model[-4:] == 'rcnn':
loss_dict = net(im, label)
loss = sum(ls for ls in loss_dict.values())
else:
classification_loss, regression_loss = net([im, label])
classification_loss = classification_loss.mean()
regression_loss = regression_loss.mean()
loss = classification_loss + regression_loss
if bool(loss == 0):
continue
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(net.parameters(), 0.1)
optimizer.step()
loss_hist.append(float(loss))
if i % 50 == 0:
print(
'Ep: {} | Iter: {} | Cls loss: {:1.4f} | Reg loss: {:1.4f} | Running loss: {:1.4f}'.format(
tmp_epoch, i, float(classification_loss), float(regression_loss), np.mean(loss_hist)))
del classification_loss
del regression_loss
torch.cuda.empty_cache()
net = net.eval()
val_loss = 0
data_length = val_data_num
all_detections = [None for j in range(data_length)]
all_annotations = [None for j in range(data_length)]
with torch.no_grad():
for i, return_list in tqdm(enumerate(val_batch)):
case_name, x, y = return_list
##################### Get detections ######################
im = Variable(x.type(torch.FloatTensor).cuda())
if e == 0 and i == 0:
print('input size:', im.shape)
# forward
scores, labels, boxes = net(im)
scores = scores.detach().cpu().numpy()
labels = labels.detach().cpu().numpy()
boxes = boxes.detach().cpu().numpy()
indices = np.where(scores > opt.s_th)[0]
if indices.shape[0] > 0:
scores = scores[indices]
boxes = boxes[indices]
labels = labels[indices]
# find the order with which to sort the scores
scores_sort = np.argsort(-scores)[:opt.max_dets]
# select detections
image_boxes = boxes[scores_sort]
image_scores = scores[scores_sort]
image_labels = labels[scores_sort]
image_detections = np.concatenate(
[image_boxes, np.expand_dims(image_scores, axis=1), np.expand_dims(image_labels, axis=1)],
axis=1)
all_detections[i] = image_detections[:, :-1]
else:
all_detections[i] = np.zeros((0, 5))
###########################################################
##################### Get annotations #####################
annotations = y.detach().cpu().numpy()[0]
all_annotations[i] = annotations[:, :4]
###########################################################
false_positives = np.zeros((0,))
true_positives = np.zeros((0,))
scores = np.zeros((0,))
num_annotations = 0.0
for i in range(data_length):
detections = all_detections[i]
annotations = all_annotations[i]
num_annotations += annotations.shape[0]
detected_annotations = []
for d in detections:
scores = np.append(scores, d[4])
if annotations.shape[0] == 0:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
continue
d_tensor = torch.tensor(d[:4][np.newaxis])
a_tensor = torch.tensor(annotations)
overlaps = box_iou(d_tensor, a_tensor).numpy()
assigned_annotation = np.argmax(overlaps, axis=1)
max_overlap = overlaps[0, assigned_annotation]
if max_overlap >= opt.iou_th and assigned_annotation not in detected_annotations:
false_positives = np.append(false_positives, 0)
true_positives = np.append(true_positives, 1)
detected_annotations.append(assigned_annotation)
else:
false_positives = np.append(false_positives, 1)
true_positives = np.append(true_positives, 0)
if len(false_positives) == 0 and len(true_positives) == 0:
print('No detection')
else:
# sort by score
indices = np.argsort(-scores)
scores = scores[indices]
false_positives = false_positives[indices]
true_positives = true_positives[indices]
# compute false positives and true positives
false_positives = np.cumsum(false_positives)
true_positives = np.cumsum(true_positives)
# compute recall and precision
recall = true_positives / num_annotations
precision = true_positives / np.maximum(true_positives + false_positives, np.finfo(np.float64).eps)
# compute average precision
average_precision = non_model.compute_ap(recall, precision)
print('mAP: {}'.format(average_precision))
print("Precision: ", precision[-1])
print("Recall: ", recall[-1])
if average_precision > best_metric:
best_metric = average_precision
epoch_save = tmp_epoch
save_dict = {}
save_dict['net'] = net
save_dict['config_dict'] = config_dict
torch.save(save_dict, save_model_folder + 'K%s_%s_AP_%.4f_Pr_%.4f_Re_%.4f.pkl' %
(k, str(epoch_save).rjust(3, '0'), best_metric, precision[-1], recall[-1]))
info_dict = {
'fp': false_positives.tolist(),
'tp': true_positives.tolist(),
'score': scores.tolist(),
'anno': num_annotations
}
with open(save_info_folder + 'K%s_%s_AP_%.4f_Pr_%.4f_Re_%.4f.json' %
(k, str(epoch_save).rjust(3, '0'), best_metric, precision[-1], recall[-1]), 'w') as f:
json.dump(info_dict, f, indent=2)
del save_dict
del info_dict
print('====================== model save ========================')
if opt.cos_lr == True:
scheduler.step()
else:
scheduler.step(best_metric)
before_lr = optimizer.__getstate__()['param_groups'][0]['lr']
if before_lr != tmp_lr:
epoch_save = tmp_epoch
lr_change += 1
print('================== lr change to %.6f ==================' % before_lr)
torch.cuda.empty_cache()
if __name__ == '__main__':
import fire
fire.Fire()
| [
"numpy.random.seed",
"numpy.argmax",
"numpy.argsort",
"utils.non_model.make_path_folder",
"numpy.mean",
"torch.no_grad",
"utils.non_model.read_kwargs",
"make_dataset.val_Dataset",
"collections.deque",
"net.model_tools.get_model",
"torchvision.ops.box_iou",
"torch.utils.data.DataLoader",
"res... | [((367, 400), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (390, 400), False, 'import warnings\n'), ((413, 455), 'resource.getrlimit', 'resource.getrlimit', (['resource.RLIMIT_NOFILE'], {}), '(resource.RLIMIT_NOFILE)\n', (431, 455), False, 'import resource\n'), ((456, 517), 'resource.setrlimit', 'resource.setrlimit', (['resource.RLIMIT_NOFILE', '(2000, rlimit[1])'], {}), '(resource.RLIMIT_NOFILE, (2000, rlimit[1]))\n', (474, 517), False, 'import resource\n'), ((584, 613), 'utils.non_model.read_kwargs', 'non_model.read_kwargs', (['kwargs'], {}), '(kwargs)\n', (605, 613), False, 'from utils import non_model\n'), ((618, 654), 'config.opt.load_config', 'opt.load_config', (['"""../config/all.txt"""'], {}), "('../config/all.txt')\n", (633, 654), False, 'from config import opt\n'), ((673, 690), 'config.opt._spec', 'opt._spec', (['kwargs'], {}), '(kwargs)\n', (682, 690), False, 'from config import opt\n'), ((881, 926), 'utils.non_model.make_path_folder', 'non_model.make_path_folder', (['save_model_folder'], {}), '(save_model_folder)\n', (907, 926), False, 'from utils import non_model\n'), ((931, 975), 'utils.non_model.make_path_folder', 'non_model.make_path_folder', (['save_info_folder'], {}), '(save_info_folder)\n', (957, 975), False, 'from utils import non_model\n'), ((12764, 12775), 'fire.Fire', 'fire.Fire', ([], {}), '()\n', (12773, 12775), False, 'import fire\n'), ((1069, 1132), 'json.dump', 'json.dump', (['config_dict', 'json_file'], {'ensure_ascii': '(False)', 'indent': '(4)'}), '(config_dict, json_file, ensure_ascii=False, indent=4)\n', (1078, 1132), False, 'import json\n'), ((1233, 1257), 'random.seed', 'random.seed', (['GLOBAL_SEED'], {}), '(GLOBAL_SEED)\n', (1244, 1257), False, 'import random\n'), ((1266, 1293), 'numpy.random.seed', 'np.random.seed', (['GLOBAL_SEED'], {}), '(GLOBAL_SEED)\n', (1280, 1293), True, 'import numpy as np\n'), ((1302, 1332), 'torch.manual_seed', 'torch.manual_seed', (['GLOBAL_SEED'], {}), '(GLOBAL_SEED)\n', (1319, 1332), False, 'import torch\n'), ((1341, 1376), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['GLOBAL_SEED'], {}), '(GLOBAL_SEED)\n', (1363, 1376), False, 'import torch\n'), ((1385, 1424), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['GLOBAL_SEED'], {}), '(GLOBAL_SEED)\n', (1411, 1424), False, 'import torch\n'), ((1607, 1638), 'torch.cuda.set_device', 'torch.cuda.set_device', (['data_gpu'], {}), '(data_gpu)\n', (1628, 1638), False, 'import torch\n'), ((1654, 1677), 'net.model_tools.get_model', 'model_tools.get_model', ([], {}), '()\n', (1675, 1677), False, 'from net import model_tools\n'), ((3108, 3139), 'make_dataset.train_Dataset', 'train_Dataset', (['train_slice_list'], {}), '(train_slice_list)\n', (3121, 3139), False, 'from make_dataset import train_Dataset, val_Dataset\n'), ((3211, 3403), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'train_set', 'batch_size': 'opt.train_bs', 'shuffle': '(True)', 'num_workers': 'opt.num_workers', 'worker_init_fn': 'worker_init_fn', 'drop_last': '(True)', 'collate_fn': 'non_model.num_collate'}), '(dataset=train_set, batch_size=opt.train_bs, shuffle=True,\n num_workers=opt.num_workers, worker_init_fn=worker_init_fn, drop_last=\n True, collate_fn=non_model.num_collate)\n', (3226, 3403), True, 'import torch.utils.data as Data\n'), ((3601, 3628), 'make_dataset.val_Dataset', 'val_Dataset', (['val_slice_list'], {}), '(val_slice_list)\n', (3612, 3628), False, 'from make_dataset import train_Dataset, val_Dataset\n'), ((3694, 3833), 'torch.utils.data.DataLoader', 'Data.DataLoader', ([], {'dataset': 'val_set', 'batch_size': 'opt.val_bs', 'shuffle': '(False)', 'num_workers': 'opt.test_num_workers', 'worker_init_fn': 'worker_init_fn'}), '(dataset=val_set, batch_size=opt.val_bs, shuffle=False,\n num_workers=opt.test_num_workers, worker_init_fn=worker_init_fn)\n', (3709, 3833), True, 'import torch.utils.data as Data\n'), ((4055, 4084), 'collections.deque', 'collections.deque', ([], {'maxlen': '(500)'}), '(maxlen=500)\n', (4072, 4084), False, 'import collections\n'), ((2336, 2438), 'torch.optim.lr_scheduler.CosineAnnealingLR', 'torch.optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'T_max': 'opt.Tmax', 'eta_min': '(opt.lr / opt.lr_gap)'}), '(optimizer, T_max=opt.Tmax,\n eta_min=opt.lr / opt.lr_gap)\n', (2378, 2438), False, 'import torch\n'), ((2539, 2627), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer', '"""max"""'], {'patience': 'opt.patience'}), "(optimizer, 'max', patience=opt.\n patience)\n", (2581, 2627), False, 'import torch\n'), ((2664, 2681), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (2675, 2681), False, 'import random\n'), ((2694, 2714), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (2708, 2714), True, 'import numpy as np\n'), ((2727, 2750), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (2744, 2750), False, 'import torch\n'), ((2763, 2791), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['seed'], {}), '(seed)\n', (2785, 2791), False, 'import torch\n'), ((2804, 2836), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['seed'], {}), '(seed)\n', (2830, 2836), False, 'import torch\n'), ((6262, 6286), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (6284, 6286), False, 'import torch\n'), ((8551, 8565), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8559, 8565), True, 'import numpy as np\n'), ((8595, 8609), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8603, 8609), True, 'import numpy as np\n'), ((8631, 8645), 'numpy.zeros', 'np.zeros', (['(0,)'], {}), '((0,))\n', (8639, 8645), True, 'import numpy as np\n'), ((12689, 12713), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (12711, 12713), False, 'import torch\n'), ((6529, 6544), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (6542, 6544), False, 'import torch\n'), ((10230, 10249), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (10240, 10249), True, 'import numpy as np\n'), ((10503, 10529), 'numpy.cumsum', 'np.cumsum', (['false_positives'], {}), '(false_positives)\n', (10512, 10529), True, 'import numpy as np\n'), ((10563, 10588), 'numpy.cumsum', 'np.cumsum', (['true_positives'], {}), '(true_positives)\n', (10572, 10588), True, 'import numpy as np\n'), ((10892, 10931), 'utils.non_model.compute_ap', 'non_model.compute_ap', (['recall', 'precision'], {}), '(recall, precision)\n', (10912, 10931), False, 'from utils import non_model\n'), ((4493, 4523), 'numpy.zeros', 'np.zeros', (['(opt.label_length - 1)'], {}), '(opt.label_length - 1)\n', (4501, 4523), True, 'import numpy as np\n'), ((8983, 9006), 'numpy.append', 'np.append', (['scores', 'd[4]'], {}), '(scores, d[4])\n', (8992, 9006), True, 'import numpy as np\n'), ((9265, 9296), 'torch.tensor', 'torch.tensor', (['d[:4][np.newaxis]'], {}), '(d[:4][np.newaxis])\n', (9277, 9296), False, 'import torch\n'), ((9328, 9353), 'torch.tensor', 'torch.tensor', (['annotations'], {}), '(annotations)\n', (9340, 9353), False, 'import torch\n'), ((9463, 9490), 'numpy.argmax', 'np.argmax', (['overlaps'], {'axis': '(1)'}), '(overlaps, axis=1)\n', (9472, 9490), True, 'import numpy as np\n'), ((7198, 7225), 'numpy.where', 'np.where', (['(scores > opt.s_th)'], {}), '(scores > opt.s_th)\n', (7206, 7225), True, 'import numpy as np\n'), ((8140, 8156), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (8148, 8156), True, 'import numpy as np\n'), ((9100, 9129), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (9109, 9129), True, 'import numpy as np\n'), ((9171, 9199), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (9180, 9199), True, 'import numpy as np\n'), ((9703, 9732), 'numpy.append', 'np.append', (['false_positives', '(0)'], {}), '(false_positives, 0)\n', (9712, 9732), True, 'import numpy as np\n'), ((9774, 9802), 'numpy.append', 'np.append', (['true_positives', '(1)'], {}), '(true_positives, 1)\n', (9783, 9802), True, 'import numpy as np\n'), ((9944, 9973), 'numpy.append', 'np.append', (['false_positives', '(1)'], {}), '(false_positives, 1)\n', (9953, 9973), True, 'import numpy as np\n'), ((10015, 10043), 'numpy.append', 'np.append', (['true_positives', '(0)'], {}), '(true_positives, 0)\n', (10024, 10043), True, 'import numpy as np\n'), ((12079, 12112), 'json.dump', 'json.dump', (['info_dict', 'f'], {'indent': '(2)'}), '(info_dict, f, indent=2)\n', (12088, 12112), False, 'import json\n'), ((6151, 6169), 'numpy.mean', 'np.mean', (['loss_hist'], {}), '(loss_hist)\n', (6158, 6169), True, 'import numpy as np\n'), ((7530, 7549), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (7540, 7549), True, 'import numpy as np\n'), ((9385, 9412), 'torchvision.ops.box_iou', 'box_iou', (['d_tensor', 'a_tensor'], {}), '(d_tensor, a_tensor)\n', (9392, 9412), False, 'from torchvision.ops import box_iou\n'), ((10785, 10805), 'numpy.finfo', 'np.finfo', (['np.float64'], {}), '(np.float64)\n', (10793, 10805), True, 'import numpy as np\n'), ((7887, 7923), 'numpy.expand_dims', 'np.expand_dims', (['image_scores'], {'axis': '(1)'}), '(image_scores, axis=1)\n', (7901, 7923), True, 'import numpy as np\n'), ((7925, 7961), 'numpy.expand_dims', 'np.expand_dims', (['image_labels'], {'axis': '(1)'}), '(image_labels, axis=1)\n', (7939, 7961), True, 'import numpy as np\n')] |
"""The Bayesian Neural Network that learns the transformation of the target
distribution to the predictor distribution, which results in the conditional
distribution of the predictor conditional on the target.
"""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from psych_metric.distrib.mcmc import get_mcmc_kernel
def mcmc_sample_log_prob(
params,
data,
targets,
origin_adjust,
rotation_mat,
scale_identity_multiplier=0.01,
):
"""MCMC BNN that takes the original probability vectors and transforms them
into the conditional RV's probability vectors. This BNN ensures that the
output of the network is always a probability distribution via softmax.
Notes
-----
The BNN is rewritten here because TFP's MCMC target log prob does not play
well with creating the network outside of the target log prob function and
passed in as constant variables.
"""
bnn_data = tf.convert_to_tensor(data.astype(np.float32), dtype=tf.float32)
bnn_target = tf.convert_to_tensor(
targets.astype(np.float32),
dtype=tf.float32,
)
bnn_rotation_mat = tf.convert_to_tensor(
rotation_mat.astype(np.float32),
dtype=tf.float32,
)
bnn_origin_adjust = tf.convert_to_tensor(
origin_adjust.astype(np.float32),
dtype=tf.float32,
)
hidden_weights, hidden_bias, output_weights, output_bias = params
bnn_data_rot = (bnn_data - bnn_origin_adjust) @ bnn_rotation_mat
hidden = tf.nn.sigmoid(bnn_data_rot @ hidden_weights + hidden_bias)
bnn_output = hidden @ output_weights + output_bias
output = tf.nn.softmax(
(bnn_output @ tf.transpose(bnn_rotation_mat)) + bnn_origin_adjust
)
# TODO Check the order of the bnn_output and bnn_target
return tf.reduce_sum(
tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros([data.shape[1]]),
scale_identity_multiplier=scale_identity_multiplier
).log_prob(output - bnn_target),
)
def l2_dist(
params,
data,
targets,
origin_adjust,
rotation_mat,
):
"""MCMC BNN that takes the original probability vectors and transforms them
into the conditional RV's probability vectors. This BNN ensures that the
output of the network is always a probability distribution via softmax.
Notes
-----
The BNN is rewritten here because TFP's MCMC target log prob does not play
well with creating the network outside of the target log prob function and
passed in as constant variables.
"""
bnn_data = tf.convert_to_tensor(data.astype(np.float32), dtype=tf.float32)
bnn_target = tf.convert_to_tensor(
targets.astype(np.float32),
dtype=tf.float32,
)
bnn_rotation_mat = tf.convert_to_tensor(
rotation_mat.astype(np.float32),
dtype=tf.float32,
)
bnn_origin_adjust = tf.convert_to_tensor(
origin_adjust.astype(np.float32),
dtype=tf.float32,
)
hidden_weights, hidden_bias, output_weights, output_bias = params
bnn_data_rot = (bnn_data - bnn_origin_adjust) @ bnn_rotation_mat
hidden = tf.nn.sigmoid(bnn_data_rot @ hidden_weights + hidden_bias)
bnn_output = hidden @ output_weights + output_bias
output = tf.nn.softmax(
(bnn_output @ tf.transpose(bnn_rotation_mat)) + bnn_origin_adjust
)
# Max is 0, ow. negative values.
return -tf.reduce_sum(tf.norm(output - bnn_target, axis=1))
def bnn_end2end_target_func(
params,
data,
targets,
scale_identity_multiplier=0.01,
):
"""MCMC BNN target log prob function that expects the BNN to be end-to-end
with no mathematical transforms.
Notes
-----
The BNN is rewritten here because TFP's MCMC target log prob does not play
well with creating the network outside of the target log prob function and
passed in as constant variables.
"""
bnn_data = tf.convert_to_tensor(data.astype(np.float32), dtype=tf.float32)
bnn_target = tf.convert_to_tensor(
targets.astype(np.float32),
dtype=tf.float32,
)
hidden_weights, hidden_bias, output_weights, output_bias = params
hidden = tf.nn.sigmoid(bnn_data @ hidden_weights + hidden_bias)
bnn_output = hidden @ output_weights + output_bias
# TODO Check the order of the bnn_output and bnn_target
return tf.reduce_sum(
tfp.distributions.MultivariateNormalDiag(
loc=tf.zeros([data.shape[1]]),
scale_identity_multiplier=scale_identity_multiplier
).log_prob(bnn_output - bnn_target),
)
def bnn_softmax(input_labels, simplex_transform, *args, **kwargs):
"""BNN of stochastic transform of given random variable (target label) to
the respective conditional random variable (predictor's prediction). Input
is of the dimension of the original probability vector and output is in the
same space.
"""
#with tf.device(tf_device), tf.name_scope('bnn_mlp_softmax_transform'):
output, tf_vars = bnn_mlp(
simplex_transform.to(input_labels),
*args,
**kwargs,
)
output = tf.nn.softmax(simplex_transform.back(output))
return output, tf_vars
def bnn_softmax_placeholders(input_labels, simplex_transform, *args, **kwargs):
"""Placeholder version of `bnn_softmax()`. BNN of stochastic transform of
given random variable (target label) to the respective conditional random
variable (predictor's prediction). Input is of the dimension of the
original probability vector and output is in the same space.
"""
#with tf.device(tf_device), tf.name_scope('bnn_softmax_transformer'):
output, tf_placeholders = bnn_mlp_placeholders(
simplex_transform.to(input_labels),
*args,
**kwargs,
)
output = tf.nn.softmax(simplex_transform.back(output))
return output, tf_placeholders
def bnn_mlp(
input_labels,
num_layers=1,
num_hidden=10,
hidden_activation=tf.math.sigmoid,
hidden_use_bias=True,
output_activation=None, #, tf.math.sigmoid,
output_use_bias=True, # False,
dtype=tf.float32,
tf_device=None,
):
"""BNN of a simple MLP model. Input: labels in prob simplex; outputs
predictor's label in prob simplex.
"""
with tf.device(tf_device), tf.name_scope('bnn_mlp_transformer'):
tf_vars = []
x = input_labels
for i in range(num_layers):
dense_layer = tf.keras.layers.Dense(
num_hidden,
activation=hidden_activation,
dtype=dtype,
use_bias=hidden_use_bias,
)
x = dense_layer(x)
for w in dense_layer.weights:
tf_vars.append(w)
# output = activation(dot(input, kernel) + bias)
dense_layer = tf.keras.layers.Dense(
input_labels.shape[1],
activation=output_activation,
use_bias=output_use_bias,
dtype=dtype,
name='bnn_output_pred',
)
bnn_out = dense_layer(x)
for w in dense_layer.weights:
tf_vars.append(w)
return bnn_out, tf_vars
def bnn_mlp_placeholders(
input_labels,
num_layers=1,
num_hidden=10,
hidden_activation=tf.math.sigmoid,
hidden_use_bias=True,
output_activation=tf.math.sigmoid,
output_use_bias=False,
dtype=tf.float32,
tf_device=None,
):
"""BNN of a simple MLP model. Input: labels in prob simplex; outputs
predictor's label in prob simplex. Uses placeholders for the weights of the
network.
"""
with tf.device(tf_device), tf.name_scope('bnn_mlp_transformer'):
tf_placeholders = []
x = input_labels
for i in range(num_layers):
# Weights
weights = tf.placeholder(
dtype,
[x.shape[1], num_hidden],
f'hidden_weights_{i}',
)
tf_placeholders.append(weights)
# Bias
bias_name = f'hidden_bias_{i}'
if hidden_use_bias:
bias = tf.placeholder(dtype, [num_hidden], bias_name)
tf_placeholders.append(bias)
else:
bias = tf.zeros([num_hidden], dtype, bias_name)
# Hidden layer calculation
x = (x @ weights) + bias
if hidden_activation:
x = hidden_activation(x)
# output = activation(dot(input, kernel) + bias)
weights = tf.placeholder(
dtype,
[x.shape[1], input_labels.shape[1]],
'output_weights',
)
tf_placeholders.append(weights)
if output_use_bias:
bias = tf.placeholder(dtype, [input_labels.shape[1]], 'output_bias')
tf_placeholders.append(bias)
else:
bias = tf.zeros([input_labels.shape[1]], dtype, bias_name)
bnn_out = (x @ weights) + bias
if output_activation:
bnn_out = output_activation(bnn_out)
return bnn_out, tf_placeholders
def bnn_mlp_loss(*weights, **kwargs):
with tf.control_dependencies(weights):
# Given the tf.variables, assign the new values to them
assign_op = []
for i, w in enumerate(weights):
assign_op.append(tf.assign(kwargs['tf_vars'][i], w))
diff_mvn = tfp.distributions.MultivariateNormalDiag(
tf.zeros(kwargs['bnn_out'].shape[1]),
scale_identity_multiplier=kwargs['scale_identity_multiplier'],
)
with tf.control_dependencies(assign_op):
diff = kwargs['bnn_out'] - kwargs['tf_labels']
kwargs['ops']['diff'] = diff
loss = tf.reduce_sum(
diff_mvn.log_prob(diff),
name='log_prob_dist_sum')
kwargs['ops']['loss'] = loss
return loss
def bnn_all_loss(*weights, **kwargs):
# build ANN
bnn_out, tf_vars = bnn_mlp(**kwargs['bnn_args'])
# assign weights
assign_op = []
for i, w in enumerate(weights):
assign_op.append(tf.assign(tf_vars[i], w))
with tf.control_dependencies(assign_op):
# get diff and log prob.
diff_mvn = tfp.distributions.MultivariateNormalDiag(
tf.zeros(bnn_out.shape[1]),
scale_identity_multiplier=kwargs['scale_identity_multiplier'],
)
diff = bnn_out - kwargs['tf_labels']
# NOTE trying to see if it needs to negative log prob!
return kwargs['diff_scale'] * tf.reduce_sum(
diff_mvn.log_prob(diff),
name='log_prob_dist_sum',
)
def bnn_adam(
bnn_out,
tf_vars,
tf_labels,
feed_dict,
tf_config=None,
optimizer_id='adam',
optimizer_args=None,
epochs=1,
init_vars=None,
):
"""Trains the given ANN with ADAM to be used as the initial weights for the
MCMC fitting of the BNN version.
Parameters
----------
init_vars: dict
A dictionary like feed_dict that will be temporarily added to the
feed_dict for the first epoch to serve as the initial values of given
tensorflow variables in tf_vars.
"""
if optimizer_args is None:
# Ensure that opt args is a dict for use with **
optimizer_args = {}
# create loss
loss = tf.norm(bnn_out - tf_labels, axis=1)
# Create optimizer
if optimizer_id == 'adam':
optimizer = tf.train.AdamOptimizer(**optimizer_args)
elif optimizer_id == 'nadam':
optimizer = tf.contrib.opt.NadamOptimizer(**optimizer_args)
else:
raise ValueError(f'Unexpected optimizer_id value: {optimizer_id}')
global_step = tf.Variable(0, name='global_step', trainable=False)
grad = optimizer.compute_gradients(loss)
train_op = optimizer.apply_gradients(grad, global_step)
results_dict = {
'train_op': train_op,
'loss': loss,
#'grad': grad,
}
if init_vars:
feed_dict = feed_dict.copy()
feed_dict.update(init_vars)
with tf.Session(config=tf_config) as sess:
sess.run((
tf.global_variables_initializer(),
tf.local_variables_initializer(),
))
for i in range(epochs):
if i == 1 and init_vars:
# remove initialization vars from the feed dict on 2nd epoch
for v in init_vars:
del feed_dict[v]
iter_results = sess.run(results_dict, feed_dict=feed_dict)
weights = sess.run(tf_vars)
return weights, iter_results
def get_bnn_transform(
input_labels,
output_labels,
bnn_args=None,
num_samples=int(1e4),
burnin=int(1e4),
lag=int(1e3),
parallel_iter=16,
hyperbolic=False,
kernel_id='RandomWalkMetropolis',
kernel_args=None,
scale_identity_multiplier=1.0,
random_seed=None,
dtype=tf.float32,
tf_vars_init=None,
tf_input=None,
diff_scale=1.0,
step_adjust_id='Simple',
num_adaptation_steps=None,
):
if input_labels.shape != output_labels.shape:
raise ValueError(
'input_labels and output_labels must have the same shape.',
)
if bnn_args is None:
bnn_args = {}
# Data placeholders
if tf_input is None:
tf_input = tf.placeholder(
dtype=dtype,
shape=[None, input_labels.shape[1]],
name='input_label',
)
tf_labels = tf.placeholder(
dtype=dtype,
shape=[None, output_labels.shape[1]],
name='output_labels',
)
# Create the BNN model
if tf_vars_init is None:
_, tf_vars_init = bnn_mlp(tf_input, **bnn_args)
bnn_args['input_labels'] = tf_input
# Get loss function
loss_fn = lambda *w: bnn_all_loss(
*w,
bnn_args=bnn_args,
tf_labels=tf_labels,
scale_identity_multiplier=scale_identity_multiplier,
diff_scale=diff_scale, # for ease of negating the log prob, use -1.0
)
# Get the MCMC Kernel
if num_adaptation_steps is not None:
kernel = get_mcmc_kernel(
loss_fn,
kernel_id,
kernel_args,
step_adjust_id,
num_adaptation_steps,
)
else:
kernel = get_mcmc_kernel(loss_fn, kernel_id, kernel_args)
# Fit the BNN with the MCMC kernel
samples, trace = tfp.mcmc.sample_chain(
num_results=num_samples,
current_state=tf_vars_init,
kernel=kernel,
num_burnin_steps=burnin,
num_steps_between_results=lag,
parallel_iterations=parallel_iter,
)
results_dict = {
'samples': samples,
'trace': trace,
}
feed_dict = {
tf_input: input_labels,
tf_labels: output_labels,
}
return results_dict, feed_dict
def bnn_mlp_run_sess(results_dict, feed_dict, sess_config=None):
# TODO run the session.
with tf.Session(config=sess_config) as sess:
sess.run((
tf.global_variables_initializer(),
tf.local_variables_initializer(),
))
iter_results = sess.run(results_dict, feed_dict=feed_dict)
return iter_results
def reformat_chained_weights(weight_data, multiple_chains=True):
"""Reformats possibly parallelized sample chain weights in list of list of
np.ndarrays where the first list is total samples, second is the order of
the weights, and the np.ndarrays are the individual weight values.
"""
weights = []
if multiple_chains:
for i in range(len(weight_data[0][0])):
for chain in weight_data:
weights.append([np.array(w[i]) for w in chain])
else:
for i in range(len(weight_data[0])):
weights.append([np.array(w[i]) for w in weight_data])
return weights
def assign_weights_bnn(
weights_sets,
tf_placeholders,
bnn_out,
input_labels,
tf_input,
#output_labels=None,
dtype=tf.float32,
sess_config=None,
data_dim_first=True,
):
"""Given BNN weights and tensors with data, forward pass through network.
"""
feed_dict = {tf_input: input_labels}
results_list = [bnn_out]
#if output_labels:
# # TODO this doesn't make sense. bnn isn't used for simplex differences
# tf_output = tf.placeholder(
# dtype=dtype,
# shape=[None, output_labels.shape[1]],
# name='output_labels',
# )
# results_list.append(bnn_out - tf_output)
#
# feed_dict[tf_output] = output_labels
with tf.Session(config=sess_config) as sess:
sess.run((
tf.global_variables_initializer(),
tf.local_variables_initializer(),
))
# Loop through each set of weights and get BNN outputs
iter_results = []
if isinstance(weights_sets[0], np.ndarray):
# list of the weight's np.ndarrays whose first idx is the samples
num_weights_sets = len(weights_sets[0])
for sample_idx in range(weights_sets[0].shape[0]):
# Loop through the different placeholders and assign the values
for i, var_ph in enumerate(tf_placeholders):
feed_dict[var_ph] = weights_sets[i][sample_idx]
iter_results.append(sess.run(
results_list,
feed_dict=feed_dict,
))
elif isinstance(weights_sets[0], list):
# a sample list of weights lists that contain the np.ndarrays
# TODO this needs confirmed.
num_weights_sets = len(weights_sets[0][0])
for sample_idx in range(len(weights_sets)):
# Loop through the different placeholders and assign the values
for i, var_ph in enumerate(tf_placeholders):
feed_dict[var_ph] = weights_sets[sample_idx][i]
iter_results.append(sess.run(
results_list,
feed_dict=feed_dict,
))
#if output_labels:
# return iter_results
if data_dim_first:
# reshape the output such that the shape corresponds to
# [data samples, number of bnn weights sets, classes]
results = np.stack(iter_results)
if results.shape[0] == num_weights_sets and results.shape[2] == input_labels.shape[0]:
return np.swapaxes(results, 0, 2).squeeze()
return np.swapaxes(results, 0, 1).squeeze()
# Otherwise: [number of bnn weights sets, data samples, classes]
return np.stack(iter_results).squeeze()
| [
"tensorflow.keras.layers.Dense",
"tensorflow.local_variables_initializer",
"tensorflow.assign",
"tensorflow.Variable",
"tensorflow_probability.mcmc.sample_chain",
"psych_metric.distrib.mcmc.get_mcmc_kernel",
"tensorflow.placeholder",
"numpy.swapaxes",
"tensorflow.contrib.opt.NadamOptimizer",
"tens... | [((1525, 1583), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['(bnn_data_rot @ hidden_weights + hidden_bias)'], {}), '(bnn_data_rot @ hidden_weights + hidden_bias)\n', (1538, 1583), True, 'import tensorflow as tf\n'), ((3166, 3224), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['(bnn_data_rot @ hidden_weights + hidden_bias)'], {}), '(bnn_data_rot @ hidden_weights + hidden_bias)\n', (3179, 3224), True, 'import tensorflow as tf\n'), ((4208, 4262), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['(bnn_data @ hidden_weights + hidden_bias)'], {}), '(bnn_data @ hidden_weights + hidden_bias)\n', (4221, 4262), True, 'import tensorflow as tf\n'), ((11315, 11351), 'tensorflow.norm', 'tf.norm', (['(bnn_out - tf_labels)'], {'axis': '(1)'}), '(bnn_out - tf_labels, axis=1)\n', (11322, 11351), True, 'import tensorflow as tf\n'), ((11674, 11725), 'tensorflow.Variable', 'tf.Variable', (['(0)'], {'name': '"""global_step"""', 'trainable': '(False)'}), "(0, name='global_step', trainable=False)\n", (11685, 11725), True, 'import tensorflow as tf\n'), ((13435, 13527), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'dtype', 'shape': '[None, output_labels.shape[1]]', 'name': '"""output_labels"""'}), "(dtype=dtype, shape=[None, output_labels.shape[1]], name=\n 'output_labels')\n", (13449, 13527), True, 'import tensorflow as tf\n'), ((14363, 14551), 'tensorflow_probability.mcmc.sample_chain', 'tfp.mcmc.sample_chain', ([], {'num_results': 'num_samples', 'current_state': 'tf_vars_init', 'kernel': 'kernel', 'num_burnin_steps': 'burnin', 'num_steps_between_results': 'lag', 'parallel_iterations': 'parallel_iter'}), '(num_results=num_samples, current_state=tf_vars_init,\n kernel=kernel, num_burnin_steps=burnin, num_steps_between_results=lag,\n parallel_iterations=parallel_iter)\n', (14384, 14551), True, 'import tensorflow_probability as tfp\n'), ((6300, 6320), 'tensorflow.device', 'tf.device', (['tf_device'], {}), '(tf_device)\n', (6309, 6320), True, 'import tensorflow as tf\n'), ((6322, 6358), 'tensorflow.name_scope', 'tf.name_scope', (['"""bnn_mlp_transformer"""'], {}), "('bnn_mlp_transformer')\n", (6335, 6358), True, 'import tensorflow as tf\n'), ((6838, 6979), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['input_labels.shape[1]'], {'activation': 'output_activation', 'use_bias': 'output_use_bias', 'dtype': 'dtype', 'name': '"""bnn_output_pred"""'}), "(input_labels.shape[1], activation=output_activation,\n use_bias=output_use_bias, dtype=dtype, name='bnn_output_pred')\n", (6859, 6979), True, 'import tensorflow as tf\n'), ((7619, 7639), 'tensorflow.device', 'tf.device', (['tf_device'], {}), '(tf_device)\n', (7628, 7639), True, 'import tensorflow as tf\n'), ((7641, 7677), 'tensorflow.name_scope', 'tf.name_scope', (['"""bnn_mlp_transformer"""'], {}), "('bnn_mlp_transformer')\n", (7654, 7677), True, 'import tensorflow as tf\n'), ((8512, 8588), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[x.shape[1], input_labels.shape[1]]', '"""output_weights"""'], {}), "(dtype, [x.shape[1], input_labels.shape[1]], 'output_weights')\n", (8526, 8588), True, 'import tensorflow as tf\n'), ((9117, 9149), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['weights'], {}), '(weights)\n', (9140, 9149), True, 'import tensorflow as tf\n'), ((10118, 10152), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['assign_op'], {}), '(assign_op)\n', (10141, 10152), True, 'import tensorflow as tf\n'), ((11427, 11467), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {}), '(**optimizer_args)\n', (11449, 11467), True, 'import tensorflow as tf\n'), ((12036, 12064), 'tensorflow.Session', 'tf.Session', ([], {'config': 'tf_config'}), '(config=tf_config)\n', (12046, 12064), True, 'import tensorflow as tf\n'), ((13287, 13376), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'dtype', 'shape': '[None, input_labels.shape[1]]', 'name': '"""input_label"""'}), "(dtype=dtype, shape=[None, input_labels.shape[1]], name=\n 'input_label')\n", (13301, 13376), True, 'import tensorflow as tf\n'), ((14068, 14158), 'psych_metric.distrib.mcmc.get_mcmc_kernel', 'get_mcmc_kernel', (['loss_fn', 'kernel_id', 'kernel_args', 'step_adjust_id', 'num_adaptation_steps'], {}), '(loss_fn, kernel_id, kernel_args, step_adjust_id,\n num_adaptation_steps)\n', (14083, 14158), False, 'from psych_metric.distrib.mcmc import get_mcmc_kernel\n'), ((14253, 14301), 'psych_metric.distrib.mcmc.get_mcmc_kernel', 'get_mcmc_kernel', (['loss_fn', 'kernel_id', 'kernel_args'], {}), '(loss_fn, kernel_id, kernel_args)\n', (14268, 14301), False, 'from psych_metric.distrib.mcmc import get_mcmc_kernel\n'), ((14910, 14940), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_config'}), '(config=sess_config)\n', (14920, 14940), True, 'import tensorflow as tf\n'), ((16541, 16571), 'tensorflow.Session', 'tf.Session', ([], {'config': 'sess_config'}), '(config=sess_config)\n', (16551, 16571), True, 'import tensorflow as tf\n'), ((18236, 18258), 'numpy.stack', 'np.stack', (['iter_results'], {}), '(iter_results)\n', (18244, 18258), True, 'import numpy as np\n'), ((3454, 3490), 'tensorflow.norm', 'tf.norm', (['(output - bnn_target)'], {'axis': '(1)'}), '(output - bnn_target, axis=1)\n', (3461, 3490), True, 'import tensorflow as tf\n'), ((6469, 6575), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_hidden'], {'activation': 'hidden_activation', 'dtype': 'dtype', 'use_bias': 'hidden_use_bias'}), '(num_hidden, activation=hidden_activation, dtype=dtype,\n use_bias=hidden_use_bias)\n', (6490, 6575), True, 'import tensorflow as tf\n'), ((7814, 7884), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[x.shape[1], num_hidden]', 'f"""hidden_weights_{i}"""'], {}), "(dtype, [x.shape[1], num_hidden], f'hidden_weights_{i}')\n", (7828, 7884), True, 'import tensorflow as tf\n'), ((8724, 8785), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[input_labels.shape[1]]', '"""output_bias"""'], {}), "(dtype, [input_labels.shape[1]], 'output_bias')\n", (8738, 8785), True, 'import tensorflow as tf\n'), ((8860, 8911), 'tensorflow.zeros', 'tf.zeros', (['[input_labels.shape[1]]', 'dtype', 'bias_name'], {}), '([input_labels.shape[1]], dtype, bias_name)\n', (8868, 8911), True, 'import tensorflow as tf\n'), ((9417, 9453), 'tensorflow.zeros', 'tf.zeros', (["kwargs['bnn_out'].shape[1]"], {}), "(kwargs['bnn_out'].shape[1])\n", (9425, 9453), True, 'import tensorflow as tf\n'), ((9554, 9588), 'tensorflow.control_dependencies', 'tf.control_dependencies', (['assign_op'], {}), '(assign_op)\n', (9577, 9588), True, 'import tensorflow as tf\n'), ((10082, 10106), 'tensorflow.assign', 'tf.assign', (['tf_vars[i]', 'w'], {}), '(tf_vars[i], w)\n', (10091, 10106), True, 'import tensorflow as tf\n'), ((10260, 10286), 'tensorflow.zeros', 'tf.zeros', (['bnn_out.shape[1]'], {}), '(bnn_out.shape[1])\n', (10268, 10286), True, 'import tensorflow as tf\n'), ((11522, 11569), 'tensorflow.contrib.opt.NadamOptimizer', 'tf.contrib.opt.NadamOptimizer', ([], {}), '(**optimizer_args)\n', (11551, 11569), True, 'import tensorflow as tf\n'), ((18544, 18566), 'numpy.stack', 'np.stack', (['iter_results'], {}), '(iter_results)\n', (18552, 18566), True, 'import numpy as np\n'), ((1691, 1721), 'tensorflow.transpose', 'tf.transpose', (['bnn_rotation_mat'], {}), '(bnn_rotation_mat)\n', (1703, 1721), True, 'import tensorflow as tf\n'), ((3332, 3362), 'tensorflow.transpose', 'tf.transpose', (['bnn_rotation_mat'], {}), '(bnn_rotation_mat)\n', (3344, 3362), True, 'import tensorflow as tf\n'), ((8110, 8156), 'tensorflow.placeholder', 'tf.placeholder', (['dtype', '[num_hidden]', 'bias_name'], {}), '(dtype, [num_hidden], bias_name)\n', (8124, 8156), True, 'import tensorflow as tf\n'), ((8243, 8283), 'tensorflow.zeros', 'tf.zeros', (['[num_hidden]', 'dtype', 'bias_name'], {}), '([num_hidden], dtype, bias_name)\n', (8251, 8283), True, 'import tensorflow as tf\n'), ((9307, 9341), 'tensorflow.assign', 'tf.assign', (["kwargs['tf_vars'][i]", 'w'], {}), "(kwargs['tf_vars'][i], w)\n", (9316, 9341), True, 'import tensorflow as tf\n'), ((12105, 12138), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (12136, 12138), True, 'import tensorflow as tf\n'), ((12152, 12184), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (12182, 12184), True, 'import tensorflow as tf\n'), ((14981, 15014), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (15012, 15014), True, 'import tensorflow as tf\n'), ((15028, 15060), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (15058, 15060), True, 'import tensorflow as tf\n'), ((16612, 16645), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (16643, 16645), True, 'import tensorflow as tf\n'), ((16659, 16691), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (16689, 16691), True, 'import tensorflow as tf\n'), ((18426, 18452), 'numpy.swapaxes', 'np.swapaxes', (['results', '(0)', '(1)'], {}), '(results, 0, 1)\n', (18437, 18452), True, 'import numpy as np\n'), ((15744, 15758), 'numpy.array', 'np.array', (['w[i]'], {}), '(w[i])\n', (15752, 15758), True, 'import numpy as np\n'), ((18374, 18400), 'numpy.swapaxes', 'np.swapaxes', (['results', '(0)', '(2)'], {}), '(results, 0, 2)\n', (18385, 18400), True, 'import numpy as np\n'), ((1902, 1927), 'tensorflow.zeros', 'tf.zeros', (['[data.shape[1]]'], {}), '([data.shape[1]])\n', (1910, 1927), True, 'import tensorflow as tf\n'), ((4472, 4497), 'tensorflow.zeros', 'tf.zeros', (['[data.shape[1]]'], {}), '([data.shape[1]])\n', (4480, 4497), True, 'import tensorflow as tf\n'), ((15629, 15643), 'numpy.array', 'np.array', (['w[i]'], {}), '(w[i])\n', (15637, 15643), True, 'import numpy as np\n')] |
# coding: utf-8
# # Text Summarization in Python
#
# ## Approach:
# Extractive text summarization is all about finding the more important sentences from a document as a summary of that document.
# Our approach is using the PageRank algorithm to find these 'important' sentences.
# ## Implementation
# ### 1. Importing important libraries
# numpy library helps in working with arrays: array creation and manipulation
# this implementation uses array for storing the matrices generated as 2-D arrays
# PyPDF2 is a library used for reading the PDF files
# sys library has been used for printing the size of data structures used in the program
import numpy as np
import PyPDF2
import fpdf # to print PDF summary
import fitz # to highlight sentence in PDF
import sys
# matplotlib is a library that is used to visualize the data by drawing graphs of matrix inputs
# we will use it for drawing the matrices generated later in the program
# %matplotlib inline is a command used to show the graphs in the jupyter notebook
import matplotlib.pyplot as plt
#get_ipython().magic('matplotlib inline')
# networkx library helps in working with graphs
# and later performing the PageRank algorithm
# which is the crux of this implementation to find
# the importance of each sentence using their 'rank' as a metric
# rank, the output of the method pagerank, is a measure of importance of sentences
# this library has been used in the cell no. ()
import networkx as nx
# the PunktSentenceTokenizer library is being imported from the file punkt.py contained in package nltk.tokenize
# this is used to tokenize the document into sentences
# Tokenization: Tokenization is the process of demarcating and possibly classifying..
# sections of a string of input characters.
# The resulting tokens are then passed on to some other form of processing.
from nltk.tokenize.punkt import PunktSentenceTokenizer
# TfidfTransformer and CountVectorizer libraries are being imported
# CountVectorizer: In this implementation, a CountVectorizer object is being created that ..
# will be used for creating the document-term matrix
# tFidTransformer: In this implementation,TfidfTransformer is used for executing the method fit_transform()...
# which provides the output as a document-term matrix normalized (value 0-1) according to the TF-IDF
# TF(Term Frequency): the no. of times a term(a word here) appears in the current document(single sentence here)
# IDF(Inverse Document Frequency): the no. of times a term(a word here) appears in the entire corpus
# Corpus: set of all sentences
from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer
# ### 2. Function to read the document from user
# Supported formats: .txt, .pdf
#
# Input: Takes the name of the file as input.
#
# Output: Returns a string output containing the contents of the file.
# we are going to show an example of how the method is working
# first let's take the document as an input
def readDoc():
global name
# name = input('Please input a file name: ')
name = str(sys.argv[1])
print('You have asked for the document {}'.format(name))
# now read the type of document
if name.lower().endswith('.txt'):
choice = 1
elif name.lower().endswith('.pdf'):
choice = 2
else:
choice = 3
# print(name)
print(choice)
document = ''
# Case 1: if it is a .txt file
if choice == 1:
f = open(name, 'r')
document = f.read()
f.close()
# Case 2: if it is a .pdf file
elif choice == 2:
pdfFileObj = open(name, 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObj)
for i in range(pdfReader.getNumPages()):
pageObj = pdfReader.getPage(i)
document += pageObj.extractText()
pdfFileObj.close()
# Case 3: none of the format
else:
print('Failed to load a valid file')
print('Returning an empty string')
document = ''
print(type(document))
return document
# ### 3. Function to tokenize the document
# Input: String of text document
#
# Output: A list containing sentences as its elements
# the function used for tokenizing the sentences
# tokenization of a sentence: '''provided in cell() above'''
def tokenize(document):
# We are tokenizing using the PunktSentenceTokenizer
# we call an instance of this class as sentence_tokenizer
doc_tokenizer = PunktSentenceTokenizer()
# tokenize() method: takes our document as input and returns a list of all the sentences in the document
# sentences is a list containing each sentence of the document as an element
sentences_list = doc_tokenizer.tokenize(document)
return sentences_list
# ### 4. Read the document
# reading a file and
# printing the size of the file
document = readDoc()
print('The length of the file is:', end=' ')
print(len(document))
# ### 5. Generate a list of sentences in the document
# we want to tokenize the document for further processing
# tokenizing the sentence means that we are creating a list of all the sentences of the document.
# Need of tokenizing the document: Initially the document is in just a string format.
# if we want to process the document, we need to store it in a data structure.
# Tokenization of document into words is also possible, but we will go with the tokenizing with the sentences
# Since we want to choose the most relevant sentences, we need to generate tokens of sentences only
sentences_list = tokenize(document)
# let us print the size of memory used by the list sentences
print('The size of the list in Bytes is: {}'.format(sys.getsizeof(sentences_list)))
# the size of one of the element of the list
print('The size of the item 0 in Bytes is: {}'.format(sys.getsizeof(sentences_list[0])))
# let us see the data type of sentences_list
# It will be list
print(type(sentences_list))
# let us analyse the elements of the sentences
# len() method applies on the list and provides the number of elements in the list
print('The size of the list "sentences" is: {}'.format(len(sentences_list)))
# print the elements of the list
# If the input document is long, which on realistically will be wrong, we would not like to print the entire document
for i in sentences_list:
print(i)
# ### 6. Generate term-document matrix (TD matrix) of the data
# Convert a collection of text documents to a matrix of token counts
# fit_transform method of CountVectorizer() class
# Learn the vocabulary dictionary and return term-document matrix.
# I/p: An iterable which yields either str, unicode or file objects.
# O/p: The term-document matrix named cv_matrix
cv = CountVectorizer()
cv_matrix = cv.fit_transform(sentences_list)
# **So what does CountVectorizer.fit_transform() do?**
'''
# a demo of what CountVectorizer().fit_transform(text) does
cv_demo = CountVectorizer() # a demo object of class CountVectorizer
# I have repeated the words to make a non-ambiguous array of the document text matrix
text_demo = ["Ashish is good, you are bad", "I am not bad"]
res_demo = cv_demo.fit_transform(text_demo)
print('Result demo array is {}'.format(res_demo.toarray()))
# Result is 2-d matrix containing document text matrix
# Notice that in the second row, there is 2.
# also, bad is repeated twice in that sentence.
# so we can infer that 2 is corresponding to the word 'bad'
print('Feature list: {}'.format(cv_demo.get_feature_names()))
'''
# printing the cv_matrix type
# and how it is being stored in memory?
# it is stored in the compressed row format
# compressed row format:
print('The data type of bow matrix {}'.format(type(cv_matrix)))
print('Shape of the matrix {}'.format(cv_matrix.get_shape))
print('Size of the matrix is: {}'.format(sys.getsizeof(cv_matrix)))
print(cv.get_feature_names())
print(cv_matrix.toarray())
# Tnormalized: document-term matrix normalized (value 0-1) according to the TF-IDF
# TF(Term Frequency): the no. of times a term(a word here) appears in the current document(single sentence here)
# IDF(Inverse Document Frequency): the no. of times a term(a word here) appears in the entire corpus
# Corpus: set of all sentences
normal_matrix = TfidfTransformer().fit_transform(cv_matrix)
print(normal_matrix.toarray())
print(normal_matrix.T.toarray)
res_graph = normal_matrix * normal_matrix.T
# plt.spy(res_graph)
# ### 7. Generate a graph for the document to apply PageRank algorithm
# drawing a graph to proceed for the textrank algorithm
# nx_graph is a graph developed using the networkx library
# each node represents a sentence
# an edge represents that they have words in common
# the edge weight is the number of words that are common in both of the sentences(nodes)
# nx.draw() method is used to draw the graph created
nx_graph = nx.from_scipy_sparse_matrix(res_graph)
nx.draw_circular(nx_graph)
print('Number of edges {}'.format(nx_graph.number_of_edges()))
print('Number of vertices {}'.format(nx_graph.number_of_nodes()))
# plt.show()
print('The memory used by the graph in Bytes is: {}'.format(sys.getsizeof(nx_graph)))
# ### 8. Getting the rank of every sentence using pagerank
# ranks is a dictionary with key=node(sentences) and value=textrank (the rank of each of the sentences)
ranks = nx.pagerank(nx_graph)
# analyse the data type of ranks
print(type(ranks))
print('The size used by the dictionary in Bytes is: {}'.format(sys.getsizeof(ranks)))
# print the dictionary
for i in ranks:
print(i, ranks[i])
# ### 9. Finding important sentences and generating summary
# enumerate method: returns an enumerate object
# Use of list Comprehensions
# O/p: sentence_array is the sorted(descending order w.r.t. score value) 2-d array of ranks[sentence] and sentence
# For example, if there are two sentences: S1 (with a score of S1 = s1) and S2 with score s2, with s2>s1
# then sentence_array is [[s2, S2], [s1, S1]]
sentence_array = sorted(((ranks[i], s) for i, s in enumerate(sentences_list)), reverse=True)
sentence_array = np.asarray(sentence_array)
# as sentence_array is in descending order wrt score value
# fmax is the largest score value(the score of first element)
# fmin is the smallest score value(the score of last element)
rank_max = float(sentence_array[0][0])
rank_min = float(sentence_array[len(sentence_array) - 1][0])
# print the largest and smallest value of scores of the sentence
print(rank_max)
print(rank_min)
# Normalization of the scores
# so that it comes out in the range 0-1
# fmax becomes 1
# fmin becomes 0
# store the normalized values in the list temp_array
temp_array = []
# if all sentences have equal ranks, means they are all the same
# taking any sentence will give the summary, say the first sentence
flag = 0
if rank_max - rank_min == 0:
temp_array.append(0)
flag = 1
# If the sentence has different ranks
if flag != 1:
for i in range(0, len(sentence_array)):
temp_array.append((float(sentence_array[i][0]) - rank_min) / (rank_max - rank_min))
print(len(temp_array))
# Calculation of threshold:
# We take the mean value of normalized scores
# any sentence with the normalized score 0.2 more than the mean value is considered to be
threshold = (sum(temp_array) / len(temp_array)) + 0.2
# Separate out the sentences that satiasfy the criteria of having a score above the threshold
sentence_list = []
if len(temp_array) > 1:
for i in range(0, len(temp_array)):
if temp_array[i] > threshold:
sentence_list.append(sentence_array[i][1])
else:
sentence_list.append(sentence_array[0][1])
model = sentence_list
# ### 10. Writing the summary to a new file
# print(sentence_list)
'''
summary = " ".join(str(x) for x in sentence_list)
summary = str.encode(summary).replace(b'\n', b'')
summary = summary.decode()
print(summary)
'''
# with open('sum.txt', 'w') as file:
# file.write(summary)
output_pdf = fpdf.FPDF(format='letter')
output_pdf.add_page()
output_pdf.set_font("Arial", size = 12)
final = []
for lines in sentence_list:
line = str(lines)
line = str.encode(line).replace(b'\n', b'')
line = line.decode()
line = str(line.encode(encoding = 'utf-8', errors = 'ignore'))
# print(line[1:])
output_pdf.write(5, line[2:-1])
final.append(line[2:-1])
output_pdf.ln(10)
output_pdf.output(name.replace(".pdf", "_") + "summary.pdf")
highlight = fitz.open(name)
for page in highlight:
for line in final:
sentence = page.searchFor(line)
for sen in sentence:
page.addHighlightAnnot(sen)
highlight.save(name.replace(".pdf", "_") + "highlight.pdf", garbage = 4, deflate = True, clean = True)
# End of the notebook | [
"networkx.from_scipy_sparse_matrix",
"sklearn.feature_extraction.text.CountVectorizer",
"networkx.draw_circular",
"networkx.pagerank",
"fpdf.FPDF",
"numpy.asarray",
"sys.getsizeof",
"PyPDF2.PdfFileReader",
"fitz.open",
"nltk.tokenize.punkt.PunktSentenceTokenizer",
"sklearn.feature_extraction.tex... | [((6710, 6727), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (6725, 6727), False, 'from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer\n'), ((8833, 8871), 'networkx.from_scipy_sparse_matrix', 'nx.from_scipy_sparse_matrix', (['res_graph'], {}), '(res_graph)\n', (8860, 8871), True, 'import networkx as nx\n'), ((8872, 8898), 'networkx.draw_circular', 'nx.draw_circular', (['nx_graph'], {}), '(nx_graph)\n', (8888, 8898), True, 'import networkx as nx\n'), ((9302, 9323), 'networkx.pagerank', 'nx.pagerank', (['nx_graph'], {}), '(nx_graph)\n', (9313, 9323), True, 'import networkx as nx\n'), ((10044, 10070), 'numpy.asarray', 'np.asarray', (['sentence_array'], {}), '(sentence_array)\n', (10054, 10070), True, 'import numpy as np\n'), ((11929, 11955), 'fpdf.FPDF', 'fpdf.FPDF', ([], {'format': '"""letter"""'}), "(format='letter')\n", (11938, 11955), False, 'import fpdf\n'), ((12406, 12421), 'fitz.open', 'fitz.open', (['name'], {}), '(name)\n', (12415, 12421), False, 'import fitz\n'), ((4456, 4480), 'nltk.tokenize.punkt.PunktSentenceTokenizer', 'PunktSentenceTokenizer', ([], {}), '()\n', (4478, 4480), False, 'from nltk.tokenize.punkt import PunktSentenceTokenizer\n'), ((5672, 5701), 'sys.getsizeof', 'sys.getsizeof', (['sentences_list'], {}), '(sentences_list)\n', (5685, 5701), False, 'import sys\n'), ((5804, 5836), 'sys.getsizeof', 'sys.getsizeof', (['sentences_list[0]'], {}), '(sentences_list[0])\n', (5817, 5836), False, 'import sys\n'), ((7798, 7822), 'sys.getsizeof', 'sys.getsizeof', (['cv_matrix'], {}), '(cv_matrix)\n', (7811, 7822), False, 'import sys\n'), ((8229, 8247), 'sklearn.feature_extraction.text.TfidfTransformer', 'TfidfTransformer', ([], {}), '()\n', (8245, 8247), False, 'from sklearn.feature_extraction.text import TfidfTransformer, CountVectorizer\n'), ((9101, 9124), 'sys.getsizeof', 'sys.getsizeof', (['nx_graph'], {}), '(nx_graph)\n', (9114, 9124), False, 'import sys\n'), ((9440, 9460), 'sys.getsizeof', 'sys.getsizeof', (['ranks'], {}), '(ranks)\n', (9453, 9460), False, 'import sys\n'), ((3643, 3675), 'PyPDF2.PdfFileReader', 'PyPDF2.PdfFileReader', (['pdfFileObj'], {}), '(pdfFileObj)\n', (3663, 3675), False, 'import PyPDF2\n')] |
import os
import numpy as np
from tqdm import tqdm
import mxnet as mx
from mxnet import gluon, autograd
from gluoncv.utils import LRScheduler
from gluoncv.utils.metrics.voc_segmentation import batch_pix_accuracy, batch_intersection_union
from gluoncv.model_zoo.segbase import SoftmaxCrossEntropyLoss
from mylib.deeplabv3p import DeepLabv3p
from mylib.dataset import VOCAugSegmentation
class Trainer(object):
def __init__(self, flag, batch_size,
use_global_stats=True,
checkpoint_interval=5,
epochs=50,
learning_rate=1.e-4,
momentum=0.9,
weight_decay=1.e-4,
train_OS=16,
train_split='train_aug',
val_split='val',
resume=None,
test_batch_size=None,
data_root=os.path.expanduser('~/.mxnet/datasets/voc'),
num_workers=4):
if test_batch_size is None:
test_batch_size = batch_size
self.running_flag = flag
self.checkpoint_interval = checkpoint_interval
# dataset and dataloader
train_dataset = VOCAugSegmentation(root=data_root, split=train_split)
val_datset = VOCAugSegmentation(root=data_root, split=val_split)
self.train_data = gluon.data.DataLoader(train_dataset, batch_size, shuffle=True, last_batch='rollover',
num_workers=num_workers)
self.eval_data = gluon.data.DataLoader(val_datset, test_batch_size,
last_batch='keep', num_workers=num_workers)
# create network
model = DeepLabv3p(OS=train_OS, classes=21, use_global_stats=use_global_stats)
self.net = model
print(model)
# resume checkpoint if needed
if resume is not None:
if os.path.isfile(resume):
model.load_params(resume, ctx=mx.gpu())
else:
raise RuntimeError("=> no checkpoint found at '{}'".format(resume))
else:
model.initialize(ctx=mx.gpu())
# create criterion
self.criterion = SoftmaxCrossEntropyLoss()
# optimizer and lr scheduling
self.lr_scheduler = LRScheduler(mode='poly', baselr=learning_rate, niters=len(self.train_data),
nepochs=epochs)
self.optimizer = gluon.Trainer(self.net.collect_params(), 'sgd',
{'lr_scheduler': self.lr_scheduler,
'wd': weight_decay,
'momentum': momentum,
'multi_precision': True})
def training(self, epoch):
tbar = tqdm(self.train_data)
train_loss = 0.
for i, (data, target) in enumerate(tbar):
data = data.copyto(mx.gpu())
target = target.copyto(mx.gpu())
self.lr_scheduler.update(i, epoch)
with autograd.record(True):
outputs = self.net(data)
losses = self.criterion(outputs, target)
loss = losses.mean()
mx.nd.waitall()
loss.backward()
self.optimizer.step(batch_size=1) # dummy expression
train_loss += loss.asscalar()
tbar.set_description('Epoch %d, training loss %.3f' % (epoch, train_loss / (i + 1)))
mx.nd.waitall()
# break
def validation(self, epoch, train=False):
if train:
loader = self.train_data
flag = "train"
else:
loader = self.eval_data
flag = 'val'
tbar = tqdm(loader)
total_inter, total_union, total_correct, total_label = (0,) * 4
for i, (x, y) in enumerate(tbar):
x = x.copyto(mx.gpu())
y = y.copyto(mx.gpu())
pred = self.net(x)
correct, labeled = batch_pix_accuracy(output=pred, target=y)
inter, union = batch_intersection_union(output=pred, target=y, nclass=21)
total_correct += correct.astype('int64')
total_label += labeled.astype('int64')
total_inter += inter.astype('int64')
total_union += union.astype('int64')
pix_acc = np.float64(1.0) * total_correct / (np.spacing(1, dtype=np.float64) + total_label)
IoU = np.float64(1.0) * total_inter / (np.spacing(1, dtype=np.float64) + total_union)
mIoU = IoU.mean()
tbar.set_description('%s - Epoch %s, pix_acc: %.4f, mIoU: %.4f' % (flag, epoch, pix_acc, mIoU))
mx.nd.waitall()
# break
return pix_acc, mIoU
def save_checkpoint(self, epoch, is_best=False):
save_checkpoint(self.running_flag, self.net, epoch, self.checkpoint_interval, is_best)
def save_checkpoint(flag, net, epoch, checkpoint_interval, is_best=False):
"""Save Checkpoint"""
directory = "runs/%s" % flag
if not os.path.exists(directory):
os.makedirs(directory)
net.save_params(os.path.join(directory, "lastest.params"))
if (epoch + 1) % checkpoint_interval == 0:
net.save_params(os.path.join(directory, 'checkpoint_%s.params' % (epoch + 1)))
print("Checkpoint saved.")
if is_best:
net.save_params(os.path.join(directory, 'best.params'))
print("Best model saved.")
if __name__ == "__main__":
FLAG = 'finetune_train_aug_best'
EPOCHS = 50
BATCH = 4
TEST_BATCH = 16
TRAIN_SPLIT = 'train_aug'
TRAIN_OS = 16
USE_GLOBAL_STATS = True
DATA_ROOT = os.path.expanduser('~/myDataset/voc')
# WEIGHTS = '../weights/pascal_train_aug.params'
WEIGHTS = '../weights/checkpoint_10.params'
LR = 1.e-4
CHECKPOINT_INTERVAL = 3
trainer = Trainer(flag=FLAG,
batch_size=BATCH,
epochs=EPOCHS,
resume=WEIGHTS,
learning_rate=LR,
train_OS=TRAIN_OS,
train_split=TRAIN_SPLIT,
test_batch_size=TEST_BATCH,
use_global_stats=USE_GLOBAL_STATS,
data_root=DATA_ROOT,
checkpoint_interval=CHECKPOINT_INTERVAL)
_, best_mIoU = trainer.validation("INIT")
for epoch in range(EPOCHS):
trainer.training(epoch)
_, mIoU = trainer.validation(epoch)
if mIoU > best_mIoU:
best_mIoU = mIoU
is_best = True
print("A new best! mIoU = %.4f" % mIoU)
else:
is_best = False
trainer.save_checkpoint(epoch, is_best=is_best)
| [
"mxnet.nd.waitall",
"tqdm.tqdm",
"mylib.deeplabv3p.DeepLabv3p",
"mxnet.autograd.record",
"os.makedirs",
"os.path.join",
"gluoncv.utils.metrics.voc_segmentation.batch_intersection_union",
"gluoncv.utils.metrics.voc_segmentation.batch_pix_accuracy",
"os.path.exists",
"numpy.spacing",
"mxnet.gluon.... | [((5654, 5691), 'os.path.expanduser', 'os.path.expanduser', (['"""~/myDataset/voc"""'], {}), "('~/myDataset/voc')\n", (5672, 5691), False, 'import os\n'), ((870, 913), 'os.path.expanduser', 'os.path.expanduser', (['"""~/.mxnet/datasets/voc"""'], {}), "('~/.mxnet/datasets/voc')\n", (888, 913), False, 'import os\n'), ((1173, 1226), 'mylib.dataset.VOCAugSegmentation', 'VOCAugSegmentation', ([], {'root': 'data_root', 'split': 'train_split'}), '(root=data_root, split=train_split)\n', (1191, 1226), False, 'from mylib.dataset import VOCAugSegmentation\n'), ((1248, 1299), 'mylib.dataset.VOCAugSegmentation', 'VOCAugSegmentation', ([], {'root': 'data_root', 'split': 'val_split'}), '(root=data_root, split=val_split)\n', (1266, 1299), False, 'from mylib.dataset import VOCAugSegmentation\n'), ((1326, 1441), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['train_dataset', 'batch_size'], {'shuffle': '(True)', 'last_batch': '"""rollover"""', 'num_workers': 'num_workers'}), "(train_dataset, batch_size, shuffle=True, last_batch=\n 'rollover', num_workers=num_workers)\n", (1347, 1441), False, 'from mxnet import gluon, autograd\n'), ((1510, 1608), 'mxnet.gluon.data.DataLoader', 'gluon.data.DataLoader', (['val_datset', 'test_batch_size'], {'last_batch': '"""keep"""', 'num_workers': 'num_workers'}), "(val_datset, test_batch_size, last_batch='keep',\n num_workers=num_workers)\n", (1531, 1608), False, 'from mxnet import gluon, autograd\n'), ((1694, 1764), 'mylib.deeplabv3p.DeepLabv3p', 'DeepLabv3p', ([], {'OS': 'train_OS', 'classes': '(21)', 'use_global_stats': 'use_global_stats'}), '(OS=train_OS, classes=21, use_global_stats=use_global_stats)\n', (1704, 1764), False, 'from mylib.deeplabv3p import DeepLabv3p\n'), ((2188, 2213), 'gluoncv.model_zoo.segbase.SoftmaxCrossEntropyLoss', 'SoftmaxCrossEntropyLoss', ([], {}), '()\n', (2211, 2213), False, 'from gluoncv.model_zoo.segbase import SoftmaxCrossEntropyLoss\n'), ((2796, 2817), 'tqdm.tqdm', 'tqdm', (['self.train_data'], {}), '(self.train_data)\n', (2800, 2817), False, 'from tqdm import tqdm\n'), ((3737, 3749), 'tqdm.tqdm', 'tqdm', (['loader'], {}), '(loader)\n', (3741, 3749), False, 'from tqdm import tqdm\n'), ((5040, 5065), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (5054, 5065), False, 'import os\n'), ((5075, 5097), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (5086, 5097), False, 'import os\n'), ((5118, 5159), 'os.path.join', 'os.path.join', (['directory', '"""lastest.params"""'], {}), "(directory, 'lastest.params')\n", (5130, 5159), False, 'import os\n'), ((1896, 1918), 'os.path.isfile', 'os.path.isfile', (['resume'], {}), '(resume)\n', (1910, 1918), False, 'import os\n'), ((3481, 3496), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (3494, 3496), True, 'import mxnet as mx\n'), ((3996, 4037), 'gluoncv.utils.metrics.voc_segmentation.batch_pix_accuracy', 'batch_pix_accuracy', ([], {'output': 'pred', 'target': 'y'}), '(output=pred, target=y)\n', (4014, 4037), False, 'from gluoncv.utils.metrics.voc_segmentation import batch_pix_accuracy, batch_intersection_union\n'), ((4065, 4123), 'gluoncv.utils.metrics.voc_segmentation.batch_intersection_union', 'batch_intersection_union', ([], {'output': 'pred', 'target': 'y', 'nclass': '(21)'}), '(output=pred, target=y, nclass=21)\n', (4089, 4123), False, 'from gluoncv.utils.metrics.voc_segmentation import batch_pix_accuracy, batch_intersection_union\n'), ((4678, 4693), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (4691, 4693), True, 'import mxnet as mx\n'), ((5232, 5293), 'os.path.join', 'os.path.join', (['directory', "('checkpoint_%s.params' % (epoch + 1))"], {}), "(directory, 'checkpoint_%s.params' % (epoch + 1))\n", (5244, 5293), False, 'import os\n'), ((5370, 5408), 'os.path.join', 'os.path.join', (['directory', '"""best.params"""'], {}), "(directory, 'best.params')\n", (5382, 5408), False, 'import os\n'), ((2923, 2931), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (2929, 2931), True, 'import mxnet as mx\n'), ((2968, 2976), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (2974, 2976), True, 'import mxnet as mx\n'), ((3042, 3063), 'mxnet.autograd.record', 'autograd.record', (['(True)'], {}), '(True)\n', (3057, 3063), False, 'from mxnet import gluon, autograd\n'), ((3216, 3231), 'mxnet.nd.waitall', 'mx.nd.waitall', ([], {}), '()\n', (3229, 3231), True, 'import mxnet as mx\n'), ((3889, 3897), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (3895, 3897), True, 'import mxnet as mx\n'), ((3924, 3932), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (3930, 3932), True, 'import mxnet as mx\n'), ((2125, 2133), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (2131, 2133), True, 'import mxnet as mx\n'), ((4348, 4363), 'numpy.float64', 'np.float64', (['(1.0)'], {}), '(1.0)\n', (4358, 4363), True, 'import numpy as np\n'), ((4383, 4414), 'numpy.spacing', 'np.spacing', (['(1)'], {'dtype': 'np.float64'}), '(1, dtype=np.float64)\n', (4393, 4414), True, 'import numpy as np\n'), ((4448, 4463), 'numpy.float64', 'np.float64', (['(1.0)'], {}), '(1.0)\n', (4458, 4463), True, 'import numpy as np\n'), ((4481, 4512), 'numpy.spacing', 'np.spacing', (['(1)'], {'dtype': 'np.float64'}), '(1, dtype=np.float64)\n', (4491, 4512), True, 'import numpy as np\n'), ((1966, 1974), 'mxnet.gpu', 'mx.gpu', ([], {}), '()\n', (1972, 1974), True, 'import mxnet as mx\n')] |
import numpy as np
data2 = [[1,2,3,4], [5,6,7,8]]
arr2 = np.array(data2)
print(arr2)
print(arr2.shape)
data3 = [[1,2,3,4], [5,6,7,8,9]]
print(data3)
arr3 = np.array(data3)
print(arr3)
print(arr3.shape)
data4 = [[1,2,3,4], [5,6,7,8], [9,10,11,12]]
arr4 = np.array(data4)
arr5 = np.array([[1,2,3], [4,5,6,7]])
arr3d = np.array([[[1,2,3], [4,5,6]], [[7,8,9], [10,11,12]]])
| [
"numpy.array"
] | [((59, 74), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (67, 74), True, 'import numpy as np\n'), ((161, 176), 'numpy.array', 'np.array', (['data3'], {}), '(data3)\n', (169, 176), True, 'import numpy as np\n'), ((261, 276), 'numpy.array', 'np.array', (['data4'], {}), '(data4)\n', (269, 276), True, 'import numpy as np\n'), ((285, 320), 'numpy.array', 'np.array', (['[[1, 2, 3], [4, 5, 6, 7]]'], {}), '([[1, 2, 3], [4, 5, 6, 7]])\n', (293, 320), True, 'import numpy as np\n'), ((326, 387), 'numpy.array', 'np.array', (['[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]'], {}), '([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]])\n', (334, 387), True, 'import numpy as np\n')] |
import os
import numpy as np
import cv2
import threading
import time
import logging
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import itertools
class Analysis:
def __init__(self, datasets_dir, ground_truth_filename, number_top,
log_analysis, debug=True, record_nones=False,
heed_singles=False, heed_multiples=False, all_tp=False,
preds_dir=None, dataset_name=None,
counter_or_objectness='counter'):
self.datasets_dir = datasets_dir
self.ground_truth_filename = ground_truth_filename
self.number_top = number_top
self.log_analysis = log_analysis
self.debug = debug
self.record_nones = record_nones
self.heed_singles = heed_singles
self.heed_multiples = heed_multiples
self.preds_dir = preds_dir
self.dataset_name = dataset_name
self.all_tp = all_tp
self.counter_or_objectness = counter_or_objectness
if not os.path.isdir(self.log_analysis):
os.mkdir(self.log_analysis)
if self.debug:
logging.basicConfig(filename=log_analysis + 'analysis_{}.log'.format(
self.dataset_name), level=logging.DEBUG,
format='%(levelname)s:%(asctime)s %(message)s',
datefmt='%m/%d/%Y %I:%M:%S%p', filemode='w')
logging.debug("Analysis Started")
assert (self.counter_or_objectness == 'counter' or
self.counter_or_objectness == 'objectness')
def video_player(self, show_pred_boxes=True, show_gt_boxes=True, save_frames=False,
particle=True):
"""
Shows the image frames of different videos in a specific directory.
Uses the ground truth and predicted bounding boxes and shows them in
the figure.
"""
ic_list = self.image_reader()
if show_gt_boxes:
gt_list = self.gt_reader()
if show_pred_boxes:
pred_list = self.pred_reader()
video_names = os.listdir(self.datasets_dir)
video_names = sorted(video_names)
for n, ic in enumerate(ic_list):
for d, frame in enumerate(ic):
time_begin = time.time()
print("Currently read frame number: {}".format(d))
image = cv2.imread(frame)
if show_gt_boxes:
# Green --> GT
for k, gt in enumerate(gt_list[n]):
if d == gt[0]:
try:
gt_pt1, gt_pt2 = tuple(gt[1:3]), tuple(gt[3:5])
cv2.rectangle(image, gt_pt1, gt_pt2, color=(
0, 255, 0), thickness=2)
except:
pass
if show_pred_boxes:
# Red --> RPN
for k, pred in enumerate(pred_list['RPN'][n]):
if d == pred[0][0]:
pred_pt1 = tuple((pred[0][1], pred[0][2]))
pred_pt2 = tuple((pred[0][3], pred[0][4]))
cv2.rectangle(image, pred_pt1, pred_pt2,
color=(0, 0, 255), thickness=2)
if particle:
# Blue --> Particle
for k, pred in enumerate(pred_list['Particle'][n]):
if d == pred[0][0]:
pred_pt1 = tuple((pred[0][1], pred[0][2]))
pred_pt2 = tuple((pred[0][3], pred[0][4]))
cv2.rectangle(image, pred_pt1, pred_pt2,
color=(255, 0, 0), thickness=2)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(image, video_names[n], (0, 20), font, 0.8,
(0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(image, "Fr: {}".format(d), (0, 35), font, 0.4,
(0, 0, 255), 2, cv2.LINE_AA)
cv2.imshow('display', image)
if save_frames:
if not os.path.isdir(os.path.join(self.log_analysis, video_names[n])):
os.mkdir(os.path.join(self.log_analysis, video_names[n]))
cv2.imwrite(os.path.join(self.log_analysis, video_names[n], str(d)+".jpg"), image)
cv2.waitKey(1)
delay = float(time.time() - time_begin)
print("FPS: {}".format(1 / delay))
#if(delay < 0.15):
# time.sleep(0.15 - delay)
def retreiver(self, duration, preds_all, gt_all, video_id, dict_ref):
for frame_id in range(duration):
print(preds_all['Particle'])
time.sleep(5555)
for mode in preds_all.keys():
if (frame_id % 100 == 0) or (frame_id + 1 == duration):
print("{}/{} is complete for {}.".format(frame_id + 1, duration, mode))
framewise_preds = self.ret_preds(preds_all, video_id, frame_id + 1)
gt = gt_all[video_id][frame_id]
iou_frame_all_pred = []
for pred in framewise_preds[mode]:
try:
iou_for_pred = (self.iou(pred[1:5], gt[1:5]), *pred[5:])
except:
iou_for_pred = (None, *pred[5:])
iou_frame_all_pred.append(iou_for_pred)
# iou_frame_all_pred = sorted(iou_frame_all_pred, reverse=True)
dict_ref[mode].append(iou_frame_all_pred)
def ret_preds(self, preds, video_id, frame_id):
output = {mode: [] for mode in preds.keys()}
for mode in preds.keys():
for pred_frame in preds[mode][video_id]:
if pred_frame[0][0] == frame_id:
output[mode].append(list(pred_frame[0]))
return output
def test1(self):
"""
Testing for determining how many of the GT entries are described as nan.
E.g: When the object is not present in the scene.
Simply, the first coordinate x will be checked as nan or not.
"""
video_names = os.listdir(self.datasets_dir)
video_names = sorted(video_names)
frame_lengths = [len(length) for length in self.image_reader()]
gt_all = self.gt_reader()
#############################################################################
### TEST 1
### How many GT entries were annotated as nan?
### E.g: When the object is not present in the scene.
### Simply, the first coordinate x will be checked as nan or not.
#############################################################################
logging.info("#############################################################################")
logging.info("TEST 1: How many GT entries were annotated as nan?")
self.isnan_list = []
for video_id, video_name in enumerate(video_names):
nan_counter = 0
frame_ids = []
for frame_id in range(frame_lengths[video_id]):
if np.isnan(gt_all[video_id][frame_id][1]):
frame_ids.append(frame_id)
nan_counter+=1
self.isnan_list.append(frame_ids)
logging.info("{}/{} frames were annotated as 'nan' in video: {}.".format(nan_counter,\
frame_lengths[video_id], video_name))
logging.info("TEST 1 complete.")
logging.info("#############################################################################")
def test2(self):
"""
Finds how many of the predictions does not have any corresponding GT entries.
This definition will write to file on a class basis using isnan_list from self.test1().
In this case, self.test1() has to be called to create the self.isnan_list attrib.
"""
video_names = os.listdir(self.datasets_dir)
video_names = sorted(video_names)
preds_all = self.pred_reader()
logging.info("#############################################################################")
logging.info("TEST 2: For the frames where there is no GT defined, how many of "+\
"them have at least 1 prediction?")
#############################################################################
### TEST 2
### Among the frames in videos where the corresponding GT entries are not
### defined, are there any predictions? If so, what is the count of them as
### frames?
### If there are, does the prediction bbox exceed the IoU threshold?
### If it is over the threshold, does the label of objects inside
### bounding boxes have the same
### E.g: When the object is not present in the scene.
#############################################################################
pred_histogram_video = []
for video_id, video_name in enumerate(video_names):
preds_when_nan_valid = [0]*81
for pred in preds_all[video_id]:
for nan_frame_id in self.isnan_list[video_id]:
if pred[0][0] == nan_frame_id:
preds_when_nan_valid[pred[0][5]]+=1
pred_histogram_video.append(preds_when_nan_valid)
logging.info("Video Name: {} {}".format(video_name, preds_when_nan_valid))
logging.info("TEST 2 complete.")
logging.info("#############################################################################")
def test3(self, analyse=[0.5], top_what_pred=1):
"""
"""
video_names = os.listdir(self.datasets_dir)
video_names = sorted(video_names)
frame_lengths = [len(length) for length in self.image_reader()]
preds_all = self.pred_reader()
gt_all = self.gt_reader()
self.dict_iou = {video_name: {mode: [] for mode in preds_all.keys()} for video_name in video_names}
self.analyse = analyse
if "VOT2018_LT" in self.dataset_name:
self.target_classes = {'bicycle': 1, 'car9': 3}
elif "VOT2016" in self.dataset_name:
self.target_classes = {'ball1': 33, 'ball2': 33, 'basketball': 1, 'birds1': 15,
'birds2': 15, 'blanket': 1, 'bmx': 1, 'bolt1': 1, 'bolt2': 1, 'book': 74,
'car1': 3, 'car2': 3, 'fernando': 16, 'girl': 1, 'graduate': 1, 'gymnastics1': 1,
'gymnastics2': 1, 'gymnastics3': 1, 'gymnastics4': 1, 'handball1': 1, 'handball2': 1,
'iceskater1': 1, 'iceskater2': 1, 'motocross1': 4, 'motocross2': 4, 'nature': 15,
'pedestrian1': 1, 'pedestrian2': 1, 'racing': 3, 'road': 4, 'sheep': 19, 'singer1': 1,
'singer2': 1, 'soccer2': 1, 'traffic': 1, 'tunnel': 3, 'wiper': 3, 'matrix': 81,
'shaking': 81, 'singer3': 81, 'soccer1': 81, 'soldier': 81}
threads = []
for video_id, video_name in enumerate(video_names):
dict_ref = self.dict_iou[video_name]
t = threading.Thread(target=self.retreiver, args=(frame_lengths[video_id],
preds_all, gt_all, video_id, dict_ref))
threads.append(t)
logging.debug("IoU calculation process for '{}' has begun.".format(video_name))
for d, thread in enumerate(threads):
thread.start()
for thread in threads:
thread.join()
logging.debug("All threads have been successfully suspended.")
#############################################################################
### TEST 3
### By using the IoU thresholds and IoU rates, this part will separate if our
### bounding boxes are positive or negative.
#############################################################################
logging.info("#############################################################################")
logging.info("TEST 3: Assigning bboxes (+) or (-) based on if the prediction is "+\
"True or False depending on the IoU being higher than a specified threshold.")
self.different_iou_video_based_conf = {mode: {video_name: np.zeros((len(analyse), 12)) for video_name in video_names} for mode in preds_all.keys()}
self.all_videos_temporal_stats = {mode: {video_name: np.zeros((frame_lengths[video_id], 12)) for video_id, video_name in enumerate(video_names)} for mode in preds_all.keys()}
for video_id, video_name in enumerate(video_names):
for thr_id, iou_thr in enumerate(analyse):
counter_single_pred = 0
counter_multi_pred = 0
miss_detection = 0
fp_single_type_none = 0
tp_single = 0
fn_single = 0
fp_single = 0
tn_single = 0
fp_multi_type_none = 0
tp_multi = 0
fn_multi = 0
tn_multi = 0
fp_multi_1 = 0
fp_multi_2 = 0
for mode in preds_all.keys():
for d, framewise_preds in enumerate(self.dict_iou[video_name][mode]):
miss_detection_temp = 0
tp_single_temp = 0
fn_single_temp = 0
fp_single_temp = 0
tn_single_temp = 0
tp_multi_temp = 0
fn_multi_temp = 0
tn_multi_temp = 0
fp_multi_1_temp = 0
fp_multi_2_temp = 0
# When there is no prediction
if len(framewise_preds) is 0:
miss_detection_temp += 1
miss_detection += 1
# When there is single prediction
elif len(framewise_preds)==1 and self.heed_singles:
counter_single_pred += 1
if (framewise_preds[0][0] == None and self.record_nones):
fp_single_type_none += 1
elif framewise_preds[0][0]>=iou_thr:
if framewise_preds[0][2*top_what_pred-1]==self.target_classes[video_name]:
if self.counter_or_objectness is 'counter':
tp_single += 1
tp_single_temp += 1
elif self.counter_or_objectness is 'objectness':
tp_single += framewise_preds[0][2*top_what_pred]
tp_single_temp += framewise_preds[0][2*top_what_pred]
else:
fn_single += 1
fn_single_temp += 1
elif framewise_preds[0][0]<iou_thr:
if framewise_preds[0][2*top_what_pred-1]==self.target_classes[video_name]:
fp_single += 1
fp_single_temp += 1
else:
tn_single += 1
tn_single_temp += 1
# When there are multiple predictions
elif len(framewise_preds)>1 and self.heed_multiples:
idx = []
for n, pred in enumerate(framewise_preds):
counter_multi_pred+=1
if (pred[0] == None and self.record_nones):
fp_multi_type_none += 1
elif pred[0]<iou_thr:
if pred[2*top_what_pred-1]==self.target_classes[video_name]:
fp_multi_1 += 1
fp_multi_1_temp += 1
else:
tn_multi += 1
tn_multi_temp += 1
# Obtain the indices which might be true positive bboxes
elif pred[0]>=iou_thr:
if pred[2*top_what_pred-1]==self.target_classes[video_name]:
idx.append((n, pred[0]))
else:
fn_multi += 1
fn_multi_temp += 1
idx = sorted(idx, key=lambda x: x[1], reverse=True)
counter = 0
for n, pred in enumerate(framewise_preds):
if any(k==n for k, iou in idx):
if counter==0:
if self.counter_or_objectness is 'counter':
tp_multi += 1
tp_multi_temp += 1
elif self.counter_or_objectness is 'objectness':
tp_multi += framewise_preds[0][2*top_what_pred]
tp_multi_temp += framewise_preds[0][2*top_what_pred]
counter+=1
else:
fp_multi_2 += 1
fp_multi_2_temp += 1
self.all_videos_temporal_stats[mode][video_name][d] = d, tp_multi_temp, fp_multi_1_temp,\
fp_multi_2_temp, fn_multi_temp, tn_multi_temp, miss_detection_temp,\
tp_single_temp, fn_single_temp, fp_single_temp, tn_single_temp,\
tp_multi_temp+tp_single_temp
if self.record_nones:
logging.info("VName: {}\tIOU: {}\tMode: {}\tSingle Predictions FP from none GT: {}".format(video_name, iou_thr, mode, fp_single_type_none))
logging.info("VName: {}\tIOU: {}\tMode: {}\tMulti Predictions FP from none GT: {}".format(video_name, iou_thr, mode, fp_multi_type_none))
if self.heed_singles:
logging.info("VName: {}\tIOU: {}\tMode: {}\tSingle Predictions TP: {}".format(video_name, iou_thr, mode, tp_single))
logging.info("VName: {}\tIOU: {}\tMode: {}\tSingle Predictions FP: {}".format(video_name, iou_thr, mode, fp_single))
logging.info("VName: {}\tIOU: {}\tMode: {}\tSingle Predictions FN: {}".format(video_name, iou_thr, mode, fn_single))
logging.info("VName: {}\tIOU: {}\tMode: {}\tSingle Predictions TN: {}".format(video_name, iou_thr, mode, tn_single))
logging.info("VName: {}\tIOU: {}\tMode: {}\tMulti Predictions TP: {}".format(video_name, iou_thr, mode, tp_multi))
logging.info("VName: {}\tIOU: {}\tMode: {}\tMulti Predictions FP Type I: {}".format(video_name, iou_thr, mode, fp_multi_1))
logging.info("VName: {}\tIOU: {}\tMode: {}\tMulti Predictions FP Type II: {}".format(video_name, iou_thr, mode, fp_multi_2))
logging.info("VName: {}\tIOU: {}\tMode: {}\tMulti Predictions FN: {}".format(video_name, iou_thr, mode, fn_multi))
logging.info("VName: {}\tIOU: {}\tMode: {}\tMulti Predictions TN: {}".format(video_name, iou_thr, mode, tn_multi))
self.different_iou_video_based_conf[mode][video_name][thr_id] = iou_thr, fp_single_type_none, fp_multi_type_none,\
tp_single, fp_single, fn_single, tn_single, tp_multi, fp_multi_1, fp_multi_2, fn_multi, tn_multi
logging.info("\t\t\t\tMode: {}\tNumber of multiple predictions for {}: {}".format(mode, video_name, counter_multi_pred))
logging.info("\t\t\t\tMode: {}\tNumber of single predictions for {}: {}".format(mode, video_name, counter_single_pred))
logging.info("\t\t\t\tMode: {}\tTotal number of frames for {} is: {}".format(mode, video_name, frame_lengths[video_id]))
logging.info("TEST 3 complete.")
logging.info("#############################################################################")
def iouth_count_graph(self):
for mode in self.different_iou_video_based_conf.keys():
for video_name in self.different_iou_video_based_conf[mode].keys():
iou_thr = self.different_iou_video_based_conf[mode][video_name][:, 0]
features = self.different_iou_video_based_conf[mode][video_name][:, 1:]
fig = plt.figure()
if self.record_nones:
plt.plot(iou_thr, features[:, 0], label="FP Single w/ GT None")
plt.plot(iou_thr, features[:, 1], label="FP Multiple w/ GT None")
if self.heed_singles:
plt.plot(iou_thr, features[:, 2], label="TP Single")
plt.plot(iou_thr, features[:, 3], label="FP Single")
plt.plot(iou_thr, features[:, 4], label="FN Single")
plt.plot(iou_thr, features[:, 5], label="TN Single")
if self.heed_multiples:
plt.plot(iou_thr, features[:, 6], label="TP Multiple")
plt.plot(iou_thr, features[:, 7], label="FP Multiple Type I")
plt.plot(iou_thr, features[:, 8], label="FP Multiple Type II")
plt.plot(iou_thr, features[:, 9], label="FN Multiple")
plt.plot(iou_thr, features[:, 10], label="TN Multiple")
if self.all_tp:
plt.plot(iou_thr, features[:, 2]+features[:, 6], label="TP Single+Multi")
if self.record_nones or self.heed_singles or self.heed_multiples or self.all_tp:
plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.)
plt.suptitle("Test Statistics for {}".format(video_name))
plt.ylabel('Number of predictions satisfying the condition')
plt.xlabel('IoU Threshold')
plt.savefig(self.log_analysis+video_name+".jpg")
def frame_count_stats_pdf_graph(self):
#assert len(self.analyse)==1
np.set_printoptions(threshold=np.nan)
print(self.all_videos_temporal_stats)
time.sleep(55)
for mode in self.all_videos_temporal_stats.keys():
for video_name in self.all_videos_temporal_stats[mode].keys():
frame_ids = self.all_videos_temporal_stats[mode][video_name][:, 0]
tp_multi = self.all_videos_temporal_stats[mode][video_name][:, 1]
fp_multi_1 = self.all_videos_temporal_stats[mode][video_name][:, 2]
fp_multi_2 = self.all_videos_temporal_stats[mode][video_name][:, 3]
fn_multi = self.all_videos_temporal_stats[mode][video_name][:, 4]
tn_multi = self.all_videos_temporal_stats[mode][video_name][:, 5]
miss_det = self.all_videos_temporal_stats[mode][video_name][:, 6]
tp_single = self.all_videos_temporal_stats[mode][video_name][:, 7]
fn_single = self.all_videos_temporal_stats[mode][video_name][:, 8]
fp_single = self.all_videos_temporal_stats[mode][video_name][:, 9]
tn_single = self.all_videos_temporal_stats[mode][video_name][:, 10]
tp_all = self.all_videos_temporal_stats[mode][video_name][:, 11]
if self.heed_singles:
label = "TP Single"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_tpsingle_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, tp_single, label, title, save_dir)
label = "FN Single"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_fnsingle_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, fn_single, label, title, save_dir)
label = "FP Single"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_fpsingle_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, fp_single, label, title, save_dir)
label = "TN Single"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_tnsingle_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, tn_single, label, title, save_dir)
if self.heed_multiples:
label = "TP Multiple"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_tpmulti_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, tp_multi, label, title, save_dir)
label = "FP Type I Multiple"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_fpt1multi_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, fp_multi_1, label, title, save_dir)
label = "FP Type II Multiple"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_fpt2multi_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, fp_multi_2, label, title, save_dir)
label = "FN Multiple"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_fnmulti_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, fn_multi, label, title, save_dir)
label = "TN Multiple"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_tnmulti_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, tn_multi, label, title, save_dir)
label = "Miss Detection"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_md_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, miss_det, label, title, save_dir)
if self.all_tp:
label = "TP All"
title = label + " for {}".format(video_name)
save_dir = self.log_analysis+video_name+\
"_temporal_pdf_tpall_iou{}".format(self.analyse[0])+".jpg"
self.figure_function(video_name, frame_ids, tp_all, label, title, save_dir)
def figure_function(self, video_name, frame_ids, data_points, label, title, save_dir):
plt.figure()
plt.plot(frame_ids, data_points, label=label)
plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.)
plt.suptitle(title)
if self.counter_or_objectness is 'counter':
plt.ylabel('Number of predictions satisfying the condition (counter)')
elif self.counter_or_objectness is 'objectness':
plt.ylabel('Number of predictions satisfying the condition (obj. score)')
plt.xlabel('Frame #')
plt.savefig(save_dir)
plt.close()
def conf_matrix(self, top_what_pred=1, one_gt_plot=True):
## TO DO ##
## 1) Write the images onto a file.
## 2) Save the numbers into a CSV file.
"""
Constructs the confusion matrix given the
"""
video_names = os.listdir(self.datasets_dir)
video_names = sorted(video_names)
for video_id, video_name in enumerate(video_names):
cm = np.zeros((81, 81), dtype=np.int32)
gt_index = self.target_classes[video_name]
for preds in self.dict_iou[video_name]:
for pred in preds:
pred_index = pred[2*top_what_pred-1]
cm[gt_index][pred_index] += 1
if one_gt_plot:
cm = cm[gt_index, :][np.newaxis]
# self.plot_confusion_matrix(cm, one_gt_plot, true_lbl=gt_index)
self.plot_confusion_matrix(cm, one_gt_plot)
def plot_confusion_matrix(self, cm, one_gt_plot,
true_lbl=None,
classes=None,
normalize=False,
title='Confusion matrix',
cmap=plt.cm.Blues,
numbers=False):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
else:
# print('Confusion matrix, without normalization')
pass
plt.figure()
plt.yticks([])
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
if classes is not None:
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
if numbers:
fmt = '.2f' if normalize else 'd'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black")
plt.tight_layout()
if true_lbl is not None:
plt.ylabel('True label {}'.format(true_lbl))
else:
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
def iou(self, bbox1, bbox2):
"""
Calculates the IoU of 2 bounding boxes.
Parameters: bbox1, bbox2: list or numpy array of bounding box coordinates.
The input should contain the top-left corner's x and y coordinates and
width and height of the bounding boxes.
Assertations: width and height informations of bbox1 and bbox2 should be
larger than 0.
Returns: iou: A floating point decimal representing the IoU ratio, which
is the division of bounding box areas of intersection to their union.
"""
x1, y1, x1_t, y1_t = bbox1
w1 = x1_t - x1
h1 = y1_t - y1
x2, y2, x2_t, y2_t = bbox2
w2 = x2_t - x2
h2 = y2_t - y2
assert w1 and w2 > 0
assert w1 and h2 > 0
iou = 0
if (((x1>x2 and x1<x2+w2) or (x1+w1>x2 and x1+w1<x2+w2) or
(x2>x1 and x2<x1+w1) or (x2+w2>x1 and x2+w2<x1+w1)) and
((y1>y2 and y1<y2+h2) or (y1+h1>y2 and y1+h1<y2+h2) or
(y2>y1 and y2<y1+h1) or (y2+h2>y1 and y2+h2<y1+h1))):
iou_xmin = float(max(x1, x2))
iou_xmax = float(min(x1+w1, x2+w2))
iou_ymin = float(max(y1, y2))
iou_ymax = float(min(y1+h1, y2+h2))
intersection_area = (iou_ymax - iou_ymin)*(iou_xmax - iou_xmin)
total_area = float(w1)*float(h1) + float(w2)*float(h2) - intersection_area
iou = intersection_area/total_area
return iou
def image_reader(self):
ic_list = []
video_names = os.listdir(self.datasets_dir)
video_names = sorted(video_names)
if self.dataset_name != "MSPR_Dataset":
for video in video_names:
frame_names = os.listdir(os.path.join(
self.datasets_dir, video))
frame_names = filter(lambda x: x[-4:] == ".jpg",
frame_names)
frame_names = sorted(frame_names, key=
lambda x: int(x[:-4]))
frame_names = [os.path.join(self.datasets_dir, video,
frame_name) for frame_name in frame_names]
ic_list.append(frame_names)
else:
for video in video_names:
frame_names = os.listdir(os.path.join(
self.datasets_dir, video, "Images"))
frame_names = filter(lambda x: x[-4:] == ".jpg",
frame_names)
frame_names = sorted(frame_names, key=
lambda x: int(x[:-4]))
frame_names = [os.path.join(self.datasets_dir, video, "Images",
frame_name) for frame_name in frame_names]
ic_list.append(frame_names)
return ic_list
def gt_reader(self):
ground_truths = []
video_names = os.listdir(self.datasets_dir)
video_names = sorted(video_names)
for video in video_names:
video_gts = []
if "VOT2018" and "LT" in self.dataset_name:
gt_path = os.path.join(self.datasets_dir, video, self.ground_truth_filename)
with open(gt_path, 'r') as f:
temp_input_lines = f.read().split("\n")[:-1]
logging.debug("Parsing ground truths of {} as VOT2018 format.".format(video))
for d, line in enumerate(temp_input_lines):
try:
x, y, w, h = map(float, line.split(","))
except:
logging.error("GT for {} in {} dataset couldn't have been parsed.".format(
video, self.dataset_name))
video_gts.append(np.array([d+1, x, y, x+w, y+h], dtype=np.float32))
elif "VOT2016" in self.dataset_name:
gt_path = os.path.join(self.datasets_dir, video, self.ground_truth_filename)
with open(gt_path, 'r') as f:
temp_input_lines = f.read().split("\n")[:-1]
logging.debug("Parsing ground truths of {} as VOT2016 format.".format(video))
for d, line in enumerate(temp_input_lines):
try:
x1, y1, x2, y2, x3, y3, x4, y4 = map(float, line.split(","))
x, y, w, h = self.vot16_to_18(x1, y1, x2, y2, x3, y3, x4, y4)
except:
logging.error("GT for {} in {} dataset couldn't have been parsed.".format(video, self.dataset_name))
video_gts.append(np.array([d+1, x, y, x+w, y+h], dtype=np.float32))
elif self.dataset_name == "MSPR_Dataset":
gt_path = os.path.join(self.datasets_dir, video, "Images", self.ground_truth_filename)
with open(gt_path, 'r') as f:
temp_input_lines = f.read().split("\n")[:-1]
logging.debug("Parsing ground truths of {} as MSPR Dataset format.".format(video))
for line in temp_input_lines:
try:
d, x1, y1, x2, y2, x3, y3, x4, y4 = map(float, line.split(","))
x, y, w, h = self.vot16_to_18(x1, y1, x2, y2, x3, y3, x4, y4)
except:
logging.error("GT for {} in {} dataset couldn't have been parsed.".format(video, self.dataset_name))
video_gts.append(np.array([d, x, y, x+w, y+h], dtype=np.float32))
else:
logging.warning("{} not found.".format(self.dataset_name))
ground_truths.append(video_gts)
return ground_truths
def pred_reader(self):
preds_all = {name.split("/")[-2]: [] for name in sorted(self.preds_dir)}
video_names = os.listdir(self.preds_dir[0])
video_names = sorted(video_names)
for video in video_names:
for mode_id, name in enumerate(preds_all.keys()):
logging.debug("Parsing predictions of {}".format(video))
pred_sample_path = os.path.join(self.preds_dir[mode_id], video)
video_preds = []
with open(pred_sample_path, 'r') as f:
temp_input_lines = f.read().split("\n")[:-1]
# Handling the first line seen in result txt files with top-5 probs.
if self.number_top == 5:
temp_input_lines = temp_input_lines[1:]
for id, line in enumerate(temp_input_lines):
if self.number_top == 1:
fr_id, x, y, w, h, _, label = map(float, line.split("\t"))
video_preds.append(np.array([fr_id, x, y, x+w, y+h, label], dtype=np.int16))
elif self.number_top == 5:
try:
fr_id, x, y, w, h, _, _, label1, prob1, label2, prob2, label3, prob3, label4,\
prob4, label5, prob5 = map(float, line.split("\t"))
except:
raise(AssertionError("video name: {} line number: {}".format(video, id+2)))
video_preds.append(np.array([(fr_id, x, y, x+w, y+h, label1, prob1, label2,
prob2, label3, prob3, label4, prob4, label5, prob5)],
dtype=[('', 'i4'),('', 'i4'),('', 'i4'),('', 'i4'),('', 'i4'),
('', 'i4'),('', 'f4'),('', 'i4'),('', 'f4'),('', 'i4'),
('', 'f4'),('', 'i4'),('', 'f4'),('', 'i4'),('', 'f4')]))
preds_all[name].append(video_preds)
return preds_all
def vot16_to_18(self, x1, y1, x2, y2, x3, y3, x4, y4):
xmin = min(x1, x2, x3, x4)
ymin = min(y1, y2, y3, y4)
xmax = max(x1, x2, x3, x4)
ymax = max(y1, y2, y3, y4)
w = xmax - xmin
h = ymax - ymin
return xmin, ymin, w, h
if __name__ == "__main__":
# test-case -1
images_dir = "VOT2016/"
dataset_name = "VOT2016_face_Subset"
datasets_dir = "Datasets/"+dataset_name+"/"+images_dir+"/"
ground_truth_file_name = "groundtruth.txt"
preds_dir = "logs/Evaluations/MASK_VOT2018_Subsets/stage1-10of30/"
# preds_dir = "logs/Evaluations/MASK_VOT2016FACE_184imgtrain/"
number_top = 5
log_analysis = "analysistop2/"
debug = True
# test-case 0
# dataset_name = "VOT2016_Subset_Subset"
# datasets_dir = "Datasets/"+dataset_name+"/"
# ground_truth_file_name = "groundtruth.txt"
# preds_dir = "logs/Evaluations/MASK_VOT2016_Subset_final_th0.01_top5prob/"
# number_top = 5
# log_analysis = "analysis_temp/"
# debug = True
# test-case 1
# dataset_name = "VOT2016_Subset"
# datasets_dir = "Datasets/"+dataset_name+"/"
# ground_truth_file_name = "groundtruth.txt"
# preds_dir = "logs/Evaluations/MASK_VOT2016_final_th0/"
# number_top = 1
# test-case 2
# dataset_name = "VOT2018_LT_Subset"
# datasets_dir = "Datasets/"+dataset_name+"/"
# ground_truth_file_name = "groundtruth.txt"
# preds_dir = "logs/Evaluations/MASK_VOT2018_Subsets/"+\
# "MASK_VOT2018_final_th0.01_detnms_th0.4/"
# number_top = 1
# test-case 3
# dataset_name = "VOT2018_LT"
# datasets_dir = "Datasets/"+dataset_name+"/"
# ground_truth_file_name = "groundtruth.txt"
# preds_dir = "logs/Evaluations/MASK_VOT2018_final_th0.001/"
# number_top = 1
# test-case particles
images_dir = "VOT2016/"
dataset_name = "VOT2016_hard_Subset"
datasets_dir = "Datasets/"+dataset_name+"/"+images_dir+"/"
ground_truth_file_name = "groundtruth.txt"
preds_dir = []
preds_dir.append("logs/Evaluations/VOT2018_hard_Subset/Particle/")
preds_dir.append("logs/Evaluations/VOT2018_hard_Subset/RPN/")
# preds_dir = "logs/Evaluations/MASK_VOT2016FACE_184imgtrain/"
number_top = 5
log_analysis = "analysisparticle/"
debug = True
analyse = Analysis(datasets_dir, ground_truth_file_name, number_top, log_analysis,
debug=debug, preds_dir=preds_dir, dataset_name=dataset_name,
heed_singles=True, heed_multiples=True, all_tp=True,
counter_or_objectness='counter')
# analyse.video_player(save_frames=True, particle=True)
# analyse.test1()
# analyse.test2()
thr = list(np.arange(0.001, 1.001, 0.001))
analyse.test3(analyse=thr)
analyse.frame_count_stats_pdf_graph()
# for top_x in range(1, 6):
# log_analysis = "logs/analysis_face/analysistop{}/".format(top_x)
# analyse = Analysis(datasets_dir, ground_truth_file_name, number_top, log_analysis,
# debug=debug, preds_dir=preds_dir, dataset_name=dataset_name,
# heed_singles=True, heed_multiples=True, all_tp=True,
# counter_or_objectness='objectness')
#
# analyse.test3(analyse=[0.5], top_what_pred=top_x)
# analyse.frame_count_stats_pdf_graph()
# analyse.test3(analyse=[0.9])
# analyse.conf_matrix()
| [
"matplotlib.pyplot.title",
"os.mkdir",
"matplotlib.pyplot.suptitle",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.arange",
"cv2.rectangle",
"cv2.imshow",
"matplotlib.pyplot.tight_layout",
"os.path.join",
"numpy.set_printoptions",
"matplotlib.pyplot.close",
"matplotlib.pyplot.imshow",
... | [((102, 123), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (116, 123), False, 'import matplotlib\n'), ((2128, 2157), 'os.listdir', 'os.listdir', (['self.datasets_dir'], {}), '(self.datasets_dir)\n', (2138, 2157), False, 'import os\n'), ((6385, 6414), 'os.listdir', 'os.listdir', (['self.datasets_dir'], {}), '(self.datasets_dir)\n', (6395, 6414), False, 'import os\n'), ((6964, 7067), 'logging.info', 'logging.info', (['"""#############################################################################"""'], {}), "(\n '#############################################################################'\n )\n", (6976, 7067), False, 'import logging\n'), ((7066, 7132), 'logging.info', 'logging.info', (['"""TEST 1: How many GT entries were annotated as nan?"""'], {}), "('TEST 1: How many GT entries were annotated as nan?')\n", (7078, 7132), False, 'import logging\n'), ((7690, 7722), 'logging.info', 'logging.info', (['"""TEST 1 complete."""'], {}), "('TEST 1 complete.')\n", (7702, 7722), False, 'import logging\n'), ((7731, 7834), 'logging.info', 'logging.info', (['"""#############################################################################"""'], {}), "(\n '#############################################################################'\n )\n", (7743, 7834), False, 'import logging\n'), ((8166, 8195), 'os.listdir', 'os.listdir', (['self.datasets_dir'], {}), '(self.datasets_dir)\n', (8176, 8195), False, 'import os\n'), ((8295, 8398), 'logging.info', 'logging.info', (['"""#############################################################################"""'], {}), "(\n '#############################################################################'\n )\n", (8307, 8398), False, 'import logging\n'), ((8397, 8524), 'logging.info', 'logging.info', (["('TEST 2: For the frames where there is no GT defined, how many of ' +\n 'them have at least 1 prediction?')"], {}), "(\n 'TEST 2: For the frames where there is no GT defined, how many of ' +\n 'them have at least 1 prediction?')\n", (8409, 8524), False, 'import logging\n'), ((9706, 9738), 'logging.info', 'logging.info', (['"""TEST 2 complete."""'], {}), "('TEST 2 complete.')\n", (9718, 9738), False, 'import logging\n'), ((9747, 9850), 'logging.info', 'logging.info', (['"""#############################################################################"""'], {}), "(\n '#############################################################################'\n )\n", (9759, 9850), False, 'import logging\n'), ((9951, 9980), 'os.listdir', 'os.listdir', (['self.datasets_dir'], {}), '(self.datasets_dir)\n', (9961, 9980), False, 'import os\n'), ((11796, 11858), 'logging.debug', 'logging.debug', (['"""All threads have been successfully suspended."""'], {}), "('All threads have been successfully suspended.')\n", (11809, 11858), False, 'import logging\n'), ((12201, 12304), 'logging.info', 'logging.info', (['"""#############################################################################"""'], {}), "(\n '#############################################################################'\n )\n", (12213, 12304), False, 'import logging\n'), ((12303, 12479), 'logging.info', 'logging.info', (["('TEST 3: Assigning bboxes (+) or (-) based on if the prediction is ' +\n 'True or False depending on the IoU being higher than a specified threshold.'\n )"], {}), "(\n 'TEST 3: Assigning bboxes (+) or (-) based on if the prediction is ' +\n 'True or False depending on the IoU being higher than a specified threshold.'\n )\n", (12315, 12479), False, 'import logging\n'), ((20877, 20909), 'logging.info', 'logging.info', (['"""TEST 3 complete."""'], {}), "('TEST 3 complete.')\n", (20889, 20909), False, 'import logging\n'), ((20918, 21021), 'logging.info', 'logging.info', (['"""#############################################################################"""'], {}), "(\n '#############################################################################'\n )\n", (20930, 21021), False, 'import logging\n'), ((23049, 23086), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'threshold': 'np.nan'}), '(threshold=np.nan)\n', (23068, 23086), True, 'import numpy as np\n'), ((23141, 23155), 'time.sleep', 'time.sleep', (['(55)'], {}), '(55)\n', (23151, 23155), False, 'import time\n'), ((28417, 28429), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (28427, 28429), True, 'import matplotlib.pyplot as plt\n'), ((28438, 28483), 'matplotlib.pyplot.plot', 'plt.plot', (['frame_ids', 'data_points'], {'label': 'label'}), '(frame_ids, data_points, label=label)\n', (28446, 28483), True, 'import matplotlib.pyplot as plt\n'), ((28492, 28554), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(1)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.0)\n', (28502, 28554), True, 'import matplotlib.pyplot as plt\n'), ((28562, 28581), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (['title'], {}), '(title)\n', (28574, 28581), True, 'import matplotlib.pyplot as plt\n'), ((28868, 28889), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Frame #"""'], {}), "('Frame #')\n", (28878, 28889), True, 'import matplotlib.pyplot as plt\n'), ((28898, 28919), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_dir'], {}), '(save_dir)\n', (28909, 28919), True, 'import matplotlib.pyplot as plt\n'), ((28928, 28939), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (28937, 28939), True, 'import matplotlib.pyplot as plt\n'), ((29212, 29241), 'os.listdir', 'os.listdir', (['self.datasets_dir'], {}), '(self.datasets_dir)\n', (29222, 29241), False, 'import os\n'), ((30581, 30593), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (30591, 30593), True, 'import matplotlib.pyplot as plt\n'), ((30602, 30616), 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), '([])\n', (30612, 30616), True, 'import matplotlib.pyplot as plt\n'), ((30625, 30675), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cm'], {'interpolation': '"""nearest"""', 'cmap': 'cmap'}), "(cm, interpolation='nearest', cmap=cmap)\n", (30635, 30675), True, 'import matplotlib.pyplot as plt\n'), ((30684, 30700), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (30693, 30700), True, 'import matplotlib.pyplot as plt\n'), ((30709, 30723), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (30721, 30723), True, 'import matplotlib.pyplot as plt\n'), ((31282, 31300), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (31298, 31300), True, 'import matplotlib.pyplot as plt\n'), ((31450, 31479), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Predicted label"""'], {}), "('Predicted label')\n", (31460, 31479), True, 'import matplotlib.pyplot as plt\n'), ((31488, 31498), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31496, 31498), True, 'import matplotlib.pyplot as plt\n'), ((33104, 33133), 'os.listdir', 'os.listdir', (['self.datasets_dir'], {}), '(self.datasets_dir)\n', (33114, 33133), False, 'import os\n'), ((34409, 34438), 'os.listdir', 'os.listdir', (['self.datasets_dir'], {}), '(self.datasets_dir)\n', (34419, 34438), False, 'import os\n'), ((37366, 37395), 'os.listdir', 'os.listdir', (['self.preds_dir[0]'], {}), '(self.preds_dir[0])\n', (37376, 37395), False, 'import os\n'), ((42123, 42153), 'numpy.arange', 'np.arange', (['(0.001)', '(1.001)', '(0.001)'], {}), '(0.001, 1.001, 0.001)\n', (42132, 42153), True, 'import numpy as np\n'), ((1015, 1047), 'os.path.isdir', 'os.path.isdir', (['self.log_analysis'], {}), '(self.log_analysis)\n', (1028, 1047), False, 'import os\n'), ((1061, 1088), 'os.mkdir', 'os.mkdir', (['self.log_analysis'], {}), '(self.log_analysis)\n', (1069, 1088), False, 'import os\n'), ((1437, 1470), 'logging.debug', 'logging.debug', (['"""Analysis Started"""'], {}), "('Analysis Started')\n", (1450, 1470), False, 'import logging\n'), ((4942, 4958), 'time.sleep', 'time.sleep', (['(5555)'], {}), '(5555)\n', (4952, 4958), False, 'import time\n'), ((11395, 11509), 'threading.Thread', 'threading.Thread', ([], {'target': 'self.retreiver', 'args': '(frame_lengths[video_id], preds_all, gt_all, video_id, dict_ref)'}), '(target=self.retreiver, args=(frame_lengths[video_id],\n preds_all, gt_all, video_id, dict_ref))\n', (11411, 11509), False, 'import threading\n'), ((28646, 28716), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of predictions satisfying the condition (counter)"""'], {}), "('Number of predictions satisfying the condition (counter)')\n", (28656, 28716), True, 'import matplotlib.pyplot as plt\n'), ((29362, 29396), 'numpy.zeros', 'np.zeros', (['(81, 81)'], {'dtype': 'np.int32'}), '((81, 81), dtype=np.int32)\n', (29370, 29396), True, 'import numpy as np\n'), ((30818, 30862), 'matplotlib.pyplot.xticks', 'plt.xticks', (['tick_marks', 'classes'], {'rotation': '(45)'}), '(tick_marks, classes, rotation=45)\n', (30828, 30862), True, 'import matplotlib.pyplot as plt\n'), ((30875, 30906), 'matplotlib.pyplot.yticks', 'plt.yticks', (['tick_marks', 'classes'], {}), '(tick_marks, classes)\n', (30885, 30906), True, 'import matplotlib.pyplot as plt\n'), ((31417, 31441), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True label"""'], {}), "('True label')\n", (31427, 31441), True, 'import matplotlib.pyplot as plt\n'), ((2314, 2325), 'time.time', 'time.time', ([], {}), '()\n', (2323, 2325), False, 'import time\n'), ((2417, 2434), 'cv2.imread', 'cv2.imread', (['frame'], {}), '(frame)\n', (2427, 2434), False, 'import cv2\n'), ((3955, 4043), 'cv2.putText', 'cv2.putText', (['image', 'video_names[n]', '(0, 20)', 'font', '(0.8)', '(0, 0, 255)', '(2)', 'cv2.LINE_AA'], {}), '(image, video_names[n], (0, 20), font, 0.8, (0, 0, 255), 2, cv2.\n LINE_AA)\n', (3966, 4043), False, 'import cv2\n'), ((4216, 4244), 'cv2.imshow', 'cv2.imshow', (['"""display"""', 'image'], {}), "('display', image)\n", (4226, 4244), False, 'import cv2\n'), ((4569, 4583), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4580, 4583), False, 'import cv2\n'), ((7356, 7395), 'numpy.isnan', 'np.isnan', (['gt_all[video_id][frame_id][1]'], {}), '(gt_all[video_id][frame_id][1])\n', (7364, 7395), True, 'import numpy as np\n'), ((12705, 12744), 'numpy.zeros', 'np.zeros', (['(frame_lengths[video_id], 12)'], {}), '((frame_lengths[video_id], 12))\n', (12713, 12744), True, 'import numpy as np\n'), ((21391, 21403), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (21401, 21403), True, 'import matplotlib.pyplot as plt\n'), ((28786, 28859), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of predictions satisfying the condition (obj. score)"""'], {}), "('Number of predictions satisfying the condition (obj. score)')\n", (28796, 28859), True, 'import matplotlib.pyplot as plt\n'), ((34625, 34691), 'os.path.join', 'os.path.join', (['self.datasets_dir', 'video', 'self.ground_truth_filename'], {}), '(self.datasets_dir, video, self.ground_truth_filename)\n', (34637, 34691), False, 'import os\n'), ((37643, 37687), 'os.path.join', 'os.path.join', (['self.preds_dir[mode_id]', 'video'], {}), '(self.preds_dir[mode_id], video)\n', (37655, 37687), False, 'import os\n'), ((21462, 21525), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 0]'], {'label': '"""FP Single w/ GT None"""'}), "(iou_thr, features[:, 0], label='FP Single w/ GT None')\n", (21470, 21525), True, 'import matplotlib.pyplot as plt\n'), ((21546, 21611), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 1]'], {'label': '"""FP Multiple w/ GT None"""'}), "(iou_thr, features[:, 1], label='FP Multiple w/ GT None')\n", (21554, 21611), True, 'import matplotlib.pyplot as plt\n'), ((21671, 21723), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 2]'], {'label': '"""TP Single"""'}), "(iou_thr, features[:, 2], label='TP Single')\n", (21679, 21723), True, 'import matplotlib.pyplot as plt\n'), ((21744, 21796), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 3]'], {'label': '"""FP Single"""'}), "(iou_thr, features[:, 3], label='FP Single')\n", (21752, 21796), True, 'import matplotlib.pyplot as plt\n'), ((21817, 21869), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 4]'], {'label': '"""FN Single"""'}), "(iou_thr, features[:, 4], label='FN Single')\n", (21825, 21869), True, 'import matplotlib.pyplot as plt\n'), ((21890, 21942), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 5]'], {'label': '"""TN Single"""'}), "(iou_thr, features[:, 5], label='TN Single')\n", (21898, 21942), True, 'import matplotlib.pyplot as plt\n'), ((22004, 22058), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 6]'], {'label': '"""TP Multiple"""'}), "(iou_thr, features[:, 6], label='TP Multiple')\n", (22012, 22058), True, 'import matplotlib.pyplot as plt\n'), ((22079, 22140), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 7]'], {'label': '"""FP Multiple Type I"""'}), "(iou_thr, features[:, 7], label='FP Multiple Type I')\n", (22087, 22140), True, 'import matplotlib.pyplot as plt\n'), ((22161, 22223), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 8]'], {'label': '"""FP Multiple Type II"""'}), "(iou_thr, features[:, 8], label='FP Multiple Type II')\n", (22169, 22223), True, 'import matplotlib.pyplot as plt\n'), ((22244, 22298), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 9]'], {'label': '"""FN Multiple"""'}), "(iou_thr, features[:, 9], label='FN Multiple')\n", (22252, 22298), True, 'import matplotlib.pyplot as plt\n'), ((22319, 22374), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', 'features[:, 10]'], {'label': '"""TN Multiple"""'}), "(iou_thr, features[:, 10], label='TN Multiple')\n", (22327, 22374), True, 'import matplotlib.pyplot as plt\n'), ((22428, 22503), 'matplotlib.pyplot.plot', 'plt.plot', (['iou_thr', '(features[:, 2] + features[:, 6])'], {'label': '"""TP Single+Multi"""'}), "(iou_thr, features[:, 2] + features[:, 6], label='TP Single+Multi')\n", (22436, 22503), True, 'import matplotlib.pyplot as plt\n'), ((22620, 22682), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'bbox_to_anchor': '(1.05, 1)', 'loc': '(1)', 'borderaxespad': '(0.0)'}), '(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.0)\n', (22630, 22682), True, 'import matplotlib.pyplot as plt\n'), ((22780, 22840), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Number of predictions satisfying the condition"""'], {}), "('Number of predictions satisfying the condition')\n", (22790, 22840), True, 'import matplotlib.pyplot as plt\n'), ((22861, 22888), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""IoU Threshold"""'], {}), "('IoU Threshold')\n", (22871, 22888), True, 'import matplotlib.pyplot as plt\n'), ((22909, 22961), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(self.log_analysis + video_name + '.jpg')"], {}), "(self.log_analysis + video_name + '.jpg')\n", (22920, 22961), True, 'import matplotlib.pyplot as plt\n'), ((33304, 33342), 'os.path.join', 'os.path.join', (['self.datasets_dir', 'video'], {}), '(self.datasets_dir, video)\n', (33316, 33342), False, 'import os\n'), ((33592, 33642), 'os.path.join', 'os.path.join', (['self.datasets_dir', 'video', 'frame_name'], {}), '(self.datasets_dir, video, frame_name)\n', (33604, 33642), False, 'import os\n'), ((33844, 33892), 'os.path.join', 'os.path.join', (['self.datasets_dir', 'video', '"""Images"""'], {}), "(self.datasets_dir, video, 'Images')\n", (33856, 33892), False, 'import os\n'), ((34142, 34202), 'os.path.join', 'os.path.join', (['self.datasets_dir', 'video', '"""Images"""', 'frame_name'], {}), "(self.datasets_dir, video, 'Images', frame_name)\n", (34154, 34202), False, 'import os\n'), ((35406, 35472), 'os.path.join', 'os.path.join', (['self.datasets_dir', 'video', 'self.ground_truth_filename'], {}), '(self.datasets_dir, video, self.ground_truth_filename)\n', (35418, 35472), False, 'import os\n'), ((4614, 4625), 'time.time', 'time.time', ([], {}), '()\n', (4623, 4625), False, 'import time\n'), ((35279, 35334), 'numpy.array', 'np.array', (['[d + 1, x, y, x + w, y + h]'], {'dtype': 'np.float32'}), '([d + 1, x, y, x + w, y + h], dtype=np.float32)\n', (35287, 35334), True, 'import numpy as np\n'), ((36257, 36333), 'os.path.join', 'os.path.join', (['self.datasets_dir', 'video', '"""Images"""', 'self.ground_truth_filename'], {}), "(self.datasets_dir, video, 'Images', self.ground_truth_filename)\n", (36269, 36333), False, 'import os\n'), ((3276, 3348), 'cv2.rectangle', 'cv2.rectangle', (['image', 'pred_pt1', 'pred_pt2'], {'color': '(0, 0, 255)', 'thickness': '(2)'}), '(image, pred_pt1, pred_pt2, color=(0, 0, 255), thickness=2)\n', (3289, 3348), False, 'import cv2\n'), ((4318, 4365), 'os.path.join', 'os.path.join', (['self.log_analysis', 'video_names[n]'], {}), '(self.log_analysis, video_names[n])\n', (4330, 4365), False, 'import os\n'), ((4401, 4448), 'os.path.join', 'os.path.join', (['self.log_analysis', 'video_names[n]'], {}), '(self.log_analysis, video_names[n])\n', (4413, 4448), False, 'import os\n'), ((36125, 36180), 'numpy.array', 'np.array', (['[d + 1, x, y, x + w, y + h]'], {'dtype': 'np.float32'}), '([d + 1, x, y, x + w, y + h], dtype=np.float32)\n', (36133, 36180), True, 'import numpy as np\n'), ((38278, 38338), 'numpy.array', 'np.array', (['[fr_id, x, y, x + w, y + h, label]'], {'dtype': 'np.int16'}), '([fr_id, x, y, x + w, y + h, label], dtype=np.int16)\n', (38286, 38338), True, 'import numpy as np\n'), ((2745, 2813), 'cv2.rectangle', 'cv2.rectangle', (['image', 'gt_pt1', 'gt_pt2'], {'color': '(0, 255, 0)', 'thickness': '(2)'}), '(image, gt_pt1, gt_pt2, color=(0, 255, 0), thickness=2)\n', (2758, 2813), False, 'import cv2\n'), ((3771, 3843), 'cv2.rectangle', 'cv2.rectangle', (['image', 'pred_pt1', 'pred_pt2'], {'color': '(255, 0, 0)', 'thickness': '(2)'}), '(image, pred_pt1, pred_pt2, color=(255, 0, 0), thickness=2)\n', (3784, 3843), False, 'import cv2\n'), ((36980, 37031), 'numpy.array', 'np.array', (['[d, x, y, x + w, y + h]'], {'dtype': 'np.float32'}), '([d, x, y, x + w, y + h], dtype=np.float32)\n', (36988, 37031), True, 'import numpy as np\n'), ((38827, 39146), 'numpy.array', 'np.array', (['[(fr_id, x, y, x + w, y + h, label1, prob1, label2, prob2, label3, prob3,\n label4, prob4, label5, prob5)]'], {'dtype': "[('', 'i4'), ('', 'i4'), ('', 'i4'), ('', 'i4'), ('', 'i4'), ('', 'i4'), (\n '', 'f4'), ('', 'i4'), ('', 'f4'), ('', 'i4'), ('', 'f4'), ('', 'i4'),\n ('', 'f4'), ('', 'i4'), ('', 'f4')]"}), "([(fr_id, x, y, x + w, y + h, label1, prob1, label2, prob2, label3,\n prob3, label4, prob4, label5, prob5)], dtype=[('', 'i4'), ('', 'i4'), (\n '', 'i4'), ('', 'i4'), ('', 'i4'), ('', 'i4'), ('', 'f4'), ('', 'i4'),\n ('', 'f4'), ('', 'i4'), ('', 'f4'), ('', 'i4'), ('', 'f4'), ('', 'i4'),\n ('', 'f4')])\n", (38835, 39146), True, 'import numpy as np\n')] |
from environment import *
import numpy as np
def policy_evaluation(env, policy, gamma, theta, max_iterations):
value = np.zeros(env.n_states, dtype=np.float)
# TODO
iterations = 0
# limit algorithm to max_interactions
while iterations < max_iterations:
iterations += 1
# initialize delta to 0
delta = 0
# loop through each state
for state in range(env.n_states-1):
v = value[state]
# calculate new values
newValue = 0
for action in range(env.n_actions):
# Calculate probabilities
sigma = 0
p = [env.p(next_state, state, action) for next_state in range(env.n_states)]
for next_state in range(len(p)):
sigma += p[next_state] * (env.r(next_state, state, action) + gamma * value[next_state])
newValue += policy[state][action] * sigma
value[state] = newValue
# Get max delta post-iteration
delta = max(delta, np.abs(v - value[state]))
# limit delta by tolerance theta
if delta < theta:
break
return value
def policy_improvement(env, value, gamma):
policy = np.zeros((env.n_states, env.n_actions), dtype=int)
for state in range(env.n_states-1):
action_value = -1
max_a = -1
for action in range(env.n_actions):
sigma = 0
p = [env.p(next_state, state, action) for next_state in range(env.n_states)]
for next_states in range(len(p)):
sigma += p[next_states] * (env.r(next_states, state, action) + gamma * value[next_states])
# compare sigma and Q
if sigma > action_value:
action_value = sigma
max_a = action
policy[state][max_a] = 1
return policy
def policy_iteration(env, gamma, theta, max_iterations, policy=None):
value = np.zeros(env.n_states, dtype=float)
if policy is None:
policy = np.zeros((env.n_states, env.n_actions), dtype=int)
else:
policy = np.array(policy, dtype=int)
# TODO
iterations = 0
while iterations < max_iterations:
iterations += 1
v_pi = policy_evaluation(env, policy, gamma, theta, max_iterations)
policy = policy_improvement(env, v_pi, gamma)
delta = 0
for state in range(env.n_states):
delta = max(delta, np.abs(v_pi[state] - value[state]))
if delta < theta:
break
value = v_pi
return policy, value
def value_iteration(env, gamma, theta, max_iterations, value=None):
policy = np.zeros((env.n_states, env.n_actions))
if value is None:
value = np.zeros(env.n_states, dtype=float)
else:
value = np.array(value, dtype=float)
# TODO
iterations = 0
while iterations < max_iterations:
iterations += 1
delta = 0
for state in range(env.n_states-1):
v = value[state]
max_v = -1
for action in range(env.n_actions):
sigma = 0
p = [env.p(next_state, state, action) for next_state in range(env.n_states-1)]
for next_state in range(len(p)):
sigma += p[next_state] * (env.r(next_state, state, action) + gamma * value[next_state])
if sigma > max_v:
max_v = sigma
value[state] = max_v
delta = max(delta, np.abs(v - value[state]))
if delta < theta:
break
for state in range(env.n_states-1):
max_a = -1
max_v = -1
for action in range(env.n_actions):
sigma = 0
p = [env.p(next_state, state, action) for next_state in range(env.n_states)]
for ns in range(len(p)):
sigma += p[ns] * (env.r(next_state, state, action) + gamma * value[next_state])
if sigma > max_v:
max_v = sigma
max_a = action
policy[state][max_a] = 1
return policy, value
| [
"numpy.array",
"numpy.abs",
"numpy.zeros"
] | [((125, 163), 'numpy.zeros', 'np.zeros', (['env.n_states'], {'dtype': 'np.float'}), '(env.n_states, dtype=np.float)\n', (133, 163), True, 'import numpy as np\n'), ((1242, 1292), 'numpy.zeros', 'np.zeros', (['(env.n_states, env.n_actions)'], {'dtype': 'int'}), '((env.n_states, env.n_actions), dtype=int)\n', (1250, 1292), True, 'import numpy as np\n'), ((1965, 2000), 'numpy.zeros', 'np.zeros', (['env.n_states'], {'dtype': 'float'}), '(env.n_states, dtype=float)\n', (1973, 2000), True, 'import numpy as np\n'), ((2674, 2713), 'numpy.zeros', 'np.zeros', (['(env.n_states, env.n_actions)'], {}), '((env.n_states, env.n_actions))\n', (2682, 2713), True, 'import numpy as np\n'), ((2041, 2091), 'numpy.zeros', 'np.zeros', (['(env.n_states, env.n_actions)'], {'dtype': 'int'}), '((env.n_states, env.n_actions), dtype=int)\n', (2049, 2091), True, 'import numpy as np\n'), ((2119, 2146), 'numpy.array', 'np.array', (['policy'], {'dtype': 'int'}), '(policy, dtype=int)\n', (2127, 2146), True, 'import numpy as np\n'), ((2752, 2787), 'numpy.zeros', 'np.zeros', (['env.n_states'], {'dtype': 'float'}), '(env.n_states, dtype=float)\n', (2760, 2787), True, 'import numpy as np\n'), ((2814, 2842), 'numpy.array', 'np.array', (['value'], {'dtype': 'float'}), '(value, dtype=float)\n', (2822, 2842), True, 'import numpy as np\n'), ((1055, 1079), 'numpy.abs', 'np.abs', (['(v - value[state])'], {}), '(v - value[state])\n', (1061, 1079), True, 'import numpy as np\n'), ((2463, 2497), 'numpy.abs', 'np.abs', (['(v_pi[state] - value[state])'], {}), '(v_pi[state] - value[state])\n', (2469, 2497), True, 'import numpy as np\n'), ((3511, 3535), 'numpy.abs', 'np.abs', (['(v - value[state])'], {}), '(v - value[state])\n', (3517, 3535), True, 'import numpy as np\n')] |
"""
Copyright 2020 Division of Medical Image Computing, German Cancer Research Center (DKFZ), Heidelberg, Germany
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from logging import warn
import numpy as np
import os, sys
import torch
import json, pickle
import argparse
sys.path.append("../../")
from bpreg.preprocessing.nifti2npy import Nifti2Npy
from bpreg.network_architecture.bpr_model import BodyPartRegression
from bpreg.score_processing import Scores, BodyPartExaminedDict
from bpreg.settings.settings import *
from bpreg.settings.model_settings import ModelSettings
from bpreg.score_processing.bodypartexamined_tag import *
from bpreg.utils.json_parser import *
from bpreg.scripts.initialize_pretrained_model import initialize_pretrained_model
from dataclasses import dataclass
from tqdm import tqdm
class InferenceModel:
"""
Body Part Regression Model for inference purposes.
Args:
base_dir (str]): Path which includes model related file.
Structure of base_dir:
base_dir/
model.pt - includes model
settings.json - includes mean slope and mean slope std
lookuptable.json - includes lookuptable as reference
device (str, optional): [description]. "cuda" or "cpu"
"""
def __init__(
self,
base_dir: str = DEFAULT_MODEL,
gpu: bool = 1,
warning_to_error: bool = False,
):
self.base_dir = base_dir
self.device = "cpu"
if gpu:
self.device = "cuda"
self.model = load_model(base_dir, device=self.device)
self.load_inference_settings()
self.n2n = Nifti2Npy(
target_pixel_spacing=3.5, min_hu=-1000, max_hu=1500, size=128
)
self.warning_to_error = warning_to_error
def load_inference_settings(self):
path = self.base_dir + "inference-settings.json"
if not os.path.exists(path):
print("WARNING: For this model, no inference settings can be load!")
with open(path, "rb") as f:
settings = json.load(f)
# use for inference the lookuptable from all predictions
# of the annotated landmarks in the train- and validation-dataset
self.lookuptable_original = settings["lookuptable_train_val"]["original"]
self.lookuptable = settings["lookuptable_train_val"]["transformed"]
self.start_landmark = settings["settings"]["start-landmark"]
self.end_landmark = settings["settings"]["end-landmark"]
self.transform_min = self.lookuptable_original[self.start_landmark]["mean"]
self.transform_max = self.lookuptable_original[self.end_landmark]["mean"]
self.slope_mean = settings["slope_mean"]
self.tangential_slope_min = settings["lower_quantile_tangential_slope"]
self.tangential_slope_max = settings["upper_quantile_tangential_slope"]
def predict_tensor(self, tensor, n_splits=200):
scores = []
n = tensor.shape[0]
slice_splits = list(np.arange(0, n, n_splits))
slice_splits.append(n)
with torch.no_grad():
self.model.eval()
self.model.to(self.device)
for i in range(len(slice_splits) - 1):
min_index = slice_splits[i]
max_index = slice_splits[i + 1]
score = self.model(tensor[min_index:max_index, :, :, :].to(self.device))
scores += [s.item() for s in score]
scores = np.array(scores)
return scores
def predict_npy_array(self, x, n_splits=200):
x_tensor = torch.tensor(x[:, np.newaxis, :, :]).to(self.device)
scores = self.predict_tensor(x_tensor, n_splits=n_splits)
return scores
def predict_nifti(self, nifti_path: str):
# get nifti file as tensor
try:
x, pixel_spacings = self.n2n.preprocess_nifti(nifti_path)
except:
x, pixel_spacings = np.nan, np.nan
if isinstance(x, float) and np.isnan(x):
x, pixel_spacings = self.n2n.load_volume(nifti_path)
if not isinstance(x, np.ndarray):
if self.warning_to_error:
raise ValueError(f"File {nifti_path} can not be loaded.")
return np.nan
warning_msg = (
f"File {nifti_path.split('/')[-1]} with shape {x.shape} and pixel spacings {pixel_spacings} can not be converted to a 3-dimensional volume "
+ f"of the size {self.n2n.size}x{self.n2n.size}xz;"
)
print("WARNING: ", warning_msg)
if self.warning_to_error:
raise ValueError(warning_msg)
return np.nan
x = np.transpose(x, (2, 0, 1))[:, np.newaxis, :, :]
x_tensor = torch.tensor(x)
x_tensor.to(self.device)
# predict slice-scores
scores = self.predict_tensor(x_tensor)
return self.parse_scores(scores, pixel_spacings[2])
def parse_scores(self, scores_array, pixel_spacing):
scores = Scores(
scores_array,
pixel_spacing,
transform_min=self.lookuptable_original[self.start_landmark]["mean"],
transform_max=self.lookuptable_original[self.end_landmark]["mean"],
slope_mean=self.slope_mean,
tangential_slope_min=self.tangential_slope_min,
tangential_slope_max=self.tangential_slope_max,
)
return scores
def npy2json(
self,
X_: np.array,
output_path: str,
pixel_spacings: tuple,
axis_ordering=(0, 1, 2),
ignore_invalid_z: bool = False,
):
"""
Method to predict slice scores from numpy arrays (in Hounsfiel dunits).
Converts plain numpy array to numpy arrays which can be used by the DEFAULT_MODEL to predict the slice scores.n
Args:
X (np.array): matrix of CT volume in Hounsfield units.
output_path (str): output path to save json file
pixel_spacing (tuple): pixel spacing in x, y and z direction.
axis_ordering (tuple): Axis ordering of CT volume. (0,1,2) is equivalent to the axis ordering xyz.
ignore_invalid_z (bool): If true, than invalid z-spacing will be ignored for predicting the body part examined and not NONE will be given back.
"""
X = self.n2n.preprocess_npy(X_, pixel_spacings, axis_ordering=axis_ordering)
# convert axis ordering to zxy
X = X.transpose(2, 0, 1)
slice_scores = self.predict_npy_array(X)
slice_scores = self.parse_scores(slice_scores, pixel_spacings[2])
data_storage = VolumeStorage(
slice_scores, self.lookuptable, ignore_invalid_z=ignore_invalid_z
)
if len(output_path) > 0:
data_storage.save_json(output_path)
return data_storage.json
def nifti2json(
self,
nifti_path: str,
output_path: str = "",
stringify_json: bool = False,
ignore_invalid_z: bool = False,
):
"""
Main method to convert NIFTI CT volumes int JSON meta data files.
Args:
nifti_path (str): path of input NIFTI file
output_path (str): output path to save JSON file
stringify_json (bool): Set it to true for Kaapana JSON format
axis_ordering (tuple): Axis ordering of CT volume. (0,1,2) is equivalent to the axis ordering xyz.
ignore_invalid_z (bool): If true, than invalid z-spacing will be ignored for predicting the body part examined and not NONE will be given back.
"""
slice_scores = self.predict_nifti(nifti_path)
if isinstance(slice_scores, float) and np.isnan(slice_scores):
return np.nan
data_storage = VolumeStorage(
slice_scores, self.lookuptable, ignore_invalid_z=ignore_invalid_z
)
if len(output_path) > 0:
data_storage.save_json(output_path, stringify_json=stringify_json)
return data_storage.json
@dataclass
class VolumeStorage:
"""Body part metadata for one volume
Args:
scores (Scores): predicted slice scores
lookuptable (dict): reference table which contains expected scores for anatomies
body_parts ([type], optional): dictionary to define the body parts for the tag: "body part examined". Defaults to BODY_PARTS.
body_parts_included ([type], optional): dictionary to calculate the "body part examined tag". Defaults to BODY_PARTS_INCLUDED.
distinct_body_parts ([type], optional): dictionary to calculate the "body part examined tag". Defaults to DISTINCT_BODY_PARTS.
min_present_landmarks ([type], optional): dictionary to calculate the "body part examined rtag". Defaults to MIN_PRESENT_LANDMARKS.
"""
def __init__(
self,
scores: Scores,
lookuptable: dict,
body_parts=BODY_PARTS,
body_parts_included=BODY_PARTS_INCLUDED,
distinct_body_parts=DISTINCT_BODY_PARTS,
min_present_landmarks=MIN_PRESENT_LANDMARKS,
ignore_invalid_z: bool = False,
):
self.ignore_invalid_z = ignore_invalid_z
self.body_parts = body_parts
self.body_parts_included = body_parts_included
self.distinct_body_parts = distinct_body_parts
self.min_present_landmarks = min_present_landmarks
self.cleaned_slice_scores = list(scores.values.astype(np.float64))
self.z = list(scores.z.astype(np.float64))
self.unprocessed_slice_scores = list(
scores.original_transformed_values.astype(np.float64)
)
self.lookuptable = lookuptable
self.zspacing = float(scores.zspacing) # .astype(np.float64)
self.reverse_zordering = float(scores.reverse_zordering)
self.valid_zspacing = float(scores.valid_zspacing)
self.expected_slope = float(scores.slope_mean)
self.observed_slope = float(scores.a)
self.expected_zspacing = float(scores.expected_zspacing)
self.r_slope = float(scores.r_slope)
self.bpe = BodyPartExaminedDict(lookuptable, body_parts=self.body_parts)
self.bpet = BodyPartExaminedTag(
lookuptable,
body_parts_included=self.body_parts_included,
distinct_body_parts=self.distinct_body_parts,
min_present_landmarks=self.min_present_landmarks,
ignore_invalid_z=self.ignore_invalid_z,
)
self.settings = {
"slice score processing": scores.settings,
"body part examined dict": self.body_parts,
"body part examined tag": {
"body parts included": self.body_parts_included,
"distinct body parts": self.distinct_body_parts,
"min present landmarks": self.min_present_landmarks,
},
}
self.json = {
"cleaned slice scores": self.cleaned_slice_scores,
"z": self.z,
"unprocessed slice scores": self.unprocessed_slice_scores,
"body part examined": self.bpe.get_examined_body_part(
self.cleaned_slice_scores
),
"body part examined tag": self.bpet.estimate_tag(scores),
"look-up table": self.lookuptable,
"reverse z-ordering": self.reverse_zordering,
"valid z-spacing": self.valid_zspacing,
"expected slope": self.expected_slope,
"observed slope": self.observed_slope,
"slope ratio": self.r_slope,
"expected z-spacing": self.expected_zspacing,
"z-spacing": self.zspacing,
"settings": self.settings,
}
def save_json(self, output_path: str, stringify_json=False):
"""Store data in json file
Args:
output_path (str): save path for json file
stringify_json (bool, optional): if True, stringify output of parameters and
convert json file to a Kaapana friendly format
"""
data = self.json
if stringify_json:
data = parse_json4kaapana(data)
with open(output_path, "w") as f:
json.dump(data, f, indent=4)
def load_model(
base_dir, model_file="model.pt", config_file="config.json", device="cuda"
):
# load public model, if it does not exist locally
if (base_dir == DEFAULT_MODEL) & ~os.path.exists(base_dir):
initialize_pretrained_model()
config_filepath = base_dir + config_file
model_filepath = base_dir + model_file
config = ModelSettings()
config.load(path=config_filepath)
model = BodyPartRegression(alpha=config.alpha, lr=config.lr)
model.load_state_dict(
torch.load(model_filepath, map_location=torch.device(device)), strict=False
)
model.eval()
model.to(device)
return model
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--i", default="")
parser.add_argument("--o", default="")
parser.add_argument("--g", default=1)
value = parser.parse_args()
ipath = value.i
opath = value.o
gpu = value.g
base_dir = "../../src/models/private_bpr_model/"
model = InferenceModel(base_dir, gpu=gpu)
data_path = "../../data/test_cases/"
nifti_paths = [
data_path + f for f in os.listdir(data_path) if f.endswith(".nii.gz")
]
for nifti_path in tqdm(nifti_paths):
output_path = nifti_path.replace("test_cases", "test_results").replace(
".nii.gz", ".json"
)
model.nifti2json(nifti_path, output_path)
| [
"argparse.ArgumentParser",
"numpy.isnan",
"bpreg.network_architecture.bpr_model.BodyPartRegression",
"numpy.arange",
"torch.device",
"torch.no_grad",
"sys.path.append",
"bpreg.settings.model_settings.ModelSettings",
"os.path.exists",
"numpy.transpose",
"bpreg.score_processing.BodyPartExaminedDic... | [((754, 779), 'sys.path.append', 'sys.path.append', (['"""../../"""'], {}), "('../../')\n", (769, 779), False, 'import os, sys\n'), ((13034, 13049), 'bpreg.settings.model_settings.ModelSettings', 'ModelSettings', ([], {}), '()\n', (13047, 13049), False, 'from bpreg.settings.model_settings import ModelSettings\n'), ((13101, 13153), 'bpreg.network_architecture.bpr_model.BodyPartRegression', 'BodyPartRegression', ([], {'alpha': 'config.alpha', 'lr': 'config.lr'}), '(alpha=config.alpha, lr=config.lr)\n', (13119, 13153), False, 'from bpreg.network_architecture.bpr_model import BodyPartRegression\n'), ((13369, 13394), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (13392, 13394), False, 'import argparse\n'), ((13882, 13899), 'tqdm.tqdm', 'tqdm', (['nifti_paths'], {}), '(nifti_paths)\n', (13886, 13899), False, 'from tqdm import tqdm\n'), ((2123, 2195), 'bpreg.preprocessing.nifti2npy.Nifti2Npy', 'Nifti2Npy', ([], {'target_pixel_spacing': '(3.5)', 'min_hu': '(-1000)', 'max_hu': '(1500)', 'size': '(128)'}), '(target_pixel_spacing=3.5, min_hu=-1000, max_hu=1500, size=128)\n', (2132, 2195), False, 'from bpreg.preprocessing.nifti2npy import Nifti2Npy\n'), ((3955, 3971), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3963, 3971), True, 'import numpy as np\n'), ((5246, 5261), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (5258, 5261), False, 'import torch\n'), ((5510, 5826), 'bpreg.score_processing.Scores', 'Scores', (['scores_array', 'pixel_spacing'], {'transform_min': "self.lookuptable_original[self.start_landmark]['mean']", 'transform_max': "self.lookuptable_original[self.end_landmark]['mean']", 'slope_mean': 'self.slope_mean', 'tangential_slope_min': 'self.tangential_slope_min', 'tangential_slope_max': 'self.tangential_slope_max'}), "(scores_array, pixel_spacing, transform_min=self.lookuptable_original\n [self.start_landmark]['mean'], transform_max=self.lookuptable_original[\n self.end_landmark]['mean'], slope_mean=self.slope_mean,\n tangential_slope_min=self.tangential_slope_min, tangential_slope_max=\n self.tangential_slope_max)\n", (5516, 5826), False, 'from bpreg.score_processing import Scores, BodyPartExaminedDict\n'), ((10572, 10633), 'bpreg.score_processing.BodyPartExaminedDict', 'BodyPartExaminedDict', (['lookuptable'], {'body_parts': 'self.body_parts'}), '(lookuptable, body_parts=self.body_parts)\n', (10592, 10633), False, 'from bpreg.score_processing import Scores, BodyPartExaminedDict\n'), ((12901, 12930), 'bpreg.scripts.initialize_pretrained_model.initialize_pretrained_model', 'initialize_pretrained_model', ([], {}), '()\n', (12928, 12930), False, 'from bpreg.scripts.initialize_pretrained_model import initialize_pretrained_model\n'), ((2380, 2400), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2394, 2400), False, 'import os, sys\n'), ((2543, 2555), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2552, 2555), False, 'import json, pickle\n'), ((3495, 3520), 'numpy.arange', 'np.arange', (['(0)', 'n', 'n_splits'], {}), '(0, n, n_splits)\n', (3504, 3520), True, 'import numpy as np\n'), ((3567, 3582), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (3580, 3582), False, 'import torch\n'), ((4470, 4481), 'numpy.isnan', 'np.isnan', (['x'], {}), '(x)\n', (4478, 4481), True, 'import numpy as np\n'), ((5179, 5205), 'numpy.transpose', 'np.transpose', (['x', '(2, 0, 1)'], {}), '(x, (2, 0, 1))\n', (5191, 5205), True, 'import numpy as np\n'), ((8193, 8215), 'numpy.isnan', 'np.isnan', (['slice_scores'], {}), '(slice_scores)\n', (8201, 8215), True, 'import numpy as np\n'), ((12647, 12675), 'json.dump', 'json.dump', (['data', 'f'], {'indent': '(4)'}), '(data, f, indent=4)\n', (12656, 12675), False, 'import json, pickle\n'), ((12867, 12891), 'os.path.exists', 'os.path.exists', (['base_dir'], {}), '(base_dir)\n', (12881, 12891), False, 'import os, sys\n'), ((13807, 13828), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (13817, 13828), False, 'import os, sys\n'), ((4064, 4100), 'torch.tensor', 'torch.tensor', (['x[:, np.newaxis, :, :]'], {}), '(x[:, np.newaxis, :, :])\n', (4076, 4100), False, 'import torch\n'), ((13229, 13249), 'torch.device', 'torch.device', (['device'], {}), '(device)\n', (13241, 13249), False, 'import torch\n')] |
#!/usr/bin/env python
# coding: utf-8
# ./session.py
from __future__ import division
import os
from glob import glob
import shutil
import numpy as np
import pandas as pd
from prody import *
from .sblu import pwrmsd, rmsd
from .inout import extract
from .transform import read_ftresults, read_rotations, apply_ftresults_atom_group
class RawSession:
def __init__(self, raw_path, crys_lig=None):
# Raw Files:
self.raw_path = raw_path
self.ft0_raw = os.path.join(self.raw_path, 'ft.000.00.gz')
self.ft2_raw = os.path.join(self.raw_path, 'ft.002.00.gz')
self.ft4_raw = os.path.join(self.raw_path, 'ft.004.00.gz')
self.ft6_raw = os.path.join(self.raw_path, 'ft.006.00.gz')
self.rot_raw = os.path.join(self.raw_path, 'prms/rot70k.0.0.4.prm')
self.lig_raw = os.path.join(self.raw_path, 'lig.pdb.gz')
self.rec_raw = os.path.join(self.raw_path, 'rec.pdb.gz')
if crys_lig is None:
self.crys_lig = self.lig_raw
def convert(self, session_path, contents=None):
"""
Converts a raw session comprising a raw ClusPro output into a "working session"
:param session_path: path of destination for working session directory
:param contents: list of contents to convert, default = None
"""
DEFAULT_CONTENTS = [self.ft0_raw, self.ft2_raw, self.ft4_raw,
self.ft6_raw, self.lig_raw, self.rec_raw, self.rot_raw]
EXTRACTABLE_CONTENTS = [self.ft0_raw, self.ft2_raw, self.ft4_raw,
self.ft6_raw, self.lig_raw, self.rec_raw]
if contents is None:
contents = DEFAULT_CONTENTS
for item in contents:
if item in EXTRACTABLE_CONTENTS:
extract(item, session_path)
elif item == self.rot_raw:
shutil.copy(self.rot_raw, session_path)
class Session:
def __init__(self, session_path, crys_lig=None):
# Standard:
self.session_path = session_path
self.ft0 = os.path.join(self.session_path, 'ft.000.00')
self.ft2 = os.path.join(self.session_path, 'ft.002.00')
self.ft4 = os.path.join(self.session_path, 'ft.004.00')
self.ft6 = os.path.join(self.session_path, 'ft.006.00')
self.rot = os.path.join(self.session_path, 'rot70k.0.0.4.prm')
self.lig = os.path.join(self.session_path, 'lig.pdb')
self.rec = os.path.join(self.session_path, 'rec.pdb')
self.session_name = os.path.basename(self.session_path)
# Optional:
self.mobile_lig = os.path.join(self.session_path, 'mobile-lig.pdb')
self.rec_lig = os.path.join(self.session_path, 'rec-lig.pdb')
self.align_map = glob(os.path.join(self.session_path, '*.pse'))
self.irmsd = glob(os.path.join(self.session_path, '*.irmsd'))
self.ipwrmsd = glob(os.path.join(self.session_path, '*.ipwrmsd'))
self.pwrmsd = glob(os.path.join(self.session_path, '*.pwrmsd'))
self.rmsd = glob(os.path.join(self.session_path, '*.rmsd'))
if crys_lig is None:
self.crys_lig = self.lig
def interface_rmsd(self, ft_type, ft_special=None, output=None):
"""
Generates the interface rmsd for a given ligand/receptor complex and ft_type
:param ft_type: string list for corresponding ft type (e.g., ["0"] or ["0", "6"]
:param output: default is in the same session folder
"""
option = "--only-interface --rec {}".format(self.rec)
for ft in ft_type:
DEFAULT_OUTPUT = os.path.join(
self.session_path, "{}.{}.irmsd".format(self.session_name, ft))
if output is None:
output = DEFAULT_OUTPUT
if ft == "0":
rmsd(self.lig, self.crys_lig, self.ft0, self.rot, output, option)
elif ft == "2":
rmsd(self.lig, self.crys_lig, self.ft2, self.rot, output, option)
elif ft == "4":
rmsd(self.lig, self.crys_lig, self.ft4, self.rot, output, option)
elif ft == "6":
rmsd(self.lig, self.crys_lig, self.ft6, self.rot, output, option)
elif ft == "special":
rmsd(self.lig, self.crys_lig, ft_special, self.rot, output, option)
else:
print("ft_type should be in a list (e.g., ['0'] or ['2', '6'])")
def interface_pwrmsd(self, ft_type, output=None, num_entries=None, ft_special=None):
"""
Generates the interface pwrmsd for a given ligand/receptor complex and ft_type
:param ft_type: string list for corresponding ft type (e.g., ["0"] or ["0", "special"]
:param output: default is in the same session folder
:param num_entries: default is 1000 ft entries used for interface_pwrmsd calculation
:param ft_special: file path for ft_type="special" flag
"""
DEFAULT_NUM_ENTRIES = 1000
if num_entries is None:
num_entries = DEFAULT_NUM_ENTRIES
option = "--only-interface --rec {} --nftresults {}".format(self.rec, num_entries)
for ft in ft_type:
DEFAULT_OUTPUT = os.path.join(
self.session_path, "{}.{}.ipwrmsd".format(self.session_name, ft))
if output is None:
output = DEFAULT_OUTPUT
if ft == "0":
pwrmsd(self.lig, self.ft0, self.rot, output, option)
elif ft == "2":
pwrmsd(self.lig, self.ft2, self.rot, output, option)
elif ft == "4":
pwrmsd(self.lig, self.ft4, self.rot, output, option)
elif ft == "6":
pwrmsd(self.lig, self.ft6, self.rot, output, option)
elif ft == "special":
pwrmsd(self.lig, ft_special, self.rot, output, option)
else:
print("ft_type should be in a list (e.g., ['0'] or ['2', 'special'])")
def near_native(self, threshold, rmsd=10.0, rmsd_file=None):
"""
Generates information for near native poses
:param threshold: number of ft entries to consider
:param rmsd: number representing maximum rmsd to consider
:param rmsd_file: default is the interface_rmsd, can be file path to other rmsd file
:return: session name, threshold, number of near native poses, near native pose entries
"""
if rmsd_file is None:
rmsd_file = self.irmsd
for irmsd_file in rmsd_file:
df = pd.read_csv(irmsd_file, names=["RMSD"])
df = df[:int(threshold)]
bad_poses = df[df["RMSD"] > rmsd].index
df.drop(bad_poses, inplace=True)
shape = df.shape
num_hits = shape[0]
hits = df.index.values
base_name = os.path.basename(irmsd_file)
base_name = base_name.split(".")[0]
return base_name, threshold, num_hits, hits
def get_ft(self, ft_type):
"""
Returns the ft file based on an entered ft_type
:param ft_type: string representing ft-type (e.g., "000")
:return: requested ft file path
"""
if ft_type == "000":
ft = self.ft0
if ft_type == "002":
ft = self.ft2
if ft_type == "004":
ft = self.ft4
if ft_type == "006":
ft = self.ft6
return ft
def from_component(self, component_path):
return Session(os.path.split(component_path)[0])
class Component(Session):
def __init__(self, component_path):
self.component_path = component_path
self.session_path = os.path.split(component_path)[0]
self.atom_group = parsePDB(self.component_path)
# def atom_group(self):
# """
# Returns an atom group for a specified molecule
# :return: the ProDy atom group
# """
# ag = parsePDB(self.component_path)
# return ag
def pose_rmsd(self, num_entry, rmsd_file=None):
"""
Returns the rmsd for a specified pose
:param num_entry: int of ft entry
:param rmsd_file: default is the interface_rmsd, can be file path to other rmsd file
:return: rmsd for the pose @ the specified num_entry
"""
if rmsd_file is None:
session = super().from_component(self.component_path)
rmsd_file = session.irmsd
df = pd.read_csv(rmsd_file, names=["RMSD"])
return df.iloc[num_entry]
def xform(self, ft_type, center=None):
"""
Transform a molecule based on translation and rotation matricies
:param ft_type: string representing ft-type (e.g., "000")
:return: transformed coordinate of component
"""
session = Session(os.path.split(self.component_path)[0])
ft_path = session.get_ft(ft_type)
rot_path = session.rot
protein = self.atom_group
if center is not None:
center_coords = center.getCoords()
center = np.mean(center_coords, axis=0)
ft_results = read_ftresults(ft_path)
rot_results = read_rotations(rot_path)
transformed = apply_ftresults_atom_group(protein, ft_results, rot_results, center)
return transformed
| [
"os.path.basename",
"pandas.read_csv",
"numpy.mean",
"os.path.split",
"os.path.join",
"shutil.copy"
] | [((483, 526), 'os.path.join', 'os.path.join', (['self.raw_path', '"""ft.000.00.gz"""'], {}), "(self.raw_path, 'ft.000.00.gz')\n", (495, 526), False, 'import os\n'), ((550, 593), 'os.path.join', 'os.path.join', (['self.raw_path', '"""ft.002.00.gz"""'], {}), "(self.raw_path, 'ft.002.00.gz')\n", (562, 593), False, 'import os\n'), ((617, 660), 'os.path.join', 'os.path.join', (['self.raw_path', '"""ft.004.00.gz"""'], {}), "(self.raw_path, 'ft.004.00.gz')\n", (629, 660), False, 'import os\n'), ((684, 727), 'os.path.join', 'os.path.join', (['self.raw_path', '"""ft.006.00.gz"""'], {}), "(self.raw_path, 'ft.006.00.gz')\n", (696, 727), False, 'import os\n'), ((751, 803), 'os.path.join', 'os.path.join', (['self.raw_path', '"""prms/rot70k.0.0.4.prm"""'], {}), "(self.raw_path, 'prms/rot70k.0.0.4.prm')\n", (763, 803), False, 'import os\n'), ((827, 868), 'os.path.join', 'os.path.join', (['self.raw_path', '"""lig.pdb.gz"""'], {}), "(self.raw_path, 'lig.pdb.gz')\n", (839, 868), False, 'import os\n'), ((892, 933), 'os.path.join', 'os.path.join', (['self.raw_path', '"""rec.pdb.gz"""'], {}), "(self.raw_path, 'rec.pdb.gz')\n", (904, 933), False, 'import os\n'), ((2058, 2102), 'os.path.join', 'os.path.join', (['self.session_path', '"""ft.000.00"""'], {}), "(self.session_path, 'ft.000.00')\n", (2070, 2102), False, 'import os\n'), ((2122, 2166), 'os.path.join', 'os.path.join', (['self.session_path', '"""ft.002.00"""'], {}), "(self.session_path, 'ft.002.00')\n", (2134, 2166), False, 'import os\n'), ((2186, 2230), 'os.path.join', 'os.path.join', (['self.session_path', '"""ft.004.00"""'], {}), "(self.session_path, 'ft.004.00')\n", (2198, 2230), False, 'import os\n'), ((2250, 2294), 'os.path.join', 'os.path.join', (['self.session_path', '"""ft.006.00"""'], {}), "(self.session_path, 'ft.006.00')\n", (2262, 2294), False, 'import os\n'), ((2314, 2365), 'os.path.join', 'os.path.join', (['self.session_path', '"""rot70k.0.0.4.prm"""'], {}), "(self.session_path, 'rot70k.0.0.4.prm')\n", (2326, 2365), False, 'import os\n'), ((2385, 2427), 'os.path.join', 'os.path.join', (['self.session_path', '"""lig.pdb"""'], {}), "(self.session_path, 'lig.pdb')\n", (2397, 2427), False, 'import os\n'), ((2447, 2489), 'os.path.join', 'os.path.join', (['self.session_path', '"""rec.pdb"""'], {}), "(self.session_path, 'rec.pdb')\n", (2459, 2489), False, 'import os\n'), ((2518, 2553), 'os.path.basename', 'os.path.basename', (['self.session_path'], {}), '(self.session_path)\n', (2534, 2553), False, 'import os\n'), ((2601, 2650), 'os.path.join', 'os.path.join', (['self.session_path', '"""mobile-lig.pdb"""'], {}), "(self.session_path, 'mobile-lig.pdb')\n", (2613, 2650), False, 'import os\n'), ((2674, 2720), 'os.path.join', 'os.path.join', (['self.session_path', '"""rec-lig.pdb"""'], {}), "(self.session_path, 'rec-lig.pdb')\n", (2686, 2720), False, 'import os\n'), ((8441, 8479), 'pandas.read_csv', 'pd.read_csv', (['rmsd_file'], {'names': "['RMSD']"}), "(rmsd_file, names=['RMSD'])\n", (8452, 8479), True, 'import pandas as pd\n'), ((2751, 2791), 'os.path.join', 'os.path.join', (['self.session_path', '"""*.pse"""'], {}), "(self.session_path, '*.pse')\n", (2763, 2791), False, 'import os\n'), ((2819, 2861), 'os.path.join', 'os.path.join', (['self.session_path', '"""*.irmsd"""'], {}), "(self.session_path, '*.irmsd')\n", (2831, 2861), False, 'import os\n'), ((2891, 2935), 'os.path.join', 'os.path.join', (['self.session_path', '"""*.ipwrmsd"""'], {}), "(self.session_path, '*.ipwrmsd')\n", (2903, 2935), False, 'import os\n'), ((2964, 3007), 'os.path.join', 'os.path.join', (['self.session_path', '"""*.pwrmsd"""'], {}), "(self.session_path, '*.pwrmsd')\n", (2976, 3007), False, 'import os\n'), ((3034, 3075), 'os.path.join', 'os.path.join', (['self.session_path', '"""*.rmsd"""'], {}), "(self.session_path, '*.rmsd')\n", (3046, 3075), False, 'import os\n'), ((6534, 6573), 'pandas.read_csv', 'pd.read_csv', (['irmsd_file'], {'names': "['RMSD']"}), "(irmsd_file, names=['RMSD'])\n", (6545, 6573), True, 'import pandas as pd\n'), ((6828, 6856), 'os.path.basename', 'os.path.basename', (['irmsd_file'], {}), '(irmsd_file)\n', (6844, 6856), False, 'import os\n'), ((7666, 7695), 'os.path.split', 'os.path.split', (['component_path'], {}), '(component_path)\n', (7679, 7695), False, 'import os\n'), ((9046, 9076), 'numpy.mean', 'np.mean', (['center_coords'], {'axis': '(0)'}), '(center_coords, axis=0)\n', (9053, 9076), True, 'import numpy as np\n'), ((7490, 7519), 'os.path.split', 'os.path.split', (['component_path'], {}), '(component_path)\n', (7503, 7519), False, 'import os\n'), ((8800, 8834), 'os.path.split', 'os.path.split', (['self.component_path'], {}), '(self.component_path)\n', (8813, 8834), False, 'import os\n'), ((1866, 1905), 'shutil.copy', 'shutil.copy', (['self.rot_raw', 'session_path'], {}), '(self.rot_raw, session_path)\n', (1877, 1905), False, 'import shutil\n')] |
#!/opt/anaconda2/bin/python
# -*- coding: utf-8 -*-
"""
################################################################################
#
# Copyright (c) 2015 <NAME>
# All rights reserved
# Distributed under the terms of the MIT license
#
################################################################################
#
# Filename: cell_patches.py
#
# Decription:
# Codebook from cell patches (with KMeans)
#
# Authors:
# <NAME>
#
################################################################################
#
# History:
# --------
# Date Who Ticket Description
# ---------- --- --------- ------------------------------------------------
# 2015-12-20 wm Initial version
#
################################################################################
"""
from __future__ import print_function
DEBUG = False
__all__ = []
__version__ = 0.1
__date__ = '2015-12-20'
__updated__ = '2015-12-20'
from sys import path as sys_path
sys_path.insert(0, './Pipe')
import pipe as P
def work(in_csv_file, out_csv_file, max_n_pois, npatches, patch_size):
from pypipes import as_csv_rows,loopcount,itime
icodebook = (
in_csv_file
| as_csv_rows
| loopcount
| P.select(lambda l: [float(x) for x in l])
)
codebook = [r for r in icodebook]
nclust = len(codebook)
print(nclust)
from math import sqrt
patch_size = int(sqrt(len(codebook[0])))
print(patch_size)
p, q = 32, (nclust + 31) // 32
print(p, q, p * q)
import numpy as np
tiled = np.zeros((p * patch_size, q * patch_size), dtype=float)
for j in range(q):
for i in range(p):
if (j * 32 + i) < nclust:
from skimage.exposure import rescale_intensity
foo = rescale_intensity(np.array(codebook[j * 32 + i]).reshape((patch_size, patch_size)))
tiled[i * patch_size:(i + 1) * patch_size, j * patch_size:(j + 1) * patch_size] = foo
pass
pass
pass
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.set_title("codebook")
ax.imshow(tiled, interpolation='nearest')
ax.axis('off')
plt.show()
pass
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
from sys import argv as Argv
if argv is None:
argv = Argv
pass
else:
Argv.extend(argv)
pass
from os.path import basename
program_name = basename(Argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by <NAME> on %s.
Copyright 2015 <NAME>. All rights reserved.
Licensed under the MIT License
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from argparse import FileType
from sys import stdout,stdin
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
#parser.add_argument("-D", "--data-dir",
# type=str, action='store', dest="data_dir", required=True,
# help="directory with input CSV files, BMP 'train' and 'test' subfolders, and where H5 will be stored")
parser.add_argument("-i", "--in-csv",
action='store', dest="in_csv_file", default=stdin,
type=FileType('r'),
help="input CSV file name")
parser.add_argument("-o", "--out-csv",
action='store', dest="out_csv_file", default=stdout,
type=FileType('w'),
help="output CSV file name")
parser.add_argument("-p", "--patch-size",
type=int, default=16, action='store', dest="patch_size",
help="size of square patch to build the codebook upon, in pixels")
parser.add_argument("-C", "--num-patches",
type=int, default=80, action='store', dest="npatches",
help="number of patches per image")
parser.add_argument("-N", "--max-pois",
type=int, default=5000, action='store', dest="max_n_pois",
help="max number of PoIs to collect (num_peaks of peak_local_max)")
# Process arguments
args = parser.parse_args()
for k, v in args.__dict__.items():
print(str(k) + ' => ' + str(v))
pass
work(args.in_csv_file,
args.out_csv_file,
args.max_n_pois,
args.npatches,
args.patch_size)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
if DEBUG:
raise(e)
pass
indent = len(program_name) * " "
from sys import stderr
stderr.write(program_name + ": " + repr(e) + "\n")
stderr.write(indent + " for help use --help")
return 2
pass
if __name__ == "__main__":
if DEBUG:
from sys import argv
argv.append("--in-csv=cell_patches.csv")
argv.append("--num-patches=256")
pass
from sys import exit as Exit
Exit(main())
pass
| [
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"os.path.basename",
"sys.argv.append",
"numpy.zeros",
"sys.path.insert",
"sys.argv.extend",
"numpy.array",
"sys.stderr.write",
"matplotlib.pyplot.subplots",
"argparse.FileType"
] | [((990, 1018), 'sys.path.insert', 'sys_path.insert', (['(0)', '"""./Pipe"""'], {}), "(0, './Pipe')\n", (1005, 1018), True, 'from sys import path as sys_path\n'), ((1578, 1633), 'numpy.zeros', 'np.zeros', (['(p * patch_size, q * patch_size)'], {'dtype': 'float'}), '((p * patch_size, q * patch_size), dtype=float)\n', (1586, 1633), True, 'import numpy as np\n'), ((2100, 2118), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2112, 2118), True, 'from matplotlib import pyplot as plt\n'), ((2217, 2227), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2225, 2227), True, 'from matplotlib import pyplot as plt\n'), ((2498, 2515), 'os.path.basename', 'basename', (['Argv[0]'], {}), '(Argv[0])\n', (2506, 2515), False, 'from os.path import basename\n'), ((2414, 2431), 'sys.argv.extend', 'Argv.extend', (['argv'], {}), '(argv)\n', (2425, 2431), True, 'from sys import argv as Argv\n'), ((3290, 3383), 'argparse.ArgumentParser', 'ArgumentParser', ([], {'description': 'program_license', 'formatter_class': 'RawDescriptionHelpFormatter'}), '(description=program_license, formatter_class=\n RawDescriptionHelpFormatter)\n', (3304, 3383), False, 'from argparse import ArgumentParser\n'), ((5350, 5390), 'sys.argv.append', 'argv.append', (['"""--in-csv=cell_patches.csv"""'], {}), "('--in-csv=cell_patches.csv')\n", (5361, 5390), False, 'from sys import argv\n'), ((5399, 5431), 'sys.argv.append', 'argv.append', (['"""--num-patches=256"""'], {}), "('--num-patches=256')\n", (5410, 5431), False, 'from sys import argv\n'), ((5196, 5242), 'sys.stderr.write', 'stderr.write', (["(indent + ' for help use --help')"], {}), "(indent + ' for help use --help')\n", (5208, 5242), False, 'from sys import stderr\n'), ((3741, 3754), 'argparse.FileType', 'FileType', (['"""r"""'], {}), "('r')\n", (3749, 3754), False, 'from argparse import FileType\n'), ((3925, 3938), 'argparse.FileType', 'FileType', (['"""w"""'], {}), "('w')\n", (3933, 3938), False, 'from argparse import FileType\n'), ((1825, 1855), 'numpy.array', 'np.array', (['codebook[j * 32 + i]'], {}), '(codebook[j * 32 + i])\n', (1833, 1855), True, 'import numpy as np\n')] |
import tomopy
import numpy as np
import dxchange
from util import *
import time
import os
PI = 3.1415927
# ============================================
theta_st = 0
theta_end = PI
n_epochs = 200
sino_range = (600, 601, 1)
center = 958
downsample = (3, 0, 0)
# ============================================
def reconstruct_sirt(fname, sino_range, theta_st=0, theta_end=PI, n_epochs=200,
output_folder=None, downsample=None, center=None):
if output_folder is None:
output_folder = 'sirt_niter_{}_ds_{}_{}_{}'.format(n_epochs, *downsample)
t0 = time.time()
print('Reading data...')
prj, flt, drk, _ = dxchange.read_aps_32id(fname, sino=sino_range)
print('Data reading: {} s'.format(time.time() - t0))
print('Data shape: {}'.format(prj.shape))
prj = tomopy.normalize(prj, flt, drk)
prj = preprocess(prj)
# scale up to prevent precision issue
prj *= 1.e2
if downsample is not None:
prj = tomopy.downsample(prj, level=downsample[0], axis=0)
prj = tomopy.downsample(prj, level=downsample[1], axis=1)
prj = tomopy.downsample(prj, level=downsample[2], axis=2)
print('Downsampled shape: {}'.format(prj.shape))
n_theta = prj.shape[0]
theta = np.linspace(theta_st, theta_end, n_theta)
print('Starting reconstruction...')
t0 = time.time()
extra_options = {'MinConstraint': 0}
options = {'proj_type': 'cuda', 'method': 'SIRT_CUDA', 'num_iter': n_epochs, 'extra_options': extra_options}
res = tomopy.recon(prj, theta, center=center, algorithm=tomopy.astra, options=options)
dxchange.write_tiff_stack(res, fname=os.path.join(output_folder, 'recon'), dtype='float32',
overwrite=True)
print('Reconstruction time: {} s'.format(time.time() - t0))
if __name__ == '__main__':
reconstruct_sirt(fname='data.h5',
sino_range=sino_range,
n_epochs=n_epochs,
downsample=downsample,
center=center)
| [
"tomopy.recon",
"tomopy.downsample",
"time.time",
"dxchange.read_aps_32id",
"numpy.linspace",
"os.path.join",
"tomopy.normalize"
] | [((585, 596), 'time.time', 'time.time', ([], {}), '()\n', (594, 596), False, 'import time\n'), ((649, 695), 'dxchange.read_aps_32id', 'dxchange.read_aps_32id', (['fname'], {'sino': 'sino_range'}), '(fname, sino=sino_range)\n', (671, 695), False, 'import dxchange\n'), ((809, 840), 'tomopy.normalize', 'tomopy.normalize', (['prj', 'flt', 'drk'], {}), '(prj, flt, drk)\n', (825, 840), False, 'import tomopy\n'), ((1252, 1293), 'numpy.linspace', 'np.linspace', (['theta_st', 'theta_end', 'n_theta'], {}), '(theta_st, theta_end, n_theta)\n', (1263, 1293), True, 'import numpy as np\n'), ((1344, 1355), 'time.time', 'time.time', ([], {}), '()\n', (1353, 1355), False, 'import time\n'), ((1520, 1605), 'tomopy.recon', 'tomopy.recon', (['prj', 'theta'], {'center': 'center', 'algorithm': 'tomopy.astra', 'options': 'options'}), '(prj, theta, center=center, algorithm=tomopy.astra, options=options\n )\n', (1532, 1605), False, 'import tomopy\n'), ((971, 1022), 'tomopy.downsample', 'tomopy.downsample', (['prj'], {'level': 'downsample[0]', 'axis': '(0)'}), '(prj, level=downsample[0], axis=0)\n', (988, 1022), False, 'import tomopy\n'), ((1037, 1088), 'tomopy.downsample', 'tomopy.downsample', (['prj'], {'level': 'downsample[1]', 'axis': '(1)'}), '(prj, level=downsample[1], axis=1)\n', (1054, 1088), False, 'import tomopy\n'), ((1103, 1154), 'tomopy.downsample', 'tomopy.downsample', (['prj'], {'level': 'downsample[2]', 'axis': '(2)'}), '(prj, level=downsample[2], axis=2)\n', (1120, 1154), False, 'import tomopy\n'), ((1643, 1679), 'os.path.join', 'os.path.join', (['output_folder', '"""recon"""'], {}), "(output_folder, 'recon')\n", (1655, 1679), False, 'import os\n'), ((734, 745), 'time.time', 'time.time', ([], {}), '()\n', (743, 745), False, 'import time\n'), ((1789, 1800), 'time.time', 'time.time', ([], {}), '()\n', (1798, 1800), False, 'import time\n')] |
from asyncio import Queue
from enum import Enum
import numpy as np
import torch
import math
import csv
from datetime import datetime
import numpy as np
producer_queue = Queue(maxsize=1)
consumer_queue = Queue(maxsize=1)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class ActionSpace(Enum):
STOP = 0
STRONGFORWARD = 1
WEAKFORWARD = 2
STRONGBACK = 3
WEAKBACK = 4
STRONGLEFT = 5
WEAKLEFT = 6
STRONGRIGHT = 7
WEAKRIGHT = 8
MOTOR_SPEED_CHANGE = 1
class Commands(Enum):
STOP = 0
STRONGFORWARD = 1
WEAKFORWARD = 2
STRONGBACK = 3
WEAKBACK = 4
STRONGLEFT = 5
WEAKLEFT = 6
STRONGRIGHT = 7
WEAKRIGHT = 8
RESET = 9
WINDRESET = 10
class VelStates(Enum):
NO_ACCEL = 0
STRONGFORWARD = 1
WEAKFORWARD = 2
STRONGBACK = 3
WEAKBACK = 4
STRONGLEFT = 5
WEAKLEFT = 6
STRONGRIGHT = 7
WEAKRIGHT = 8
class AngVelStates(Enum):
NONE = 0
STRONGLEFT = 1
STRONGRIGHT = 2
WEAKLEFT = 3
WEAKRIGHT = 4
# abbrevations for motors, U = up, N = neutral, D = down
class Actions(Enum):
UU = 0
UN = 1
UD = 2
NU = 3
NN = 4
ND = 5
DU = 6
DN = 7
DD = 8
def changeMotorSpeed(left_motor_speed, right_motor_speed, action):
if action == Actions.UU:
left_motor_speed += MOTOR_SPEED_CHANGE
right_motor_speed += MOTOR_SPEED_CHANGE
elif action == Actions.UN:
left_motor_speed += MOTOR_SPEED_CHANGE
elif action == Actions.UD:
left_motor_speed += MOTOR_SPEED_CHANGE
right_motor_speed -= MOTOR_SPEED_CHANGE
elif action == Actions.NU:
right_motor_speed += MOTOR_SPEED_CHANGE
elif action == Actions.ND:
right_motor_speed -= MOTOR_SPEED_CHANGE
elif action == Actions.DU:
left_motor_speed -= MOTOR_SPEED_CHANGE
right_motor_speed += MOTOR_SPEED_CHANGE
elif action == Actions.DN:
left_motor_speed -= MOTOR_SPEED_CHANGE
elif action == Actions.DD:
left_motor_speed -= MOTOR_SPEED_CHANGE
right_motor_speed -= MOTOR_SPEED_CHANGE
if left_motor_speed >= 127:
left_motor_speed = 127
elif left_motor_speed <= -127:
left_motor_speed = -127
if right_motor_speed >= 127:
right_motor_speed = 127
elif right_motor_speed <= -127:
right_motor_speed = -127
return (left_motor_speed, right_motor_speed)
COMMAND_MAP = [
{
"leftPowerLevel": '0',
"rightPowerLevel": '0'
},
{
"leftPowerLevel": '127',
"rightPowerLevel": '127'
},
{
"leftPowerLevel": '64',
"rightPowerLevel": '64'
},
{
"leftPowerLevel": '-127',
"rightPowerLevel": '-127'
},
{
"leftPowerLevel": '-64',
"rightPowerLevel": '-64'
},
{
"leftPowerLevel": '-127',
"rightPowerLevel": '127'
},
{
"leftPowerLevel": '-64',
"rightPowerLevel": '64'
},
{
"leftPowerLevel": '127',
"rightPowerLevel": '-127'
},
{
"leftPowerLevel": '64',
"rightPowerLevel": '-64'
},
{
"reset": True
},
{
"windReset": True
}
]
def calculateStateAndReward(acceleration, angularVelocity, linearVelocity):
speed = math.sqrt(math.pow(linearVelocity['x'], 2) + math.pow(linearVelocity['y'], 2))
accmag = math.sqrt(math.pow(acceleration['x'], 2) + math.pow(acceleration['y'], 2))
reward = 0.0
# print(speed)
state = np.array([acceleration['x'], acceleration['y'], linearVelocity['x'],
linearVelocity['y'], angularVelocity, speed])
# if speed < 0.1:
# reward = 100.0
# elif speed < 0.25:
reward = 3.0 - (4 * speed) ** 2 - (10000 * accmag) ** 2 - (100 * angularVelocity) ** 2
return (state, reward, speed < 0.1 and abs(angularVelocity) < 0.0005)
async def sendCommand(command):
await producer_queue.put(command)
async def getNextState(lms, rms):
await producer_queue.put({
"leftPowerLevel": str(lms),
"rightPowerLevel": str(rms),
})
data_package = await consumer_queue.get()
state, reward, done = calculateStateAndReward(**data_package)
return (state, reward, done, {})
async def getState():
data_package = await consumer_queue.get()
state, _, _ = calculateStateAndReward(**data_package)
return state
def now_str(str_format='%Y%m%d%H%M'):
return datetime.now().strftime(str_format)
def idx2mask(idx, max_size):
mask = np.zeros(max_size)
mask[idx] = 1.0
return mask
class RecordHistory:
def __init__(self, csv_path, header):
self.csv_path = csv_path
self.header = header
def generate_csv(self):
with open(self.csv_path, 'w') as f:
writer = csv.writer(f)
writer.writerow(self.header)
def add_histry(self, history):
history_list = [history[key] for key in self.header]
with open(self.csv_path, 'a') as f:
writer = csv.writer(f)
writer.writerow(history_list)
def add_list(self, array):
with open(self.csv_path, 'a') as f:
writer = csv.writer(f)
writer.writerow(array)
| [
"csv.writer",
"math.pow",
"numpy.zeros",
"datetime.datetime.now",
"numpy.array",
"torch.cuda.is_available",
"asyncio.Queue"
] | [((171, 187), 'asyncio.Queue', 'Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (176, 187), False, 'from asyncio import Queue\n'), ((205, 221), 'asyncio.Queue', 'Queue', ([], {'maxsize': '(1)'}), '(maxsize=1)\n', (210, 221), False, 'from asyncio import Queue\n'), ((3223, 3341), 'numpy.array', 'np.array', (["[acceleration['x'], acceleration['y'], linearVelocity['x'], linearVelocity[\n 'y'], angularVelocity, speed]"], {}), "([acceleration['x'], acceleration['y'], linearVelocity['x'],\n linearVelocity['y'], angularVelocity, speed])\n", (3231, 3341), True, 'import numpy as np\n'), ((4224, 4242), 'numpy.zeros', 'np.zeros', (['max_size'], {}), '(max_size)\n', (4232, 4242), True, 'import numpy as np\n'), ((255, 280), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (278, 280), False, 'import torch\n'), ((3013, 3045), 'math.pow', 'math.pow', (["linearVelocity['x']", '(2)'], {}), "(linearVelocity['x'], 2)\n", (3021, 3045), False, 'import math\n'), ((3048, 3080), 'math.pow', 'math.pow', (["linearVelocity['y']", '(2)'], {}), "(linearVelocity['y'], 2)\n", (3056, 3080), False, 'import math\n'), ((3105, 3135), 'math.pow', 'math.pow', (["acceleration['x']", '(2)'], {}), "(acceleration['x'], 2)\n", (3113, 3135), False, 'import math\n'), ((3138, 3168), 'math.pow', 'math.pow', (["acceleration['y']", '(2)'], {}), "(acceleration['y'], 2)\n", (3146, 3168), False, 'import math\n'), ((4146, 4160), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (4158, 4160), False, 'from datetime import datetime\n'), ((4500, 4513), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4510, 4513), False, 'import csv\n'), ((4717, 4730), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4727, 4730), False, 'import csv\n'), ((4870, 4883), 'csv.writer', 'csv.writer', (['f'], {}), '(f)\n', (4880, 4883), False, 'import csv\n')] |
from copy import copy as copy
from numpy import dot as dot
from numpy import histogram as histogram
from numpy import zeros as zeros
from scipy.special import gammaincc as gammaincc
class ComplexityTest:
@staticmethod
def linear_complexity_test(binary_data:str, verbose=False, block_size=500):
"""
Note that this description is taken from the NIST documentation [1]
[1] http://csrc.nist.gov/publications/nistpubs/800-22-rev1a/SP800-22rev1a.pdf
The focus of this test is the length of a linear feedback shift register (LFSR). The purpose of this test is to
determine whether or not the sequence is complex enough to be considered random. Random sequences are
characterized by longer LFSRs. An LFSR that is too short implies non-randomness.
:param binary_data: a binary string
:param verbose True to display the debug messgae, False to turn off debug message
:param block_size: Size of the block
:return: (p_value, bool) A tuple which contain the p_value and result of frequency_test(True or False)
"""
length_of_binary_data = len(binary_data)
# The number of degrees of freedom;
# K = 6 has been hard coded into the test.
degree_of_freedom = 6
# π0 = 0.010417, π1 = 0.03125, π2 = 0.125, π3 = 0.5, π4 = 0.25, π5 = 0.0625, π6 = 0.020833
# are the probabilities computed by the equations in Section 3.10
pi = [0.01047, 0.03125, 0.125, 0.5, 0.25, 0.0625, 0.020833]
t2 = (block_size / 3.0 + 2.0 / 9) / 2 ** block_size
mean = 0.5 * block_size + (1.0 / 36) * (9 + (-1) ** (block_size + 1)) - t2
number_of_block = int(length_of_binary_data / block_size)
if number_of_block > 1:
block_end = block_size
block_start = 0
blocks = []
for i in range(number_of_block):
blocks.append(binary_data[block_start:block_end])
block_start += block_size
block_end += block_size
complexities = []
for block in blocks:
complexities.append(ComplexityTest.berlekamp_massey_algorithm(block))
t = ([-1.0 * (((-1) ** block_size) * (chunk - mean) + 2.0 / 9) for chunk in complexities])
vg = histogram(t, bins=[-9999999999, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 9999999999])[0][::-1]
im = ([((vg[ii] - number_of_block * pi[ii]) ** 2) / (number_of_block * pi[ii]) for ii in range(7)])
xObs = 0.0
for i in range(len(pi)):
xObs += im[i]
# P-Value = igamc(K/2, xObs/2)
p_value = gammaincc(degree_of_freedom / 2.0, xObs / 2.0)
if verbose:
print('Linear Complexity Test DEBUG BEGIN:')
print("\tLength of input:\t", length_of_binary_data)
print('\tLength in bits of a block:\t', )
print("\tDegree of Freedom:\t\t", degree_of_freedom)
print('\tNumber of Blocks:\t', number_of_block)
print('\tValue of Vs:\t\t', vg)
print('\txObs:\t\t\t\t', xObs)
print('\tP-Value:\t\t\t', p_value)
print('DEBUG END.')
return (p_value, (p_value >= 0.01))
else:
return (-1.0, False)
@staticmethod
def berlekamp_massey_algorithm(block_data):
"""
An implementation of the Berlekamp Massey Algorithm. Taken from Wikipedia [1]
[1] - https://en.wikipedia.org/wiki/Berlekamp-Massey_algorithm
The Berlekamp–Massey algorithm is an algorithm that will find the shortest linear feedback shift register (LFSR)
for a given binary output sequence. The algorithm will also find the minimal polynomial of a linearly recurrent
sequence in an arbitrary field. The field requirement means that the Berlekamp–Massey algorithm requires all
non-zero elements to have a multiplicative inverse.
:param block_data:
:return:
"""
n = len(block_data)
c = zeros(n)
b = zeros(n)
c[0], b[0] = 1, 1
l, m, i = 0, -1, 0
int_data = [int(el) for el in block_data]
while i < n:
v = int_data[(i - l):i]
v = v[::-1]
cc = c[1:l + 1]
d = (int_data[i] + dot(v, cc)) % 2
if d == 1:
temp = copy(c)
p = zeros(n)
for j in range(0, l):
if b[j] == 1:
p[j + i - m] = 1
c = (c + p) % 2
if l <= 0.5 * i:
l = i + 1 - l
m = i
b = temp
i += 1
return l | [
"numpy.zeros",
"copy.copy",
"numpy.histogram",
"numpy.dot",
"scipy.special.gammaincc"
] | [((4127, 4135), 'numpy.zeros', 'zeros', (['n'], {}), '(n)\n', (4132, 4135), True, 'from numpy import zeros as zeros\n'), ((4148, 4156), 'numpy.zeros', 'zeros', (['n'], {}), '(n)\n', (4153, 4156), True, 'from numpy import zeros as zeros\n'), ((2705, 2751), 'scipy.special.gammaincc', 'gammaincc', (['(degree_of_freedom / 2.0)', '(xObs / 2.0)'], {}), '(degree_of_freedom / 2.0, xObs / 2.0)\n', (2714, 2751), True, 'from scipy.special import gammaincc as gammaincc\n'), ((4462, 4469), 'copy.copy', 'copy', (['c'], {}), '(c)\n', (4466, 4469), True, 'from copy import copy as copy\n'), ((4490, 4498), 'numpy.zeros', 'zeros', (['n'], {}), '(n)\n', (4495, 4498), True, 'from numpy import zeros as zeros\n'), ((2349, 2426), 'numpy.histogram', 'histogram', (['t'], {'bins': '[-9999999999, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 9999999999]'}), '(t, bins=[-9999999999, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 9999999999])\n', (2358, 2426), True, 'from numpy import histogram as histogram\n'), ((4400, 4410), 'numpy.dot', 'dot', (['v', 'cc'], {}), '(v, cc)\n', (4403, 4410), True, 'from numpy import dot as dot\n')] |
from math import isnan
import numpy as np
def nan_leastsquare(measured_vals, updated_model, weights, x, y=None):
"""
Least square statistic with optional weights.
This function is a copy of the original astropy code handling nan values.
Parameters
----------
measured_vals : `~numpy.ndarray`
Measured data values.
updated_model : `~astropy.modeling.Model`
Model with parameters set by the current iteration of the optimizer.
weights : `~numpy.ndarray`
Array of weights to apply to each residual.
x : `~numpy.ndarray`
Independent variable "x" to evaluate the model on.
y : `~numpy.ndarray`, optional
Independent variable "y" to evaluate the model on, for 2D models.
Returns
-------
res : float
The sum of least squares.
"""
if y is None:
model_vals = updated_model(x)
else:
model_vals = updated_model(x, y)
if weights is None:
return np.nanmean((model_vals - measured_vals) ** 2)
else:
return np.nanmean((weights * (model_vals - measured_vals)) ** 2)
| [
"numpy.nanmean"
] | [((986, 1031), 'numpy.nanmean', 'np.nanmean', (['((model_vals - measured_vals) ** 2)'], {}), '((model_vals - measured_vals) ** 2)\n', (996, 1031), True, 'import numpy as np\n'), ((1057, 1114), 'numpy.nanmean', 'np.nanmean', (['((weights * (model_vals - measured_vals)) ** 2)'], {}), '((weights * (model_vals - measured_vals)) ** 2)\n', (1067, 1114), True, 'import numpy as np\n')] |
# ##################################################################################################
# Copyright (c) 2020 - Fundação CERTI
# All rights reserved.
# ##################################################################################################
import numpy
numpy.seterr(divide="ignore", invalid="ignore")
def gordon_morel_1983(reflectance_550nm_wavelength, reflectance_440nm_wavelength):
return reflectance_440nm_wavelength.astype(float) / reflectance_550nm_wavelength
def gons_1999(reflectance_704nm_wavelength, reflectance_672nm_wavelength):
return reflectance_704nm_wavelength.astype(float) / reflectance_672nm_wavelength
def dallolmo_gitelson_rundquist_2003(
reflectance_745nm_wavelength,
reflectance_725nm_wavelength,
reflectance_665nm_wavelength,
):
return (
reflectance_745nm_wavelength.astype(float) / reflectance_665nm_wavelength
) - (reflectance_745nm_wavelength.astype(float) / reflectance_725nm_wavelength)
def gitelson_et_al_2007(
reflectance_730nm_wavelength,
reflectance_695nm_wavelength,
reflectance_675nm_wavelength,
):
return (
reflectance_730nm_wavelength.astype(float) / reflectance_675nm_wavelength
) - (reflectance_730nm_wavelength.astype(float) / reflectance_695nm_wavelength)
def le_et_al_2009(
reflectance_740nm_wavelength,
reflectance_705nm_wavelength,
reflectance_693nm_wavelength,
reflectance_662nm_wavelength,
):
return (
(reflectance_662nm_wavelength.astype(float) ** -1)
- (reflectance_693nm_wavelength.astype(float) ** -1)
) / (
(reflectance_740nm_wavelength.astype(float) ** -1)
- (reflectance_705nm_wavelength.astype(float) ** -1)
)
def mishra_mishra_2012(reflectance_708nm_wavelength, reflectance_665nm_wavelength):
return (
reflectance_708nm_wavelength.astype(float)
- reflectance_665nm_wavelength.astype(float)
) / (reflectance_708nm_wavelength + reflectance_665nm_wavelength)
def rodrigues_et_al_2016(reflectance_893nm_wavelength, reflectance_838nm_wavelength):
return reflectance_838nm_wavelength.astype(float) / reflectance_893nm_wavelength
def chavula_et_al_2009(reflectance_551nm_wavelength, reflectance_443nm_wavelength):
return reflectance_443nm_wavelength.astype(float) / reflectance_551nm_wavelength
def allan_hicks_brabyn_2006(reflectance_665nm_wavelength, reflectance_485nm_wavelength):
return reflectance_485nm_wavelength.astype(float) / reflectance_665nm_wavelength
def gower_et_al_2005(
reflectance_753nm_wavelength,
reflectance_709nm_wavelength,
reflectance_681nm_wavelength,
r753_adapted_wavelength,
r709_adapted_wavelength,
r681_adapted_wavelength,
):
return (
reflectance_709nm_wavelength.astype(float)
- reflectance_681nm_wavelength.astype(float)
- (
(
(r709_adapted_wavelength - r681_adapted_wavelength)
/ (r753_adapted_wavelength - r681_adapted_wavelength)
)
* (
reflectance_753nm_wavelength.astype(float)
- reflectance_681nm_wavelength.astype(float)
)
)
)
| [
"numpy.seterr"
] | [((278, 325), 'numpy.seterr', 'numpy.seterr', ([], {'divide': '"""ignore"""', 'invalid': '"""ignore"""'}), "(divide='ignore', invalid='ignore')\n", (290, 325), False, 'import numpy\n')] |
#
# Copyright (C) 2020 IBM. All Rights Reserved.
#
# See LICENSE.txt file in the root directory
# of this source tree for licensing information.
#
from pathlib import Path
from typing import Dict, Iterable, List, Optional, Tuple, Union
import cv2
import holoviews as hv
import numpy as np
from IPython.display import display
import vsrl.symmap.symbolic_mapper
import vsrl.verifier.expr as vexpr
from vsrl.spaces.space import Space
from vsrl.symmap.utils import draw_rects, drop_zeros, to_grayscale
class TemplateMatching(vsrl.symmap.symbolic_mapper.SymbolicMapper):
"""
TemplateMatching uses cv2's built-in template matching function.
Images are always converted into grayscale, including both the input image and the template images. This method
"""
def __init__(
self,
templates: Iterable[np.ndarray],
space: Space,
threshold: float = -1,
corr: str = "TM_CCORR_NORMED",
):
"""
:param templates: grayscale templates. Will convert to grayscale if they are not already in grawscale.
:param threshold: threshold at or above which a match is detected.
If threshold < 0, only the match with highest confidence will be returned.
:param corr: comparison method. See the OpenCV cv2.matchTemplate() documentation for options and details.
"""
for i, t in enumerate(templates):
templates[i] = to_grayscale(t)
self.templates = templates
self.threshold = threshold
self.corr = corr
self._space = space
@property
def space(self) -> Space:
return self._space
def _raw_map(self, raw_img: np.ndarray):
return self._match_templates(
to_grayscale(raw_img), self.templates, self.threshold, self.corr
)
def __call__(self, raw_img: np.ndarray) -> np.ndarray:
values = self._raw_map(raw_img)
s = [item for sublist in values for coords in sublist for item in coords]
assert np.array(s) in self.space
return np.array(s)
def draw_bounding_boxes(self, raw_img: np.ndarray, bb_color: int = 0):
positions = self._raw_map(raw_img)
raw_img = to_grayscale(raw_img)
for i, template in enumerate(self.templates):
h, w = template.shape
for y, x in positions[i]:
raw_img = draw_rects(raw_img, x, y, w, h, bb_color, 1, False)
return raw_img
@staticmethod
def _match_templates(
raw_img: np.ndarray,
templates: Iterable[np.ndarray],
threshold: float,
corr: str = "TM_CCORR_NORMED",
) -> List[Tuple[float, float]]:
"""
:param raw_img: RGB or grayscale image; converted to grayscale if RGB
:param templates: grayscale templates
:param threshold: threshold at or above which a match is detected.
If threshold < 0, only the match with highest confidence will be returned.
:param corr: comparison method. See the OpenCV cv2.matchTemplate() documentation for options and details.
"""
img = (
raw_img if raw_img.ndim == 2 else cv2.cvtColor(raw_img, cv2.COLOR_RGB2GRAY)
)
all_match_locs = []
for template in templates:
template_match = cv2.matchTemplate(img, template, getattr(cv2, corr))
if corr == "TM_SQDIFF_NORMED":
if threshold < 0:
match_locs = [
np.unravel_index(
np.argmin(template_match), template_match.shape
)
]
else:
match_locs = np.where(template_match <= 1 - threshold)
else:
if threshold < 0:
match_locs = [
np.unravel_index(
np.argmax(template_match), template_match.shape
)
]
else:
match_locs = np.where(template_match >= threshold)
if threshold < 0:
all_match_locs.append(match_locs)
else:
all_match_locs.append(list(zip(*match_locs)))
return all_match_locs
def make_template_from_img(
img: np.ndarray, x: int, y: int, width: int, height: int,
) -> np.ndarray:
template = img[y : y + height, x : x + width]
return template
| [
"numpy.argmax",
"cv2.cvtColor",
"numpy.argmin",
"vsrl.symmap.utils.draw_rects",
"vsrl.symmap.utils.to_grayscale",
"numpy.array",
"numpy.where"
] | [((2041, 2052), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2049, 2052), True, 'import numpy as np\n'), ((2190, 2211), 'vsrl.symmap.utils.to_grayscale', 'to_grayscale', (['raw_img'], {}), '(raw_img)\n', (2202, 2211), False, 'from vsrl.symmap.utils import draw_rects, drop_zeros, to_grayscale\n'), ((1421, 1436), 'vsrl.symmap.utils.to_grayscale', 'to_grayscale', (['t'], {}), '(t)\n', (1433, 1436), False, 'from vsrl.symmap.utils import draw_rects, drop_zeros, to_grayscale\n'), ((1728, 1749), 'vsrl.symmap.utils.to_grayscale', 'to_grayscale', (['raw_img'], {}), '(raw_img)\n', (1740, 1749), False, 'from vsrl.symmap.utils import draw_rects, drop_zeros, to_grayscale\n'), ((2000, 2011), 'numpy.array', 'np.array', (['s'], {}), '(s)\n', (2008, 2011), True, 'import numpy as np\n'), ((3137, 3178), 'cv2.cvtColor', 'cv2.cvtColor', (['raw_img', 'cv2.COLOR_RGB2GRAY'], {}), '(raw_img, cv2.COLOR_RGB2GRAY)\n', (3149, 3178), False, 'import cv2\n'), ((2364, 2415), 'vsrl.symmap.utils.draw_rects', 'draw_rects', (['raw_img', 'x', 'y', 'w', 'h', 'bb_color', '(1)', '(False)'], {}), '(raw_img, x, y, w, h, bb_color, 1, False)\n', (2374, 2415), False, 'from vsrl.symmap.utils import draw_rects, drop_zeros, to_grayscale\n'), ((3667, 3708), 'numpy.where', 'np.where', (['(template_match <= 1 - threshold)'], {}), '(template_match <= 1 - threshold)\n', (3675, 3708), True, 'import numpy as np\n'), ((4017, 4054), 'numpy.where', 'np.where', (['(template_match >= threshold)'], {}), '(template_match >= threshold)\n', (4025, 4054), True, 'import numpy as np\n'), ((3516, 3541), 'numpy.argmin', 'np.argmin', (['template_match'], {}), '(template_match)\n', (3525, 3541), True, 'import numpy as np\n'), ((3866, 3891), 'numpy.argmax', 'np.argmax', (['template_match'], {}), '(template_match)\n', (3875, 3891), True, 'import numpy as np\n')] |
"""
A collection of functions for managing multi-fidelity functions.
-- <EMAIL>
"""
# pylint: disable=import-error
# pylint: disable=no-member
# pylint: disable=invalid-name
# pylint: disable=relative-import
# pylint: disable=super-on-old-class
import numpy as np
# Local imports
from utils.general_utils import map_to_cube, map_to_bounds
class MFFunction(object):
""" This just creates a wrapper to call the function by appropriately creating bounds
and querying appropriately. """
def __init__(self, mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
vectorised=True):
""" Constructor.
mf_func: takes two arguments mf_func(z, x) where z is the fidelity and x is
the point in the domain.
fidel_cost_func: fidel_cost_func(z) gives the cost of evaluating at z.
fidel_bounds, domain_bounds: are the bounds of the fidelity spaces, domains
resp.
vectorised: If True it means mf_func and fidel_cost_func can take
multiple inputs and produce multiple outputs. If False, the functions
can take only single inputs in 'column' form.
"""
self.mf_func = mf_func
self.fidel_cost_func = fidel_cost_func
self.fidel_bounds = np.array(fidel_bounds)
self.domain_bounds = np.array(domain_bounds)
self.fidel_dim = len(fidel_bounds)
self.domain_dim = len(domain_bounds)
self.vectorised = vectorised
# Wrappers for evaluating the function -------------------------------------------------
def eval_at_fidel_single_point(self, Z, X):
""" Evaluates X at the given Z at a single point. """
if not self.vectorised:
return float(self.mf_func(Z, X))
else:
Z = np.array(Z).reshape((1, self.fidel_dim))
X = np.array(X).reshape((1, self.domain_dim))
return float(self.mf_func(Z, X))
def eval_at_fidel_multiple_points(self, Z, X):
""" Evaluates X at the given Z at multiple points. """
if self.vectorised:
return self.mf_func(Z, X).ravel()
else:
ret = []
for i in range(len(Z)):
ret.append(self.eval_at_fidel_single_point(Z[i, :], X[i, :]))
return np.array(ret)
# Wrappers for evaluating the cost function --------------------------------------------
def eval_fidel_cost_single_point(self, Z):
""" Evaluates the cost function at a single point. """
if not self.vectorised:
return float(self.fidel_cost_func(Z))
else:
Z = np.array(Z).reshape((1, self.fidel_dim))
return float(self.fidel_cost_func(Z))
def eval_fidel_cost_multiple_points(self, Z):
""" Evaluates the cost function at multiple points. """
if self.vectorised:
return self.fidel_cost_func(Z).ravel()
else:
ret = []
for i in range(len(Z)):
ret.append(self.eval_fidel_cost_single_point(Z[i, :]))
return np.array(ret)
# Wrappers for evaluating at normalised points -----------------------------------------
def eval_at_fidel_single_point_normalised(self, Z, X):
""" Evaluates X at the given Z at a single point using normalised coordinates. """
Z, X = self.get_unnormalised_coords(Z, X)
return self.eval_at_fidel_single_point(Z, X)
def eval_at_fidel_multiple_points_normalised(self, Z, X):
""" Evaluates X at the given Z at multiple points using normalised coordinates. """
Z, X = self.get_unnormalised_coords(Z, X)
return self.eval_at_fidel_multiple_points(Z, X)
def eval_fidel_cost_single_point_normalised(self, Z):
""" Evaluates the cost function at a single point using normalised coordinates. """
Z, _ = self.get_unnormalised_coords(Z, None)
return self.eval_fidel_cost_single_point(Z)
def eval_fidel_cost_multiple_points_normalised(self, Z):
""" Evaluates the cost function at multiple points using normalised coordinates. """
Z, _ = self.get_unnormalised_coords(Z, None)
return self.eval_fidel_cost_multiple_points(Z)
# Maps to normalised coordinates and vice versa ----------------------------------------
def get_normalised_coords(self, Z, X):
""" Maps points in the original space to the cube. """
ret_Z = None if Z is None else map_to_cube(Z, self.fidel_bounds)
ret_X = None if X is None else map_to_cube(X, self.domain_bounds)
return ret_Z, ret_X
def get_unnormalised_coords(self, Z, X):
""" Maps points in the cube to the original space. """
ret_Z = None if Z is None else map_to_bounds(Z, self.fidel_bounds)
ret_X = None if X is None else map_to_bounds(X, self.domain_bounds)
return ret_Z, ret_X
# MFFunction ends here ===================================================================
class MFOptFunction(MFFunction):
""" A class which we will use for MF Optimisation. """
def __init__(self, mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
opt_fidel_unnormalised, vectorised=True, opt_pt=None, opt_val=None):
""" Constructor.
mf_func: takes two arguments mf_func(z, x) where z is the fidelity and x is
the point in the domain.
fidel_cost_func: fidel_cost_func(z) gives the cost of evaluating at z.
fidel_bounds, domain_bounds: are the bounds of the fidelity spaces, domains
resp.
opt_fidel: The point in the fidelity space at which we want to optimise.
vectorised: If True it means mf_func and fidel_cost_func can take
multiple inputs and produce multiple outputs. If False, the functions
can take only single inputs in 'column' form.
opt_pt, opt_val: The optimum point and value in the domain.
"""
super(MFOptFunction, self).__init__(mf_func, fidel_cost_func, fidel_bounds,
domain_bounds, vectorised)
self.opt_fidel_unnormalised = np.array(opt_fidel_unnormalised).ravel()
self.opt_fidel, _ = self.get_normalised_coords(opt_fidel_unnormalised, None)
if len(self.opt_fidel) != self.fidel_dim:
raise ValueError('opt_fidel should be a %d-vector.'%(self.fidel_dim))
self.opt_fidel_cost = self.cost_single(self.opt_fidel)
# Set the optimisation point.
self.opt_pt = opt_pt
self.opt_val = opt_val
self.mfgp = None # we will need this later on.
self.finite_fidels = None
self.is_finite = False
# Evaluation ---------------------------------------------------------------------------
def eval_single(self, Z, X):
""" Evaluate at a single point. """
return self.eval_at_fidel_single_point_normalised(Z, X)
def eval_multiple(self, Z, X):
""" Evaluate at multiple points. """
return self.eval_at_fidel_multiple_points_normalised(Z, X)
def eval(self, Z, X):
""" Executes either eval_single or eval_multiple. """
if len(Z.shape) == 1:
return self.eval_single(Z, X)
elif len(Z.shape) == 2:
return self.eval_multiple(Z, X)
else:
raise ValueError('Z should be either a vector or matrix.')
# Cost ---------------------------------------------------------------------------------
def cost_single(self, Z):
""" Evaluates cost at a single point. """
return self.eval_fidel_cost_single_point_normalised(Z)
def cost_multiple(self, Z):
""" Evaluates cost at multiple points. """
return self.eval_fidel_cost_multiple_points_normalised(Z)
def cost(self, Z):
""" Executes either cost_single or cost_multiple. """
if len(Z.shape) == 1:
return self.cost_single(Z)
elif len(Z.shape) == 2:
return self.cost_multiple(Z)
else:
raise ValueError('Z should be either a vector or matrix.')
# Other --------------------------------------------------------------------------------
def get_cost_ratio(self, Z1, Z2=None):
""" Obtains the ration between the costs. """
if Z2 is None:
cost_Z2 = self.opt_fidel_cost
else:
cost_Z2 = self.cost(Z2)
return self.cost(Z1)/cost_Z2
def get_candidate_fidelities(self, filter_by_cost=True):
""" Gets candidate fidelities. If filter_by_cost is True then it doesn't return those
whose cost is larger than opt_cost_fidel. """
# Determine the candidates randomly
if self.is_finite:
return self.get_candidate_fidelities_finite()
if self.fidel_dim == 1:
candidates = np.linspace(0, 1, 200).reshape((-1, 1))
elif self.fidel_dim == 2:
num_per_dim = 25
candidates = (np.indices((num_per_dim, num_per_dim)).reshape(2, -1).T + 0.5) / \
float(num_per_dim)
elif self.fidel_dim == 3:
num_per_dim = 10
cand_1 = (np.indices((num_per_dim, num_per_dim, num_per_dim)).reshape(3, -1).T
+ 0.5) / float(num_per_dim)
cand_2 = np.random.random((1000, self.fidel_dim))
candidates = np.vstack((cand_1, cand_2))
else:
candidates = np.random.random((4000, self.fidel_dim))
# To filter by cost?
if filter_by_cost:
fidel_costs = self.cost_multiple(candidates)
filtered_idxs = fidel_costs < self.opt_fidel_cost
candidates = candidates[filtered_idxs, :]
# Finally add the highest fidelity.
candidates = np.vstack((self.opt_fidel.reshape((1, self.fidel_dim)), candidates))
return candidates
def set_finite_fidels(self, finite_fidels_raw, is_normalised):
""" Sets the finite fidels. """
self.is_finite = True
if is_normalised:
self.finite_fidels = finite_fidels_raw
else:
self.finite_fidels_unnormalised = finite_fidels_raw
self.finite_fidels, _ = self.get_normalised_coords(finite_fidels_raw, None)
def get_candidate_fidelities_finite(self):
""" Gets the finite candidate fidelities. """
candidates = np.repeat(self.finite_fidels, 100, axis=0)
np.random.shuffle(candidates)
candidates = candidates[1:500, :]
candidates = np.vstack((self.opt_fidel.reshape((1, self.fidel_dim)), candidates))
return candidates
# MFOptFunction ends here ================================================================
class NoisyMFOptFunction(MFOptFunction):
""" Child class of MFOptFunction which also adds noise to the evaluations. """
def __init__(self, mf_func, fidel_cost_func, fidel_bounds, domain_bounds,
opt_fidel_unnormalised, noise_var, noise_type='gauss',
*args, **kwargs):
""" Constructor. See MFOptFunction and MFFunction for args. """
super(NoisyMFOptFunction, self).__init__(mf_func, fidel_cost_func, fidel_bounds,
domain_bounds, opt_fidel_unnormalised, *args, **kwargs)
self.noise_var = noise_var
self.noise_type = noise_type
# Noise functions ----------------------------------------------------------------------
def noise_multiple(self, num_samples):
""" Returns noise. """
if self.noise_type == 'gauss':
return np.random.normal(scale=np.sqrt(self.noise_var), size=(num_samples))
else:
raise NotImplementedError('Only implemented gauss noise so far. ')
def noise_single(self):
""" Single noise value. """
return float(self.noise_multiple(1))
# Override evaluation functions to add noise. ------------------------------------------
def eval_single_noiseless(self, Z, X):
""" Evaluate at a single point. """
return super(NoisyMFOptFunction, self).eval_single(Z, X)
def eval_multiple_noiseless(self, Z, X):
""" Evaluate at multiple points. """
return super(NoisyMFOptFunction, self).eval_multiple(Z, X)
def eval_single(self, Z, X):
""" Evaluate at a single point. """
return self.eval_single_noiseless(Z, X) + self.noise_single()
def eval_multiple(self, Z, X):
""" Evaluate at multiple points. """
return self.eval_multiple_noiseless(Z, X) + self.noise_multiple(len(Z))
def get_noisy_mfof_from_mfof(mfof, noise_var, noise_type='gauss', additional_attrs=None):
""" Returns a noisy mfof object from an mfof object. """
nmfof = NoisyMFOptFunction(mfof.mf_func, mfof.fidel_cost_func, mfof.fidel_bounds,
mfof.domain_bounds, mfof.opt_fidel_unnormalised, noise_var,
noise_type=noise_type,
vectorised=mfof.vectorised,
opt_pt=mfof.opt_pt,
opt_val=mfof.opt_val,
)
if additional_attrs is None:
additional_attrs = ['init_mfgp', 'mfgp']
for attr in additional_attrs:
if hasattr(mfof, attr):
setattr(nmfof, attr, getattr(mfof, attr))
return nmfof
# NOisyMFOptFunction ends here ===========================================================
| [
"numpy.random.shuffle",
"numpy.indices",
"numpy.random.random",
"utils.general_utils.map_to_bounds",
"numpy.array",
"numpy.linspace",
"numpy.vstack",
"numpy.sqrt",
"utils.general_utils.map_to_cube",
"numpy.repeat"
] | [((1255, 1277), 'numpy.array', 'np.array', (['fidel_bounds'], {}), '(fidel_bounds)\n', (1263, 1277), True, 'import numpy as np\n'), ((1303, 1326), 'numpy.array', 'np.array', (['domain_bounds'], {}), '(domain_bounds)\n', (1311, 1326), True, 'import numpy as np\n'), ((9648, 9690), 'numpy.repeat', 'np.repeat', (['self.finite_fidels', '(100)'], {'axis': '(0)'}), '(self.finite_fidels, 100, axis=0)\n', (9657, 9690), True, 'import numpy as np\n'), ((9695, 9724), 'numpy.random.shuffle', 'np.random.shuffle', (['candidates'], {}), '(candidates)\n', (9712, 9724), True, 'import numpy as np\n'), ((2166, 2179), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (2174, 2179), True, 'import numpy as np\n'), ((2862, 2875), 'numpy.array', 'np.array', (['ret'], {}), '(ret)\n', (2870, 2875), True, 'import numpy as np\n'), ((4172, 4205), 'utils.general_utils.map_to_cube', 'map_to_cube', (['Z', 'self.fidel_bounds'], {}), '(Z, self.fidel_bounds)\n', (4183, 4205), False, 'from utils.general_utils import map_to_cube, map_to_bounds\n'), ((4241, 4275), 'utils.general_utils.map_to_cube', 'map_to_cube', (['X', 'self.domain_bounds'], {}), '(X, self.domain_bounds)\n', (4252, 4275), False, 'from utils.general_utils import map_to_cube, map_to_bounds\n'), ((4438, 4473), 'utils.general_utils.map_to_bounds', 'map_to_bounds', (['Z', 'self.fidel_bounds'], {}), '(Z, self.fidel_bounds)\n', (4451, 4473), False, 'from utils.general_utils import map_to_cube, map_to_bounds\n'), ((4509, 4545), 'utils.general_utils.map_to_bounds', 'map_to_bounds', (['X', 'self.domain_bounds'], {}), '(X, self.domain_bounds)\n', (4522, 4545), False, 'from utils.general_utils import map_to_cube, map_to_bounds\n'), ((5801, 5833), 'numpy.array', 'np.array', (['opt_fidel_unnormalised'], {}), '(opt_fidel_unnormalised)\n', (5809, 5833), True, 'import numpy as np\n'), ((1723, 1734), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (1731, 1734), True, 'import numpy as np\n'), ((1774, 1785), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (1782, 1785), True, 'import numpy as np\n'), ((2468, 2479), 'numpy.array', 'np.array', (['Z'], {}), '(Z)\n', (2476, 2479), True, 'import numpy as np\n'), ((8264, 8286), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(200)'], {}), '(0, 1, 200)\n', (8275, 8286), True, 'import numpy as np\n'), ((8681, 8721), 'numpy.random.random', 'np.random.random', (['(1000, self.fidel_dim)'], {}), '((1000, self.fidel_dim))\n', (8697, 8721), True, 'import numpy as np\n'), ((8741, 8768), 'numpy.vstack', 'np.vstack', (['(cand_1, cand_2)'], {}), '((cand_1, cand_2))\n', (8750, 8768), True, 'import numpy as np\n'), ((8798, 8838), 'numpy.random.random', 'np.random.random', (['(4000, self.fidel_dim)'], {}), '((4000, self.fidel_dim))\n', (8814, 8838), True, 'import numpy as np\n'), ((10777, 10800), 'numpy.sqrt', 'np.sqrt', (['self.noise_var'], {}), '(self.noise_var)\n', (10784, 10800), True, 'import numpy as np\n'), ((8377, 8415), 'numpy.indices', 'np.indices', (['(num_per_dim, num_per_dim)'], {}), '((num_per_dim, num_per_dim))\n', (8387, 8415), True, 'import numpy as np\n'), ((8553, 8604), 'numpy.indices', 'np.indices', (['(num_per_dim, num_per_dim, num_per_dim)'], {}), '((num_per_dim, num_per_dim, num_per_dim))\n', (8563, 8604), True, 'import numpy as np\n')] |
# <NAME>, ddk960, 11096287
# a6q1
# CMPT 141, Assignment 6
# Arrarys
# Continue your codes based on this starter file
import numpy as np
import csv
import math as m
def perchange(nval,row):
lis=[]
lis=[abs(((nval[row][i]-nval[row][i-1])/nval[row][i-1])*100) for i in range(1,5)]
return lis
# put the csv file in the same folder as your program
f = open('age_statistics.csv', 'r')
csvreader = csv.reader(f, delimiter=',')
data = []
for row in csvreader:
row1 = [item.replace(',', '') for item in row] # This is used to remove the thousand separator , in each row
data.append(row1)
print(data)
data1=[]
data2=[]
for i in range(9,len(data)):
data1.append(data[i])
for i in range(len(data1)):
data2.append(data1[i][1:])
data2 =[list(map(int,i) ) for i in data2]
age_dict={}
for i in range (len(data1)):
age_dict[i]=data1[i][0]
# write your assignment based on the varaible data
data_array=np.array(data2)
print(data_array.shape)
print(data_array.size)
print(data_array.dtype)
summ=np.sum(data_array, axis=0)
largest=[]
for i in range(0,21):
largest.append(sum(perchange(data_array,i)))
maxi=-1
idxx=0
for i in range (len(largest)):
if maxi<largest[i]:
maxi=largest[i]
idxx=i
print(maxi)
print(age_dict[i-1]) | [
"numpy.array",
"csv.reader",
"numpy.sum"
] | [((408, 436), 'csv.reader', 'csv.reader', (['f'], {'delimiter': '""","""'}), "(f, delimiter=',')\n", (418, 436), False, 'import csv\n'), ((934, 949), 'numpy.array', 'np.array', (['data2'], {}), '(data2)\n', (942, 949), True, 'import numpy as np\n'), ((1027, 1053), 'numpy.sum', 'np.sum', (['data_array'], {'axis': '(0)'}), '(data_array, axis=0)\n', (1033, 1053), True, 'import numpy as np\n')] |
import math
import numpy as np
from mindspore.common.tensor import Tensor
def _average_units(shape):
if not shape:
return 1
if len(shape) == 1:
return float(shape[0])
if len(shape) == 2:
return float(shape[0] + shape[1]) / 2.
raise RuntimeError("not support shape.")
def weight_variable(shape):
scale_shape = shape
avg_units = _average_units(scale_shape)
scale = 1.0 / max(1., avg_units)
limit = math.sqrt(3.0 * scale)
values = np.random.uniform(-limit, limit, shape).astype(np.float32) # 随机生成下一个实数,它在[-limit, limit]范围内
return Tensor(values)
def one_weight(shape):
ones = np.ones(shape).astype(np.float32)
return Tensor(ones)
def zero_weight(shape):
zeros = np.zeros(shape).astype(np.float32)
return Tensor(zeros)
def normal_weight(shape, num_units):
norm = np.random.normal(0.0, num_units ** -0.5, shape).astype(np.float32) # 正态分布 0.0 表示均值,num_units ** -0.5表示标准差
return Tensor(norm) | [
"numpy.random.uniform",
"math.sqrt",
"numpy.zeros",
"numpy.ones",
"mindspore.common.tensor.Tensor",
"numpy.random.normal"
] | [((471, 493), 'math.sqrt', 'math.sqrt', (['(3.0 * scale)'], {}), '(3.0 * scale)\n', (480, 493), False, 'import math\n'), ((612, 626), 'mindspore.common.tensor.Tensor', 'Tensor', (['values'], {}), '(values)\n', (618, 626), False, 'from mindspore.common.tensor import Tensor\n'), ((711, 723), 'mindspore.common.tensor.Tensor', 'Tensor', (['ones'], {}), '(ones)\n', (717, 723), False, 'from mindspore.common.tensor import Tensor\n'), ((811, 824), 'mindspore.common.tensor.Tensor', 'Tensor', (['zeros'], {}), '(zeros)\n', (817, 824), False, 'from mindspore.common.tensor import Tensor\n'), ((995, 1007), 'mindspore.common.tensor.Tensor', 'Tensor', (['norm'], {}), '(norm)\n', (1001, 1007), False, 'from mindspore.common.tensor import Tensor\n'), ((508, 547), 'numpy.random.uniform', 'np.random.uniform', (['(-limit)', 'limit', 'shape'], {}), '(-limit, limit, shape)\n', (525, 547), True, 'import numpy as np\n'), ((665, 679), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (672, 679), True, 'import numpy as np\n'), ((764, 779), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (772, 779), True, 'import numpy as np\n'), ((877, 924), 'numpy.random.normal', 'np.random.normal', (['(0.0)', '(num_units ** -0.5)', 'shape'], {}), '(0.0, num_units ** -0.5, shape)\n', (893, 924), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 7 14:31:56 2017
@author: <NAME>
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import animation
from scipy.spatial import ConvexHull
from scipy.misc import imread
import time
import datetime
from pylab import rcParams
from sklearn import preprocessing
# Time periods (in ms) to calculate convex hull areas
periods = [3000]
participantNums = range(1, 21)
dwgs = range(1, 11)
viewingThresh = 5000 # DWG Viewing Time Threshold (ms)
viewingPointMin = 20 # Minimum # of points required to make a viewing
imagePath = 'results/images/'
filePrefix = 'BeGaze Data/Raw Data/Participant '
fileSuffix = '.txt'
# Convex hull areas smaller than detailThresh are considered "detailed viewing"
# while convex hull areas larger than detailThresh are considered
# "distributed viewing." This will be used to calculate a propDetail, which is
# the propoirtion of time spent in "detailed viewing"
detailThresh = 0.10 # Set threshold to 10%
#########################
# Don't edit below here #
#########################
# Initialize results
results = pd.DataFrame(columns = ['period', 'participant', 'dwg', 'viewing',
'viewingAvgHullArea', 'viewingTime',
'dwgAvgHullArea', 'dwgTime',
'participantAvgHullArea', 'participantTime'])
# Remove all categories except for "Visual Intake"
def getVisualIntakes(data):
data = data.loc[data['category'] == 'Visual Intake']
return data
# Remove all AOI's that include spools (no '-' or 'white space')
def getSpools(data):
data = data[data['aoi'].str.contains("Spool")]
return data
def getFloatCoords(data):
# Convert coordinates to floats
data = data.astype(dtype = {'x': np.float64, 'y': np.float64})
return data
# Remove combine rows that have the same binocular index
def getFirstIndices(data):
data = data.drop_duplicates(subset = 'index', keep = 'first')
return data
# Read in the raw BeGaze data files
def getData(filePrefix, fileSuffix, participantNumTxt):
# Data doesn't exist. Read the file.
data = pd.read_table(filePrefix + participantNumTxt + fileSuffix,
delimiter = ',')
# Rename the columns
data.columns = ['recordtime', 'timestamp', 'category',
'index', 'x', 'y', 'aoi']
return data
def getCleanData(data):
data = getVisualIntakes(data)
data = getSpools(data)
data = getFloatCoords(data)
return data
def getDwgData(data, dwgNum):
return data.loc[data['aoi'] == 'Spool '
+ str(dwgNum)].sort_values(by = ['timestamp'])
def getScaledCoordinates(data):
min_max_scaler = preprocessing.MinMaxScaler()
# Normalize the coordinates
data['x'] = min_max_scaler.fit_transform(data['x'].values.reshape(-1,1))
data['y'] = min_max_scaler.fit_transform(data['y'].values.reshape(-1,1))
return data
def addDurationsCol(data):
for i, row in data.iterrows():
if i > 0:
duration = data.iloc[i,:]['recordtime'] - data.iloc[i - 1,:]['recordtime']
else:
# Just use the timestamp for the very first row
duration = data.iloc[i,:]['recordtime']
data.set_value(i, 'duration', duration)
return data
# Split the data into viewings based upon the duration of each fixation
# If a duration exceeds viewingThresh (global variable defined by user) then split
def getDwgViewings(data, viewingThresh, viewingPointMin):
viewingNum = 1
# Assign viewing numbers
for i, row in data.iterrows():
if data.iloc[i,:]['duration'] > viewingThresh:
data.set_value(i, 'viewing', 0)
viewingNum += 1
else:
data.set_value(i, 'viewing', viewingNum)
totalViewings = viewingNum
data = data[data['viewing'] > 0]
# Make an array of viewings
viewings = []
# Append the lists of each viewing
for i in range(1, totalViewings + 1):
thisViewing = data[data['viewing'] == i]
thisViewing.reset_index(drop=True, inplace=True)
del thisViewing['viewing']
if(len(thisViewing) >= viewingPointMin):
viewings.append(thisViewing)
return viewings
def getViewingTime(data):
minTime = data['recordtime'].min()
def pt(x):
return x['recordtime'] - minTime
return data.apply(pt, axis=1)
def getStartRow(data, finishRow, period):
startRow = False
for row in range(finishRow, -1, -1):
if data.iloc[finishRow]['viewingTime'] - data.iloc[row]['viewingTime'] > period:
startRow = row + 1
break
return startRow
def getRowCountStartPeriod(data, period):
for row in range(len(data) - 1, -1, -1):
startRow = getStartRow(data, row, period)
if(startRow):
data.set_value(row, 'startRow', startRow)
data.set_value(row, 'rowCount', row - startRow + 1)
data.set_value(row, 'period',
data.iloc[row]['viewingTime'] - data.iloc[startRow]['viewingTime'])
else:
data.set_value(row, 'startRow', np.nan)
data.set_value(row, 'rowCount', np.nan)
data.set_value(row, 'period', np.nan)
return data
# Calculate the convex hulls and hullAreas
def getConvexHulls(data):
for i, row in data.iterrows():
if(not np.isnan(row['startRow'])):
# Get just the points for calculating the convex hull
points = data.iloc[int(row['startRow']):i+1][['x', 'y']]
if((int(row['rowCount']) > 2) & (len(points.drop_duplicates()) > 2)):
# Calculate the convex hull and save it
hull = ConvexHull(points)
data.set_value(i, 'hull', hull)
data.set_value(i, 'hullArea', hull.volume*100)
else:
data.set_value(i, 'hullArea', 0)
return data
def getPlotPoints(data, frame):
plotPoints = []
startRow = data.iloc[frame]['startRow']
if(not np.isnan(startRow)):
plotPoints = data.iloc[int(startRow):frame+1][['x', 'y']]
plotPoints.reset_index(inplace=True)
del plotPoints['index']
return plotPoints
def updatePlot(frame, data, startFrame, period, participantNumTxt,
dwgNumTxt, viewingNumTxt):
global finalTime
global average
print('period:' + str(period) + ' participant:' + participantNumTxt
+ ' dwg:' + dwgNumTxt + ' viewing:' + viewingNumTxt
+ ' frame: ' + str(frame))
row = data.iloc[frame]
if(row['rowCount'] > 2):
plotPoints = getPlotPoints(data, frame)
if(len(plotPoints) > 2):
if(len(plotPoints.drop_duplicates()) > 2):
plotPoints = plotPoints.as_matrix()
# Plot the points! Draw the points in the left subplot
ax1.cla()
ax1.set_xlim([0, 1])
ax1.set_ylim([0, 1])
ax1.set_xlabel('X Coordinate (normalized)')
ax1.set_ylabel('Y Coordinate (normalized)')
# Set the Left plot background to the reference image
img = imread('referenceImages/DWG' + dwgNumTxt + '.png')
ax1.imshow(img, zorder=0, extent=[0,1,0,1], aspect='auto')
ax1.plot(plotPoints[:,0], plotPoints[:,1], 'o')
for simplex in row['hull'].simplices:
ax1.plot(plotPoints[simplex, 0],
plotPoints[simplex, 1], 'k-')
# Set the subplot title to frameRange
ax1.title.set_text('Fixation #\'s: '
+ str(int(row['startRow'])) + ' - '
+ str(frame) + '\nPeriod: ' +
str(row['period'])[0:6] + ' milliseconds')
# Update Right plot
if(frame > startFrame):
# Update the x axis limit to include the new data
ax2.set_xlim(left = data.loc[startFrame, 'viewingTime'],
right = data.loc[frame, 'viewingTime'])
# Draw AREA line graph in the right subplot
areaLine.set_data(data.loc[startFrame:frame, 'viewingTime'],
data.loc[startFrame:frame, 'hullArea'])
# Update and draw the flat average line
average = np.mean(data.loc[startFrame:frame,'hullArea'])
avgLine.set_data([0, data.loc[frame, 'viewingTime']],
[average, average])
# Update and move the average line label
avgLabel.set_text(str(average)[0:5] + '%')
avgLabel.set_position((data.loc[frame, 'viewingTime'],
average))
# Update the time in the bottom right corner of the plot
timeLabel.set_position((data.loc[frame, 'viewingTime'], 0))
timeLabel.set_text(str(row['viewingTime']/1000)[0:6]
+ ' seconds')
finalTime = row['viewingTime']/1000
def getStartFrame(data):
for i, row in data.iterrows():
if(not np.isnan(row['startRow'])):
return i
def plotAnimationAndSave(data, period, participantNumTxt, dwgNumTxt, viewingNumTxt):
global fig1
global ax1
global ax2
global areaLine
global avgLine
global avgLabel
global timeLabel
global finalTime
global average
# Set the default size of the plot figure to 10" width x 5" height
rcParams['figure.figsize'] = 10, 5
# Setup the figure and subplots
plt.close("all")
fig1, (ax1, ax2) = plt.subplots(1, 2, sharey=False)
fig1.tight_layout(rect=[0.03, 0.05, 0.96, 0.85])
# Increase whitespace between the subplots
fig1.subplots_adjust(wspace=0.35)
# Set the figure title
fig1.suptitle('Participant ' + participantNumTxt + ' - DWG ' + dwgNumTxt
+ ' - Viewing ' + viewingNumTxt, y=0.96, fontweight='bold')
# Set Axes labels of the right subplot (the line graph)
ax2.set_xlabel('Time (milliseconds)')
ax2.set_ylabel('Convex Hull Area (%)')
# Set the axis limits for the right subplot
ax2.set_xlim([0, 1])
ax2.set_ylim([0, 100])
# Set the title of the right subplot
ax2.title.set_text('Convex Hull Area Over Time')
# Initialize the lines for the right subplot
areaLine, = ax2.plot([], [], label = 'Convex Hull Area')
avgLine, = ax2.plot([], [], linestyle='--', label = 'Average Area')
# Initialize the average line label
avgLabel = ax2.text(0, 0, '%', horizontalalignment='left',
verticalalignment='center')
# Initialize the time label in the bottom right of the right subplot
timeLabel = ax2.text(0, 0, '', horizontalalignment='right',
verticalalignment='bottom')
# Create the legend in the top right corner of the right subplot
ax2.legend(loc=1)
# Make a timestamp for unique movie filenames
ts = time.time()
dt = datetime.datetime.fromtimestamp(ts).strftime('%y%m%d.%H%M%S')
startFrame = getStartFrame(data)
finalTime = 0
average = 0
# Animate the plot
anim = animation.FuncAnimation(fig1,
func=updatePlot,
frames=range(startFrame, len(data)),
fargs=(data, startFrame, period,
participantNumTxt, dwgNumTxt,
viewingNumTxt),
interval=200,
repeat=False)
anim.save('results/animations/' + str(period) + '_participant'
+ participantNumTxt + '_dwg' + dwgNumTxt + '_viewing' + viewingNumTxt
+ '.mp4', fps=5, bitrate=500,
extra_args=['-vcodec', 'libx264'])
def plotHistogramAndSave(data, title, filename, period, participantNumTxt, dwgNumTxt, viewingNumTxt):
global imagePath
global fig2
print('HISTOGRAM period:' + str(period) + ' participant:'
+ participantNumTxt + ' dwg:' + dwgNumTxt + ' viewing:' + viewingNumTxt)
x = data[np.isfinite(data['hullArea'])]['hullArea'].as_matrix()
fig2 = plt.figure()
n, bins, patches = plt.hist(x, 'auto', normed=1, alpha=0.75)
plt.xlabel('Convex Hull Area (%)')
plt.ylabel('Probability')
plt.title(title, y=1)
plt.xlim(0)
plt.grid(True)
# Save to plot
plt.savefig((imagePath + '/Histograms/' + filename))
plt.close("all")
def doCalculations(periods, participantNums, dwgs, viewingThresh, viewingPointMin,
filePrefix, fileSuffix):
global results
# Do everything for each period
for period in periods:
# Do everything for each participant
for participantNum in participantNums:
participantNumTxt = str(participantNum).zfill(2)
# Get the participant data
participantData = getData(filePrefix, fileSuffix,
participantNumTxt)
# Clean the participant data
participantData = getCleanData(participantData)
# Do everything for each drawing
for dwgNum in dwgs:
dwgNumTxt = str(dwgNum).zfill(2)
dwgData = getDwgData(participantData, dwgNum)
dwgData = getFirstIndices(dwgData)
dwgData.reset_index(inplace = True)
dwgData = getScaledCoordinates(dwgData)
dwgData = addDurationsCol(dwgData)
dwgViewings = getDwgViewings(dwgData, viewingThresh, viewingPointMin)
# Do everything for each drawing viewing
for i in range(0, len(dwgViewings)):
viewingData = dwgViewings[i]
viewingNum = i+1
viewingNumTxt = str(viewingNum).zfill(2)
if(not 'viewingTime' in viewingData):
viewingData['viewingTime'] = getViewingTime(viewingData)
if(not 'startRow' in viewingData):
viewingData = getRowCountStartPeriod(viewingData, period)
# Don't calculate convex hulls unless there's enough data
if(viewingData['startRow'].nunique() > 0):
if(not 'hull' in viewingData):
viewingData = getConvexHulls(viewingData)
plotAnimationAndSave(viewingData, period,
participantNumTxt, dwgNumTxt,
viewingNumTxt)
title = ('Convex Hull Area Distribution\n Participant '
+ participantNumTxt + ' - DWG ' + dwgNumTxt
+ ' - Viewing ' + viewingNumTxt)
filename = ('Histogram_' + str(period) + '_participant'
+ participantNumTxt + '_dwg' + '_viewing'
+ viewingNumTxt + '.png')
plotHistogramAndSave(viewingData, title, filename, period, participantNumTxt, dwgNumTxt, viewingNumTxt)
# Append this result to results
result = {'period': period,
'participant': participantNum,
'dwg': dwgNum,
'viewing': viewingNum,
'viewingAvgHullArea': average,
'viewingTime': finalTime}
results = results.append(result, ignore_index=True)
# Write results to excel file
writer = pd.ExcelWriter('results/results.xlsx',
engine='xlsxwriter')
results.to_excel(writer, sheet_name='Sheet1')
writer.save()
# Change dtype to integers
results = results.astype(dtype = {'period': np.int, 'participant': np.int,
'dwg': np.int, 'viewing': np.int})
# Make a timestamp for unique movie filenames
ts = time.time()
dt = datetime.datetime.fromtimestamp(ts).strftime('%y%m%d.%H%M%S')
# Write results to excel file but save name with current date and time
writerBackup = pd.ExcelWriter('results/results' + str(dt) + '.xlsx',
engine='xlsxwriter')
results.to_excel(writerBackup, sheet_name='Sheet1')
writerBackup.save()
# Finally, call doCalculations
doCalculations(periods, participantNums, dwgs, viewingThresh, viewingPointMin,
filePrefix, fileSuffix)
#########
######### for testing only
#########
#period=3000
#participantNum=19
#dwgNum=4
#i=0
#data=viewingData
#frame=12
#startFrame=7
#########
######### for testing only
######### | [
"matplotlib.pyplot.title",
"sklearn.preprocessing.MinMaxScaler",
"numpy.isnan",
"matplotlib.pyplot.figure",
"numpy.mean",
"pandas.read_table",
"pandas.DataFrame",
"matplotlib.pyplot.close",
"numpy.isfinite",
"matplotlib.pyplot.subplots",
"pandas.ExcelWriter",
"datetime.datetime.fromtimestamp",... | [((1164, 1348), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': "['period', 'participant', 'dwg', 'viewing', 'viewingAvgHullArea',\n 'viewingTime', 'dwgAvgHullArea', 'dwgTime', 'participantAvgHullArea',\n 'participantTime']"}), "(columns=['period', 'participant', 'dwg', 'viewing',\n 'viewingAvgHullArea', 'viewingTime', 'dwgAvgHullArea', 'dwgTime',\n 'participantAvgHullArea', 'participantTime'])\n", (1176, 1348), True, 'import pandas as pd\n'), ((2215, 2288), 'pandas.read_table', 'pd.read_table', (['(filePrefix + participantNumTxt + fileSuffix)'], {'delimiter': '""","""'}), "(filePrefix + participantNumTxt + fileSuffix, delimiter=',')\n", (2228, 2288), True, 'import pandas as pd\n'), ((2815, 2843), 'sklearn.preprocessing.MinMaxScaler', 'preprocessing.MinMaxScaler', ([], {}), '()\n', (2841, 2843), False, 'from sklearn import preprocessing\n'), ((10299, 10315), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10308, 10315), True, 'import matplotlib.pyplot as plt\n'), ((10339, 10371), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'sharey': '(False)'}), '(1, 2, sharey=False)\n', (10351, 10371), True, 'import matplotlib.pyplot as plt\n'), ((11764, 11775), 'time.time', 'time.time', ([], {}), '()\n', (11773, 11775), False, 'import time\n'), ((13050, 13062), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (13060, 13062), True, 'import matplotlib.pyplot as plt\n'), ((13091, 13132), 'matplotlib.pyplot.hist', 'plt.hist', (['x', '"""auto"""'], {'normed': '(1)', 'alpha': '(0.75)'}), "(x, 'auto', normed=1, alpha=0.75)\n", (13099, 13132), True, 'import matplotlib.pyplot as plt\n'), ((13142, 13176), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Convex Hull Area (%)"""'], {}), "('Convex Hull Area (%)')\n", (13152, 13176), True, 'import matplotlib.pyplot as plt\n'), ((13181, 13206), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Probability"""'], {}), "('Probability')\n", (13191, 13206), True, 'import matplotlib.pyplot as plt\n'), ((13211, 13232), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'y': '(1)'}), '(title, y=1)\n', (13220, 13232), True, 'import matplotlib.pyplot as plt\n'), ((13237, 13248), 'matplotlib.pyplot.xlim', 'plt.xlim', (['(0)'], {}), '(0)\n', (13245, 13248), True, 'import matplotlib.pyplot as plt\n'), ((13253, 13267), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (13261, 13267), True, 'import matplotlib.pyplot as plt\n'), ((13292, 13342), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(imagePath + '/Histograms/' + filename)"], {}), "(imagePath + '/Histograms/' + filename)\n", (13303, 13342), True, 'import matplotlib.pyplot as plt\n'), ((13354, 13370), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (13363, 13370), True, 'import matplotlib.pyplot as plt\n'), ((17623, 17634), 'time.time', 'time.time', ([], {}), '()\n', (17632, 17634), False, 'import time\n'), ((6343, 6361), 'numpy.isnan', 'np.isnan', (['startRow'], {}), '(startRow)\n', (6351, 6361), True, 'import numpy as np\n'), ((5650, 5675), 'numpy.isnan', 'np.isnan', (["row['startRow']"], {}), "(row['startRow'])\n", (5658, 5675), True, 'import numpy as np\n'), ((9825, 9850), 'numpy.isnan', 'np.isnan', (["row['startRow']"], {}), "(row['startRow'])\n", (9833, 9850), True, 'import numpy as np\n'), ((11785, 11820), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (11816, 11820), False, 'import datetime\n'), ((17644, 17679), 'datetime.datetime.fromtimestamp', 'datetime.datetime.fromtimestamp', (['ts'], {}), '(ts)\n', (17675, 17679), False, 'import datetime\n'), ((6000, 6018), 'scipy.spatial.ConvexHull', 'ConvexHull', (['points'], {}), '(points)\n', (6010, 6018), False, 'from scipy.spatial import ConvexHull\n'), ((7527, 7577), 'scipy.misc.imread', 'imread', (["('referenceImages/DWG' + dwgNumTxt + '.png')"], {}), "('referenceImages/DWG' + dwgNumTxt + '.png')\n", (7533, 7577), False, 'from scipy.misc import imread\n'), ((8908, 8955), 'numpy.mean', 'np.mean', (["data.loc[startFrame:frame, 'hullArea']"], {}), "(data.loc[startFrame:frame, 'hullArea'])\n", (8915, 8955), True, 'import numpy as np\n'), ((12979, 13008), 'numpy.isfinite', 'np.isfinite', (["data['hullArea']"], {}), "(data['hullArea'])\n", (12990, 13008), True, 'import numpy as np\n'), ((17102, 17161), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['"""results/results.xlsx"""'], {'engine': '"""xlsxwriter"""'}), "('results/results.xlsx', engine='xlsxwriter')\n", (17116, 17161), True, 'import pandas as pd\n')] |
import numpy as np
import matplotlib.pyplot as plt
# def verguero():
# lista = list(range(10))*2
# encontro = True
# cuenta = 0
# while encontro:
# rand1 = int(np.random.uniform(0,20))
# media1 = lista.pop(rand1)
# rand2 = int(np.random.uniform(0,19))
# media2 = lista.pop(rand2)
# cuenta += 1
# if media1 == media2:
# encontro = False
# else:
# lista.append(media1)
# lista.append(media2)
# return cuenta
#
# cue = []
# for i in range(10000):
# cue.append(verguero())
# cuentas = np.array(cue)
#
# promedio = np.mean(cuentas)
#
# promedio
# _ = plt.hist(cuentas, bins=50)
# #Ejercicio 7.1
# n_puntos=1000
# x = np.linspace(0, np.pi)
# def f(x):
# y = 0.5 * np.sin(x)
# if(np.isscalar(x)):# esto va a funcionar si entra un numero
# if (x>np.pi) | (x<0):
# y = 0
# else: #esto va a funcionar si entra un array
# ii = (x>np.pi) | (x<0)
# y[ii] = 0.0
# return y
#
# #%%
# N = 100000
# resultados = []
# sigma_delta = [1.0, 0.001, 1000.0]
#
# for delta in sigma_delta:
# lista = [np.random.random()*np.pi]
# for i in range(1,N):
# propuesta = lista[i-1] + np.random.normal(loc=0.0, scale=delta)
# r = min(1,f(propuesta)/f(lista[i-1]))
# alpha = np.random.random()
# if(alpha<r):
# lista.append(propuesta)
# else:
# lista.append(lista[i-1])
# resultados.append(lista)
#
# len(resultados)
#
#
# #%%
# fig= plt.figure(figsize=(10,3))
# ax= fig.add_subplot(131)
# ax.hist(resultados[0], density=True, bins=x)
# ax.plot(x, f(x))
# ax2= fig.add_subplot(132)
# ax2.hist(resultados[1], density=True, bins=x)
# ax2.plot(x, f(x))
# ax3= fig.add_subplot(133)
# ax3.hist(resultados[2], density=True, bins=x)
# ax3.plot(x, f(x))
# plt.show()
#
# #%%
# #Ejercicio 7.2
N=100000
def dens(x, y):
return np.exp(-0.5*(x**2/4 + y**2 +x*y/1.5))
x_lista = [np.random.random()]
y_lista = [np.random.random()]
sigma_delta = 1.0
for i in range (1,N):
propuestax= x_lista[i-1] + sigma_delta*(np.random.random() - 0.5)
propuestay= y_lista[i-1] + sigma_delta*(np.random.random() - 0.5)
r= min (1.0, dens(propuestax, propuestay)/ dens(x_lista[i-1],y_lista[i-1]))
alpha= np.random.random()
if (alpha< r):
x_lista.append(propuestax)
y_lista.append(propuestay)
else:
x_lista.append(x_lista[i-1])
y_lista.append(y_lista[i-1])
#%%
_ = plt.hist2d(x_lista, y_lista, bins=50)
# plt.xlim(-5,5)
# plt.ylim(-5,5)
plt.show()
#%%
x_line = np.linspace(-5,5,100)
y_line = np.linspace(-5,5,100)
x_grid, y_grid = np.meshgrid(x_line, y_line)
z_grid = dens(x_grid, y_grid)
fig, (ax0, ax1) = plt.subplots(1,2)
# grafica los puntos de la grid
im = ax0.pcolormesh(x_grid, y_grid, z_grid)
| [
"numpy.meshgrid",
"matplotlib.pyplot.show",
"numpy.random.random",
"numpy.exp",
"numpy.linspace",
"matplotlib.pyplot.hist2d",
"matplotlib.pyplot.subplots"
] | [((2506, 2543), 'matplotlib.pyplot.hist2d', 'plt.hist2d', (['x_lista', 'y_lista'], {'bins': '(50)'}), '(x_lista, y_lista, bins=50)\n', (2516, 2543), True, 'import matplotlib.pyplot as plt\n'), ((2578, 2588), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2586, 2588), True, 'import matplotlib.pyplot as plt\n'), ((2605, 2628), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (2616, 2628), True, 'import numpy as np\n'), ((2636, 2659), 'numpy.linspace', 'np.linspace', (['(-5)', '(5)', '(100)'], {}), '(-5, 5, 100)\n', (2647, 2659), True, 'import numpy as np\n'), ((2675, 2702), 'numpy.meshgrid', 'np.meshgrid', (['x_line', 'y_line'], {}), '(x_line, y_line)\n', (2686, 2702), True, 'import numpy as np\n'), ((2752, 2770), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {}), '(1, 2)\n', (2764, 2770), True, 'import matplotlib.pyplot as plt\n'), ((1933, 1983), 'numpy.exp', 'np.exp', (['(-0.5 * (x ** 2 / 4 + y ** 2 + x * y / 1.5))'], {}), '(-0.5 * (x ** 2 / 4 + y ** 2 + x * y / 1.5))\n', (1939, 1983), True, 'import numpy as np\n'), ((1982, 2000), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (1998, 2000), True, 'import numpy as np\n'), ((2013, 2031), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2029, 2031), True, 'import numpy as np\n'), ((2305, 2323), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2321, 2323), True, 'import numpy as np\n'), ((2118, 2136), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2134, 2136), True, 'import numpy as np\n'), ((2188, 2206), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (2204, 2206), True, 'import numpy as np\n')] |
"""
Transforms the map.png files from EWAP scenarios using their H.txt files.
"""
import argparse
from pathlib import Path
import numpy as np
from matplotlib.image import imread
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument(
'map_file',
help='path to map.png image file containig obstacles of scenario',
type=Path
)
parser.add_argument(
'hmat_file',
help='Camera homography matrix "h.txt" of scenario.',
type=Path
)
parser.add_argument(
'--scale',
help='Scale of produced image. values between 10-50 are good picks.',
type=int
)
args = parser.parse_args()
if __name__ == '__main__':
orig_map = imread(str(args.map_file.resolve()))
hmat = np.loadtxt(str(args.hmat_file.resolve()))
# all pixel positions
pixel_pos = np.indices(orig_map.shape).T.reshape(-1, 2).T
h_pixel_pos = np.concatenate((pixel_pos, np.ones((1, pixel_pos.shape[1]))))
transformed_map = np.matmul(hmat, h_pixel_pos)
transformed_map /= transformed_map[-1:]
transformed_map = transformed_map[:2, :]
# try to coerce transformed map into discrete image
# arbitrary 1m/10 resolution below
shifted_map = transformed_map.copy() * args.scale
# store shift amount for saving later
shift2save = np.array([
shifted_map[0, 0],
shifted_map[1, 0]
])
shifted_map[0:] -= shifted_map[0, 0]
shifted_map[1:] -= shifted_map[1, 0]
shifted_map = np.round(shifted_map).astype(np.int64)
# construct new image
max_x = np.max(shifted_map[0]).item() + 1
max_y = np.max(shifted_map[1]).item() + 1
new_image = np.zeros((max_x, max_y))
# populate new image
for i in range(shifted_map.shape[1]):
old_coords = pixel_pos[:, i]
rw_coords = shifted_map[:, i]
new_image[tuple(rw_coords)] = orig_map[tuple(old_coords)]
# save image
im2save = Image.fromarray(new_image.astype('uint8') * 255)
im2save = im2save.convert('1')
im2save.save(args.map_file.parents[0] / 'hmap.png')
# save shift amount, all positions in dataset must be shifted by this.
np.savetxt(args.map_file.parents[0] / 'shift.txt', shift2save)
# save scale amount, all positions and velocities must be scaled by this.
np.savetxt(args.map_file.parents[0] / 'scale.txt', np.array([args.scale]))
| [
"argparse.ArgumentParser",
"numpy.savetxt",
"numpy.zeros",
"numpy.ones",
"numpy.indices",
"numpy.max",
"numpy.array",
"numpy.matmul",
"numpy.round"
] | [((212, 237), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (235, 237), False, 'import argparse\n'), ((953, 981), 'numpy.matmul', 'np.matmul', (['hmat', 'h_pixel_pos'], {}), '(hmat, h_pixel_pos)\n', (962, 981), True, 'import numpy as np\n'), ((1281, 1329), 'numpy.array', 'np.array', (['[shifted_map[0, 0], shifted_map[1, 0]]'], {}), '([shifted_map[0, 0], shifted_map[1, 0]])\n', (1289, 1329), True, 'import numpy as np\n'), ((1629, 1653), 'numpy.zeros', 'np.zeros', (['(max_x, max_y)'], {}), '((max_x, max_y))\n', (1637, 1653), True, 'import numpy as np\n'), ((2115, 2177), 'numpy.savetxt', 'np.savetxt', (["(args.map_file.parents[0] / 'shift.txt')", 'shift2save'], {}), "(args.map_file.parents[0] / 'shift.txt', shift2save)\n", (2125, 2177), True, 'import numpy as np\n'), ((2312, 2334), 'numpy.array', 'np.array', (['[args.scale]'], {}), '([args.scale])\n', (2320, 2334), True, 'import numpy as np\n'), ((895, 927), 'numpy.ones', 'np.ones', (['(1, pixel_pos.shape[1])'], {}), '((1, pixel_pos.shape[1]))\n', (902, 927), True, 'import numpy as np\n'), ((1454, 1475), 'numpy.round', 'np.round', (['shifted_map'], {}), '(shifted_map)\n', (1462, 1475), True, 'import numpy as np\n'), ((1532, 1554), 'numpy.max', 'np.max', (['shifted_map[0]'], {}), '(shifted_map[0])\n', (1538, 1554), True, 'import numpy as np\n'), ((1578, 1600), 'numpy.max', 'np.max', (['shifted_map[1]'], {}), '(shifted_map[1])\n', (1584, 1600), True, 'import numpy as np\n'), ((804, 830), 'numpy.indices', 'np.indices', (['orig_map.shape'], {}), '(orig_map.shape)\n', (814, 830), True, 'import numpy as np\n')] |
import pytest
import numpy as np
import skgstat as skg
import scipy
# produce a random dataset
np.random.seed(42)
rcoords = np.random.gamma(40, 10, size=(500, 2))
np.random.seed(42)
rvals = np.random.normal(10, 4, 500)
def test_invalid_dist_func():
# instantiate metrix space
ms = skg.MetricSpace(rcoords, dist_metric='euclidean')
with pytest.raises(AttributeError) as e:
skg.Variogram(ms, rvals, dist_func='cityblock')
assert 'Distance metric' in e.value
def test_sparse_matrix_no_warning():
# make a really sparse matrix
sparse = skg.MetricSpace(rcoords, max_dist=5)
# call triangular_distance_matrix without warning
V = skg.Variogram(sparse, rvals)
V.triangular_distance_matrix
def test_dense_matrix_warning():
dense = skg.MetricSpace(rcoords)
# check the warning
with pytest.raises(RuntimeWarning) as w:
V = skg.Variogram(dense, rvals)
V.triangular_distance_matrix
assert 'Only available' in w.value
def test_unknown_metric():
with pytest.raises(ValueError) as e:
skg.MetricSpace(rcoords, dist_metric='foobar')
assert 'Unknown Distance Metric:' in e.value
def test_tree_non_euklidean():
with pytest.raises(ValueError) as e:
ms = skg.MetricSpace(rcoords, 'cityblock')
ms.tree
assert 'can only be constructed' in e.value
def test_metric_pair_metrix():
c1 = np.random.gamma(100, 4, (300, 2))
c2 = np.random.gamma(50, 5, (100, 2))
ms1 = skg.MetricSpace(c1, dist_metric='cityblock')
ms2 = skg.MetricSpace(c2, dist_metric='euclidean')
with pytest.raises(ValueError) as e:
skg.MetricSpacePair(ms1, ms2)
assert 'same distance metric' in e.value
def test_metric_pair_max_dist():
c1 = np.random.gamma(100, 4, (300, 2))
c2 = np.random.gamma(50, 5, (100, 2))
ms1 = skg.MetricSpace(c1, max_dist=50)
ms2 = skg.MetricSpace(c2, max_dist=400)
with pytest.raises(ValueError) as e:
skg.MetricSpacePair(ms1, ms2)
assert 'same max_dist' in e.value
def test_raster_metric():
# Generate a gridded dataset
shape = (100, 100)
np.random.seed(42)
vals = np.random.normal(0, 1, size=shape)
# Coordinates
x = np.arange(0, shape[0])
y = np.arange(0, shape[1])
xx, yy = np.meshgrid(x, y)
# Flatten everything because we don't care about the 2D at this point
coords = np.dstack((xx.flatten(), yy.flatten())).squeeze()
vals = vals.flatten()
# Run the computation
rems = skg.RasterEquidistantMetricSpace(coords, shape=shape, extent=(x[0],x[-1],y[0],y[-1]), samples=10, runs=10,
rnd=42, verbose=True)
# Minimal check of the output
assert rems.max_dist == pytest.approx(140,rel=0.01)
assert rems.res == pytest.approx(1, rel=0.0001)
assert isinstance(rems.dists, scipy.sparse.csr.csr_matrix)
assert rems.dists.shape == (10000, 10000)
# Check the random state provides the same final center
assert all(rems._centers[-1] == np.array([62, 52]))
# Check the interface with a Variogram object works
V = skg.Variogram(rems, vals)
assert V.bin_count is not None
# Check the variogram is always the same with the random state given
assert V.experimental[0] == pytest.approx(0.89,0.01)
# Check that the routines are robust to very few data points in the grid (e.g., from nodata values)
coords_sub = coords[0::1000]
vals_sub = vals[0::1000]
rems_sub = skg.RasterEquidistantMetricSpace(coords_sub, shape=shape, extent=(x[0],x[-1],y[0],y[-1]), samples=100, runs=10,
rnd=42)
V = skg.Variogram(rems_sub, vals_sub)
# Check with a single isolated point possibly being used as center
coords_sub = np.concatenate(([coords[0]], coords[-10:]))
vals_sub = np.concatenate(([vals[0]], vals[-10:]))
rems_sub = skg.RasterEquidistantMetricSpace(coords_sub, shape=shape, extent=(x[0],x[-1],y[0],y[-1]), samples=100, runs=11,
rnd=42)
V = skg.Variogram(rems_sub, vals_sub)
| [
"numpy.meshgrid",
"numpy.random.seed",
"skgstat.MetricSpace",
"skgstat.MetricSpacePair",
"numpy.random.gamma",
"pytest.raises",
"numpy.arange",
"skgstat.RasterEquidistantMetricSpace",
"numpy.random.normal",
"numpy.array",
"pytest.approx",
"numpy.concatenate",
"skgstat.Variogram"
] | [((96, 114), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (110, 114), True, 'import numpy as np\n'), ((125, 163), 'numpy.random.gamma', 'np.random.gamma', (['(40)', '(10)'], {'size': '(500, 2)'}), '(40, 10, size=(500, 2))\n', (140, 163), True, 'import numpy as np\n'), ((164, 182), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (178, 182), True, 'import numpy as np\n'), ((191, 219), 'numpy.random.normal', 'np.random.normal', (['(10)', '(4)', '(500)'], {}), '(10, 4, 500)\n', (207, 219), True, 'import numpy as np\n'), ((291, 340), 'skgstat.MetricSpace', 'skg.MetricSpace', (['rcoords'], {'dist_metric': '"""euclidean"""'}), "(rcoords, dist_metric='euclidean')\n", (306, 340), True, 'import skgstat as skg\n'), ((574, 610), 'skgstat.MetricSpace', 'skg.MetricSpace', (['rcoords'], {'max_dist': '(5)'}), '(rcoords, max_dist=5)\n', (589, 610), True, 'import skgstat as skg\n'), ((674, 702), 'skgstat.Variogram', 'skg.Variogram', (['sparse', 'rvals'], {}), '(sparse, rvals)\n', (687, 702), True, 'import skgstat as skg\n'), ((783, 807), 'skgstat.MetricSpace', 'skg.MetricSpace', (['rcoords'], {}), '(rcoords)\n', (798, 807), True, 'import skgstat as skg\n'), ((1414, 1447), 'numpy.random.gamma', 'np.random.gamma', (['(100)', '(4)', '(300, 2)'], {}), '(100, 4, (300, 2))\n', (1429, 1447), True, 'import numpy as np\n'), ((1457, 1489), 'numpy.random.gamma', 'np.random.gamma', (['(50)', '(5)', '(100, 2)'], {}), '(50, 5, (100, 2))\n', (1472, 1489), True, 'import numpy as np\n'), ((1500, 1544), 'skgstat.MetricSpace', 'skg.MetricSpace', (['c1'], {'dist_metric': '"""cityblock"""'}), "(c1, dist_metric='cityblock')\n", (1515, 1544), True, 'import skgstat as skg\n'), ((1555, 1599), 'skgstat.MetricSpace', 'skg.MetricSpace', (['c2'], {'dist_metric': '"""euclidean"""'}), "(c2, dist_metric='euclidean')\n", (1570, 1599), True, 'import skgstat as skg\n'), ((1774, 1807), 'numpy.random.gamma', 'np.random.gamma', (['(100)', '(4)', '(300, 2)'], {}), '(100, 4, (300, 2))\n', (1789, 1807), True, 'import numpy as np\n'), ((1817, 1849), 'numpy.random.gamma', 'np.random.gamma', (['(50)', '(5)', '(100, 2)'], {}), '(50, 5, (100, 2))\n', (1832, 1849), True, 'import numpy as np\n'), ((1860, 1892), 'skgstat.MetricSpace', 'skg.MetricSpace', (['c1'], {'max_dist': '(50)'}), '(c1, max_dist=50)\n', (1875, 1892), True, 'import skgstat as skg\n'), ((1903, 1936), 'skgstat.MetricSpace', 'skg.MetricSpace', (['c2'], {'max_dist': '(400)'}), '(c2, max_dist=400)\n', (1918, 1936), True, 'import skgstat as skg\n'), ((2148, 2166), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2162, 2166), True, 'import numpy as np\n'), ((2178, 2212), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': 'shape'}), '(0, 1, size=shape)\n', (2194, 2212), True, 'import numpy as np\n'), ((2240, 2262), 'numpy.arange', 'np.arange', (['(0)', 'shape[0]'], {}), '(0, shape[0])\n', (2249, 2262), True, 'import numpy as np\n'), ((2271, 2293), 'numpy.arange', 'np.arange', (['(0)', 'shape[1]'], {}), '(0, shape[1])\n', (2280, 2293), True, 'import numpy as np\n'), ((2307, 2324), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (2318, 2324), True, 'import numpy as np\n'), ((2527, 2662), 'skgstat.RasterEquidistantMetricSpace', 'skg.RasterEquidistantMetricSpace', (['coords'], {'shape': 'shape', 'extent': '(x[0], x[-1], y[0], y[-1])', 'samples': '(10)', 'runs': '(10)', 'rnd': '(42)', 'verbose': '(True)'}), '(coords, shape=shape, extent=(x[0], x[-1],\n y[0], y[-1]), samples=10, runs=10, rnd=42, verbose=True)\n', (2559, 2662), True, 'import skgstat as skg\n'), ((3134, 3159), 'skgstat.Variogram', 'skg.Variogram', (['rems', 'vals'], {}), '(rems, vals)\n', (3147, 3159), True, 'import skgstat as skg\n'), ((3508, 3635), 'skgstat.RasterEquidistantMetricSpace', 'skg.RasterEquidistantMetricSpace', (['coords_sub'], {'shape': 'shape', 'extent': '(x[0], x[-1], y[0], y[-1])', 'samples': '(100)', 'runs': '(10)', 'rnd': '(42)'}), '(coords_sub, shape=shape, extent=(x[0], x[-\n 1], y[0], y[-1]), samples=100, runs=10, rnd=42)\n', (3540, 3635), True, 'import skgstat as skg\n'), ((3680, 3713), 'skgstat.Variogram', 'skg.Variogram', (['rems_sub', 'vals_sub'], {}), '(rems_sub, vals_sub)\n', (3693, 3713), True, 'import skgstat as skg\n'), ((3803, 3846), 'numpy.concatenate', 'np.concatenate', (['([coords[0]], coords[-10:])'], {}), '(([coords[0]], coords[-10:]))\n', (3817, 3846), True, 'import numpy as np\n'), ((3862, 3901), 'numpy.concatenate', 'np.concatenate', (['([vals[0]], vals[-10:])'], {}), '(([vals[0]], vals[-10:]))\n', (3876, 3901), True, 'import numpy as np\n'), ((3917, 4044), 'skgstat.RasterEquidistantMetricSpace', 'skg.RasterEquidistantMetricSpace', (['coords_sub'], {'shape': 'shape', 'extent': '(x[0], x[-1], y[0], y[-1])', 'samples': '(100)', 'runs': '(11)', 'rnd': '(42)'}), '(coords_sub, shape=shape, extent=(x[0], x[-\n 1], y[0], y[-1]), samples=100, runs=11, rnd=42)\n', (3949, 4044), True, 'import skgstat as skg\n'), ((4089, 4122), 'skgstat.Variogram', 'skg.Variogram', (['rems_sub', 'vals_sub'], {}), '(rems_sub, vals_sub)\n', (4102, 4122), True, 'import skgstat as skg\n'), ((351, 380), 'pytest.raises', 'pytest.raises', (['AttributeError'], {}), '(AttributeError)\n', (364, 380), False, 'import pytest\n'), ((395, 442), 'skgstat.Variogram', 'skg.Variogram', (['ms', 'rvals'], {'dist_func': '"""cityblock"""'}), "(ms, rvals, dist_func='cityblock')\n", (408, 442), True, 'import skgstat as skg\n'), ((842, 871), 'pytest.raises', 'pytest.raises', (['RuntimeWarning'], {}), '(RuntimeWarning)\n', (855, 871), False, 'import pytest\n'), ((890, 917), 'skgstat.Variogram', 'skg.Variogram', (['dense', 'rvals'], {}), '(dense, rvals)\n', (903, 917), True, 'import skgstat as skg\n'), ((1037, 1062), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1050, 1062), False, 'import pytest\n'), ((1077, 1123), 'skgstat.MetricSpace', 'skg.MetricSpace', (['rcoords'], {'dist_metric': '"""foobar"""'}), "(rcoords, dist_metric='foobar')\n", (1092, 1123), True, 'import skgstat as skg\n'), ((1220, 1245), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1233, 1245), False, 'import pytest\n'), ((1265, 1302), 'skgstat.MetricSpace', 'skg.MetricSpace', (['rcoords', '"""cityblock"""'], {}), "(rcoords, 'cityblock')\n", (1280, 1302), True, 'import skgstat as skg\n'), ((1610, 1635), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1623, 1635), False, 'import pytest\n'), ((1650, 1679), 'skgstat.MetricSpacePair', 'skg.MetricSpacePair', (['ms1', 'ms2'], {}), '(ms1, ms2)\n', (1669, 1679), True, 'import skgstat as skg\n'), ((1947, 1972), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (1960, 1972), False, 'import pytest\n'), ((1987, 2016), 'skgstat.MetricSpacePair', 'skg.MetricSpacePair', (['ms1', 'ms2'], {}), '(ms1, ms2)\n', (2006, 2016), True, 'import skgstat as skg\n'), ((2763, 2791), 'pytest.approx', 'pytest.approx', (['(140)'], {'rel': '(0.01)'}), '(140, rel=0.01)\n', (2776, 2791), False, 'import pytest\n'), ((2814, 2842), 'pytest.approx', 'pytest.approx', (['(1)'], {'rel': '(0.0001)'}), '(1, rel=0.0001)\n', (2827, 2842), False, 'import pytest\n'), ((3301, 3326), 'pytest.approx', 'pytest.approx', (['(0.89)', '(0.01)'], {}), '(0.89, 0.01)\n', (3314, 3326), False, 'import pytest\n'), ((3049, 3067), 'numpy.array', 'np.array', (['[62, 52]'], {}), '([62, 52])\n', (3057, 3067), True, 'import numpy as np\n')] |
from ..base import RNGDataFlow
from ...utils import logger,fs
import os
import numpy as np
def load_data_from_npzs(fnames):
if not isinstance(fnames, list):
fnames = [fnames]
Xs = []
Ys = []
for fname in fnames:
d = np.load(fname)
logger.info('Loading from {}'.format(fname))
X, Y = (d['X'], d['Y'])
Xs.append(X)
Ys.append(Y)
return np.stack(X), np.stack(Y)
class Camvid(RNGDataFlow):
name = 'camvid'
non_void_nclasses = 11
_void_labels = [11]
# optional arguments
data_shape = (360, 480, 3)
mean = [0.39068785, 0.40521392, 0.41434407]
std = [0.29652068, 0.30514979, 0.30080369]
_cmap = {
0: (128, 128, 128), # sky
1: (128, 0, 0), # building
2: (192, 192, 128), # column_pole
3: (128, 64, 128), # road
4: (0, 0, 192), # sidewalk
5: (128, 128, 0), # Tree
6: (192, 128, 128), # SignSymbol
7: (64, 64, 128), # Fence
8: (64, 0, 128), # Car
9: (64, 64, 0), # Pedestrian
10: (0, 128, 192), # Bicyclist
11: (0, 0, 0)} # Void
_mask_labels = {0: 'sky', 1: 'building', 2: 'column_pole', 3: 'road',
4: 'sidewalk', 5: 'tree', 6: 'sign', 7: 'fence', 8: 'car',
9: 'pedestrian', 10: 'byciclist', 11: 'void'}
# frequency and weight of each class (including void)
class_freq = np.array([ 0.16845114, 0.23258652, 0.00982927, 0.31658215, 0.0448627,
0.09724055, 0.01172954, 0.01126809, 0.05865686, 0.00639231, 0.00291665, 0.03948423])
class_weight = sorted(class_freq)[len(class_freq)//2] / class_freq
#class_weight = np.array([ 0.49470329, 0.35828961, 8.47807568, 0.26322815,
# 1.8575192 , 0.85698135, 7.10457224, 7.39551774,
# 1.42069214, 13.03649617, 28.57158304, 2.11054735])
def __init__(self, which_set, shuffle=True, pixel_z_normalize=True, data_dir=None,
is_label_one_hot=False,
slide_all=False, slide_window_size=224, void_overlap=False):
"""
which_set : one of train, val, test, trainval
shuffle:
data_dir: <data_dir> should contain train.npz, val.npz, test.npz
"""
self.shuffle = shuffle
self.pixel_z_normalize = pixel_z_normalize
self.is_label_one_hot = is_label_one_hot
self.void_overlap = void_overlap
if data_dir is None:
data_dir = fs.get_dataset_path('camvid')
assert os.path.exists(data_dir)
for set_name in ['train', 'val', 'test']:
assert os.path.exists(os.path.join(data_dir, '{}.npz'.format(set_name)))
assert which_set in ['train', 'val', 'test', 'trainval'],which_set
if which_set == 'train':
load_fns = ['train']
elif which_set == 'val':
load_fns = ['val']
elif which_set == 'test':
load_fns = ['test']
else: #if which_set == 'trainval':
load_fns = ['train', 'val']
# These npz are assumed to have NHWC format for image, and NHW for label
load_fns = map(lambda fn : os.path.join(data_dir, '{}.npz'.format(fn)), load_fns)
self.X, self.Y = load_data_from_npzs(load_fns)
assert self.X.dtype == 'uint8'
self.slide_window_size = slide_window_size
self.slide_all = slide_all
self.slide_all_size =None
def get_data(self):
idxs = np.arange(len(self.X))
if self.shuffle:
self.rng.shuffle(idxs)
for k in idxs:
X = np.asarray(self.X[k], dtype=np.float32) / 255.0
Y = self.Y[k]
H,W = (X.shape[0], X.shape[1])
void = Camvid._void_labels[0]
if self.is_label_one_hot:
K = Camvid.non_void_nclasses
Y_tmp = np.zeros((H,W,K),dtype=np.float32)
mask = (Y.reshape([-1]) < K)
Y_tmp.reshape([-1,K])[np.arange(H*W)[mask], Y.reshape([-1])[mask]] = 1.0
Y = Y_tmp
void = np.zeros(K)
if self.pixel_z_normalize:
X = (X - Camvid.mean) / Camvid.std
if not self.slide_all:
# do not slide all windows
yield [X, Y]
else:
# slide all windows
side = self.slide_window_size
n_h = H // side + int(H % side != 0)
n_w = W // side + int(W % side != 0)
for hi in range(n_h):
h_overlap = 0
row = hi*side
row_end = row+side
if row_end > H:
if self.void_overlap:
h_overlap = row - (H-side)
row = H - side
row_end = H
for wi in range(n_w):
w_overlap = 0
col = wi*side
col_end = col+side
if col_end > W:
if self.void_overlap:
w_overlap = col - (W-side)
col = W - side
col_end = W
Xrc = X[row:row_end, col:col_end]
Yrc = Y[row:row_end, col:col_end].copy()
if h_overlap > 0:
Yrc[:h_overlap, :] = void
if w_overlap > 0:
Yrc[:, :w_overlap] = void
yield [Xrc, Yrc]
def size(self):
if not self.slide_all:
return len(self.X)
if self.slide_all_size is None:
H, W = self.X.shape[1], self.X.shape[2]
side = self.slide_window_size
n_h = H // side + int(H % side !=0)
n_w = W // side + int(W % side !=0)
self.slide_all_size = n_h * n_w * len(self.X)
return self.slide_all_size
def stitch_sliding_images(self, l_imgs):
"""
The l_imgs should be probability distribution of labels.
"""
side = self.slide_window_size
H,W = (Camvid.data_shape[0], Camvid.data_shape[1])
n_h = H // side + int(H % side != 0)
n_w = W // side + int(W % side != 0)
assert n_h * n_w == len(l_imgs), len(l_imgs)
n_ch = len(l_imgs[0].reshape([-1])) / side **2
assert n_ch > 1, n_ch
image = np.zeros((H, W, n_ch))
i = -1
for hi in range(n_h):
row = hi * side
row_end = row+side
if row_end > H:
row_end = H
row = H - side
for wi in range(n_w):
col = wi*side
col_end = col+side
if col_end > W:
col_end = W
col = W - side
i+=1
r_ = row_end - row
c_ = col_end - col
window = l_imgs[i].reshape([side, side, n_ch])
image[row:row_end, col:col_end] += window
return image
| [
"numpy.stack",
"numpy.load",
"numpy.asarray",
"numpy.zeros",
"os.path.exists",
"numpy.array",
"numpy.arange"
] | [((1450, 1612), 'numpy.array', 'np.array', (['[0.16845114, 0.23258652, 0.00982927, 0.31658215, 0.0448627, 0.09724055, \n 0.01172954, 0.01126809, 0.05865686, 0.00639231, 0.00291665, 0.03948423]'], {}), '([0.16845114, 0.23258652, 0.00982927, 0.31658215, 0.0448627, \n 0.09724055, 0.01172954, 0.01126809, 0.05865686, 0.00639231, 0.00291665,\n 0.03948423])\n', (1458, 1612), True, 'import numpy as np\n'), ((249, 263), 'numpy.load', 'np.load', (['fname'], {}), '(fname)\n', (256, 263), True, 'import numpy as np\n'), ((402, 413), 'numpy.stack', 'np.stack', (['X'], {}), '(X)\n', (410, 413), True, 'import numpy as np\n'), ((415, 426), 'numpy.stack', 'np.stack', (['Y'], {}), '(Y)\n', (423, 426), True, 'import numpy as np\n'), ((2533, 2557), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (2547, 2557), False, 'import os\n'), ((6548, 6570), 'numpy.zeros', 'np.zeros', (['(H, W, n_ch)'], {}), '((H, W, n_ch))\n', (6556, 6570), True, 'import numpy as np\n'), ((3599, 3638), 'numpy.asarray', 'np.asarray', (['self.X[k]'], {'dtype': 'np.float32'}), '(self.X[k], dtype=np.float32)\n', (3609, 3638), True, 'import numpy as np\n'), ((3865, 3902), 'numpy.zeros', 'np.zeros', (['(H, W, K)'], {'dtype': 'np.float32'}), '((H, W, K), dtype=np.float32)\n', (3873, 3902), True, 'import numpy as np\n'), ((4084, 4095), 'numpy.zeros', 'np.zeros', (['K'], {}), '(K)\n', (4092, 4095), True, 'import numpy as np\n'), ((3984, 4000), 'numpy.arange', 'np.arange', (['(H * W)'], {}), '(H * W)\n', (3993, 4000), True, 'import numpy as np\n')] |
from enum import Enum
import numpy as np
from math import sqrt
from table import Table
class Data:
def __init__(self, params=[], labelValues=None): # , trainCount, testCount
"""
params = [dict]
"""
self.params = params
self.labelValues = labelValues
self.x = []
self.y = []
self.labels = []
def func(x):
return 0
for param in self.params:
pType = param["type"] if "type" in param else "single"
count = param["count"] if "count" in param else 250
if pType == "single":
correlation = param["correlation"] if "correlation" in param else 0
func1 = param["func"] if "func" in param else func
self.store(*self.getPtsDouble(p1=param["x"], p2=param["y"], count=count, correlation=correlation), count=count, func=func1)
else:
correlationX = param["correlationX"] if "correlationX" in param else 0
correlationY = param["correlationY"] if "correlationY" in param else 0
func1 = param["func1"] if "func1" in param else func
func2 = param["func2"] if "func2" in param else func
x1, x2 = self.getPtsDouble(p1=param["x1"], p2=param["x2"], count=count, correlation=correlationX)
y1, y2 = self.getPtsDouble(p1=param["y1"], p2=param["y2"], count=count, correlation=correlationY)
self.store(x=x1, y=y1, count=count, func=func1)
self.store(x=x2, y=y2, count=count, func=func2)
def readDict(self, p):
dist = p["dist"]
if dist == "uniform":
return (np.random.uniform, (p["min"], p["max"]))
elif dist == "normal":
return (np.random.normal, (p["mean"], p["std"]))
# df = degrees of freedom
return (np.random.standard_t, (p["df"]))
def getPtsSingle(self, p, count):
run, args = self.readDict(p)
return run(*args, size=count)
def getPtsCorr(self, p1, p2, count, correlation):
gen1, (mean1, std1) = self.readDict(p1)
gen2, (mean2, std2) = self.readDict(p2)
pts1 = gen1(size=count)
pts2 = gen2(size=count)
return (mean1 + std1 * pts1, mean2 + std2 * (pts1 * correlation + pts2 * sqrt(1 - correlation * correlation))) # x1,x3
def getPtsDouble(self, p1, p2, count, correlation):
if correlation != 0:
return self.getPtsCorr(p1=p1, p2=p2, count=count, correlation=correlation)
else:
return self.getPtsSingle(p=p1, count=count), self.getPtsSingle(p=p2, count=count)
def store(self, x, y, count, func):
y += func(x)
self.x.append(x)
self.y.append(y)
if self.labelValues != None:
index = len(self.labels)
while index >= len(self.labelValues):
self.labelValues.append(self.labelValues[-1] + 1)
self.labels.append(np.full(count, self.labelValues[index], dtype=np.int64))
def getTable(self):
x = np.concatenate(self.x)
y = np.concatenate(self.y)
if self.labelValues == None:
p = {"target": "y", "columns": ["x"]}
arr = [x, y]
else:
p = {"target": "label", "columns": ["x", "y"]}
arr = [np.concatenate(self.labels), x, y]
return Table(numpy=np.array(arr).transpose(), param=p)
def saveTable(table, fileName):
if(len(fileName) == 0):
print("data is NOT saved.")
return
path = "examples/saved_data/" + fileName + ".csv"
table.data.to_csv(path_or_buf=path, index=False)
print("Saved successfully")
if __name__ == '__main__':
print("RUNNING COMP")
def base(x):
return x * x
def delta(x):
return base(x) + 5
# dataOptions1 = [{
# "x": {
# "dist": "normal",
# "mean": 0,
# "std": 0.1
# },
# "y": {
# "dist": "normal",
# "mean": 0,
# "std": 0.1
# }
# }]
# dataOptions2 = [{
# "x": {
# "dist": "uniform",
# "min": 0,
# "max": 1
# },
# "y": {
# "dist": "normal",
# "mean": 0,
# "std": 1
# }
# }]
# dataOptions3 = [{
# "x": {
# "dist": "normal",
# "mean": 100,
# "std": 10
# },
# "y": {
# "dist": "normal",
# "mean": 0,
# "std": 400
# },
# "func": x2
# }]
# dataOptions4 = [{
# "type": "single",
# "x": {
# "dist": "normal",
# "mean": 0,
# "std": 2
# },
# "y": {
# "dist": "normal",
# "mean": 0,
# "std": 0.05
# },
# "func": sigmoid
# }]
dataOptions5 = [{
"type": "double",
"x1": {
"dist": "uniform",
"min": -2,
"max": 4
},
"y1": {
"dist": "normal",
"mean": 0,
"std": 1
},
"x2": {
"dist": "normal",
"mean": 0,
"std": 1
},
"y2": {
"dist": "normal",
"mean": 0,
"std": 0.6
},
"func1": base,
"func2": delta
}]
training = Data(params=dataOptions5, labelValues=[0, 1]).getTable()
# training, testing = table.partition()
print("TRAINING")
print(training.data)
# print("TESTING")
# print(testing.data)
import matplotlib.pyplot as plt
plt.scatter(training['x'], training['y'], c=training['label'], alpha=0.5)
plt.show()
saveTable(training, input("Saving Data(Press Enter to skip)\nEnter filename:"))
| [
"numpy.full",
"matplotlib.pyplot.show",
"math.sqrt",
"matplotlib.pyplot.scatter",
"numpy.array",
"numpy.concatenate"
] | [((5688, 5761), 'matplotlib.pyplot.scatter', 'plt.scatter', (["training['x']", "training['y']"], {'c': "training['label']", 'alpha': '(0.5)'}), "(training['x'], training['y'], c=training['label'], alpha=0.5)\n", (5699, 5761), True, 'import matplotlib.pyplot as plt\n'), ((5766, 5776), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5774, 5776), True, 'import matplotlib.pyplot as plt\n'), ((3071, 3093), 'numpy.concatenate', 'np.concatenate', (['self.x'], {}), '(self.x)\n', (3085, 3093), True, 'import numpy as np\n'), ((3106, 3128), 'numpy.concatenate', 'np.concatenate', (['self.y'], {}), '(self.y)\n', (3120, 3128), True, 'import numpy as np\n'), ((2977, 3032), 'numpy.full', 'np.full', (['count', 'self.labelValues[index]'], {'dtype': 'np.int64'}), '(count, self.labelValues[index], dtype=np.int64)\n', (2984, 3032), True, 'import numpy as np\n'), ((3334, 3361), 'numpy.concatenate', 'np.concatenate', (['self.labels'], {}), '(self.labels)\n', (3348, 3361), True, 'import numpy as np\n'), ((3397, 3410), 'numpy.array', 'np.array', (['arr'], {}), '(arr)\n', (3405, 3410), True, 'import numpy as np\n'), ((2315, 2350), 'math.sqrt', 'sqrt', (['(1 - correlation * correlation)'], {}), '(1 - correlation * correlation)\n', (2319, 2350), False, 'from math import sqrt\n')] |
import numpy
from mpmath import mp
from sympy import Rational as frac
from sympy import sqrt
from ..helpers import article, fsd, pm, pm_array, pm_array0, untangle
from ._helpers import SphereScheme, cartesian_to_spherical_sympy
citation = article(
authors=["<NAME>"],
title="Optimal Numerical Integration on a Sphere",
journal="Mathematics of Computation",
volume="17",
number="84",
month="oct",
year="1963",
pages="361-383",
url="https://doi.org/10.1090/S0025-5718-1963-0159418-2",
)
def mclaren_01():
degree = 3
data = [(frac(1, 12), fsd(3, (sqrt(frac(1, 2)), 2)))]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_02():
degree = 5
# Stroud doesn't mention u=1, but it's implied. (After all, this is integration on a
# sphere.)
u = 1
r = frac(1, 2)
s, t = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
data = [
(frac(1, 30), fsd(3, (u, 1))),
(frac(1, 30), pm_array([r, s, t])),
(frac(1, 30), pm_array([t, r, s])),
(frac(1, 30), pm_array([s, t, r])),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_03():
degree = 7
# the positive roots of
# z^6 - z^4 + 0.2*z^2 - 1/105 = 0,
# i.e., the square roots of the roots of
# z^3 - z^2 + 0.2*z^1 - 1/105 = 0,
r2, s2, t2 = mp.polyroots([1, -1, frac(1, 5), -frac(1, 105)])
r = sqrt(r2)
s = sqrt(s2)
t = sqrt(t2)
u = numpy.array([+r, -r, +s, -s, +t, -t])
v = numpy.array([+s, +t, +t, +r, +r, +s])
w = numpy.array([+t, +s, +r, +t, +s, +r])
data = [
(frac(1, 24), numpy.column_stack([+u, +v, +w])),
(frac(1, 24), numpy.column_stack([+u, -v, -w])),
(frac(1, 24), numpy.column_stack([+u, +w, -v])),
(frac(1, 24), numpy.column_stack([+u, -w, +v])),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_04():
degree = 8
# the positive roots of
# z^6 - z^4 + 5/21 * z^2 - 5/441 = 0,
# i.e., the square roots of the roots of
# z^3 - z^2 + 5/21 * z^1 - 5/441 = 0,
r2, s2, t2 = mp.polyroots([1, -1, frac(5, 21), -frac(5, 441)])
r = sqrt(r2)
s = sqrt(s2)
t = sqrt(t2)
u = numpy.array([+r, -r, +s, -s, +t, -t])
v = numpy.array([+s, +t, +t, +r, +r, +s])
w = numpy.array([+t, +s, +r, +t, +s, +r])
data = [
(frac(16, 600), fsd(3, (1, 1))),
(frac(21, 600), numpy.column_stack([+u, +v, +w])),
(frac(21, 600), numpy.column_stack([+u, -v, -w])),
(frac(21, 600), numpy.column_stack([+u, +w, -v])),
(frac(21, 600), numpy.column_stack([+u, -w, +v])),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_05():
degree = 9
r, s = [sqrt((5 + pm_ * sqrt(5)) / 10) for pm_ in [+1, -1]]
u, v = [sqrt((3 - pm_ * sqrt(5)) / 6) for pm_ in [+1, -1]]
t = sqrt(frac(1, 3))
B1 = frac(25, 840)
B2 = frac(27, 840)
data = [
(B1, pm_array0(3, [r, s], [0, 1])),
(B1, pm_array0(3, [r, s], [1, 2])),
(B1, pm_array0(3, [r, s], [2, 0])),
#
(B2, pm_array0(3, [u, v], [0, 1])),
(B2, pm_array0(3, [u, v], [1, 2])),
(B2, pm_array0(3, [u, v], [2, 0])),
#
(B2, pm(3, t)),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_06():
degree = 9
r, s = [sqrt((5 + pm_ * sqrt(5)) / 10) for pm_ in [+1, -1]]
t = 1
u = frac(1, 2)
v, w = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
B = frac(25, 1260)
C = frac(32, 1260)
data = [
# ERR Stroud is missing +- at the first r.
(B, pm_array0(3, [r, s], [0, 1])),
(B, pm_array0(3, [r, s], [1, 2])),
(B, pm_array0(3, [r, s], [2, 0])),
#
(C, fsd(3, (t, 1))),
#
(C, pm_array([u, v, w])),
(C, pm_array([w, u, v])),
(C, pm_array([v, w, u])),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_07():
degree = 9
r, s = [sqrt((3 - pm_ * sqrt(5)) / 6) for pm_ in [+1, -1]]
t = sqrt(frac(1, 3))
# ERR Stroud incorrectly gives sqrt(0.5)
u = frac(1, 2)
v, w = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
B = -frac(9, 140)
C = frac(16, 210)
data = [
(B, pm_array0(3, [r, s], [0, 1])),
(B, pm_array0(3, [r, s], [1, 2])),
(B, pm_array0(3, [r, s], [2, 0])),
#
(B, pm(3, t)),
#
(C, fsd(3, (1, 1))),
#
(C, pm_array([u, v, w])),
(C, pm_array([w, u, v])),
(C, pm_array([v, w, u])),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_08():
degree = 11
r = 1
s = sqrt(frac(1, 2))
t = sqrt(frac(1, 3))
u = sqrt(frac(1, 11))
v = sqrt(frac(9, 11))
B1 = frac(9216, 725760)
B2 = frac(16384, 725760)
B3 = frac(15309, 725760)
B4 = frac(14641, 725760)
data = [
(B1, fsd(3, (r, 1))),
(B2, fsd(3, (s, 2))),
(B3, pm(3, t)),
(B4, fsd(3, (u, 2), (v, 1))),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_09():
degree = 11
sqrt5 = sqrt(5)
p, q = [sqrt((5 + pm_ * sqrt5) / 10) for pm_ in [+1, -1]]
r, s = [sqrt((3 - pm_ * sqrt5) / 6) for pm_ in [+1, -1]]
t = sqrt(frac(1, 3))
u = frac(1, 2)
v, w = [(sqrt(5) + pm_) / 4 for pm_ in [+1, -1]]
B = frac(625, 27720)
C = frac(243, 27720)
D = frac(512, 27720)
data = [
(B, pm_array0(3, [p, q], [0, 1])),
(B, pm_array0(3, [p, q], [1, 2])),
(B, pm_array0(3, [p, q], [2, 0])),
#
(C, pm_array0(3, [r, s], [0, 1])),
(C, pm_array0(3, [r, s], [1, 2])),
(C, pm_array0(3, [r, s], [2, 0])),
#
(C, pm(3, t)),
#
(D, fsd(3, (1, 1))),
#
(D, pm_array([u, v, w])),
(D, pm_array([w, u, v])),
(D, pm_array([v, w, u])),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
def mclaren_10():
degree = 14
r, s = [sqrt((5 - pm_ * sqrt(5)) / 10) for pm_ in [+1, -1]]
B = frac(125, 10080)
C = frac(143, 10080)
# The roots of
#
# 2556125 y^6 - 5112250 y^5 + 3578575 y^4 - 1043900 y^3
# + 115115 y^2 - 3562 y + 9 =0
#
# in decreasing order.
y = [
0.8318603575087328951583062165711519728388,
0.5607526046766541293084396308069013490725,
0.4118893592345073860321480490176804941547,
0.1479981814629634692260834719469411619893,
0.04473134613410273910111648293922113227845,
0.002768150983039381173906148718103889666260,
]
z = numpy.sqrt(y)
u = (
numpy.array([z[3] - z[2], z[1] - z[4], z[5] - z[1], z[2] - z[5], z[4] - z[3]])
/ 2
/ s
)
v = (
numpy.array([z[4] + z[5], z[5] + z[3], z[2] + z[4], z[3] + z[1], z[1] + z[2]])
/ 2
/ s
)
w = (
numpy.array([z[0] + z[1], z[0] + z[2], z[0] + z[3], z[0] + z[4], z[0] + z[5]])
/ 2
/ s
)
data = [
(B, pm_array0(3, [r, s], [0, 1])),
(B, pm_array0(3, [r, s], [1, 2])),
(B, pm_array0(3, [r, s], [2, 0])),
#
(C, numpy.column_stack([+u, +v, +w])),
(C, numpy.column_stack([+u, -v, -w])),
(C, numpy.column_stack([-u, -v, +w])),
(C, numpy.column_stack([-u, +v, -w])),
#
(C, numpy.column_stack([+v, +w, +u])),
(C, numpy.column_stack([+v, -w, -u])),
(C, numpy.column_stack([-v, -w, +u])),
(C, numpy.column_stack([-v, +w, -u])),
#
(C, numpy.column_stack([+w, +u, +v])),
(C, numpy.column_stack([+w, -u, -v])),
(C, numpy.column_stack([-w, -u, +v])),
(C, numpy.column_stack([-w, +u, -v])),
]
points, weights = untangle(data)
azimuthal_polar = cartesian_to_spherical_sympy(points)
return SphereScheme("McLaren 1", weights, points, azimuthal_polar, degree, citation)
| [
"sympy.Rational",
"sympy.sqrt",
"numpy.array",
"numpy.column_stack",
"numpy.sqrt"
] | [((960, 970), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (964, 970), True, 'from sympy import Rational as frac\n'), ((1664, 1672), 'sympy.sqrt', 'sqrt', (['r2'], {}), '(r2)\n', (1668, 1672), False, 'from sympy import sqrt\n'), ((1681, 1689), 'sympy.sqrt', 'sqrt', (['s2'], {}), '(s2)\n', (1685, 1689), False, 'from sympy import sqrt\n'), ((1698, 1706), 'sympy.sqrt', 'sqrt', (['t2'], {}), '(t2)\n', (1702, 1706), False, 'from sympy import sqrt\n'), ((1716, 1753), 'numpy.array', 'numpy.array', (['[+r, -r, +s, -s, +t, -t]'], {}), '([+r, -r, +s, -s, +t, -t])\n', (1727, 1753), False, 'import numpy\n'), ((1762, 1799), 'numpy.array', 'numpy.array', (['[+s, +t, +t, +r, +r, +s]'], {}), '([+s, +t, +t, +r, +r, +s])\n', (1773, 1799), False, 'import numpy\n'), ((1808, 1845), 'numpy.array', 'numpy.array', (['[+t, +s, +r, +t, +s, +r]'], {}), '([+t, +s, +r, +t, +s, +r])\n', (1819, 1845), False, 'import numpy\n'), ((2550, 2558), 'sympy.sqrt', 'sqrt', (['r2'], {}), '(r2)\n', (2554, 2558), False, 'from sympy import sqrt\n'), ((2567, 2575), 'sympy.sqrt', 'sqrt', (['s2'], {}), '(s2)\n', (2571, 2575), False, 'from sympy import sqrt\n'), ((2584, 2592), 'sympy.sqrt', 'sqrt', (['t2'], {}), '(t2)\n', (2588, 2592), False, 'from sympy import sqrt\n'), ((2602, 2639), 'numpy.array', 'numpy.array', (['[+r, -r, +s, -s, +t, -t]'], {}), '([+r, -r, +s, -s, +t, -t])\n', (2613, 2639), False, 'import numpy\n'), ((2648, 2685), 'numpy.array', 'numpy.array', (['[+s, +t, +t, +r, +r, +s]'], {}), '([+s, +t, +t, +r, +r, +s])\n', (2659, 2685), False, 'import numpy\n'), ((2694, 2731), 'numpy.array', 'numpy.array', (['[+t, +s, +r, +t, +s, +r]'], {}), '([+t, +s, +r, +t, +s, +r])\n', (2705, 2731), False, 'import numpy\n'), ((3412, 3425), 'sympy.Rational', 'frac', (['(25)', '(840)'], {}), '(25, 840)\n', (3416, 3425), True, 'from sympy import Rational as frac\n'), ((3435, 3448), 'sympy.Rational', 'frac', (['(27)', '(840)'], {}), '(27, 840)\n', (3439, 3448), True, 'from sympy import Rational as frac\n'), ((4080, 4090), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (4084, 4090), True, 'from sympy import Rational as frac\n'), ((4153, 4167), 'sympy.Rational', 'frac', (['(25)', '(1260)'], {}), '(25, 1260)\n', (4157, 4167), True, 'from sympy import Rational as frac\n'), ((4176, 4190), 'sympy.Rational', 'frac', (['(32)', '(1260)'], {}), '(32, 1260)\n', (4180, 4190), True, 'from sympy import Rational as frac\n'), ((4904, 4914), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (4908, 4914), True, 'from sympy import Rational as frac\n'), ((4999, 5012), 'sympy.Rational', 'frac', (['(16)', '(210)'], {}), '(16, 210)\n', (5003, 5012), True, 'from sympy import Rational as frac\n'), ((5691, 5709), 'sympy.Rational', 'frac', (['(9216)', '(725760)'], {}), '(9216, 725760)\n', (5695, 5709), True, 'from sympy import Rational as frac\n'), ((5719, 5738), 'sympy.Rational', 'frac', (['(16384)', '(725760)'], {}), '(16384, 725760)\n', (5723, 5738), True, 'from sympy import Rational as frac\n'), ((5748, 5767), 'sympy.Rational', 'frac', (['(15309)', '(725760)'], {}), '(15309, 725760)\n', (5752, 5767), True, 'from sympy import Rational as frac\n'), ((5777, 5796), 'sympy.Rational', 'frac', (['(14641)', '(725760)'], {}), '(14641, 725760)\n', (5781, 5796), True, 'from sympy import Rational as frac\n'), ((6173, 6180), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (6177, 6180), False, 'from sympy import sqrt\n'), ((6339, 6349), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (6343, 6349), True, 'from sympy import Rational as frac\n'), ((6412, 6428), 'sympy.Rational', 'frac', (['(625)', '(27720)'], {}), '(625, 27720)\n', (6416, 6428), True, 'from sympy import Rational as frac\n'), ((6437, 6453), 'sympy.Rational', 'frac', (['(243)', '(27720)'], {}), '(243, 27720)\n', (6441, 6453), True, 'from sympy import Rational as frac\n'), ((6462, 6478), 'sympy.Rational', 'frac', (['(512)', '(27720)'], {}), '(512, 27720)\n', (6466, 6478), True, 'from sympy import Rational as frac\n'), ((7245, 7261), 'sympy.Rational', 'frac', (['(125)', '(10080)'], {}), '(125, 10080)\n', (7249, 7261), True, 'from sympy import Rational as frac\n'), ((7270, 7286), 'sympy.Rational', 'frac', (['(143)', '(10080)'], {}), '(143, 10080)\n', (7274, 7286), True, 'from sympy import Rational as frac\n'), ((7784, 7797), 'numpy.sqrt', 'numpy.sqrt', (['y'], {}), '(y)\n', (7794, 7797), False, 'import numpy\n'), ((3390, 3400), 'sympy.Rational', 'frac', (['(1)', '(3)'], {}), '(1, 3)\n', (3394, 3400), True, 'from sympy import Rational as frac\n'), ((4839, 4849), 'sympy.Rational', 'frac', (['(1)', '(3)'], {}), '(1, 3)\n', (4843, 4849), True, 'from sympy import Rational as frac\n'), ((4978, 4990), 'sympy.Rational', 'frac', (['(9)', '(140)'], {}), '(9, 140)\n', (4982, 4990), True, 'from sympy import Rational as frac\n'), ((5591, 5601), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (5595, 5601), True, 'from sympy import Rational as frac\n'), ((5616, 5626), 'sympy.Rational', 'frac', (['(1)', '(3)'], {}), '(1, 3)\n', (5620, 5626), True, 'from sympy import Rational as frac\n'), ((5642, 5653), 'sympy.Rational', 'frac', (['(1)', '(11)'], {}), '(1, 11)\n', (5646, 5653), True, 'from sympy import Rational as frac\n'), ((5668, 5679), 'sympy.Rational', 'frac', (['(9)', '(11)'], {}), '(9, 11)\n', (5672, 5679), True, 'from sympy import Rational as frac\n'), ((6194, 6222), 'sympy.sqrt', 'sqrt', (['((5 + pm_ * sqrt5) / 10)'], {}), '((5 + pm_ * sqrt5) / 10)\n', (6198, 6222), False, 'from sympy import sqrt\n'), ((6256, 6283), 'sympy.sqrt', 'sqrt', (['((3 - pm_ * sqrt5) / 6)'], {}), '((3 - pm_ * sqrt5) / 6)\n', (6260, 6283), False, 'from sympy import sqrt\n'), ((6318, 6328), 'sympy.Rational', 'frac', (['(1)', '(3)'], {}), '(1, 3)\n', (6322, 6328), True, 'from sympy import Rational as frac\n'), ((571, 582), 'sympy.Rational', 'frac', (['(1)', '(12)'], {}), '(1, 12)\n', (575, 582), True, 'from sympy import Rational as frac\n'), ((1047, 1058), 'sympy.Rational', 'frac', (['(1)', '(30)'], {}), '(1, 30)\n', (1051, 1058), True, 'from sympy import Rational as frac\n'), ((1086, 1097), 'sympy.Rational', 'frac', (['(1)', '(30)'], {}), '(1, 30)\n', (1090, 1097), True, 'from sympy import Rational as frac\n'), ((1130, 1141), 'sympy.Rational', 'frac', (['(1)', '(30)'], {}), '(1, 30)\n', (1134, 1141), True, 'from sympy import Rational as frac\n'), ((1174, 1185), 'sympy.Rational', 'frac', (['(1)', '(30)'], {}), '(1, 30)\n', (1178, 1185), True, 'from sympy import Rational as frac\n'), ((1628, 1638), 'sympy.Rational', 'frac', (['(1)', '(5)'], {}), '(1, 5)\n', (1632, 1638), True, 'from sympy import Rational as frac\n'), ((1869, 1880), 'sympy.Rational', 'frac', (['(1)', '(24)'], {}), '(1, 24)\n', (1873, 1880), True, 'from sympy import Rational as frac\n'), ((1882, 1914), 'numpy.column_stack', 'numpy.column_stack', (['[+u, +v, +w]'], {}), '([+u, +v, +w])\n', (1900, 1914), False, 'import numpy\n'), ((1926, 1937), 'sympy.Rational', 'frac', (['(1)', '(24)'], {}), '(1, 24)\n', (1930, 1937), True, 'from sympy import Rational as frac\n'), ((1939, 1971), 'numpy.column_stack', 'numpy.column_stack', (['[+u, -v, -w]'], {}), '([+u, -v, -w])\n', (1957, 1971), False, 'import numpy\n'), ((1983, 1994), 'sympy.Rational', 'frac', (['(1)', '(24)'], {}), '(1, 24)\n', (1987, 1994), True, 'from sympy import Rational as frac\n'), ((1996, 2028), 'numpy.column_stack', 'numpy.column_stack', (['[+u, +w, -v]'], {}), '([+u, +w, -v])\n', (2014, 2028), False, 'import numpy\n'), ((2040, 2051), 'sympy.Rational', 'frac', (['(1)', '(24)'], {}), '(1, 24)\n', (2044, 2051), True, 'from sympy import Rational as frac\n'), ((2053, 2085), 'numpy.column_stack', 'numpy.column_stack', (['[+u, -w, +v]'], {}), '([+u, -w, +v])\n', (2071, 2085), False, 'import numpy\n'), ((2513, 2524), 'sympy.Rational', 'frac', (['(5)', '(21)'], {}), '(5, 21)\n', (2517, 2524), True, 'from sympy import Rational as frac\n'), ((2755, 2768), 'sympy.Rational', 'frac', (['(16)', '(600)'], {}), '(16, 600)\n', (2759, 2768), True, 'from sympy import Rational as frac\n'), ((2796, 2809), 'sympy.Rational', 'frac', (['(21)', '(600)'], {}), '(21, 600)\n', (2800, 2809), True, 'from sympy import Rational as frac\n'), ((2811, 2843), 'numpy.column_stack', 'numpy.column_stack', (['[+u, +v, +w]'], {}), '([+u, +v, +w])\n', (2829, 2843), False, 'import numpy\n'), ((2855, 2868), 'sympy.Rational', 'frac', (['(21)', '(600)'], {}), '(21, 600)\n', (2859, 2868), True, 'from sympy import Rational as frac\n'), ((2870, 2902), 'numpy.column_stack', 'numpy.column_stack', (['[+u, -v, -w]'], {}), '([+u, -v, -w])\n', (2888, 2902), False, 'import numpy\n'), ((2914, 2927), 'sympy.Rational', 'frac', (['(21)', '(600)'], {}), '(21, 600)\n', (2918, 2927), True, 'from sympy import Rational as frac\n'), ((2929, 2961), 'numpy.column_stack', 'numpy.column_stack', (['[+u, +w, -v]'], {}), '([+u, +w, -v])\n', (2947, 2961), False, 'import numpy\n'), ((2973, 2986), 'sympy.Rational', 'frac', (['(21)', '(600)'], {}), '(21, 600)\n', (2977, 2986), True, 'from sympy import Rational as frac\n'), ((2988, 3020), 'numpy.column_stack', 'numpy.column_stack', (['[+u, -w, +v]'], {}), '([+u, -w, +v])\n', (3006, 3020), False, 'import numpy\n'), ((7817, 7895), 'numpy.array', 'numpy.array', (['[z[3] - z[2], z[1] - z[4], z[5] - z[1], z[2] - z[5], z[4] - z[3]]'], {}), '([z[3] - z[2], z[1] - z[4], z[5] - z[1], z[2] - z[5], z[4] - z[3]])\n', (7828, 7895), False, 'import numpy\n'), ((7944, 8022), 'numpy.array', 'numpy.array', (['[z[4] + z[5], z[5] + z[3], z[2] + z[4], z[3] + z[1], z[1] + z[2]]'], {}), '([z[4] + z[5], z[5] + z[3], z[2] + z[4], z[3] + z[1], z[1] + z[2]])\n', (7955, 8022), False, 'import numpy\n'), ((8071, 8149), 'numpy.array', 'numpy.array', (['[z[0] + z[1], z[0] + z[2], z[0] + z[3], z[0] + z[4], z[0] + z[5]]'], {}), '([z[0] + z[1], z[0] + z[2], z[0] + z[3], z[0] + z[4], z[0] + z[5]])\n', (8082, 8149), False, 'import numpy\n'), ((8345, 8377), 'numpy.column_stack', 'numpy.column_stack', (['[+u, +v, +w]'], {}), '([+u, +v, +w])\n', (8363, 8377), False, 'import numpy\n'), ((8392, 8424), 'numpy.column_stack', 'numpy.column_stack', (['[+u, -v, -w]'], {}), '([+u, -v, -w])\n', (8410, 8424), False, 'import numpy\n'), ((8439, 8471), 'numpy.column_stack', 'numpy.column_stack', (['[-u, -v, +w]'], {}), '([-u, -v, +w])\n', (8457, 8471), False, 'import numpy\n'), ((8486, 8518), 'numpy.column_stack', 'numpy.column_stack', (['[-u, +v, -w]'], {}), '([-u, +v, -w])\n', (8504, 8518), False, 'import numpy\n'), ((8543, 8575), 'numpy.column_stack', 'numpy.column_stack', (['[+v, +w, +u]'], {}), '([+v, +w, +u])\n', (8561, 8575), False, 'import numpy\n'), ((8590, 8622), 'numpy.column_stack', 'numpy.column_stack', (['[+v, -w, -u]'], {}), '([+v, -w, -u])\n', (8608, 8622), False, 'import numpy\n'), ((8637, 8669), 'numpy.column_stack', 'numpy.column_stack', (['[-v, -w, +u]'], {}), '([-v, -w, +u])\n', (8655, 8669), False, 'import numpy\n'), ((8684, 8716), 'numpy.column_stack', 'numpy.column_stack', (['[-v, +w, -u]'], {}), '([-v, +w, -u])\n', (8702, 8716), False, 'import numpy\n'), ((8741, 8773), 'numpy.column_stack', 'numpy.column_stack', (['[+w, +u, +v]'], {}), '([+w, +u, +v])\n', (8759, 8773), False, 'import numpy\n'), ((8788, 8820), 'numpy.column_stack', 'numpy.column_stack', (['[+w, -u, -v]'], {}), '([+w, -u, -v])\n', (8806, 8820), False, 'import numpy\n'), ((8835, 8867), 'numpy.column_stack', 'numpy.column_stack', (['[-w, -u, +v]'], {}), '([-w, -u, +v])\n', (8853, 8867), False, 'import numpy\n'), ((8882, 8914), 'numpy.column_stack', 'numpy.column_stack', (['[-w, +u, -v]'], {}), '([-w, +u, -v])\n', (8900, 8914), False, 'import numpy\n'), ((984, 991), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (988, 991), False, 'from sympy import sqrt\n'), ((1641, 1653), 'sympy.Rational', 'frac', (['(1)', '(105)'], {}), '(1, 105)\n', (1645, 1653), True, 'from sympy import Rational as frac\n'), ((2527, 2539), 'sympy.Rational', 'frac', (['(5)', '(441)'], {}), '(5, 441)\n', (2531, 2539), True, 'from sympy import Rational as frac\n'), ((4104, 4111), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (4108, 4111), False, 'from sympy import sqrt\n'), ((4928, 4935), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (4932, 4935), False, 'from sympy import sqrt\n'), ((6363, 6370), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (6367, 6370), False, 'from sympy import sqrt\n'), ((597, 607), 'sympy.Rational', 'frac', (['(1)', '(2)'], {}), '(1, 2)\n', (601, 607), True, 'from sympy import Rational as frac\n'), ((3278, 3285), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (3282, 3285), False, 'from sympy import sqrt\n'), ((3342, 3349), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (3346, 3349), False, 'from sympy import sqrt\n'), ((4026, 4033), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (4030, 4033), False, 'from sympy import sqrt\n'), ((4791, 4798), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (4795, 4798), False, 'from sympy import sqrt\n'), ((7201, 7208), 'sympy.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (7205, 7208), False, 'from sympy import sqrt\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 17:25:11 2020
@author: nmei
"""
import os
from tqdm import tqdm
import numpy as np
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import LeavePOut,cross_validate
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
from sklearn.base import clone
from matplotlib import pyplot as plt
import seaborn as sns
sns.set_style('white')
#sns.set_context('talk')
label_map = {'animal':0,'object':1}
working_dir = '../results'
working_data = os.path.join(working_dir,'sampled_words.csv')
source_data = os.path.join('../data','Affective norms for 380 Spanish words belonging to three different semantic categories.xls')
language_model = os.path.join('../results/','es_all_words_from_affective_norms.csv')
print('loading model, and it is going to take some time...')
model_word2vec = pd.read_csv(language_model,encoding = 'latin-1')
df = pd.read_excel(source_data,encoding = 'latin-1')
df = df.drop_duplicates(['English'])
df = df[df['Frequency'] != 0]
df = df[np.logical_or(df['Category'] == 'animal',df['Category'] == 'object')]
df.loc[:,'log_frequency'] = np.log(df['Frequency'].values)
df_animal = df[df['Category'] == 'animal']
df_object = df[df['Category'] == 'object']
df_animal['picked'] = np.logical_and(df_animal['Mean\nFamiliarity'].apply(lambda x: 3<=x<=5),
df_animal['Mean\nConcreteness'].apply(lambda x: 6<=x<=8))
df_object['picked'] = np.logical_and(df_object['Mean\nFamiliarity'].apply(lambda x: 3<=x<=5),
df_object['Mean\nConcreteness'].apply(lambda x: 6<=x<=8))
lower_bound = np.min([np.sum(df_animal['picked']),np.sum(df_object['picked'])])
print('sample {lower_bound} words'.format(lower_bound=lower_bound))
if lower_bound > 100:
lower_bound = lower_bound - (lower_bound % 100)
df_animal = df_animal[df_animal['picked']]
df_object = df_object[df_object['picked']]
df_animal = df_animal.nlargest(lower_bound,'Mean\nFamiliarity')
df_object = df_object.nlargest(lower_bound,'Mean\nFamiliarity')
df_final = pd.concat([df_animal,df_object])
df_final = df_final.sort_values(['Category','Word'])
ewrq
base_clf = make_pipeline(StandardScaler(),
LogisticRegression(C=1, solver='liblinear',
multi_class='auto'))
word_vecs = np.array([model_word2vec[word] for word in df_final['Word']])
labels = np.array([label_map[item] for item in df_final['Category']])
cv = LeavePOut(p = 2)
groups = df_final['Word'].values
results = dict(
fold = [],
score = [],
test_word1 = [],
test_word2 = [],
)
for fold, (idx_train,idx_test) in tqdm(enumerate(cv.split(word_vecs,labels,groups = groups))):
X_train,y_train = word_vecs[idx_train],labels[idx_train]
X_test,y_test = word_vecs[idx_test],labels[idx_test]
X_train,y_train = shuffle(X_train,y_train)
test_pairs = groups[idx_test]
clf = clone(base_clf)
clf.fit(X_train,y_train)
preds = clf.predict_proba(X_test)[:,-1]
score = np.abs(preds[0] - preds[1])
results['fold'].append(fold + 1)
results['score'].append(score)
results['test_word1'].append(test_pairs[0])
results['test_word2'].append(test_pairs[1])
results_to_save = pd.DataFrame(results)
idx_map = {word:idx for idx,word in enumerate(groups)}
decode_distance = np.zeros((len(groups),len(groups)))
for ii,row in results_to_save.iterrows():
decode_distance[idx_map[row['test_word1']],
idx_map[row['test_word2']]] = row['score']
decode_distance[idx_map[row['test_word2']],
idx_map[row['test_word1']]] = row['score']
np.fill_diagonal(decode_distance,np.nan)
axis_labels = df_final['English'].values
decode_distance = pd.DataFrame(decode_distance,index = axis_labels,columns=axis_labels)
fig,ax = plt.subplots(figsize = (20,20))
ax = sns.heatmap(decode_distance,
xticklabels = True,
yticklabels = True,
square = True,
ax = ax,
cmap = plt.cm.coolwarm,
)
_ = ax.set(title = 'Red = dissimilar, Blue = similar')
ax.axhline(round(len(groups) / 2),linestyle = '--', color = 'black', alpha = 1.)
ax.axvline(round(len(groups) / 2),linestyle = '--', color = 'black', alpha = 1.)
fig.savefig('../figures/decode sampled words (decode in spanish, translated).jpeg',
dpi = 500,
bbox_inches = 'tight')
| [
"seaborn.heatmap",
"sklearn.preprocessing.StandardScaler",
"numpy.abs",
"numpy.sum",
"pandas.read_csv",
"os.path.join",
"sklearn.base.clone",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"pandas.concat",
"seaborn.set_style",
"numpy.fill_diagonal",
"pandas.read_excel",
"sklearn.linear_... | [((531, 553), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (544, 553), True, 'import seaborn as sns\n'), ((665, 711), 'os.path.join', 'os.path.join', (['working_dir', '"""sampled_words.csv"""'], {}), "(working_dir, 'sampled_words.csv')\n", (677, 711), False, 'import os\n'), ((726, 852), 'os.path.join', 'os.path.join', (['"""../data"""', '"""Affective norms for 380 Spanish words belonging to three different semantic categories.xls"""'], {}), "('../data',\n 'Affective norms for 380 Spanish words belonging to three different semantic categories.xls'\n )\n", (738, 852), False, 'import os\n'), ((861, 929), 'os.path.join', 'os.path.join', (['"""../results/"""', '"""es_all_words_from_affective_norms.csv"""'], {}), "('../results/', 'es_all_words_from_affective_norms.csv')\n", (873, 929), False, 'import os\n'), ((1011, 1058), 'pandas.read_csv', 'pd.read_csv', (['language_model'], {'encoding': '"""latin-1"""'}), "(language_model, encoding='latin-1')\n", (1022, 1058), True, 'import pandas as pd\n'), ((1068, 1114), 'pandas.read_excel', 'pd.read_excel', (['source_data'], {'encoding': '"""latin-1"""'}), "(source_data, encoding='latin-1')\n", (1081, 1114), True, 'import pandas as pd\n'), ((1293, 1323), 'numpy.log', 'np.log', (["df['Frequency'].values"], {}), "(df['Frequency'].values)\n", (1299, 1323), True, 'import numpy as np\n'), ((2252, 2285), 'pandas.concat', 'pd.concat', (['[df_animal, df_object]'], {}), '([df_animal, df_object])\n', (2261, 2285), True, 'import pandas as pd\n'), ((2530, 2591), 'numpy.array', 'np.array', (["[model_word2vec[word] for word in df_final['Word']]"], {}), "([model_word2vec[word] for word in df_final['Word']])\n", (2538, 2591), True, 'import numpy as np\n'), ((2602, 2662), 'numpy.array', 'np.array', (["[label_map[item] for item in df_final['Category']]"], {}), "([label_map[item] for item in df_final['Category']])\n", (2610, 2662), True, 'import numpy as np\n'), ((2669, 2683), 'sklearn.model_selection.LeavePOut', 'LeavePOut', ([], {'p': '(2)'}), '(p=2)\n', (2678, 2683), False, 'from sklearn.model_selection import LeavePOut, cross_validate\n'), ((3519, 3540), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (3531, 3540), True, 'import pandas as pd\n'), ((3928, 3969), 'numpy.fill_diagonal', 'np.fill_diagonal', (['decode_distance', 'np.nan'], {}), '(decode_distance, np.nan)\n', (3944, 3969), True, 'import numpy as np\n'), ((4034, 4103), 'pandas.DataFrame', 'pd.DataFrame', (['decode_distance'], {'index': 'axis_labels', 'columns': 'axis_labels'}), '(decode_distance, index=axis_labels, columns=axis_labels)\n', (4046, 4103), True, 'import pandas as pd\n'), ((4116, 4146), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (4128, 4146), True, 'from matplotlib import pyplot as plt\n'), ((4154, 4265), 'seaborn.heatmap', 'sns.heatmap', (['decode_distance'], {'xticklabels': '(True)', 'yticklabels': '(True)', 'square': '(True)', 'ax': 'ax', 'cmap': 'plt.cm.coolwarm'}), '(decode_distance, xticklabels=True, yticklabels=True, square=\n True, ax=ax, cmap=plt.cm.coolwarm)\n', (4165, 4265), True, 'import seaborn as sns\n'), ((1194, 1263), 'numpy.logical_or', 'np.logical_or', (["(df['Category'] == 'animal')", "(df['Category'] == 'object')"], {}), "(df['Category'] == 'animal', df['Category'] == 'object')\n", (1207, 1263), True, 'import numpy as np\n'), ((2373, 2389), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2387, 2389), False, 'from sklearn.preprocessing import StandardScaler\n'), ((2412, 2475), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'solver': '"""liblinear"""', 'multi_class': '"""auto"""'}), "(C=1, solver='liblinear', multi_class='auto')\n", (2430, 2475), False, 'from sklearn.linear_model import LogisticRegression\n'), ((3125, 3150), 'sklearn.utils.shuffle', 'shuffle', (['X_train', 'y_train'], {}), '(X_train, y_train)\n', (3132, 3150), False, 'from sklearn.utils import shuffle\n'), ((3196, 3211), 'sklearn.base.clone', 'clone', (['base_clf'], {}), '(base_clf)\n', (3201, 3211), False, 'from sklearn.base import clone\n'), ((3300, 3327), 'numpy.abs', 'np.abs', (['(preds[0] - preds[1])'], {}), '(preds[0] - preds[1])\n', (3306, 3327), True, 'import numpy as np\n'), ((1817, 1844), 'numpy.sum', 'np.sum', (["df_animal['picked']"], {}), "(df_animal['picked'])\n", (1823, 1844), True, 'import numpy as np\n'), ((1845, 1872), 'numpy.sum', 'np.sum', (["df_object['picked']"], {}), "(df_object['picked'])\n", (1851, 1872), True, 'import numpy as np\n')] |
"""Takeoff-hover-land for one CF. Useful to validate hardware config."""
from pycrazyswarm import Crazyswarm
from scipy.integrate import solve_ivp
import numpy as np
import math
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
SETUP_DURATION = 2.0
TAKEOFF_DURATION = 2.0
HEIGHT = 1.0
X_INITIAL = 0.0
Y_INITIAL = 0.0
Z_INITIAL = 1.0
Alpha_INITIAL = 0.0
Beta_INITIAL = 0.0
X_GOAL = 30.0 # cm
Y_GOAL = 60.0 # cm
Z_GOAL = 50.0
v = 10.0 # cm/s
kAlpha = 2 # k > (v/Radius) = 2
kBeta = 3 # k > (v/Radius) = 2
simTime = 40#40 # sec
sampleTime = 1 # sec
iterPerSample = 10
iterTime = sampleTime/iterPerSample
def odes(t, x, AlphaRelBearing, BetaRelBearing):
# assign each ODE to a vector element
X = x[0]
Y = x[1]
Z = x[2]
Alpha = x[3]
Beta = x[4]
Alpha = math.atan2(np.sin(Alpha),np.cos(Alpha))
#Beta = math.atan2(np.sin(Beta),np.cos(Beta))
Beta = math.asin(np.sin(Beta)) ###############################
# Bearing already calculated previously
# define each ODE
dXdt = v*np.cos(Alpha)*np.cos(Beta)
dYdt = v*np.sin(Alpha)*np.cos(Beta)
dZdt = v*np.sin(Beta)
dAlphadt = -kAlpha*np.sign(AlphaRelBearing)
dBetadt = -kBeta*np.sign(BetaRelBearing)
return [dXdt, dYdt, dZdt, dAlphadt, dBetadt]
def main():
swarm = Crazyswarm()
timeHelper = swarm.timeHelper
cf = swarm.allcfs.crazyflies[0]
cf.takeoff(targetHeight=HEIGHT, duration=TAKEOFF_DURATION)
timeHelper.sleep(TAKEOFF_DURATION)
# Ensure initial conditions:
initPosnSet = [X_INITIAL,Y_INITIAL,Z_INITIAL]
cf.goTo(goal=initPosnSet, yaw=Alpha_INITIAL, duration=SETUP_DURATION)
timeHelper.sleep(SETUP_DURATION)
# initial conditions
initStateVec = np.array([[X_INITIAL, Y_INITIAL, Z_INITIAL, Alpha_INITIAL, Beta_INITIAL]])
xSol = initStateVec
tSol = [0]
xActual = np.array([[cf.position()[0], cf.position()[1], cf.position()[2], cf.yaw()]])
for i in range (0, simTime, sampleTime):
delTime = [i, i+sampleTime]
x0 = xSol[-1,:] # last row of xSol matrix ----------->>>>>>>>> Should we take the freshly sensed values or the previous values by odeSolver? We can't get Beta. Alpha is yaw so fine
#x0 = np.array([cf.position()[0], cf.position()[1], cf.yaw()])
X = x0[0]
Y = x0[1]
Z = x0[2]
Alpha = x0[3]
Beta = x0[4]
Alpha = math.atan2(np.sin(Alpha),np.cos(Alpha))
Alphabearing = math.atan2((Y_GOAL-Y),(X_GOAL-X))
AlphaRelBearing = Alpha-Alphabearing
AlphaRelBearing = math.atan2(np.sin(AlphaRelBearing), np.cos(AlphaRelBearing))
#Betabearing = math.atan2((Z_GOAL-Z),(math.sqrt((X_GOAL-X_INITIAL)**2+(Y_GOAL-Y_INITIAL)**2)))
Betabearing = math.asin((Z_GOAL-Z)/(math.sqrt( (X_GOAL-X_INITIAL)**2 + (Y_GOAL-Y_INITIAL)**2 + (Z_GOAL-Z_INITIAL)**2 )))
#Beta = math.asin(np.sin(Beta)) #########
BetaRelBearing = Beta-Betabearing
#BetaRelBearing = math.atan2(np.sin(BetaRelBearing), np.cos(BetaRelBearing))
tEval=np.linspace(i, i+sampleTime, iterPerSample+1)
#print(tEval)
sol = solve_ivp(odes, (i, i+sampleTime), x0, t_eval=tEval, args=(AlphaRelBearing, BetaRelBearing)) #args pass the arguments to odes function
xInterval=sol.y # xInterval will include x(@i) and x(@i+sampleTime)
xInterval=np.transpose(xInterval) # each row should have a new set of values of x,y,alpha
tInterval=sol.t # i <= t <= i+sampleTime
# While appending solutions to master arrays, removing 1st elements as they'll be appended as the 'last element of previous iteration'
xSol = np.vstack((xSol, xInterval[1:,:])) #stacking new solutions (except the first row) over previous solutions
tSol = np.hstack((tSol, tInterval[1:])) #stacking new array without its first element to the previous array
for j in range (iterPerSample):
x_nxt = xInterval[j+1, 0]
y_nxt = xInterval[j+1, 1]
z_nxt = xInterval[j+1, 2]
yaw_nxt = xInterval[j+1, 3]
pos_nxt = [x_nxt, y_nxt, z_nxt]
cf.goTo(goal=pos_nxt, yaw=yaw_nxt, duration=iterTime)
timeHelper.sleep(iterTime)
xActualNext = np.array([[cf.position()[0], cf.position()[1], cf.position()[2], cf.yaw()]])
xActual = np.vstack((xActual, xActualNext))
cf.land(targetHeight=0.04, duration=2.5)
timeHelper.sleep(TAKEOFF_DURATION)
X = xSol[:,0]
Y = xSol[:,1]
Z = xSol[:,2]
Alpha = xSol[:,3]
Beta = xSol[:,4]
# plot the results
figure, axis = plt.subplots(2, 3)
# X with time
axis[0, 0].plot(tSol, X)
axis[0, 0].set_title("X vs t")
# Y with time
axis[0, 1].plot(tSol, Y)
axis[0, 1].set_title("Y vs t")
# Z with time
axis[0, 2].plot(tSol, Z)
axis[0, 2].set_title("Z vs t")
# Alpha with time
axis[1, 0].plot(tSol, Alpha)
axis[1, 0].set_title("Alpha vs t")
# Beta with time
axis[1, 1].plot(tSol, Beta)
axis[1, 1].set_title("Beta vs t")
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot3D(xActual[:,0], xActual[:,1], xActual[:,2], 'gray')
#ax.scatter3D(xActual[:,0], xActual[:,1], xActual[:,2], cmap='Greens');
# Combine all the operations and display
plt.show()
if __name__ == "__main__":
main()
| [
"matplotlib.pyplot.show",
"pycrazyswarm.Crazyswarm",
"math.atan2",
"matplotlib.pyplot.axes",
"math.sqrt",
"scipy.integrate.solve_ivp",
"numpy.transpose",
"numpy.hstack",
"matplotlib.pyplot.figure",
"numpy.sin",
"numpy.array",
"numpy.cos",
"numpy.sign",
"numpy.linspace",
"matplotlib.pyplo... | [((1361, 1373), 'pycrazyswarm.Crazyswarm', 'Crazyswarm', ([], {}), '()\n', (1371, 1373), False, 'from pycrazyswarm import Crazyswarm\n'), ((1800, 1874), 'numpy.array', 'np.array', (['[[X_INITIAL, Y_INITIAL, Z_INITIAL, Alpha_INITIAL, Beta_INITIAL]]'], {}), '([[X_INITIAL, Y_INITIAL, Z_INITIAL, Alpha_INITIAL, Beta_INITIAL]])\n', (1808, 1874), True, 'import numpy as np\n'), ((4872, 4890), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(3)'], {}), '(2, 3)\n', (4884, 4890), True, 'import matplotlib.pyplot as plt\n'), ((5346, 5358), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5356, 5358), True, 'import matplotlib.pyplot as plt\n'), ((5368, 5393), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (5376, 5393), True, 'import matplotlib.pyplot as plt\n'), ((5588, 5598), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5596, 5598), True, 'import matplotlib.pyplot as plt\n'), ((859, 872), 'numpy.sin', 'np.sin', (['Alpha'], {}), '(Alpha)\n', (865, 872), True, 'import numpy as np\n'), ((873, 886), 'numpy.cos', 'np.cos', (['Alpha'], {}), '(Alpha)\n', (879, 886), True, 'import numpy as np\n'), ((959, 971), 'numpy.sin', 'np.sin', (['Beta'], {}), '(Beta)\n', (965, 971), True, 'import numpy as np\n'), ((1108, 1120), 'numpy.cos', 'np.cos', (['Beta'], {}), '(Beta)\n', (1114, 1120), True, 'import numpy as np\n'), ((1149, 1161), 'numpy.cos', 'np.cos', (['Beta'], {}), '(Beta)\n', (1155, 1161), True, 'import numpy as np\n'), ((1175, 1187), 'numpy.sin', 'np.sin', (['Beta'], {}), '(Beta)\n', (1181, 1187), True, 'import numpy as np\n'), ((1211, 1235), 'numpy.sign', 'np.sign', (['AlphaRelBearing'], {}), '(AlphaRelBearing)\n', (1218, 1235), True, 'import numpy as np\n'), ((1257, 1280), 'numpy.sign', 'np.sign', (['BetaRelBearing'], {}), '(BetaRelBearing)\n', (1264, 1280), True, 'import numpy as np\n'), ((2577, 2611), 'math.atan2', 'math.atan2', (['(Y_GOAL - Y)', '(X_GOAL - X)'], {}), '(Y_GOAL - Y, X_GOAL - X)\n', (2587, 2611), False, 'import math\n'), ((3219, 3268), 'numpy.linspace', 'np.linspace', (['i', '(i + sampleTime)', '(iterPerSample + 1)'], {}), '(i, i + sampleTime, iterPerSample + 1)\n', (3230, 3268), True, 'import numpy as np\n'), ((3319, 3418), 'scipy.integrate.solve_ivp', 'solve_ivp', (['odes', '(i, i + sampleTime)', 'x0'], {'t_eval': 'tEval', 'args': '(AlphaRelBearing, BetaRelBearing)'}), '(odes, (i, i + sampleTime), x0, t_eval=tEval, args=(\n AlphaRelBearing, BetaRelBearing))\n', (3328, 3418), False, 'from scipy.integrate import solve_ivp\n'), ((3573, 3596), 'numpy.transpose', 'np.transpose', (['xInterval'], {}), '(xInterval)\n', (3585, 3596), True, 'import numpy as np\n'), ((3907, 3942), 'numpy.vstack', 'np.vstack', (['(xSol, xInterval[1:, :])'], {}), '((xSol, xInterval[1:, :]))\n', (3916, 3942), True, 'import numpy as np\n'), ((4031, 4063), 'numpy.hstack', 'np.hstack', (['(tSol, tInterval[1:])'], {}), '((tSol, tInterval[1:]))\n', (4040, 4063), True, 'import numpy as np\n'), ((1094, 1107), 'numpy.cos', 'np.cos', (['Alpha'], {}), '(Alpha)\n', (1100, 1107), True, 'import numpy as np\n'), ((1135, 1148), 'numpy.sin', 'np.sin', (['Alpha'], {}), '(Alpha)\n', (1141, 1148), True, 'import numpy as np\n'), ((2525, 2538), 'numpy.sin', 'np.sin', (['Alpha'], {}), '(Alpha)\n', (2531, 2538), True, 'import numpy as np\n'), ((2539, 2552), 'numpy.cos', 'np.cos', (['Alpha'], {}), '(Alpha)\n', (2545, 2552), True, 'import numpy as np\n'), ((2703, 2726), 'numpy.sin', 'np.sin', (['AlphaRelBearing'], {}), '(AlphaRelBearing)\n', (2709, 2726), True, 'import numpy as np\n'), ((2728, 2751), 'numpy.cos', 'np.cos', (['AlphaRelBearing'], {}), '(AlphaRelBearing)\n', (2734, 2751), True, 'import numpy as np\n'), ((4595, 4628), 'numpy.vstack', 'np.vstack', (['(xActual, xActualNext)'], {}), '((xActual, xActualNext))\n', (4604, 4628), True, 'import numpy as np\n'), ((2915, 3011), 'math.sqrt', 'math.sqrt', (['((X_GOAL - X_INITIAL) ** 2 + (Y_GOAL - Y_INITIAL) ** 2 + (Z_GOAL -\n Z_INITIAL) ** 2)'], {}), '((X_GOAL - X_INITIAL) ** 2 + (Y_GOAL - Y_INITIAL) ** 2 + (Z_GOAL -\n Z_INITIAL) ** 2)\n', (2924, 3011), False, 'import math\n')] |
import json
import numpy as np
class LocationMapBounds:
def __init__(self, t=0, y=0, x=0):
self._t = t
self._y = y
self._x = x
@property
def t(self):
return self._t
@property
def y(self):
return self._y
@property
def x(self):
return self._x
class LocationMapCell:
def __init__(self, in_bikes=0, out_bikes=0):
self.in_bikes = in_bikes
self.out_bikes = out_bikes
def serialize_cell(obj):
if isinstance(obj, LocationMapCell):
serial = obj.__dict__
return serial
else:
raise TypeError ("Type not serializable")
class LocationMap:
def __init__(self, bounds, time_delta=1):
self.bounds = bounds
self.map_tensor = []
self.time_indices = []
self.time_delta = time_delta
self.current_bikes = np.zeros([bounds.y, bounds.x])
def add_time(self, time):
"""
Creates a map entry for the new given time
"""
# if time is not already present and can insert another time index
if time not in self.time_indices and len(self.time_indices) < self.bounds.t:
time_map = {}
self.map_tensor.append(time_map)
self.time_indices.append(time)
def get_bounds(self):
return self.bounds
def get(self, t, y, x):
# We use a sparse index encoding to save space
new_index = "{}-{}".format(y, x)
if new_index not in self.map_tensor[t]:
self.map_tensor[t][new_index] = LocationMapCell()
return self.map_tensor[t][new_index]
@property
def total_bikes(self):
return int(np.sum(self.current_bikes))
def get_total_bikes_at(self, y, x):
return self.current_bikes[y][x]
def set_total_bikes_at(self, y, x, bikes):
"""
NOTE: This method should be used only in the initialization phase.
"""
self.current_bikes[y][x] = bikes
def decrement_total_bikes_at(self, y, x):
self.current_bikes[y][x]-= 1
def increment_total_bikes_at(self, y, x):
self.current_bikes[y][x]+= 1
def to_json(self):
return json.dumps({"meta_data":{"time_delta":self.time_delta, "bounds": {"t": self.bounds.t, "y": self.bounds.y, "x": self.bounds.x}}, "map": self.map_tensor}, default=serialize_cell) | [
"numpy.zeros",
"numpy.sum",
"json.dumps"
] | [((861, 891), 'numpy.zeros', 'np.zeros', (['[bounds.y, bounds.x]'], {}), '([bounds.y, bounds.x])\n', (869, 891), True, 'import numpy as np\n'), ((2175, 2362), 'json.dumps', 'json.dumps', (["{'meta_data': {'time_delta': self.time_delta, 'bounds': {'t': self.bounds.t,\n 'y': self.bounds.y, 'x': self.bounds.x}}, 'map': self.map_tensor}"], {'default': 'serialize_cell'}), "({'meta_data': {'time_delta': self.time_delta, 'bounds': {'t':\n self.bounds.t, 'y': self.bounds.y, 'x': self.bounds.x}}, 'map': self.\n map_tensor}, default=serialize_cell)\n", (2185, 2362), False, 'import json\n'), ((1667, 1693), 'numpy.sum', 'np.sum', (['self.current_bikes'], {}), '(self.current_bikes)\n', (1673, 1693), True, 'import numpy as np\n')] |
import operator
import pytest
import numpy as np
from ...core import ProxyTypeError
from ...containers import Tuple, List
from ...identifier import parameter
from ..bool_ import Bool
from ..string import Str
from ..number import Float, Int, Number, _binop_result
from ...core.tests.utils import operator_test
class TestPromote(object):
def test_number_unpromotable(self):
with pytest.raises(ProxyTypeError):
Number._promote(2.2)
with pytest.raises(ProxyTypeError):
Number._promote(0)
def test_primitives(self):
assert isinstance(Int._promote(0), Int)
assert isinstance(Float._promote(2), Float)
assert isinstance(Float._promote(2.2), Float)
def test_proxytypes(self):
assert isinstance(Int._promote(Int(0)), Int)
assert isinstance(Float._promote(Float(2.2)), Float)
def test_wrong_primitives(self):
with pytest.raises(ProxyTypeError):
Int._promote(2.2)
def test_wrong_proxytypes(self):
with pytest.raises(
ProxyTypeError, match=r"You need to convert it explicitly, like `Int\(x\)`"
):
Int._promote(Float(2.2))
with pytest.raises(
ProxyTypeError,
match=r"You need to convert it explicitly, like `Float\(x\)`",
):
Float._promote(Int(0))
class TestConstruct(object):
def test_explicit_cast_passthrough(self):
i = Int(Int(1))
assert i.graft[i.graft["returns"]] == 1
assert i.params == ()
x = parameter("x", Int)
i = Int(x)
assert i.params == (x,)
def test_explicit_cast_to_int(self):
i = Int(Float(1.0))
assert isinstance(i, Int)
assert i.graft[i.graft["returns"]][0] == "wf.Int.cast"
assert i.params == ()
x = parameter("x", Float)
i = Int(x)
assert i.params == (x,)
i = Int(Bool(True))
assert isinstance(i, Int)
assert i.graft[i.graft["returns"]][0] == "wf.Int.cast"
assert i.params == ()
x = parameter("x", Bool)
i = Int(x)
assert i.params == (x,)
i = Int(Str("1"))
assert isinstance(i, Int)
assert i.graft[i.graft["returns"]][0] == "wf.Int.cast"
assert i.params == ()
x = parameter("x", Str)
i = Int(x)
assert i.params == (x,)
def test_explicit_cast_to_float(self):
f = Float(Int(1))
assert isinstance(f, Float)
assert f.graft[f.graft["returns"]][0] == "wf.Float.cast"
assert f.params == ()
x = parameter("x", Int)
f = Float(x)
assert f.params == (x,)
f = Float(Bool(True))
assert isinstance(f, Float)
assert f.graft[f.graft["returns"]][0] == "wf.Float.cast"
assert f.params == ()
x = parameter("x", Bool)
f = Float(x)
assert f.params == (x,)
f = Float(Str("1"))
assert isinstance(f, Float)
assert f.graft[f.graft["returns"]][0] == "wf.Float.cast"
assert f.params == ()
x = parameter("x", Str)
f = Float(x)
assert f.params == (x,)
class TestNumPyScalars(object):
@pytest.mark.parametrize(
"val",
[
np.uint8(1),
np.uint16(1),
np.uint32(1),
np.uint64(1),
np.int8(1),
np.int16(1),
np.int32(1),
np.int64(1),
],
)
def test_int(self, val):
i = Int(val)
assert isinstance(i.graft[i.graft["returns"]], int)
assert i.params == ()
@pytest.mark.parametrize("val", [np.float16(1), np.float32(1), np.float64(1)])
def test_float(self, val):
i = Float(val)
assert isinstance(i.graft[i.graft["returns"]], float)
assert i.params == ()
def test_failure(self):
with pytest.raises(TypeError):
Float(np.int32(1))
with pytest.raises(TypeError):
Int(np.float64(1))
with pytest.raises(TypeError):
Int(np.datetime64("2020-01-01"))
@pytest.mark.parametrize(
"a, b, expected",
[
(Int(0), Int(0), Int),
(Float(0.0), Float(0.0), Float),
(Int(0), Float(0.0), Float),
(Float(0.0), Int(0), Float),
],
)
def test_binop_result(a, b, expected):
assert _binop_result(a, b) == expected
class TestAllOperators(object):
int_obj = Int(0)
float_obj = Float(0.0)
all_values_to_try = [Int(1), Float(2.2), Bool(True), List[Int]([1, 2])]
# ^ we use pre-promoted Proxytypes, not py types, since the `operator_test`
# helper checks if `type(value) is in accepted_types`
@pytest.mark.parametrize(
"operator, accepted_types, return_type",
[
["__abs__", (), Int],
["__add__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__div__", (Int, Float, Bool), (Int, Float)],
[
"__divmod__",
(Int, Float, Bool),
{
Float: Tuple[Float, Float],
Int: Tuple[Int, Int],
Bool: Tuple[Int, Int],
},
],
["__eq__", (Int, Float, Bool), Bool],
["__floordiv__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__ge__", (Int, Float, Bool), Bool],
["__gt__", (Int, Float, Bool), Bool],
["__invert__", (), Int],
["__le__", (Int, Float, Bool), Bool],
["__lt__", (Int, Float, Bool), Bool],
["__mod__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__mul__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__ne__", (Int, Float, Bool), Bool],
["__neg__", (), Int],
["__pos__", (), Int],
["__pow__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__radd__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rdiv__", (Int, Float, Bool), (Int, Float)],
[
"__rdivmod__",
(Int, Float, Bool),
{
Float: Tuple[Float, Float],
Int: Tuple[Int, Int],
Bool: Tuple[Int, Int],
},
],
["__rfloordiv__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rmod__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rmul__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rpow__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rsub__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__rtruediv__", (Int, Float, Bool), (Int, Float)],
["__sub__", (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}],
["__truediv__", (Int, Float, Bool), (Int, Float)],
# Int-specific methods
["__and__", [Int, Bool], Int],
["__lshift__", [Int, Bool], Int],
["__or__", [Int, Bool], Int],
["__rand__", [Int, Bool], Int],
["__rlshift__", [Int, Bool], Int],
["__ror__", [Int, Bool], Int],
["__rrshift__", [Int, Bool], Int],
["__rshift__", [Int, Bool], Int],
["__rxor__", [Int, Bool], Int],
["__xor__", [Int, Bool], Int],
],
)
def test_all_operators_int(self, operator, accepted_types, return_type):
operator_test(
self.int_obj, self.all_values_to_try, operator, accepted_types, return_type
)
@pytest.mark.parametrize(
"operator, accepted_types, return_type",
[
["__abs__", (), Float],
["__add__", (Int, Float, Bool), Float],
["__div__", (Int, Float, Bool), Float],
["__divmod__", (Int, Float, Bool), Tuple[Float, Float]],
["__eq__", (Int, Float, Bool), Bool],
["__floordiv__", (Int, Float, Bool), Float],
["__ge__", (Int, Float, Bool), Bool],
["__gt__", (Int, Float, Bool), Bool],
["__invert__", (), Float],
["__le__", (Int, Float, Bool), Bool],
["__lt__", (Int, Float, Bool), Bool],
["__mod__", (Int, Float, Bool), Float],
["__mul__", (Int, Float, Bool), Float],
["__ne__", (Int, Float, Bool), Bool],
["__neg__", (), Float],
["__pos__", (), Float],
["__pow__", (Int, Float, Bool), Float],
["__radd__", (Int, Float, Bool), Float],
["__rdiv__", (Int, Float, Bool), Float],
["__rdivmod__", (Int, Float, Bool), Tuple[Float, Float]],
["__rfloordiv__", (Int, Float, Bool), Float],
["__rmod__", (Int, Float, Bool), Float],
["__rmul__", (Int, Float, Bool), Float],
["__rpow__", (Int, Float, Bool), Float],
["__rsub__", (Int, Float, Bool), Float],
["__rtruediv__", (Int, Float, Bool), Float],
["__sub__", (Int, Float, Bool), Float],
["__truediv__", (Int, Float, Bool), Float],
],
)
def test_all_operators_float(self, operator, accepted_types, return_type):
operator_test(
self.float_obj,
self.all_values_to_try,
operator,
accepted_types,
return_type,
)
@pytest.mark.parametrize("obj", [Int(0), Float(2.2)])
@pytest.mark.parametrize(
"op, exception",
[(operator.truth, TypeError), (operator.index, TypeError), (hex, TypeError)],
)
def test_unsupported_unary_methods(self, obj, op, exception):
with pytest.raises(exception):
op(obj)
| [
"numpy.uint32",
"numpy.float16",
"numpy.uint64",
"numpy.uint8",
"numpy.datetime64",
"numpy.float32",
"pytest.raises",
"numpy.float64",
"numpy.int32",
"numpy.int64",
"numpy.uint16",
"pytest.mark.parametrize",
"numpy.int16",
"numpy.int8"
] | [((4683, 6791), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator, accepted_types, return_type"""', "[['__abs__', (), Int], ['__add__', (Int, Float, Bool), {Float: Float, Int:\n Int, Bool: Int}], ['__div__', (Int, Float, Bool), (Int, Float)], [\n '__divmod__', (Int, Float, Bool), {Float: Tuple[Float, Float], Int:\n Tuple[Int, Int], Bool: Tuple[Int, Int]}], ['__eq__', (Int, Float, Bool),\n Bool], ['__floordiv__', (Int, Float, Bool), {Float: Float, Int: Int,\n Bool: Int}], ['__ge__', (Int, Float, Bool), Bool], ['__gt__', (Int,\n Float, Bool), Bool], ['__invert__', (), Int], ['__le__', (Int, Float,\n Bool), Bool], ['__lt__', (Int, Float, Bool), Bool], ['__mod__', (Int,\n Float, Bool), {Float: Float, Int: Int, Bool: Int}], ['__mul__', (Int,\n Float, Bool), {Float: Float, Int: Int, Bool: Int}], ['__ne__', (Int,\n Float, Bool), Bool], ['__neg__', (), Int], ['__pos__', (), Int], [\n '__pow__', (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}], [\n '__radd__', (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}], [\n '__rdiv__', (Int, Float, Bool), (Int, Float)], ['__rdivmod__', (Int,\n Float, Bool), {Float: Tuple[Float, Float], Int: Tuple[Int, Int], Bool:\n Tuple[Int, Int]}], ['__rfloordiv__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rmod__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rmul__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rpow__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rsub__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rtruediv__', (Int, Float, Bool), (Int, Float\n )], ['__sub__', (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}\n ], ['__truediv__', (Int, Float, Bool), (Int, Float)], ['__and__', [Int,\n Bool], Int], ['__lshift__', [Int, Bool], Int], ['__or__', [Int, Bool],\n Int], ['__rand__', [Int, Bool], Int], ['__rlshift__', [Int, Bool], Int],\n ['__ror__', [Int, Bool], Int], ['__rrshift__', [Int, Bool], Int], [\n '__rshift__', [Int, Bool], Int], ['__rxor__', [Int, Bool], Int], [\n '__xor__', [Int, Bool], Int]]"], {}), "('operator, accepted_types, return_type', [[\n '__abs__', (), Int], ['__add__', (Int, Float, Bool), {Float: Float, Int:\n Int, Bool: Int}], ['__div__', (Int, Float, Bool), (Int, Float)], [\n '__divmod__', (Int, Float, Bool), {Float: Tuple[Float, Float], Int:\n Tuple[Int, Int], Bool: Tuple[Int, Int]}], ['__eq__', (Int, Float, Bool),\n Bool], ['__floordiv__', (Int, Float, Bool), {Float: Float, Int: Int,\n Bool: Int}], ['__ge__', (Int, Float, Bool), Bool], ['__gt__', (Int,\n Float, Bool), Bool], ['__invert__', (), Int], ['__le__', (Int, Float,\n Bool), Bool], ['__lt__', (Int, Float, Bool), Bool], ['__mod__', (Int,\n Float, Bool), {Float: Float, Int: Int, Bool: Int}], ['__mul__', (Int,\n Float, Bool), {Float: Float, Int: Int, Bool: Int}], ['__ne__', (Int,\n Float, Bool), Bool], ['__neg__', (), Int], ['__pos__', (), Int], [\n '__pow__', (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}], [\n '__radd__', (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}], [\n '__rdiv__', (Int, Float, Bool), (Int, Float)], ['__rdivmod__', (Int,\n Float, Bool), {Float: Tuple[Float, Float], Int: Tuple[Int, Int], Bool:\n Tuple[Int, Int]}], ['__rfloordiv__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rmod__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rmul__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rpow__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rsub__', (Int, Float, Bool), {Float: Float,\n Int: Int, Bool: Int}], ['__rtruediv__', (Int, Float, Bool), (Int, Float\n )], ['__sub__', (Int, Float, Bool), {Float: Float, Int: Int, Bool: Int}\n ], ['__truediv__', (Int, Float, Bool), (Int, Float)], ['__and__', [Int,\n Bool], Int], ['__lshift__', [Int, Bool], Int], ['__or__', [Int, Bool],\n Int], ['__rand__', [Int, Bool], Int], ['__rlshift__', [Int, Bool], Int],\n ['__ror__', [Int, Bool], Int], ['__rrshift__', [Int, Bool], Int], [\n '__rshift__', [Int, Bool], Int], ['__rxor__', [Int, Bool], Int], [\n '__xor__', [Int, Bool], Int]])\n", (4706, 6791), False, 'import pytest\n'), ((7684, 8928), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""operator, accepted_types, return_type"""', "[['__abs__', (), Float], ['__add__', (Int, Float, Bool), Float], ['__div__',\n (Int, Float, Bool), Float], ['__divmod__', (Int, Float, Bool), Tuple[\n Float, Float]], ['__eq__', (Int, Float, Bool), Bool], ['__floordiv__',\n (Int, Float, Bool), Float], ['__ge__', (Int, Float, Bool), Bool], [\n '__gt__', (Int, Float, Bool), Bool], ['__invert__', (), Float], [\n '__le__', (Int, Float, Bool), Bool], ['__lt__', (Int, Float, Bool),\n Bool], ['__mod__', (Int, Float, Bool), Float], ['__mul__', (Int, Float,\n Bool), Float], ['__ne__', (Int, Float, Bool), Bool], ['__neg__', (),\n Float], ['__pos__', (), Float], ['__pow__', (Int, Float, Bool), Float],\n ['__radd__', (Int, Float, Bool), Float], ['__rdiv__', (Int, Float, Bool\n ), Float], ['__rdivmod__', (Int, Float, Bool), Tuple[Float, Float]], [\n '__rfloordiv__', (Int, Float, Bool), Float], ['__rmod__', (Int, Float,\n Bool), Float], ['__rmul__', (Int, Float, Bool), Float], ['__rpow__', (\n Int, Float, Bool), Float], ['__rsub__', (Int, Float, Bool), Float], [\n '__rtruediv__', (Int, Float, Bool), Float], ['__sub__', (Int, Float,\n Bool), Float], ['__truediv__', (Int, Float, Bool), Float]]"], {}), "('operator, accepted_types, return_type', [[\n '__abs__', (), Float], ['__add__', (Int, Float, Bool), Float], [\n '__div__', (Int, Float, Bool), Float], ['__divmod__', (Int, Float, Bool\n ), Tuple[Float, Float]], ['__eq__', (Int, Float, Bool), Bool], [\n '__floordiv__', (Int, Float, Bool), Float], ['__ge__', (Int, Float,\n Bool), Bool], ['__gt__', (Int, Float, Bool), Bool], ['__invert__', (),\n Float], ['__le__', (Int, Float, Bool), Bool], ['__lt__', (Int, Float,\n Bool), Bool], ['__mod__', (Int, Float, Bool), Float], ['__mul__', (Int,\n Float, Bool), Float], ['__ne__', (Int, Float, Bool), Bool], ['__neg__',\n (), Float], ['__pos__', (), Float], ['__pow__', (Int, Float, Bool),\n Float], ['__radd__', (Int, Float, Bool), Float], ['__rdiv__', (Int,\n Float, Bool), Float], ['__rdivmod__', (Int, Float, Bool), Tuple[Float,\n Float]], ['__rfloordiv__', (Int, Float, Bool), Float], ['__rmod__', (\n Int, Float, Bool), Float], ['__rmul__', (Int, Float, Bool), Float], [\n '__rpow__', (Int, Float, Bool), Float], ['__rsub__', (Int, Float, Bool),\n Float], ['__rtruediv__', (Int, Float, Bool), Float], ['__sub__', (Int,\n Float, Bool), Float], ['__truediv__', (Int, Float, Bool), Float]])\n", (7707, 8928), False, 'import pytest\n'), ((9544, 9667), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""op, exception"""', '[(operator.truth, TypeError), (operator.index, TypeError), (hex, TypeError)]'], {}), "('op, exception', [(operator.truth, TypeError), (\n operator.index, TypeError), (hex, TypeError)])\n", (9567, 9667), False, 'import pytest\n'), ((394, 423), 'pytest.raises', 'pytest.raises', (['ProxyTypeError'], {}), '(ProxyTypeError)\n', (407, 423), False, 'import pytest\n'), ((471, 500), 'pytest.raises', 'pytest.raises', (['ProxyTypeError'], {}), '(ProxyTypeError)\n', (484, 500), False, 'import pytest\n'), ((916, 945), 'pytest.raises', 'pytest.raises', (['ProxyTypeError'], {}), '(ProxyTypeError)\n', (929, 945), False, 'import pytest\n'), ((1028, 1124), 'pytest.raises', 'pytest.raises', (['ProxyTypeError'], {'match': '"""You need to convert it explicitly, like `Int\\\\(x\\\\)`"""'}), "(ProxyTypeError, match=\n 'You need to convert it explicitly, like `Int\\\\(x\\\\)`')\n", (1041, 1124), False, 'import pytest\n'), ((1192, 1290), 'pytest.raises', 'pytest.raises', (['ProxyTypeError'], {'match': '"""You need to convert it explicitly, like `Float\\\\(x\\\\)`"""'}), "(ProxyTypeError, match=\n 'You need to convert it explicitly, like `Float\\\\(x\\\\)`')\n", (1205, 1290), False, 'import pytest\n'), ((3258, 3269), 'numpy.uint8', 'np.uint8', (['(1)'], {}), '(1)\n', (3266, 3269), True, 'import numpy as np\n'), ((3283, 3295), 'numpy.uint16', 'np.uint16', (['(1)'], {}), '(1)\n', (3292, 3295), True, 'import numpy as np\n'), ((3309, 3321), 'numpy.uint32', 'np.uint32', (['(1)'], {}), '(1)\n', (3318, 3321), True, 'import numpy as np\n'), ((3335, 3347), 'numpy.uint64', 'np.uint64', (['(1)'], {}), '(1)\n', (3344, 3347), True, 'import numpy as np\n'), ((3361, 3371), 'numpy.int8', 'np.int8', (['(1)'], {}), '(1)\n', (3368, 3371), True, 'import numpy as np\n'), ((3385, 3396), 'numpy.int16', 'np.int16', (['(1)'], {}), '(1)\n', (3393, 3396), True, 'import numpy as np\n'), ((3410, 3421), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (3418, 3421), True, 'import numpy as np\n'), ((3435, 3446), 'numpy.int64', 'np.int64', (['(1)'], {}), '(1)\n', (3443, 3446), True, 'import numpy as np\n'), ((3643, 3656), 'numpy.float16', 'np.float16', (['(1)'], {}), '(1)\n', (3653, 3656), True, 'import numpy as np\n'), ((3658, 3671), 'numpy.float32', 'np.float32', (['(1)'], {}), '(1)\n', (3668, 3671), True, 'import numpy as np\n'), ((3673, 3686), 'numpy.float64', 'np.float64', (['(1)'], {}), '(1)\n', (3683, 3686), True, 'import numpy as np\n'), ((3877, 3901), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3890, 3901), False, 'import pytest\n'), ((3947, 3971), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (3960, 3971), False, 'import pytest\n'), ((4017, 4041), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (4030, 4041), False, 'import pytest\n'), ((9765, 9789), 'pytest.raises', 'pytest.raises', (['exception'], {}), '(exception)\n', (9778, 9789), False, 'import pytest\n'), ((3921, 3932), 'numpy.int32', 'np.int32', (['(1)'], {}), '(1)\n', (3929, 3932), True, 'import numpy as np\n'), ((3989, 4002), 'numpy.float64', 'np.float64', (['(1)'], {}), '(1)\n', (3999, 4002), True, 'import numpy as np\n'), ((4059, 4086), 'numpy.datetime64', 'np.datetime64', (['"""2020-01-01"""'], {}), "('2020-01-01')\n", (4072, 4086), True, 'import numpy as np\n')] |
#!/usr/bin/env python
"""
Calculate statistical results for FITS images
"""
import os
import sys
import re
import argparse
import logging
import textwrap
import os.path
from astropy.io import fits
from astropy import stats
import numpy as np
# put parent directory into sys.path
bp = os.path.dirname(os.path.realpath(__file__)).split(os.sep)
modpath = os.sep.join(bp[:-1] + ["lib"])
sys.path.insert(0, modpath)
# local imports
try:
import imutils as iu
import mutils as mu
except ImportError as e:
logging.error("Import failed: %s", e)
sys.exit(1)
def parse_args():
"""handle command line"""
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent(
"""\
Calculate statistical quantities for image
"""
),
epilog=textwrap.dedent(
"""\
"""
),
)
parser.add_argument(
"fitsfile", nargs="+", metavar="file", help="input fits file(s)"
)
parser.add_argument(
"--quicklook",
action="store_true",
help="estimate signal, noise, counts/sec in adus",
)
sgroup = parser.add_argument_group(
"stats", "select statistics and regions" " (exclusive of quicklook)"
)
sgroup.add_argument(
"--region", nargs="+", metavar="reg", help='2d-slicespec: "rows,cols"'
)
sgroup.add_argument(
"--datasec", action="store_true", help="perform stats on DATASEC region"
)
sgroup.add_argument(
"--overscan",
action="store_true",
help="perform stats on serial overscan region",
)
sgroup.add_argument(
"--poverscan",
action="store_true",
help="perform stats on parllel overscan region",
)
sgroup.add_argument(
"--stats",
nargs="+",
metavar="stat",
help="select from: {mean median stddev min max}",
)
# ---------------------------------------------------------------
hgroup = parser.add_mutually_exclusive_group()
hgroup.add_argument(
"--hduname", nargs="+", metavar="idn", help="process HDU list by names"
)
hgroup.add_argument(
"--hduindex", nargs="+", type=int, metavar="idx", help="process HDU list by ids"
)
# ---------------------------------------------------------------
parser.add_argument(
"--bias",
action="store_true",
help="auto bias estimate removal by CCD type (itl, e2v)",
)
parser.add_argument(
"--sbias",
nargs="?",
const="byrow",
choices=[
"mean",
"median",
"byrow",
"byrowe2v",
"byrowsmooth",
"byrowsmoothe2v",
],
help="perform bias estimate removal using serial overscan",
)
parser.add_argument(
"--pbias",
nargs="?",
const="bycol",
choices=["mean", "median", "bycol", "bycolfilter", "bycolsmooth"],
help="perform bias estimate removal using par overscan",
)
parser.add_argument(
"--rstats",
action="store_true",
help="use sigma_clipped_stats() for avg,med,std",
)
parser.add_argument(
"--tearing",
nargs="?",
metavar="nrows",
const="datasec",
help="add tearing metric:" " nrows|divisdero|datasec(default)",
)
parser.add_argument(
"--dipoles", action="store_true", help="add dipole metric to quicklook output"
)
parser.add_argument(
"--threshold",
nargs=1,
metavar="thresh",
type=float,
help="count number of pixels above threshold",
)
parser.add_argument(
"--info", action="store_true", help="print the info() table summarizing file"
)
parser.add_argument(
"--debug", action="store_true", help="print additional debugging messages"
)
parser.add_argument(
"--noheadings",
action="store_true",
default=False,
help="Don't print column heads for stats",
)
return parser.parse_args()
def imstat():
"""main logic:"""
optlist = parse_args()
mu.init_logging(optlist.debug)
mu.init_warnings()
ncalls.counter = 0
# begin processing -- loop over files
for ffile in optlist.fitsfile:
try:
hdulist = fits.open(ffile)
except IOError as ioerr:
logging.error("IOError: %s", ioerr)
sys.exit(1)
if optlist.info: # just print the image info per file
hdulist.info()
continue
if optlist.bias: # auto set [sp]bias, overriding existing
try:
optlist.sbias, optlist.pbias = iu.auto_biastype(hdulist)
except KeyError as kerr:
logging.error(kerr)
sys.exit(1)
except ValueError as verr:
logging.error(verr)
sys.exit(1)
if not optlist.noheadings: # print filename
print("#")
print("# {}".format(os.path.basename(ffile)))
# Construct a list of the HDU's to work on
hduids = iu.get_requested_image_hduids(
hdulist, optlist.hduname, optlist.hduindex
)
if optlist.quicklook:
quicklook(optlist, hduids, hdulist)
else:
stats_proc(optlist, hduids, hdulist)
ncalls.counter = 0 # reset per file, triggers headers
def stats_proc(optlist, hduids, hdulist):
"""print statistics for region according to options"""
# Process each HDU in the list "hduids"
for hduid in hduids:
hdu = hdulist[hduid]
name = hdu.name
if not optlist.sbias and not optlist.pbias:
pass
else:
iu.subtract_bias(optlist.sbias, optlist.pbias, hdu)
slices = []
(datasec, soscan, poscan) = iu.get_data_oscan_slices(hdu)
if optlist.datasec:
slices.append(datasec)
if optlist.overscan:
slices.append(soscan)
if optlist.poverscan:
slices.append(poscan)
if optlist.region:
for reg in optlist.region: # if there are regions
logging.debug("processing %s", reg)
slice_spec = iu.parse_region(reg)
if slice_spec:
slices.append(slice_spec)
else:
logging.error("skipping region %s", reg)
if len(slices) == 0:
stats_print(optlist, hduid, name, hdu.data, None)
for slice_spec in slices:
y1, y2 = slice_spec[0].start or "", slice_spec[0].stop or ""
x1, x2 = slice_spec[1].start or "", slice_spec[1].stop or ""
reg = "{}:{},{}:{}".format(y1, y2, x1, x2)
stats_print(optlist, hduid, name, hdu.data[slice_spec], reg)
def stats_print(optlist, sid, name, buf, reg):
"""perform and print the given statistics quantities"""
if not optlist.stats:
optlist.stats = ["mean", "median", "stddev", "min", "max"]
if optlist.rstats:
mean_str, median_str, stddev_str = "rmean", "rmedian", "rstddev"
else:
mean_str, median_str, stddev_str = "mean", "median", "stddev"
if not optlist.noheadings and ncalls.counter == 0:
print("#{:>3s} {:>9s}".format("id", "HDUname"), end="")
if [stat for stat in optlist.stats if re.match(r"^mea", stat)]:
print(" {:>9s}".format(mean_str), end="")
if [stat for stat in optlist.stats if re.match(r"^med", stat)]:
print(" {:>9s}".format(median_str), end="")
if [stat for stat in optlist.stats if re.match(r"^std", stat)]:
print(" {:>8s}".format(stddev_str), end="")
if [stat for stat in optlist.stats if re.match(r"^min", stat)]:
print(" {:>9s}".format("min"), end="")
if [stat for stat in optlist.stats if re.match(r"^max", stat)]:
print(" {:>9s}".format("max"), end="")
if reg:
print(" {:20s}".format("region"), end="")
print("") # newline)
if not optlist.noheadings:
print(" {:3d} {:>9s}".format(sid, name), end="")
if optlist.rstats:
avg, med, std = stats.sigma_clipped_stats(buf, sigma=2.7)
else:
avg, med, std = np.mean(buf), np.median(buf), np.std(buf)
if [stat for stat in optlist.stats if re.match(r"^mea", stat)]:
print(" {:>9.6g}".format(avg), end="")
if [stat for stat in optlist.stats if re.match(r"^med", stat)]:
print(" {:>9.6g}".format(med), end="")
if [stat for stat in optlist.stats if re.match(r"^std", stat)]:
print(" {:>8.4g}".format(std), end="")
if [stat for stat in optlist.stats if re.match(r"^min", stat)]:
print(" {:>9.6g}".format(np.min(buf)), end="")
if [stat for stat in optlist.stats if re.match(r"^max", stat)]:
print(" {:>9.6g}".format(np.max(buf)), end="")
if reg:
reg = re.sub(r"^\[*([^\]]*)\]*$", r"\1", reg)
print(" {:20s}".format(reg), end="")
print("") # newline)
ncalls() # track call count, acts like static variable)
def quicklook(optlist, hduids, hdulist):
"""print quicklook for hdu's according to options"""
try:
expt = float(hdulist[0].header["EXPTIME"])
except KeyError as ke:
try:
expt = float(hdulist[0].header["DARKTIME"])
except KeyError as ke:
logging.warning(
"EXPTIME|DARKTIME non in header, adu/sec won't be available"
)
expt = 0
# perform and print the given statistics quantities
# fields are: mean, bias, signal, noise, adu/s
quick_fields = [
"mean",
"bias",
"signal",
"noise",
"adu/sec",
"eper:s-cte",
"eper:p-cte",
]
if optlist.tearing:
quick_fields.append("tearing")
if optlist.dipoles:
quick_fields.append("dipoles")
if optlist.threshold:
quick_fields.append("threshold")
for hduid in hduids:
#
hdu = hdulist[hduid]
name = hdu.name
if not optlist.sbias and not optlist.pbias:
pass
else:
iu.subtract_bias(optlist.sbias, optlist.pbias, hdu)
# get datasec, serial overscan, parallel overscan as slices
(datasec, soscan, poscan) = iu.get_data_oscan_slices(hdu)
if not datasec or not soscan or not poscan:
logging.error("Could not get DATASEC or overscan specs for %s", name)
sys.exit(1)
if optlist.rstats:
median_str, bias_str, noise_str = "rmedian", "rbias", "rnoise"
else:
median_str, bias_str, noise_str = "median", "bias", "noise"
if not optlist.noheadings and ncalls.counter == 0:
print("#{:>3s} {:>9s}".format("id", "HDUname"), end="")
if "mean" in quick_fields:
print(" {:>9s}".format(median_str), end="")
if "bias" in quick_fields:
print(" {:>9s}".format(bias_str), end="")
if "signal" in quick_fields:
print(" {:>9s}".format("signal"), end="")
if "noise" in quick_fields:
print(" {:>8s}".format(noise_str), end="")
if "adu/sec" in quick_fields and expt > 0:
print("{:>9s}".format("adu/sec"), end="")
if "eper:s-cte" in quick_fields:
print("{:>9s}".format("s-cte"), end="")
if "eper:p-cte" in quick_fields:
print("{:>9s}".format("p-cte"), end="")
if "tearing" in quick_fields:
if re.match(r"^data", optlist.tearing):
trows = int(datasec[0].stop - 1)
elif re.match(r"^div", optlist.tearing):
trows = 100
else:
trows = int(optlist.tearing)
print(" {:s}({:>4d}r){:s}".format("tml", trows, "tmr"), end="")
if "dipoles" in quick_fields:
print("{:>9s}".format("%dipoles"), end="")
if "threshold" in quick_fields:
print("{:>9s}".format("N>thresh"), end="")
print("") # newline)
if not optlist.noheadings:
print(" {:3d} {:>9s}".format(hduid, name), end="")
# noise evaluated in smaller region to avoid any gradient effects
y0 = int(0.6 * datasec[0].start) + int(0.4 * datasec[0].stop)
y1 = int(0.4 * datasec[0].start) + int(0.6 * datasec[0].stop)
sx0 = int(0.95 * soscan[1].start) + int(0.05 * soscan[1].stop)
if optlist.rstats:
avg, med, std = stats.sigma_clipped_stats(hdu.data[datasec])
sig_mean = med
avg, med, std = stats.sigma_clipped_stats(hdu.data[soscan])
bias_mean = med
avg, med, std = stats.sigma_clipped_stats(hdu.data[y0:y1, sx0:])
noise = std
else:
sig_mean = np.median(hdu.data[datasec])
bias_mean = np.median(hdu.data[soscan])
noise = np.std(hdu.data[y0:y1, sx0:])
if "mean" in quick_fields:
print(" {:>9.6g}".format(sig_mean), end="")
if "bias" in quick_fields:
print(" {:>9.5g}".format(bias_mean), end="")
if "signal" in quick_fields:
signal = sig_mean - bias_mean
print(" {:>9.6g}".format(signal), end="")
if "noise" in quick_fields:
print(" {:>8.3f}".format(noise), end="")
if "adu/sec" in quick_fields and expt > 0:
print(" {:>8.3f}".format(float(signal) / expt), end="")
if "eper:s-cte" in quick_fields:
logging.debug("s-cte------------------")
if signal < 5.0 * noise:
print(" {:>8s}".format("None"), end="")
else:
scte = iu.eper_serial(hdu)
if scte:
print(" {:>8.6f}".format(scte), end="")
else:
print(" {:>8s}".format("None"), end="")
# ---------
if "eper:p-cte" in quick_fields:
logging.debug("p-cte------------------")
if signal < 5.0 * noise:
print(" {:>8s}".format("None"), end="")
else:
pcte = iu.eper_parallel(hdu)
if pcte:
print(" {:>8.6f}".format(pcte), end="")
else:
print(" {:>8s}".format("None"), end="")
# ---------
if "tearing" in quick_fields:
logging.debug("tearing check----------")
tml, tmr = tearing_metric(hdu.data[datasec], trows)
print(" {:>5.2f} {:>5.2f}".format(tml, tmr), end="")
# ---------
if "dipoles" in quick_fields:
logging.debug("dipoles check----------")
ndipole = count_dipoles(hdu.data[datasec])
print(
"{:>9.2f}".format(
100.0 * float(2 * ndipole) / (np.size(hdu.data[datasec]))
),
end="",
)
# ---------
if "threshold" in quick_fields:
logging.debug("threshold check----------")
print(
"{:>9d}".format(
np.count_nonzero(hdu.data[datasec] > optlist.threshold)
),
end="",
)
# ---------
print("") # newline)
ncalls() # track call count, acts like static variable)
def tearing_metric(buf, trows):
"""
buf is one segment (w/out pre/over-scan) of an lsst ccd
return the fraction of pixels in the first and last column, (tml, tmr),
that are less than 1.5 stddev away from the mean of the
nearby ~50 pixels in the same row as the pixel being evaluated
If (tml, tmr) are > O(0.5) then tearing may be present.
If they are well below 0.5 it is very unlikely
"""
# left side
arr = np.mean(buf[10:trows, 3:50], axis=1)
astd = np.std(buf[10:trows, 3:50], axis=1)
arr = np.abs((1.0 * buf[10:trows, 0] - arr) / astd) # col[0] diff in std's
tml = (1.0 * np.size(arr) - np.searchsorted(arr, 1.5)) / np.size(arr)
# right side
arr = np.mean(buf[10:trows, -50:-3], axis=1)
astd = np.std(buf[10:trows, -50:-3], axis=1)
arr = np.abs((1.0 * buf[10:trows, -1] - arr) / astd) # col[-1] diff
tmr = (1.0 * np.size(arr) - np.searchsorted(arr, 1.5)) / np.size(arr)
return (tml, tmr)
def count_dipoles(buf):
"""
buf is one segment (w/out pre/over-scan) of an lsst ccd
count dipoles via:
-- use upper 10% of array rows
-- flatten in column major order
-- scale array in units of stdev from mean
-- find adjacent pairs where |A(n)-A(n+1)| > 5
-- count them
"""
(nrows, ncols) = np.shape(buf)
logging.debug("count_dipoles():using subarray [%s:%s,:]", -int(nrows / 10), -1)
arr = buf[-int(nrows / 10) : -1, :].flatten("F")
avg, med, std = stats.sigma_clipped_stats(arr)
logging.debug("clipped stats: avg:%.3g med:%s stdev:%.3g", avg, med, std)
arr = (arr - avg) / std
ndipole = 0
for i in range(0, np.size(arr) - 1):
if (np.sign(arr[i + 1] * arr[i]) == -1) and abs(arr[i + 1] - arr[i]) > 5:
ndipole += 1
return ndipole
def ncalls():
"""maintain a counter"""
ncalls.counter += 1
if __name__ == "__main__":
imstat()
| [
"numpy.abs",
"astropy.stats.sigma_clipped_stats",
"mutils.init_logging",
"numpy.shape",
"numpy.mean",
"imutils.subtract_bias",
"imutils.get_requested_image_hduids",
"logging.error",
"numpy.std",
"logging.warning",
"mutils.init_warnings",
"numpy.max",
"imutils.auto_biastype",
"os.sep.join",... | [((354, 384), 'os.sep.join', 'os.sep.join', (["(bp[:-1] + ['lib'])"], {}), "(bp[:-1] + ['lib'])\n", (365, 384), False, 'import os\n'), ((385, 412), 'sys.path.insert', 'sys.path.insert', (['(0)', 'modpath'], {}), '(0, modpath)\n', (400, 412), False, 'import sys\n'), ((4240, 4270), 'mutils.init_logging', 'mu.init_logging', (['optlist.debug'], {}), '(optlist.debug)\n', (4255, 4270), True, 'import mutils as mu\n'), ((4275, 4293), 'mutils.init_warnings', 'mu.init_warnings', ([], {}), '()\n', (4291, 4293), True, 'import mutils as mu\n'), ((15993, 16029), 'numpy.mean', 'np.mean', (['buf[10:trows, 3:50]'], {'axis': '(1)'}), '(buf[10:trows, 3:50], axis=1)\n', (16000, 16029), True, 'import numpy as np\n'), ((16041, 16076), 'numpy.std', 'np.std', (['buf[10:trows, 3:50]'], {'axis': '(1)'}), '(buf[10:trows, 3:50], axis=1)\n', (16047, 16076), True, 'import numpy as np\n'), ((16087, 16132), 'numpy.abs', 'np.abs', (['((1.0 * buf[10:trows, 0] - arr) / astd)'], {}), '((1.0 * buf[10:trows, 0] - arr) / astd)\n', (16093, 16132), True, 'import numpy as np\n'), ((16258, 16296), 'numpy.mean', 'np.mean', (['buf[10:trows, -50:-3]'], {'axis': '(1)'}), '(buf[10:trows, -50:-3], axis=1)\n', (16265, 16296), True, 'import numpy as np\n'), ((16308, 16345), 'numpy.std', 'np.std', (['buf[10:trows, -50:-3]'], {'axis': '(1)'}), '(buf[10:trows, -50:-3], axis=1)\n', (16314, 16345), True, 'import numpy as np\n'), ((16356, 16402), 'numpy.abs', 'np.abs', (['((1.0 * buf[10:trows, -1] - arr) / astd)'], {}), '((1.0 * buf[10:trows, -1] - arr) / astd)\n', (16362, 16402), True, 'import numpy as np\n'), ((16849, 16862), 'numpy.shape', 'np.shape', (['buf'], {}), '(buf)\n', (16857, 16862), True, 'import numpy as np\n'), ((17020, 17050), 'astropy.stats.sigma_clipped_stats', 'stats.sigma_clipped_stats', (['arr'], {}), '(arr)\n', (17045, 17050), False, 'from astropy import stats\n'), ((17055, 17128), 'logging.debug', 'logging.debug', (['"""clipped stats: avg:%.3g med:%s stdev:%.3g"""', 'avg', 'med', 'std'], {}), "('clipped stats: avg:%.3g med:%s stdev:%.3g', avg, med, std)\n", (17068, 17128), False, 'import logging\n'), ((514, 551), 'logging.error', 'logging.error', (['"""Import failed: %s"""', 'e'], {}), "('Import failed: %s', e)\n", (527, 551), False, 'import logging\n'), ((556, 567), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (564, 567), False, 'import sys\n'), ((5225, 5298), 'imutils.get_requested_image_hduids', 'iu.get_requested_image_hduids', (['hdulist', 'optlist.hduname', 'optlist.hduindex'], {}), '(hdulist, optlist.hduname, optlist.hduindex)\n', (5254, 5298), True, 'import imutils as iu\n'), ((5953, 5982), 'imutils.get_data_oscan_slices', 'iu.get_data_oscan_slices', (['hdu'], {}), '(hdu)\n', (5977, 5982), True, 'import imutils as iu\n'), ((8290, 8331), 'astropy.stats.sigma_clipped_stats', 'stats.sigma_clipped_stats', (['buf'], {'sigma': '(2.7)'}), '(buf, sigma=2.7)\n', (8315, 8331), False, 'from astropy import stats\n'), ((9027, 9068), 're.sub', 're.sub', (['"""^\\\\[*([^\\\\]]*)\\\\]*$"""', '"""\\\\1"""', 'reg'], {}), "('^\\\\[*([^\\\\]]*)\\\\]*$', '\\\\1', reg)\n", (9033, 9068), False, 'import re\n'), ((10428, 10457), 'imutils.get_data_oscan_slices', 'iu.get_data_oscan_slices', (['hdu'], {}), '(hdu)\n', (10452, 10457), True, 'import imutils as iu\n'), ((16218, 16230), 'numpy.size', 'np.size', (['arr'], {}), '(arr)\n', (16225, 16230), True, 'import numpy as np\n'), ((16480, 16492), 'numpy.size', 'np.size', (['arr'], {}), '(arr)\n', (16487, 16492), True, 'import numpy as np\n'), ((302, 328), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (318, 328), False, 'import os\n'), ((738, 862), 'textwrap.dedent', 'textwrap.dedent', (['""" Calculate statistical quantities for image\n """'], {}), '(\n """ Calculate statistical quantities for image\n """\n )\n', (753, 862), False, 'import textwrap\n'), ((893, 943), 'textwrap.dedent', 'textwrap.dedent', (['""" """'], {}), "(' ')\n", (908, 943), False, 'import textwrap\n'), ((4429, 4445), 'astropy.io.fits.open', 'fits.open', (['ffile'], {}), '(ffile)\n', (4438, 4445), False, 'from astropy.io import fits\n'), ((5845, 5896), 'imutils.subtract_bias', 'iu.subtract_bias', (['optlist.sbias', 'optlist.pbias', 'hdu'], {}), '(optlist.sbias, optlist.pbias, hdu)\n', (5861, 5896), True, 'import imutils as iu\n'), ((8366, 8378), 'numpy.mean', 'np.mean', (['buf'], {}), '(buf)\n', (8373, 8378), True, 'import numpy as np\n'), ((8380, 8394), 'numpy.median', 'np.median', (['buf'], {}), '(buf)\n', (8389, 8394), True, 'import numpy as np\n'), ((8396, 8407), 'numpy.std', 'np.std', (['buf'], {}), '(buf)\n', (8402, 8407), True, 'import numpy as np\n'), ((8451, 8473), 're.match', 're.match', (['"""^mea"""', 'stat'], {}), "('^mea', stat)\n", (8459, 8473), False, 'import re\n'), ((8566, 8588), 're.match', 're.match', (['"""^med"""', 'stat'], {}), "('^med', stat)\n", (8574, 8588), False, 'import re\n'), ((8681, 8703), 're.match', 're.match', (['"""^std"""', 'stat'], {}), "('^std', stat)\n", (8689, 8703), False, 'import re\n'), ((8796, 8818), 're.match', 're.match', (['"""^min"""', 'stat'], {}), "('^min', stat)\n", (8804, 8818), False, 'import re\n'), ((8919, 8941), 're.match', 're.match', (['"""^max"""', 'stat'], {}), "('^max', stat)\n", (8927, 8941), False, 'import re\n'), ((10271, 10322), 'imutils.subtract_bias', 'iu.subtract_bias', (['optlist.sbias', 'optlist.pbias', 'hdu'], {}), '(optlist.sbias, optlist.pbias, hdu)\n', (10287, 10322), True, 'import imutils as iu\n'), ((10522, 10591), 'logging.error', 'logging.error', (['"""Could not get DATASEC or overscan specs for %s"""', 'name'], {}), "('Could not get DATASEC or overscan specs for %s', name)\n", (10535, 10591), False, 'import logging\n'), ((10604, 10615), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (10612, 10615), False, 'import sys\n'), ((12712, 12756), 'astropy.stats.sigma_clipped_stats', 'stats.sigma_clipped_stats', (['hdu.data[datasec]'], {}), '(hdu.data[datasec])\n', (12737, 12756), False, 'from astropy import stats\n'), ((12812, 12855), 'astropy.stats.sigma_clipped_stats', 'stats.sigma_clipped_stats', (['hdu.data[soscan]'], {}), '(hdu.data[soscan])\n', (12837, 12855), False, 'from astropy import stats\n'), ((12912, 12960), 'astropy.stats.sigma_clipped_stats', 'stats.sigma_clipped_stats', (['hdu.data[y0:y1, sx0:]'], {}), '(hdu.data[y0:y1, sx0:])\n', (12937, 12960), False, 'from astropy import stats\n'), ((13022, 13050), 'numpy.median', 'np.median', (['hdu.data[datasec]'], {}), '(hdu.data[datasec])\n', (13031, 13050), True, 'import numpy as np\n'), ((13075, 13102), 'numpy.median', 'np.median', (['hdu.data[soscan]'], {}), '(hdu.data[soscan])\n', (13084, 13102), True, 'import numpy as np\n'), ((13123, 13152), 'numpy.std', 'np.std', (['hdu.data[y0:y1, sx0:]'], {}), '(hdu.data[y0:y1, sx0:])\n', (13129, 13152), True, 'import numpy as np\n'), ((13731, 13771), 'logging.debug', 'logging.debug', (['"""s-cte------------------"""'], {}), "('s-cte------------------')\n", (13744, 13771), False, 'import logging\n'), ((14166, 14206), 'logging.debug', 'logging.debug', (['"""p-cte------------------"""'], {}), "('p-cte------------------')\n", (14179, 14206), False, 'import logging\n'), ((14600, 14640), 'logging.debug', 'logging.debug', (['"""tearing check----------"""'], {}), "('tearing check----------')\n", (14613, 14640), False, 'import logging\n'), ((14843, 14883), 'logging.debug', 'logging.debug', (['"""dipoles check----------"""'], {}), "('dipoles check----------')\n", (14856, 14883), False, 'import logging\n'), ((15200, 15242), 'logging.debug', 'logging.debug', (['"""threshold check----------"""'], {}), "('threshold check----------')\n", (15213, 15242), False, 'import logging\n'), ((16189, 16214), 'numpy.searchsorted', 'np.searchsorted', (['arr', '(1.5)'], {}), '(arr, 1.5)\n', (16204, 16214), True, 'import numpy as np\n'), ((16451, 16476), 'numpy.searchsorted', 'np.searchsorted', (['arr', '(1.5)'], {}), '(arr, 1.5)\n', (16466, 16476), True, 'import numpy as np\n'), ((17195, 17207), 'numpy.size', 'np.size', (['arr'], {}), '(arr)\n', (17202, 17207), True, 'import numpy as np\n'), ((4491, 4526), 'logging.error', 'logging.error', (['"""IOError: %s"""', 'ioerr'], {}), "('IOError: %s', ioerr)\n", (4504, 4526), False, 'import logging\n'), ((4539, 4550), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4547, 4550), False, 'import sys\n'), ((4793, 4818), 'imutils.auto_biastype', 'iu.auto_biastype', (['hdulist'], {}), '(hdulist)\n', (4809, 4818), True, 'import imutils as iu\n'), ((6279, 6314), 'logging.debug', 'logging.debug', (['"""processing %s"""', 'reg'], {}), "('processing %s', reg)\n", (6292, 6314), False, 'import logging\n'), ((6344, 6364), 'imutils.parse_region', 'iu.parse_region', (['reg'], {}), '(reg)\n', (6359, 6364), True, 'import imutils as iu\n'), ((7470, 7492), 're.match', 're.match', (['"""^mea"""', 'stat'], {}), "('^mea', stat)\n", (7478, 7492), False, 'import re\n'), ((7596, 7618), 're.match', 're.match', (['"""^med"""', 'stat'], {}), "('^med', stat)\n", (7604, 7618), False, 'import re\n'), ((7724, 7746), 're.match', 're.match', (['"""^std"""', 'stat'], {}), "('^std', stat)\n", (7732, 7746), False, 'import re\n'), ((7852, 7874), 're.match', 're.match', (['"""^min"""', 'stat'], {}), "('^min', stat)\n", (7860, 7874), False, 'import re\n'), ((7975, 7997), 're.match', 're.match', (['"""^max"""', 'stat'], {}), "('^max', stat)\n", (7983, 7997), False, 'import re\n'), ((8855, 8866), 'numpy.min', 'np.min', (['buf'], {}), '(buf)\n', (8861, 8866), True, 'import numpy as np\n'), ((8978, 8989), 'numpy.max', 'np.max', (['buf'], {}), '(buf)\n', (8984, 8989), True, 'import numpy as np\n'), ((11703, 11737), 're.match', 're.match', (['"""^data"""', 'optlist.tearing'], {}), "('^data', optlist.tearing)\n", (11711, 11737), False, 'import re\n'), ((13906, 13925), 'imutils.eper_serial', 'iu.eper_serial', (['hdu'], {}), '(hdu)\n', (13920, 13925), True, 'import imutils as iu\n'), ((14341, 14362), 'imutils.eper_parallel', 'iu.eper_parallel', (['hdu'], {}), '(hdu)\n', (14357, 14362), True, 'import imutils as iu\n'), ((16174, 16186), 'numpy.size', 'np.size', (['arr'], {}), '(arr)\n', (16181, 16186), True, 'import numpy as np\n'), ((16436, 16448), 'numpy.size', 'np.size', (['arr'], {}), '(arr)\n', (16443, 16448), True, 'import numpy as np\n'), ((17226, 17254), 'numpy.sign', 'np.sign', (['(arr[i + 1] * arr[i])'], {}), '(arr[i + 1] * arr[i])\n', (17233, 17254), True, 'import numpy as np\n'), ((4872, 4891), 'logging.error', 'logging.error', (['kerr'], {}), '(kerr)\n', (4885, 4891), False, 'import logging\n'), ((4908, 4919), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (4916, 4919), False, 'import sys\n'), ((4975, 4994), 'logging.error', 'logging.error', (['verr'], {}), '(verr)\n', (4988, 4994), False, 'import logging\n'), ((5011, 5022), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5019, 5022), False, 'import sys\n'), ((5131, 5154), 'os.path.basename', 'os.path.basename', (['ffile'], {}), '(ffile)\n', (5147, 5154), False, 'import os\n'), ((6484, 6524), 'logging.error', 'logging.error', (['"""skipping region %s"""', 'reg'], {}), "('skipping region %s', reg)\n", (6497, 6524), False, 'import logging\n'), ((9499, 9576), 'logging.warning', 'logging.warning', (['"""EXPTIME|DARKTIME non in header, adu/sec won\'t be available"""'], {}), '("EXPTIME|DARKTIME non in header, adu/sec won\'t be available")\n', (9514, 9576), False, 'import logging\n'), ((11814, 11847), 're.match', 're.match', (['"""^div"""', 'optlist.tearing'], {}), "('^div', optlist.tearing)\n", (11822, 11847), False, 'import re\n'), ((15315, 15370), 'numpy.count_nonzero', 'np.count_nonzero', (['(hdu.data[datasec] > optlist.threshold)'], {}), '(hdu.data[datasec] > optlist.threshold)\n', (15331, 15370), True, 'import numpy as np\n'), ((15043, 15069), 'numpy.size', 'np.size', (['hdu.data[datasec]'], {}), '(hdu.data[datasec])\n', (15050, 15069), True, 'import numpy as np\n')] |
import datetime
import os
import logging
import torch as th
import numpy as np
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import ttools
from dps_3d import datasets
from dps_3d.interfaces import VectorizerInterface
from dps_3d.models import PrimsModel
LOG = logging.getLogger(__name__)
th.manual_seed(123)
th.backends.cudnn.deterministic = True
np.random.seed(123)
def _worker_init_fn(worker_id):
np.random.seed(worker_id)
def main(args):
data = datasets.ShapenetDataset(args.data, args.canvas_size)
dataloader = DataLoader(data, batch_size=args.bs, num_workers=args.num_worker_threads,
worker_init_fn=_worker_init_fn, shuffle=True, drop_last=True)
LOG.info(data)
val_data = datasets.ShapenetDataset(args.data, args.canvas_size, val=True)
val_dataloader = DataLoader(val_data)
model = PrimsModel(output_dim=(11 if args.rounded else 10)*args.n_primitives)
checkpointer = ttools.Checkpointer(args.checkpoint_dir, model)
extras, meta = checkpointer.load_latest()
starting_epoch = extras['epoch'] if extras is not None else None
interface = VectorizerInterface(model, args.lr, args.n_primitives, args.canvas_size, args.w_surface,
args.w_alignment, args.csg, args.rounded, cuda=args.cuda)
keys = ['loss', 'surfaceloss', 'alignmentloss']
writer = SummaryWriter(os.path.join(args.checkpoint_dir, 'summaries',
datetime.datetime.now().strftime('train-%m%d%y-%H%M%S')), flush_secs=1)
val_writer = SummaryWriter(os.path.join(args.checkpoint_dir, 'summaries',
datetime.datetime.now().strftime('val-%m%d%y-%H%M%S')), flush_secs=1)
trainer = ttools.Trainer(interface)
trainer.add_callback(ttools.callbacks.TensorBoardLoggingCallback(keys=keys, writer=writer,
val_writer=val_writer, frequency=5))
trainer.add_callback(ttools.callbacks.ProgressBarCallback(keys=keys))
trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer, interval=None, max_epochs=2))
trainer.train(dataloader, num_epochs=args.num_epochs, val_dataloader=val_dataloader, starting_epoch=starting_epoch)
if __name__ == '__main__':
parser = ttools.BasicArgumentParser()
parser.add_argument("--w_surface", type=float, default=1)
parser.add_argument("--w_alignment", type=float, default=0.001)
parser.add_argument("--canvas_size", type=int, default=64)
parser.add_argument("--n_primitives", type=int, default=16)
parser.add_argument("--csg", default=False, dest='csg', action='store_true')
parser.add_argument("--rounded", default=False, dest='rounded', action='store_true')
parser.set_defaults(num_worker_threads=16, bs=16, lr=1e-4)
args = parser.parse_args()
ttools.set_logger(args.debug)
main(args)
| [
"ttools.callbacks.CheckpointingCallback",
"ttools.Trainer",
"numpy.random.seed",
"torch.utils.data.DataLoader",
"ttools.callbacks.TensorBoardLoggingCallback",
"torch.manual_seed",
"dps_3d.datasets.ShapenetDataset",
"dps_3d.models.PrimsModel",
"datetime.datetime.now",
"dps_3d.interfaces.VectorizerI... | [((309, 336), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (326, 336), False, 'import logging\n'), ((338, 357), 'torch.manual_seed', 'th.manual_seed', (['(123)'], {}), '(123)\n', (352, 357), True, 'import torch as th\n'), ((397, 416), 'numpy.random.seed', 'np.random.seed', (['(123)'], {}), '(123)\n', (411, 416), True, 'import numpy as np\n'), ((455, 480), 'numpy.random.seed', 'np.random.seed', (['worker_id'], {}), '(worker_id)\n', (469, 480), True, 'import numpy as np\n'), ((510, 563), 'dps_3d.datasets.ShapenetDataset', 'datasets.ShapenetDataset', (['args.data', 'args.canvas_size'], {}), '(args.data, args.canvas_size)\n', (534, 563), False, 'from dps_3d import datasets\n'), ((581, 720), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'args.bs', 'num_workers': 'args.num_worker_threads', 'worker_init_fn': '_worker_init_fn', 'shuffle': '(True)', 'drop_last': '(True)'}), '(data, batch_size=args.bs, num_workers=args.num_worker_threads,\n worker_init_fn=_worker_init_fn, shuffle=True, drop_last=True)\n', (591, 720), False, 'from torch.utils.data import DataLoader\n'), ((780, 843), 'dps_3d.datasets.ShapenetDataset', 'datasets.ShapenetDataset', (['args.data', 'args.canvas_size'], {'val': '(True)'}), '(args.data, args.canvas_size, val=True)\n', (804, 843), False, 'from dps_3d import datasets\n'), ((865, 885), 'torch.utils.data.DataLoader', 'DataLoader', (['val_data'], {}), '(val_data)\n', (875, 885), False, 'from torch.utils.data import DataLoader\n'), ((899, 970), 'dps_3d.models.PrimsModel', 'PrimsModel', ([], {'output_dim': '((11 if args.rounded else 10) * args.n_primitives)'}), '(output_dim=(11 if args.rounded else 10) * args.n_primitives)\n', (909, 970), False, 'from dps_3d.models import PrimsModel\n'), ((989, 1036), 'ttools.Checkpointer', 'ttools.Checkpointer', (['args.checkpoint_dir', 'model'], {}), '(args.checkpoint_dir, model)\n', (1008, 1036), False, 'import ttools\n'), ((1169, 1319), 'dps_3d.interfaces.VectorizerInterface', 'VectorizerInterface', (['model', 'args.lr', 'args.n_primitives', 'args.canvas_size', 'args.w_surface', 'args.w_alignment', 'args.csg', 'args.rounded'], {'cuda': 'args.cuda'}), '(model, args.lr, args.n_primitives, args.canvas_size,\n args.w_surface, args.w_alignment, args.csg, args.rounded, cuda=args.cuda)\n', (1188, 1319), False, 'from dps_3d.interfaces import VectorizerInterface\n'), ((1799, 1824), 'ttools.Trainer', 'ttools.Trainer', (['interface'], {}), '(interface)\n', (1813, 1824), False, 'import ttools\n'), ((2370, 2398), 'ttools.BasicArgumentParser', 'ttools.BasicArgumentParser', ([], {}), '()\n', (2396, 2398), False, 'import ttools\n'), ((2924, 2953), 'ttools.set_logger', 'ttools.set_logger', (['args.debug'], {}), '(args.debug)\n', (2941, 2953), False, 'import ttools\n'), ((1850, 1959), 'ttools.callbacks.TensorBoardLoggingCallback', 'ttools.callbacks.TensorBoardLoggingCallback', ([], {'keys': 'keys', 'writer': 'writer', 'val_writer': 'val_writer', 'frequency': '(5)'}), '(keys=keys, writer=writer,\n val_writer=val_writer, frequency=5)\n', (1893, 1959), False, 'import ttools\n'), ((2051, 2098), 'ttools.callbacks.ProgressBarCallback', 'ttools.callbacks.ProgressBarCallback', ([], {'keys': 'keys'}), '(keys=keys)\n', (2087, 2098), False, 'import ttools\n'), ((2125, 2210), 'ttools.callbacks.CheckpointingCallback', 'ttools.callbacks.CheckpointingCallback', (['checkpointer'], {'interval': 'None', 'max_epochs': '(2)'}), '(checkpointer, interval=None,\n max_epochs=2)\n', (2163, 2210), False, 'import ttools\n'), ((1520, 1543), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1541, 1543), False, 'import datetime\n'), ((1714, 1737), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1735, 1737), False, 'import datetime\n')] |
import sys
import os
import time
import dill as pickle
import pprint
import numpy as np
import pandas as pd
import tensorflow as tf
sys.path.append('keras-tcn')
from tcn import tcn
import h5py
import matplotlib.pyplot as plt
from sklearn.model_selection import KFold
from sklearn.model_selection import train_test_split
from keras import backend as K
from keras import regularizers, constraints, initializers, activations
from keras.callbacks import EarlyStopping ,ModelCheckpoint, TensorBoard, ReduceLROnPlateau
from keras.engine import InputSpec
from keras.engine.topology import Layer
from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Bidirectional, CuDNNGRU
from keras.layers import Dropout, Flatten, Activation, RepeatVector, Permute
from keras.layers import Dropout
from keras.layers import merge
from keras.layers.core import Reshape
from keras.layers.merge import concatenate
from keras.layers.recurrent import Recurrent
from keras.metrics import categorical_accuracy
from keras.optimizers import Adam
from keras.preprocessing import text, sequence
from keras.preprocessing.text import Tokenizer
from keras.utils import to_categorical
from keras.models import Model, Input, Sequential
from utils import *
from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval
from hyperopt.mongoexp import MongoTrials
def data():
data_root = '/nosave/lange/cu-ssp/data/netsurfp/'
file_train = 'train'
file_test = ['cb513', 'ts115', 'casp12']
X_test = np.load(data_root + file_test[0] + '_input.npy')
profiles = np.load(data_root + file_test[0] + '_hmm.npy')
mean = np.mean(profiles)
std = np.std(profiles)
X_aug_test = (profiles - mean) / std
X_test_aug = [X_test, X_aug_test]
y_test = np.load(data_root + file_test[0] + '_q8.npy')
X_train = np.load(data_root + file_train + '_input.npy')
profiles = np.load(data_root + file_train + '_hmm.npy')
mean = np.mean(profiles)
std = np.std(profiles)
X_aug_train = (profiles - mean) / std
X_train_aug = [X_train, X_aug_train]
y_train = np.load(data_root + file_train + '_q8.npy')
X_train_aug, y_train, X_val_aug, y_val = train_val_split(True, X_train_aug, y_train)
return X_train_aug, y_train, X_val_aug, y_val, X_test_aug, y_test
DROPOUT_CHOICES = np.arange(0.0, 0.9, 0.1)
UNIT_CHOICES = [100, 200, 500, 800, 1000, 1200]
GRU_CHOICES = [100, 200, 300, 400, 500, 600]
BATCH_CHOICES = [16, 32]
LR_CHOICES = [0.0001, 0.0005, 0.001, 0.0025, 0.005, 0.01]
space = {
'dense1': hp.choice('dense1', UNIT_CHOICES),
'dropout1': hp.choice('dropout1', DROPOUT_CHOICES),
'gru1': hp.choice('gru1', GRU_CHOICES),
# nesting the layers ensures they're only un-rolled sequentially
'gru2': hp.choice('gru2', [False, {
'gru2_units': hp.choice('gru2_units', GRU_CHOICES),
# only make the 3rd layer availabile if the 2nd one is
'gru3': hp.choice('gru3', [False, {
'gru3_units': hp.choice('gru3_units', GRU_CHOICES)
}]),
}]),
'dense2': hp.choice('dense2', UNIT_CHOICES),
'dropout2': hp.choice('dropout2', DROPOUT_CHOICES),
'lr': hp.choice('lr', LR_CHOICES),
'decay': hp.choice('decay', LR_CHOICES),
'batch_size': hp.choice('batch_size', BATCH_CHOICES)
}
#load_file = "./model/mod_3-CB513-"+datetime.now().strftime("%Y_%m_%d-%H_%M")+".h5"
load_file = "/nosave/lange/cu-ssp/model_aufgeräumt/model/mod_3-CB513-test.h5"
X_train_aug, y_train, X_val_aug, y_val, X_test_aug, y_test = data()
def build_model_ho_3(params):
print(params)
input = Input(shape=(X_train_aug[0].shape[1], X_train_aug[0].shape[2],))
profiles_input = Input(shape=(X_train_aug[1].shape[1], X_train_aug[1].shape[2],))
x1 = concatenate([input, profiles_input])
x2 = concatenate([input, profiles_input])
x1 = Dense(params['dense1'], activation="relu")(x1)
x1 = Dropout(params['dropout1'])(x1)
x2 = Bidirectional(CuDNNGRU(units=params['gru1'], return_sequences=True))(x2)
if params['gru2']:
x2 = Bidirectional(CuDNNGRU(units=params['gru2']['gru2_units'], return_sequences=True))(x2)
if params['gru2'] and params['gru2']['gru3']:
x2 = Bidirectional(CuDNNGRU(units=params['gru2']['gru3']['gru3_units'], return_sequences=True))(x2)
COMBO_MOVE = concatenate([x1, x2])
w = Dense(params['dense2'], activation="relu")(COMBO_MOVE)
w = Dropout(params['dropout2'])(w)
w = tcn.TCN(return_sequences=True)(w)
y = TimeDistributed(Dense(8, activation="softmax"))(w)
model = Model([input, profiles_input], y)
adamOptimizer = Adam(lr=params['lr'], beta_1=0.8, beta_2=0.8, epsilon=None, decay=params['decay'], amsgrad=False)
model.compile(optimizer=adamOptimizer, loss="categorical_crossentropy", metrics=["accuracy", accuracy])
earlyStopping = EarlyStopping(monitor='val_accuracy', patience=3, verbose=1, mode='max')
checkpointer = ModelCheckpoint(filepath=load_file, monitor='val_accuracy', verbose=1, save_best_only=True,
mode='max')
model.fit(X_train_aug, y_train, validation_data=(X_val_aug, y_val),
epochs=20, batch_size=params['batch_size'], callbacks=[checkpointer, earlyStopping],
verbose=1, shuffle=True)
model.load_weights(load_file)
score = model.evaluate(X_test_aug, y_test)
K.clear_session()
result = {'loss': -score[2], 'status': STATUS_OK}
return result
| [
"sys.path.append",
"numpy.load",
"numpy.std",
"keras.callbacks.ModelCheckpoint",
"hyperopt.hp.choice",
"keras.layers.Dropout",
"keras.layers.merge.concatenate",
"keras.optimizers.Adam",
"keras.models.Input",
"keras.models.Model",
"numpy.mean",
"numpy.arange",
"keras.callbacks.EarlyStopping",... | [((133, 161), 'sys.path.append', 'sys.path.append', (['"""keras-tcn"""'], {}), "('keras-tcn')\n", (148, 161), False, 'import sys\n'), ((2303, 2327), 'numpy.arange', 'np.arange', (['(0.0)', '(0.9)', '(0.1)'], {}), '(0.0, 0.9, 0.1)\n', (2312, 2327), True, 'import numpy as np\n'), ((1499, 1547), 'numpy.load', 'np.load', (["(data_root + file_test[0] + '_input.npy')"], {}), "(data_root + file_test[0] + '_input.npy')\n", (1506, 1547), True, 'import numpy as np\n'), ((1563, 1609), 'numpy.load', 'np.load', (["(data_root + file_test[0] + '_hmm.npy')"], {}), "(data_root + file_test[0] + '_hmm.npy')\n", (1570, 1609), True, 'import numpy as np\n'), ((1621, 1638), 'numpy.mean', 'np.mean', (['profiles'], {}), '(profiles)\n', (1628, 1638), True, 'import numpy as np\n'), ((1649, 1665), 'numpy.std', 'np.std', (['profiles'], {}), '(profiles)\n', (1655, 1665), True, 'import numpy as np\n'), ((1758, 1803), 'numpy.load', 'np.load', (["(data_root + file_test[0] + '_q8.npy')"], {}), "(data_root + file_test[0] + '_q8.npy')\n", (1765, 1803), True, 'import numpy as np\n'), ((1819, 1865), 'numpy.load', 'np.load', (["(data_root + file_train + '_input.npy')"], {}), "(data_root + file_train + '_input.npy')\n", (1826, 1865), True, 'import numpy as np\n'), ((1881, 1925), 'numpy.load', 'np.load', (["(data_root + file_train + '_hmm.npy')"], {}), "(data_root + file_train + '_hmm.npy')\n", (1888, 1925), True, 'import numpy as np\n'), ((1937, 1954), 'numpy.mean', 'np.mean', (['profiles'], {}), '(profiles)\n', (1944, 1954), True, 'import numpy as np\n'), ((1965, 1981), 'numpy.std', 'np.std', (['profiles'], {}), '(profiles)\n', (1971, 1981), True, 'import numpy as np\n'), ((2079, 2122), 'numpy.load', 'np.load', (["(data_root + file_train + '_q8.npy')"], {}), "(data_root + file_train + '_q8.npy')\n", (2086, 2122), True, 'import numpy as np\n'), ((2528, 2561), 'hyperopt.hp.choice', 'hp.choice', (['"""dense1"""', 'UNIT_CHOICES'], {}), "('dense1', UNIT_CHOICES)\n", (2537, 2561), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((2579, 2617), 'hyperopt.hp.choice', 'hp.choice', (['"""dropout1"""', 'DROPOUT_CHOICES'], {}), "('dropout1', DROPOUT_CHOICES)\n", (2588, 2617), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((2631, 2661), 'hyperopt.hp.choice', 'hp.choice', (['"""gru1"""', 'GRU_CHOICES'], {}), "('gru1', GRU_CHOICES)\n", (2640, 2661), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3038, 3071), 'hyperopt.hp.choice', 'hp.choice', (['"""dense2"""', 'UNIT_CHOICES'], {}), "('dense2', UNIT_CHOICES)\n", (3047, 3071), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3089, 3127), 'hyperopt.hp.choice', 'hp.choice', (['"""dropout2"""', 'DROPOUT_CHOICES'], {}), "('dropout2', DROPOUT_CHOICES)\n", (3098, 3127), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3139, 3166), 'hyperopt.hp.choice', 'hp.choice', (['"""lr"""', 'LR_CHOICES'], {}), "('lr', LR_CHOICES)\n", (3148, 3166), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3181, 3211), 'hyperopt.hp.choice', 'hp.choice', (['"""decay"""', 'LR_CHOICES'], {}), "('decay', LR_CHOICES)\n", (3190, 3211), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3231, 3269), 'hyperopt.hp.choice', 'hp.choice', (['"""batch_size"""', 'BATCH_CHOICES'], {}), "('batch_size', BATCH_CHOICES)\n", (3240, 3269), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((3564, 3627), 'keras.models.Input', 'Input', ([], {'shape': '(X_train_aug[0].shape[1], X_train_aug[0].shape[2])'}), '(shape=(X_train_aug[0].shape[1], X_train_aug[0].shape[2]))\n', (3569, 3627), False, 'from keras.models import Model, Input, Sequential\n'), ((3650, 3713), 'keras.models.Input', 'Input', ([], {'shape': '(X_train_aug[1].shape[1], X_train_aug[1].shape[2])'}), '(shape=(X_train_aug[1].shape[1], X_train_aug[1].shape[2]))\n', (3655, 3713), False, 'from keras.models import Model, Input, Sequential\n'), ((3724, 3760), 'keras.layers.merge.concatenate', 'concatenate', (['[input, profiles_input]'], {}), '([input, profiles_input])\n', (3735, 3760), False, 'from keras.layers.merge import concatenate\n'), ((3770, 3806), 'keras.layers.merge.concatenate', 'concatenate', (['[input, profiles_input]'], {}), '([input, profiles_input])\n', (3781, 3806), False, 'from keras.layers.merge import concatenate\n'), ((4284, 4305), 'keras.layers.merge.concatenate', 'concatenate', (['[x1, x2]'], {}), '([x1, x2])\n', (4295, 4305), False, 'from keras.layers.merge import concatenate\n'), ((4521, 4554), 'keras.models.Model', 'Model', (['[input, profiles_input]', 'y'], {}), '([input, profiles_input], y)\n', (4526, 4554), False, 'from keras.models import Model, Input, Sequential\n'), ((4576, 4678), 'keras.optimizers.Adam', 'Adam', ([], {'lr': "params['lr']", 'beta_1': '(0.8)', 'beta_2': '(0.8)', 'epsilon': 'None', 'decay': "params['decay']", 'amsgrad': '(False)'}), "(lr=params['lr'], beta_1=0.8, beta_2=0.8, epsilon=None, decay=params[\n 'decay'], amsgrad=False)\n", (4580, 4678), False, 'from keras.optimizers import Adam\n'), ((4803, 4875), 'keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_accuracy"""', 'patience': '(3)', 'verbose': '(1)', 'mode': '"""max"""'}), "(monitor='val_accuracy', patience=3, verbose=1, mode='max')\n", (4816, 4875), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau\n'), ((4895, 5002), 'keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'load_file', 'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), "(filepath=load_file, monitor='val_accuracy', verbose=1,\n save_best_only=True, mode='max')\n", (4910, 5002), False, 'from keras.callbacks import EarlyStopping, ModelCheckpoint, TensorBoard, ReduceLROnPlateau\n'), ((5331, 5348), 'keras.backend.clear_session', 'K.clear_session', ([], {}), '()\n', (5346, 5348), True, 'from keras import backend as K\n'), ((3816, 3858), 'keras.layers.Dense', 'Dense', (["params['dense1']"], {'activation': '"""relu"""'}), "(params['dense1'], activation='relu')\n", (3821, 3858), False, 'from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Bidirectional, CuDNNGRU\n'), ((3872, 3899), 'keras.layers.Dropout', 'Dropout', (["params['dropout1']"], {}), "(params['dropout1'])\n", (3879, 3899), False, 'from keras.layers import Dropout\n'), ((4314, 4356), 'keras.layers.Dense', 'Dense', (["params['dense2']"], {'activation': '"""relu"""'}), "(params['dense2'], activation='relu')\n", (4319, 4356), False, 'from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Bidirectional, CuDNNGRU\n'), ((4377, 4404), 'keras.layers.Dropout', 'Dropout', (["params['dropout2']"], {}), "(params['dropout2'])\n", (4384, 4404), False, 'from keras.layers import Dropout\n'), ((4416, 4446), 'tcn.tcn.TCN', 'tcn.TCN', ([], {'return_sequences': '(True)'}), '(return_sequences=True)\n', (4423, 4446), False, 'from tcn import tcn\n'), ((3927, 3980), 'keras.layers.CuDNNGRU', 'CuDNNGRU', ([], {'units': "params['gru1']", 'return_sequences': '(True)'}), "(units=params['gru1'], return_sequences=True)\n", (3935, 3980), False, 'from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Bidirectional, CuDNNGRU\n'), ((4474, 4504), 'keras.layers.Dense', 'Dense', (['(8)'], {'activation': '"""softmax"""'}), "(8, activation='softmax')\n", (4479, 4504), False, 'from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Bidirectional, CuDNNGRU\n'), ((2794, 2830), 'hyperopt.hp.choice', 'hp.choice', (['"""gru2_units"""', 'GRU_CHOICES'], {}), "('gru2_units', GRU_CHOICES)\n", (2803, 2830), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n'), ((4036, 4103), 'keras.layers.CuDNNGRU', 'CuDNNGRU', ([], {'units': "params['gru2']['gru2_units']", 'return_sequences': '(True)'}), "(units=params['gru2']['gru2_units'], return_sequences=True)\n", (4044, 4103), False, 'from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Bidirectional, CuDNNGRU\n'), ((4186, 4261), 'keras.layers.CuDNNGRU', 'CuDNNGRU', ([], {'units': "params['gru2']['gru3']['gru3_units']", 'return_sequences': '(True)'}), "(units=params['gru2']['gru3']['gru3_units'], return_sequences=True)\n", (4194, 4261), False, 'from keras.layers import LSTM, Embedding, Dense, TimeDistributed, Bidirectional, CuDNNGRU\n'), ((2965, 3001), 'hyperopt.hp.choice', 'hp.choice', (['"""gru3_units"""', 'GRU_CHOICES'], {}), "('gru3_units', GRU_CHOICES)\n", (2974, 3001), False, 'from hyperopt import hp, fmin, tpe, hp, STATUS_OK, Trials, space_eval\n')] |
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
输入数据形状是[N, C, H, W]时的batchnorm示例
'''
import numpy as np
import paddle
from paddle.nn import BatchNorm2D
# 设置随机数种子,这样可以保证每次运行结果一致
np.random.seed(100)
# 创建数据
data = np.random.rand(2, 3, 3, 3).astype('float32')
# 使用BatchNorm2D计算归一化的输出
# 输入数据维度[N, C, H, W],num_features等于C
bn = BatchNorm2D(num_features=3)
x = paddle.to_tensor(data)
y = bn(x)
print('input of BatchNorm2D Layer: \n {}'.format(x.numpy()))
print('output of BatchNorm2D Layer: \n {}'.format(y.numpy()))
# 取出data中第0通道的数据,
# 使用numpy计算均值、方差及归一化的输出
a = data[:, 0, :, :]
a_mean = a.mean()
a_std = a.std()
b = (a - a_mean) / a_std
print('channel 0 of input data: \n {}'.format(a))
print('std {}, mean {}, \n output: \n {}'.format(a_mean, a_std, b))
| [
"paddle.to_tensor",
"numpy.random.rand",
"numpy.random.seed",
"paddle.nn.BatchNorm2D"
] | [((747, 766), 'numpy.random.seed', 'np.random.seed', (['(100)'], {}), '(100)\n', (761, 766), True, 'import numpy as np\n'), ((892, 919), 'paddle.nn.BatchNorm2D', 'BatchNorm2D', ([], {'num_features': '(3)'}), '(num_features=3)\n', (903, 919), False, 'from paddle.nn import BatchNorm2D\n'), ((924, 946), 'paddle.to_tensor', 'paddle.to_tensor', (['data'], {}), '(data)\n', (940, 946), False, 'import paddle\n'), ((781, 807), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(3)', '(3)'], {}), '(2, 3, 3, 3)\n', (795, 807), True, 'import numpy as np\n')] |
import time
import os
import re
import sys
import numpy as np
class node:
def __init__(self, id):
self.id = id
self.packets = []
self.sent = np.zeros(100000)
self.received = np.zeros(100000)
self.overflows = 0
self.n_sent = 0
def search_node(nodes,n):
if(len(nodes) != 0):
for i in range(0,len(nodes)):
if(n == nodes[i].id):
return i
return -1
REGEXP_SERVER = re.compile('^.*?(?P<mins>([0-9]+)):(?P<secs>([0-9]+)).(?P<mils>([0-9]+))\tID:1\t\[INFO:\sApp\s\s\s\s\s\s\s]\sReceived\s(?P<seqno>([0-9]+))\sfrom\s0(?P<source>([0-9]))')
REGEXP_CLIENT = re.compile('^.*?(?P<mins>([0-9]+)):(?P<secs>([0-9]+)).(?P<mils>([0-9]+))\tID:(?P<source>([0-9]))\t\[INFO:\sApp\s\s\s\s\s\s\s]\sSending\s(?P<seqno>([0-9]+))')
REGEXP_OVERFLOW = re.compile('^.*?ID:(?P<source>([0-9]))\t\[ERR\s:\sCSMA\s\s\s\s\s\s]\sNeighbor queue full')
REGEXP_OVERFLOW2 = re.compile('^.*?ID:(?P<source>([0-9]))\t\[ERR\s:\sTSCH\sQueue]\s!\sadd packet failed')
logfile = open(str(sys.argv[1]))
print("Parsing " + str(sys.argv[1]))
lines = logfile.readlines()
nodes = []
for line in lines:
chomped_line = line.rstrip()
server_match = re.match(REGEXP_SERVER, chomped_line)
if(server_match):
seqno = int(server_match.group("seqno"))
source = int(server_match.group("source"))
mins = int(server_match.group("mins"))
secs = int(server_match.group("secs"))
mils = int(server_match.group("mils"))
received = mils + secs*1000 + mins*1000*60
if(source != 0):
node_place = search_node(nodes,source)
if(node_place == -1):
nodes.append(node(source))
node_place = len(nodes)-1
nodes[node_place].packets.append(seqno)
nodes[node_place].received[seqno] = received
client_match = re.match(REGEXP_CLIENT, chomped_line)
if(client_match):
seqno = int(client_match.group("seqno"))
source = int(client_match.group("source"))
mins = int(client_match.group("mins"))
secs = int(client_match.group("secs"))
mils = int(client_match.group("mils"))
sent = mils + secs*1000 + mins*1000*60
if(source != 0):
node_place = search_node(nodes,source)
if(node_place == -1):
nodes.append(node(source))
node_place = len(nodes)-1
nodes[node_place].sent[seqno] = sent
nodes[node_place].n_sent += 1
overflow_match = re.match(REGEXP_OVERFLOW, chomped_line)
if(overflow_match):
source = int(overflow_match.group("source"))
if(source != 0):
node_place = search_node(nodes,source)
if(node_place == -1):
nodes.append(node(source))
node_place = len(nodes)-1
nodes[node_place].overflows += 1
overflow_match2 = re.match(REGEXP_OVERFLOW2, chomped_line)
if(overflow_match2):
source = int(overflow_match2.group("source"))
if(source != 0):
node_place = search_node(nodes,source)
if(node_place == -1):
nodes.append(node(source))
node_place = len(nodes)-1
nodes[node_place].overflows += 1
#Global values
global_passed_to_mac = []
global_accepted_by_mac = []
global_received = []
global_average_delay = []
global_goodput = []
global_reliability = []
for node in nodes:
if(len(node.packets) != 0):
passed_to_mac = node.packets[-1]+1
accepted_by_mac = node.n_sent - node.overflows
received = len(node.packets)
delays = []
last = 0
for i in range(0,len(node.received)):
if((node.received[i] != 0) and (node.sent[i] != 0)):
delays.append(node.received[i]-node.sent[i])
last = node.received[i]
average_delay = np.mean(delays)
total_time = last-node.received[0]
goodput = received/(total_time/1000.0)
reliability = received/accepted_by_mac
print("*** Node " + str(node.id) + " ***")
print("Packets passed to MAC:\t\t" + str(passed_to_mac))
print("Packets accepted by MAC:\t" + str(accepted_by_mac))
print("Packets received by server:\t" + str(received))
print("Average delay:\t\t\t" + str(average_delay) + " ms")
print("----------")
print("Goodput:\t\t\t" + str(goodput) + " packets/s")
print("Reliability:\t\t\t" + str(reliability))
print()
global_passed_to_mac.append(passed_to_mac)
global_accepted_by_mac.append(accepted_by_mac)
global_received.append(received)
global_average_delay.append(average_delay)
global_goodput.append(goodput)
global_reliability.append(reliability)
print("*** Global ***")
print("Packets passed to MAC:\t\t" + str(np.sum(global_passed_to_mac)))
print("Packets accepted by MAC:\t" + str(np.sum(global_accepted_by_mac)))
print("Packets received by server:\t" + str(np.sum(global_received)))
print("Average delay:\t\t\t" + str(np.mean(global_average_delay)) + " ms")
print("----------")
print("Goodput:\t\t\t" + str(np.sum(global_goodput)) + " packets/s")
print("Reliability:\t\t\t" + str(np.sum(global_received)/np.sum(global_accepted_by_mac)))
print() | [
"numpy.sum",
"numpy.zeros",
"re.match",
"numpy.mean",
"re.compile"
] | [((459, 650), 're.compile', 're.compile', (['"""^.*?(?P<mins>([0-9]+)):(?P<secs>([0-9]+)).(?P<mils>([0-9]+))\tID:1\t\\\\[INFO:\\\\sApp\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s]\\\\sReceived\\\\s(?P<seqno>([0-9]+))\\\\sfrom\\\\s0(?P<source>([0-9]))"""'], {}), "(\n '^.*?(?P<mins>([0-9]+)):(?P<secs>([0-9]+)).(?P<mils>([0-9]+))\\tID:1\\t\\\\[INFO:\\\\sApp\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s]\\\\sReceived\\\\s(?P<seqno>([0-9]+))\\\\sfrom\\\\s0(?P<source>([0-9]))'\n )\n", (469, 650), False, 'import re\n'), ((644, 822), 're.compile', 're.compile', (['"""^.*?(?P<mins>([0-9]+)):(?P<secs>([0-9]+)).(?P<mils>([0-9]+))\tID:(?P<source>([0-9]))\t\\\\[INFO:\\\\sApp\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s]\\\\sSending\\\\s(?P<seqno>([0-9]+))"""'], {}), "(\n '^.*?(?P<mins>([0-9]+)):(?P<secs>([0-9]+)).(?P<mils>([0-9]+))\\tID:(?P<source>([0-9]))\\t\\\\[INFO:\\\\sApp\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s]\\\\sSending\\\\s(?P<seqno>([0-9]+))'\n )\n", (654, 822), False, 'import re\n'), ((820, 930), 're.compile', 're.compile', (['"""^.*?ID:(?P<source>([0-9]))\t\\\\[ERR\\\\s:\\\\sCSMA\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s]\\\\sNeighbor queue full"""'], {}), "(\n '^.*?ID:(?P<source>([0-9]))\\t\\\\[ERR\\\\s:\\\\sCSMA\\\\s\\\\s\\\\s\\\\s\\\\s\\\\s]\\\\sNeighbor queue full'\n )\n", (830, 930), False, 'import re\n'), ((930, 1032), 're.compile', 're.compile', (['"""^.*?ID:(?P<source>([0-9]))\t\\\\[ERR\\\\s:\\\\sTSCH\\\\sQueue]\\\\s!\\\\sadd packet failed"""'], {}), "(\n '^.*?ID:(?P<source>([0-9]))\\t\\\\[ERR\\\\s:\\\\sTSCH\\\\sQueue]\\\\s!\\\\sadd packet failed'\n )\n", (940, 1032), False, 'import re\n'), ((1200, 1237), 're.match', 're.match', (['REGEXP_SERVER', 'chomped_line'], {}), '(REGEXP_SERVER, chomped_line)\n', (1208, 1237), False, 'import re\n'), ((1875, 1912), 're.match', 're.match', (['REGEXP_CLIENT', 'chomped_line'], {}), '(REGEXP_CLIENT, chomped_line)\n', (1883, 1912), False, 'import re\n'), ((2530, 2569), 're.match', 're.match', (['REGEXP_OVERFLOW', 'chomped_line'], {}), '(REGEXP_OVERFLOW, chomped_line)\n', (2538, 2569), False, 'import re\n'), ((2910, 2950), 're.match', 're.match', (['REGEXP_OVERFLOW2', 'chomped_line'], {}), '(REGEXP_OVERFLOW2, chomped_line)\n', (2918, 2950), False, 'import re\n'), ((170, 186), 'numpy.zeros', 'np.zeros', (['(100000)'], {}), '(100000)\n', (178, 186), True, 'import numpy as np\n'), ((211, 227), 'numpy.zeros', 'np.zeros', (['(100000)'], {}), '(100000)\n', (219, 227), True, 'import numpy as np\n'), ((3899, 3914), 'numpy.mean', 'np.mean', (['delays'], {}), '(delays)\n', (3906, 3914), True, 'import numpy as np\n'), ((4878, 4906), 'numpy.sum', 'np.sum', (['global_passed_to_mac'], {}), '(global_passed_to_mac)\n', (4884, 4906), True, 'import numpy as np\n'), ((4950, 4980), 'numpy.sum', 'np.sum', (['global_accepted_by_mac'], {}), '(global_accepted_by_mac)\n', (4956, 4980), True, 'import numpy as np\n'), ((5027, 5050), 'numpy.sum', 'np.sum', (['global_received'], {}), '(global_received)\n', (5033, 5050), True, 'import numpy as np\n'), ((5088, 5117), 'numpy.mean', 'np.mean', (['global_average_delay'], {}), '(global_average_delay)\n', (5095, 5117), True, 'import numpy as np\n'), ((5177, 5199), 'numpy.sum', 'np.sum', (['global_goodput'], {}), '(global_goodput)\n', (5183, 5199), True, 'import numpy as np\n'), ((5250, 5273), 'numpy.sum', 'np.sum', (['global_received'], {}), '(global_received)\n', (5256, 5273), True, 'import numpy as np\n'), ((5274, 5304), 'numpy.sum', 'np.sum', (['global_accepted_by_mac'], {}), '(global_accepted_by_mac)\n', (5280, 5304), True, 'import numpy as np\n')] |
import json
import os
import pickle
from itertools import groupby
from pathlib import Path
from typing import Dict, List, Tuple
import checksum
import numpy as np
import pandas as pd
from IPython import get_ipython
from mmcv import Config
from mmdet.core import eval_map
from tqdm import tqdm
from vinbigdata import BoxCoordsFloat, BoxesMeta, BoxWithScore, ImageMeta, classname2mmdetid, mmdetid2classname
from vinbigdata.utils import abs2rel, rel2abs, is_interactive
def generate_gt_boxes(img_data: pd.DataFrame) -> BoxesMeta:
boxes, scores, labels = [], [], []
for _, row in img_data.iterrows():
boxes.append(
abs2rel((row['x_min'], row['y_min'], row['x_max'], row['y_max']),
(row['original_width'], row['original_height'])))
scores.append(1.0)
labels.append(row['class_name'])
return (boxes, scores, labels)
def batch_inference(models: List[Dict[str, str]],
ids_file: str,
nocache: bool = True) -> List[Tuple[str, str, str, str]]:
results = []
ids_file_final = ids_file
for model_data in tqdm(models, total=len(models)):
model_hash = checksum.get_for_file(model_data['model'])
if ids_file is None and model_data['type'] == 'scaled_yolo':
ids_file_final = Config.fromfile(model_data['config'])['val']
ids_hash = checksum.get_for_file(ids_file_final)
if model_data['type'] == 'mmdet':
tool = '/mmdetection/tools/dist_test.sh'
command = 'bash {} {} {} {} --eval=mAP --cfg-options data.test.ann_file={} \
--eval-options="iou_thr=0.4" --out={}'
file_name = 'results/data/result-{}-{}.pkl'.format(model_hash, ids_hash)
if not Path(file_name).exists() or nocache:
command = command.format(tool, model_data['config'], model_data['model'], model_data['num_gpu'],
ids_file, file_name)
if is_interactive():
res = get_ipython().run_line_magic('sx', command)
print(res[-100:])
else:
os.system(command)
results.append((model_data['model'], model_data['type'], ids_file_final, file_name))
elif model_data['type'] == 'scaled_yolo':
command = 'PYTHONPATH=. python scripts/test_yolo.py --img {} --conf 0.0001 --batch 8 --device 0 --data {} \
--weights {} --verbose --save-json --task={} --result-file={}'
file_name = 'results/data/result-{}-{}.json'.format(model_hash, ids_hash)
task = 'val' if 'val' in ids_file_final else 'test'
if not Path(file_name).exists() or nocache:
command = command.format(model_data['img_shape'], model_data['config'], model_data['model'], task,
file_name)
if is_interactive():
res = get_ipython().run_line_magic('sx', command)
print(res[-100:])
else:
os.system(command)
results.append((model_data['model'], model_data['type'], ids_file_final, file_name))
else:
raise ValueError('Invalid model type')
return results
def convert_mmdet2arrays(bboxes: List[List[BoxWithScore]], img_shape: Tuple[int, int]) -> BoxesMeta:
boxes, scores, labels = [], [], []
for class_id, class_bboxes in enumerate(bboxes):
for bbox in class_bboxes:
boxes.append(abs2rel(bbox, img_shape))
scores.append(bbox[4])
labels.append(mmdetid2classname[class_id])
return (boxes, scores, labels)
def convert_array2mmdet(boxes: List[BoxCoordsFloat],
scores: List[float],
labels: List[str],
num_classes=15) -> List[List[BoxWithScore]]:
result: List[List[BoxWithScore]] = [[] for _ in range(num_classes)]
for box, score, label in zip(boxes, scores, labels):
result[classname2mmdetid[label]].append(np.array([*box, score]))
result = [np.array(res) if len(res) > 0 else np.zeros((0, 5)) for res in result]
return result
def predict_boxes(img_ids: List[str], meta: pd.DataFrame, file_path: str, model_type: str) -> List[ImageMeta]:
res = []
img_shapes: Dict[str,
Tuple[int,
int]] = {img_id: meta[['width', 'height']].loc[[img_id]].values[0]
for img_id in img_ids}
original_img_shapes: Dict[str, Tuple[int, int]] = {
img_id: meta[['original_width', 'original_height']].loc[[img_id]].values[0]
for img_id in img_ids
}
if model_type == 'mmdet':
predict_bboxes = pickle.load(open(file_path, 'rb'))
for img_id, predict_boxes_img in zip(img_ids, predict_bboxes):
res.append((img_id, original_img_shapes[img_id], convert_mmdet2arrays(predict_boxes_img,
img_shapes[img_id])))
elif model_type == 'scaled_yolo':
predict_bboxes = json.load(open(file_path, 'r'))
for img_id, group in groupby(sorted(predict_bboxes, key=lambda x: x['image_id']), key=lambda x: x['image_id']):
g = list(group)
boxes = [
abs2rel(
(img['bbox'][0], img['bbox'][1], img['bbox'][0] + img['bbox'][2], img['bbox'][1] + img['bbox'][3]),
img_shapes[img_id]) for img in g
]
scores = [img['score'] for img in g]
labels = [mmdetid2classname[img['category_id']] for img in g]
res.append((img_id, original_img_shapes[img_id], (boxes, scores, labels)))
img_id_set = {r[0] for r in res}
for img_id in img_shapes.keys():
if img_id not in img_id_set:
res.append((img_id, original_img_shapes[img_id], ([], [], [])))
return res
def calc_measure(result: List[ImageMeta], gt_result: List[ImageMeta]) -> Tuple[float, Dict[str, float]]:
x = [convert_array2mmdet([rel2abs(box, res[1]) for box in res[2][0]], res[2][1], res[2][2]) for res in result]
y = [{
'bboxes': np.array([rel2abs(box, res[1]) for box in res[2][0]]) if len(res[2][0]) > 0 else np.zeros((0, 4)),
'labels': np.array([classname2mmdetid[x] for x in res[2][2]])
} for res in gt_result]
res = eval_map(x, y, dataset=list(mmdetid2classname.values()), iou_thr=0.4)
return res
| [
"vinbigdata.mmdetid2classname.values",
"numpy.zeros",
"os.system",
"vinbigdata.utils.rel2abs",
"pathlib.Path",
"mmcv.Config.fromfile",
"numpy.array",
"checksum.get_for_file",
"IPython.get_ipython",
"vinbigdata.utils.abs2rel",
"vinbigdata.utils.is_interactive"
] | [((1169, 1211), 'checksum.get_for_file', 'checksum.get_for_file', (["model_data['model']"], {}), "(model_data['model'])\n", (1190, 1211), False, 'import checksum\n'), ((1375, 1412), 'checksum.get_for_file', 'checksum.get_for_file', (['ids_file_final'], {}), '(ids_file_final)\n', (1396, 1412), False, 'import checksum\n'), ((642, 761), 'vinbigdata.utils.abs2rel', 'abs2rel', (["(row['x_min'], row['y_min'], row['x_max'], row['y_max'])", "(row['original_width'], row['original_height'])"], {}), "((row['x_min'], row['y_min'], row['x_max'], row['y_max']), (row[\n 'original_width'], row['original_height']))\n", (649, 761), False, 'from vinbigdata.utils import abs2rel, rel2abs, is_interactive\n'), ((4079, 4102), 'numpy.array', 'np.array', (['[*box, score]'], {}), '([*box, score])\n', (4087, 4102), True, 'import numpy as np\n'), ((4118, 4131), 'numpy.array', 'np.array', (['res'], {}), '(res)\n', (4126, 4131), True, 'import numpy as np\n'), ((4153, 4169), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (4161, 4169), True, 'import numpy as np\n'), ((6345, 6396), 'numpy.array', 'np.array', (['[classname2mmdetid[x] for x in res[2][2]]'], {}), '([classname2mmdetid[x] for x in res[2][2]])\n', (6353, 6396), True, 'import numpy as np\n'), ((1310, 1347), 'mmcv.Config.fromfile', 'Config.fromfile', (["model_data['config']"], {}), "(model_data['config'])\n", (1325, 1347), False, 'from mmcv import Config\n'), ((1991, 2007), 'vinbigdata.utils.is_interactive', 'is_interactive', ([], {}), '()\n', (2005, 2007), False, 'from vinbigdata.utils import abs2rel, rel2abs, is_interactive\n'), ((3539, 3563), 'vinbigdata.utils.abs2rel', 'abs2rel', (['bbox', 'img_shape'], {}), '(bbox, img_shape)\n', (3546, 3563), False, 'from vinbigdata.utils import abs2rel, rel2abs, is_interactive\n'), ((6114, 6134), 'vinbigdata.utils.rel2abs', 'rel2abs', (['box', 'res[1]'], {}), '(box, res[1])\n', (6121, 6134), False, 'from vinbigdata.utils import abs2rel, rel2abs, is_interactive\n'), ((6309, 6325), 'numpy.zeros', 'np.zeros', (['(0, 4)'], {}), '((0, 4))\n', (6317, 6325), True, 'import numpy as np\n'), ((6463, 6489), 'vinbigdata.mmdetid2classname.values', 'mmdetid2classname.values', ([], {}), '()\n', (6487, 6489), False, 'from vinbigdata import BoxCoordsFloat, BoxesMeta, BoxWithScore, ImageMeta, classname2mmdetid, mmdetid2classname\n'), ((2159, 2177), 'os.system', 'os.system', (['command'], {}), '(command)\n', (2168, 2177), False, 'import os\n'), ((2917, 2933), 'vinbigdata.utils.is_interactive', 'is_interactive', ([], {}), '()\n', (2931, 2933), False, 'from vinbigdata.utils import abs2rel, rel2abs, is_interactive\n'), ((5369, 5501), 'vinbigdata.utils.abs2rel', 'abs2rel', (["(img['bbox'][0], img['bbox'][1], img['bbox'][0] + img['bbox'][2], img[\n 'bbox'][1] + img['bbox'][3])", 'img_shapes[img_id]'], {}), "((img['bbox'][0], img['bbox'][1], img['bbox'][0] + img['bbox'][2], \n img['bbox'][1] + img['bbox'][3]), img_shapes[img_id])\n", (5376, 5501), False, 'from vinbigdata.utils import abs2rel, rel2abs, is_interactive\n'), ((3085, 3103), 'os.system', 'os.system', (['command'], {}), '(command)\n', (3094, 3103), False, 'import os\n'), ((6238, 6258), 'vinbigdata.utils.rel2abs', 'rel2abs', (['box', 'res[1]'], {}), '(box, res[1])\n', (6245, 6258), False, 'from vinbigdata.utils import abs2rel, rel2abs, is_interactive\n'), ((1760, 1775), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (1764, 1775), False, 'from pathlib import Path\n'), ((2035, 2048), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (2046, 2048), False, 'from IPython import get_ipython\n'), ((2694, 2709), 'pathlib.Path', 'Path', (['file_name'], {}), '(file_name)\n', (2698, 2709), False, 'from pathlib import Path\n'), ((2961, 2974), 'IPython.get_ipython', 'get_ipython', ([], {}), '()\n', (2972, 2974), False, 'from IPython import get_ipython\n')] |
#import argparse
import sys
import numpy as np
input_files=open(sys.argv[1],'r')
gene_list_file=open(sys.argv[2],'r')
gene_list=[]
for gene in gene_list_file:
gene_list.append(gene.strip())
gene_list_file.close()
sample_of_interest=sys.argv[3]
output_file=open(sys.argv[4],'w')
outlier_levels=int(sys.argv[5])
if sys.argv[6] == "True":
input_file_header = True
elif sys.argv[6] == "False":
input_file_header = False
else:
sys.exit("Input file header argument must be True or False.")
gene_column=int(sys.argv[7])
expr_column=int(sys.argv[8])
cnv_column=int(sys.argv[10])
if sys.argv[9] == "True":
log10_transform = True
elif sys.argv[9] == "False":
log10_transform = False
else:
sys.exit("log10 tranformation argument must be True or False.")
gene_dict = {}
for gene in gene_list:
if gene in gene_dict:
next
else:
gene_dict[gene] = [[],None,None,[]]
sample_list = []
for line in input_files:
line = line.strip().split()
sample = line[0]
sample_file = open(line[1], 'r')
if input_file_header:
sample_file.readline()
if sample in sample_list:
sys.exit("Sample name "+sample+" appears at least twice in sample file list input file.")
else:
sample_list.append(sample)
sample_dict = {}
for sline in sample_file:
sline = sline.strip().split()
gene = sline[gene_column]
cnv = sline[cnv_column]
if log10_transform:
expr = float(np.log10(float(sline[expr_column])+1))
else:
expr = float(sline[expr_column])
if gene in sample_dict:
sys.exit("Gene "+gene+" appears at least twice in sample "+sample+" input file.")
else:
sample_dict[gene] = [float(expr), cnv]
for gene in gene_list:
if gene in sample_dict:
gene_dict[gene][0].append(sample_dict[gene][0])
gene_dict[gene][3].append(sample_dict[gene][1])
else:
gene_dict[gene][0].append("NA")
gene_dict[gene][3].append("NA")
sample_file.close()
input_files.close()
#check that sample of interest appears in input files
if not sample_of_interest in sample_list:
sys.exit("Sample of interest not found in list of input files")
for gene in gene_list:
if gene in gene_dict:
expr_values = gene_dict[gene][0]
if all([ x == "NA" for x in expr_values]):
gene_dict[gene][1] = "NA"
gene_dict[gene][2] = "NA"
else:
pct75 = np.percentile(expr_values, 75)
pct25 = np.percentile(expr_values, 25)
iqr = pct75-pct25
overexpression_outlier_level_list = []
underexpression_outlier_level_list = []
for i in range(outlier_levels):
overexpression_outlier_level_list.append(pct75+1.5*iqr*(i+1))
underexpression_outlier_level_list.append(pct25-1.5*iqr*(i+1))
gene_dict[gene][1] = overexpression_outlier_level_list
gene_dict[gene][2] = underexpression_outlier_level_list
output_file.write( '\t'.join(['gene','sample','cnv','expression_level','percentile'])+'\t'+'\t'.join(['overexpression'+str(x) for x in range(1,outlier_levels+1)])+'\t'+'\t'.join(['underexpression'+str(x) for x in range(1,outlier_levels+1)])+'\n')
sample_number=sample_list.index(sample_of_interest)
for gene in gene_list:
gene_na = all([x == "NA" for x in gene_dict[gene][0]])
expr = gene_dict[gene][0][sample_number]
cnv = gene_dict[gene][3][sample_number]
if gene_na:
pct = "NA"
else:
pct = float( [ x <= expr for x in gene_dict[gene][0] ].count(True) )/len(sample_list)
output_list = [gene, sample_list[sample_number], str(cnv), str(expr), str(pct)]
if gene_na:
output_list.extend(["NA"]*outlier_levels*2)
else:
for i in range(outlier_levels):
output_list.append(str(int(expr > gene_dict[gene][1][i])))
for i in range(outlier_levels):
output_list.append(str(int(expr < gene_dict[gene][2][i])))
output_file.write('\t'.join(output_list)+'\n')
output_file.close()
| [
"numpy.percentile",
"sys.exit"
] | [((2089, 2152), 'sys.exit', 'sys.exit', (['"""Sample of interest not found in list of input files"""'], {}), "('Sample of interest not found in list of input files')\n", (2097, 2152), False, 'import sys\n'), ((433, 494), 'sys.exit', 'sys.exit', (['"""Input file header argument must be True or False."""'], {}), "('Input file header argument must be True or False.')\n", (441, 494), False, 'import sys\n'), ((696, 759), 'sys.exit', 'sys.exit', (['"""log10 tranformation argument must be True or False."""'], {}), "('log10 tranformation argument must be True or False.')\n", (704, 759), False, 'import sys\n'), ((1091, 1188), 'sys.exit', 'sys.exit', (["('Sample name ' + sample +\n ' appears at least twice in sample file list input file.')"], {}), "('Sample name ' + sample +\n ' appears at least twice in sample file list input file.')\n", (1099, 1188), False, 'import sys\n'), ((2373, 2403), 'numpy.percentile', 'np.percentile', (['expr_values', '(75)'], {}), '(expr_values, 75)\n', (2386, 2403), True, 'import numpy as np\n'), ((2418, 2448), 'numpy.percentile', 'np.percentile', (['expr_values', '(25)'], {}), '(expr_values, 25)\n', (2431, 2448), True, 'import numpy as np\n'), ((1546, 1639), 'sys.exit', 'sys.exit', (["('Gene ' + gene + ' appears at least twice in sample ' + sample +\n ' input file.')"], {}), "('Gene ' + gene + ' appears at least twice in sample ' + sample +\n ' input file.')\n", (1554, 1639), False, 'import sys\n')] |
#-----------------------------------------------------------------------------#
# Colorado Cannabis Cultivation and Dispensaries
# February 2017
# Sources:
# https://www.colorado.gov/pacific/enforcement/med-licensed-facilities
# https://demography.dola.colorado.gov/gis/gis-data/#gis-data
# https://www.colorado.gov/pacific/revenue/colorado-marijuana-tax-data
#-----------------------------------------------------------------------------#
import numpy as np
from mpl_toolkits.basemap import Basemap
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import rgb2hex
from matplotlib.patches import Polygon
import matplotlib.patches as mpatches
import pandas as pd
from colors import *
plt.rc('text', usetex=True)
#plt.rc('font',family='Computer Modern Roman')
plt.rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
#-----------------------------------------------------------------------------#
# DATA
#-----------------------------------------------------------------------------#
workbook = r'C:\Users\Lea-\Documents\Analytics\Research\Colorado-Market\Data\Colorado-cannabis-data.xlsx'
df = pd.read_excel(workbook, sheetname='Facilities-by-County',parse_cols =11,col=0)
Total_Facilities_by_County = df['Total-Facilities']
#------------------------------ COLORADO BASEMAP -----------------------------#
# width and height of state in meters
# lat_0 and lon_0 are the center of the state
# lat_0=39.113014,lon_0=-105.358887,
# width=610000*1.05,height=450000*1.05,
map = Basemap(
llcrnrlon = -109.060176 - .1,
llcrnrlat = 36.992424 - .1,
urcrnrlon = -102.041522 + .25,
urcrnrlat = 41.003443999999995 + .005,
lat_0=39.113014,lon_0=-105.358887,
projection='lcc',
resolution='c')
# optional: rsphere=6370000.00
shp_info = map.readshapefile('ACS1014_county','states',drawbounds=True)
# Info: print(shp_info) #print(map.states_info[0].keys())
colors={}
countynames=[]
#---------------------------------- COUNTY DATA -----------------------------#
map_data = {
'Adams' : 74.0 ,
'Alamosa' : 2.0 ,
'Arapahoe' : 41.0 ,
'Archuleta' : 13.0 ,
'Baca' : 0.0 ,
'Bent' : 0.0 ,
'Boulder' : 187.0 ,
'Broomfield': 0.0 ,
'Chaffee' : 11.0 ,
'Cheyenne' : 0.0 ,
'Clear Creek': 36.0 ,
'Conejos' : 5.0 ,
'Costilla' : 16.0 ,
'Crowley' : 0.0 ,
'Custer' : 0.0 ,
'Delta' : 0.0 ,
'Denver' : 1227.0,
'Dolores' : 0.0 ,
'Douglas' : 0.0 ,
'Eagle' : 33.0 ,
'Elbert' : 0.0 ,
'<NAME>' : 376.0 ,
'Fremont' : 30.0 ,
'Garfield' : 60.0 ,
'Gilpin' : 10.0 ,
'Grand' : 13.0 ,
'Gunnison' : 25.0 ,
'Hinsdale' : 0.0 ,
'Huerfano' : 12.0 ,
'Jackson' : 0.0 ,
'Jefferson' : 70.0 ,
'Kiowa' : 0.0 ,
'<NAME>': 0.0 ,
'Lake' : 33.0 ,
'La Plata' : 10.0 ,
'Larimer' : 60.0 ,
'<NAME>as': 60.0 ,
'Lincoln' : 0.0 ,
'Logan' : 0.0 ,
'Mesa' : 7.0 ,
'Mineral' : 0.0 ,
'Moffat' : 1.0 ,
'Montezuma' : 18.0 ,
'Montrose' : 9.0 ,
'Morgan' : 17.0 ,
'Otero' : 3.0 ,
'Ouray' : 10.0 ,
'Park' : 32.0 ,
'Phillips' : 0.0 ,
'Pitkin' : 17.0 ,
'Prowers' : 0.0 ,
'Pueblo' : 237.0 ,
'<NAME>': 0.0 ,
'<NAME>': 3.0 ,
'Routt' : 42.0 ,
'Saguache' : 20.0 ,
'San Juan' : 4.0 ,
'<NAME>': 26.0 ,
'Sedgwick' : 4.0 ,
'Summit' : 23.0 ,
'Teller' : 5.0 ,
'Washington': 0.0 ,
'Weld' : 20.0 ,
'Yuma' : 0.0
}
area_names = []
area_colors={}
#------------------------------------PLOTTING---------------------------------#
"""HEATMAP"""
cmap = plt.cm.BuPu_r # use 'hot' colormap http://matplotlib.org/examples/color/colormaps_reference.html
vmin = 0.0; vmax = 1250.0 # set range.
for shapedict in map.states_info:
area_name = shapedict['NAME']
weight = map_data[area_name]
# calling colormap with value between 0 and 1 returns
# rgba value. Invert color range (hot colors have high weight),
# and then takes the sqrt root to spread out colors more.
area_colors[area_name] = cmap(1.-np.sqrt(np.sqrt((weight-vmin)/(vmax-vmin))))[:3]
area_names.append(area_name)
# cycle through state names, color each one.
ax = plt.gca() # get current axes instance
for nshape,seg in enumerate(map.states):
area_color = rgb2hex(area_colors[area_names[nshape]])
poly = Polygon(seg,facecolor=area_color,edgecolor=area_color)
ax.add_patch(poly)
""" ANNOTATIONS """
#Adams
x, y = map(-104.35, 39.87)
plt.annotate('1', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#
#Alamosa
x, y = map(-105.75, 37.55)
plt.annotate('2', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Arapahoe
x, y = map(-104.33264, 39.65)
plt.annotate('3', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Archuleta
x, y = map(-107.00670, 37.175)
plt.annotate('4', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Baca
x, y = map(-102.52980, 37.3)
plt.annotate('5', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Bent
x, y = map(-103.08179, 37.95)
plt.annotate('6', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Boulder
x, y = map(-105.35, 40.1)
plt.annotate('7', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Broomfield
x, y = map(-105.04405, 39.94202)
plt.annotate('8', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Chaffee
x, y = map(-106.15, 38.70)
plt.annotate('9', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Cheyenne
x, y = map(-102.62162, 38.85)
plt.annotate('10', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Clear Creek
x, y = map(-105.64125, 39.69045)
plt.annotate('11', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Conejos
x, y = map(-106.20, 37.175)
plt.annotate('12', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Costilla
x, y = map(-105.45, 37.30)
plt.annotate('13', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Crowley
x, y = map(-103.775, 38.325)
plt.annotate('14', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Custer
x, y = map(-105.35, 38.075)
plt.annotate('15', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Delta
x, y = map(-107.80, 38.85314)
plt.annotate('16', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Dolores
x, y = map(-108.5, 37.75303)
plt.annotate('18', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Douglas
x, y = map(-104.93889, 39.35)
plt.annotate('19', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Eagle
x, y = map(-106.7, 39.6)
plt.annotate('20', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#<NAME>
x, y = map(-104.50, 38.80)
plt.annotate('21', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Elbert
x, y = map(-104.15, 39.3)
plt.annotate('22', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Fremont
x, y = map(-105.45, 38.475)
plt.annotate('23', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Garfield
x, y = map(-107.65, 39.6)
plt.annotate('24', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Gilpin
x, y = map(-105.50, 39.8625)
plt.annotate('25', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Grand
x, y = map(-106.10, 40.10)
plt.annotate('26', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Gunnison
x, y = map(-107.00670, 38.70)
plt.annotate('27', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Hinsdale
x, y = map(-107.275, 37.80)
plt.annotate('28', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Huerfano
x, y = map(-104.93889, 37.65)
plt.annotate('29', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Jackson
x, y = map(-106.3, 40.69)
plt.annotate('30', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Jefferson
x, y = map(-105.225, 39.58003)
plt.annotate('31', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Kiowa
x, y = map(-102.62, 38.425)
plt.annotate('32', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#<NAME>
x, y = map(-102.6, 39.3)
plt.annotate('33', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#La Plata
x, y = map(-107.80, 37.325)
plt.annotate('34', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Lake
x, y = map(-106.35, 39.19412)
plt.annotate('35', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Larimer
x, y = map(-105.45, 40.69)
plt.annotate('36', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Las Animas
x, y = map(-104.10013, 37.30)
plt.annotate('37', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Lincoln
x, y = map(-103.40, 39.04575)
plt.annotate('38', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Logan
x, y = map(-103.1, 40.70562)
plt.annotate('39', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Mesa
x, y = map(-108.61756, 38.95854)
plt.annotate('40', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Mineral
x, y = map(-106.90, 37.65837)
plt.annotate('41', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Moffat
x, y = map(-108.23775, 40.61384)
plt.annotate('42', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Montezuma
x, y = map(-108.61756, 37.325)
plt.annotate('43', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Montrose
x, y = map(-108.14287, 38.46830)
plt.annotate('44', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Morgan
x, y = map(-103.8, 40.25)
plt.annotate('45', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Otero
x, y = map(-103.7, 37.9)
plt.annotate('46', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Ouray
x, y = map(-107.76362, 38.15)
plt.annotate('47', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Park
x, y = map(-105.7, 39.10)
plt.annotate('48', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Phillips
x, y = map(-102.34639, 40.6)
plt.annotate('49', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Pitkin
x, y = map(-106.9, 39.2)
plt.annotate('50', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Prowers
x, y = map(-102.35, 37.95)
plt.annotate('51', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Pueblo
x, y = map(-104.50, 38.15)
plt.annotate('52', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#<NAME>
x, y = map(-108.23775, 39.98143)
plt.annotate('53', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#<NAME>
x, y = map(-106.35, 37.57495)
plt.annotate('54', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Routt
x, y = map(-107.0, 40.47716)
plt.annotate('55', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Saguache
x, y = map(-106.3, 38.1)
plt.annotate('56', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#<NAME>
x, y = map(-107.65, 37.75737)
plt.annotate('57', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#<NAME>
x, y = map(-108.5, 38.025)
plt.annotate('58', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Sedgwick
x, y = map(-102.34639, 40.85)
plt.annotate('59', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Summit
x, y = map(-106.025, 39.575)
plt.annotate('60', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Teller
x, y = map(-105.17268, 38.86116)
plt.annotate('61', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Washington
x, y = map(-103.20, 40.0)
plt.annotate('62', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Weld
x, y = map(-104.45, 40.55)
plt.annotate('63', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Yuma
x, y = map(-102.4, 40.0)
plt.annotate('64', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
#Denver
x, y = map(-104.875, 39.9)
#plt.annotate('17', xy=(x,y), xycoords='data',fontsize=8,ha='center',va='center')
plt.annotate('17',
xy=map(-105.1, 40.0),
xytext=map(-104.95, 39.875),
arrowprops=dict(arrowstyle="-"),
)
plt.annotate('',
xy=map(-105.05, 39.7),
xytext=map(-104.875, 39.875),
arrowprops=dict(arrowstyle="-"),
)
#import Image
#im = Image.open('/Users/Lea-/Documents/Cannabis Analytics/Journal/Issue-1/Pictures/Cannabis_leaf_medium_black.png')
#height = im.size[1]
#im = np.array(im).astype(np.float) / 255
#plt.figimage(im, 210., 225. , zorder=99)
"""HEAT MAP LEGEND"""
legend_range = np.zeros((1,20))
for i in range(20):
legend_range[0,i] = (i*5)/100.0
img = ax.imshow(legend_range, interpolation='nearest', vmin=vmin, vmax=vmax,
cmap = plt.cm.BuPu)
color_bar = plt.colorbar(img,ticks=[0,250,500,750,1000,1250],
orientation='horizontal',
shrink=0.8,
pad= 0.05)
color_bar.ax.set_xticklabels(['0','250','500', '750', '1,000','1,250'])
#-----------------------------------------------------------------------------#
plt.box(on=None)
plt.gcf().set_size_inches(6.5,6.5)
#plt.savefig('/Users/Lea-/Documents/Cannabis Analytics/Journal/Issue-1/Pictures/business_map.pdf',
# bbox_inches='tight',
# pad_inches = 0.05,
# format='pdf',
# dpi=300)
plt.show()
#-----------------------------------------------------------------------------#
# SCRAP
#-----------------------------------------------------------------------------#
#for shapedict in map.states_info: #Assigns colors
# countyname = shapedict['NAME']
# law = counties[countyname]
# if law==1:
# colors[countyname] = Tgrey
# else:
# colors[countyname] = Tgrey
# countynames.append(countyname)
#ax = plt.gca() #Gets current axes and cycle through to color each one.
#
#for nshape,seg in enumerate(map.states):
# if countynames[nshape] in ord_list:
# color = rgb2hex(colors[countynames[nshape]])
# poly = Polygon(seg,facecolor=color,edgecolor=almost_black,alpha=0.8,hatch='...')
# ax.add_patch(poly)
# else:
# color = rgb2hex(colors[countynames[nshape]])
# poly = Polygon(seg,facecolor=color,edgecolor=color,alpha=.2)
# ax.add_patch(poly)
#-------------------------------------LEGEND----------------------------------#
#laws = mpatches.Patch(facecolor=Tgrey, alpha=0.8,edgecolor=almost_black,
# hatch='...',label='Counties with MMJ Ordinances')
#no_laws = mpatches.Patch(facecolor=Tgrey, alpha=0.2,
# label='Counties without MMJ Ordinances')
#city_laws = mpatches.Circle((0.5, 0.5), 0.1, facecolor=almost_black,
# label='Cities with MMJ Ordinances')
##counties = mpatches.Patch(facecolor='white',edgecolor='white',
## label='A: Example County \
## \nB: Example County')
##cities = mpatches.Patch(facecolor='white',edgecolor='white',
## label='1: Example City \
## \n2: Example City')
##For bullet in legend
#from matplotlib.legend_handler import HandlerPatch
#class HandlerBullet(HandlerPatch):
# def create_artists(self, legend, orig_handle,
# xdescent, ydescent, width, height, fontsize, trans):
# center = 0.5 * width - 0.5 * xdescent, 0.5 * height - 0.5 * ydescent
# p = mpatches.Circle(xy=center,radius=4.) #Adjust radius for size
# self.update_prop(p, orig_handle, legend)
# p.set_transform(trans)
# return [p]
##Can add counties,cities
#plt.legend(handles=[no_laws,laws,city_laws],
# loc='upper right',
# bbox_to_anchor=(1.05,.95),frameon=True, shadow=False,
# ncol=1,fontsize=11,
# handler_map={mpatches.Circle: HandlerBullet()}) | [
"matplotlib.pyplot.show",
"matplotlib.pyplot.annotate",
"numpy.zeros",
"matplotlib.pyplot.box",
"matplotlib.pyplot.colorbar",
"pandas.read_excel",
"matplotlib.patches.Polygon",
"matplotlib.colors.rgb2hex",
"matplotlib.pyplot.rc",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"mpl_toolkits.... | [((798, 825), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (804, 825), True, 'import matplotlib.pyplot as plt\n'), ((873, 944), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {}), "('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})\n", (879, 944), True, 'import matplotlib.pyplot as plt\n'), ((1253, 1332), 'pandas.read_excel', 'pd.read_excel', (['workbook'], {'sheetname': '"""Facilities-by-County"""', 'parse_cols': '(11)', 'col': '(0)'}), "(workbook, sheetname='Facilities-by-County', parse_cols=11, col=0)\n", (1266, 1332), True, 'import pandas as pd\n'), ((1671, 1882), 'mpl_toolkits.basemap.Basemap', 'Basemap', ([], {'llcrnrlon': '(-109.060176 - 0.1)', 'llcrnrlat': '(36.992424 - 0.1)', 'urcrnrlon': '(-102.041522 + 0.25)', 'urcrnrlat': '(41.003443999999995 + 0.005)', 'lat_0': '(39.113014)', 'lon_0': '(-105.358887)', 'projection': '"""lcc"""', 'resolution': '"""c"""'}), "(llcrnrlon=-109.060176 - 0.1, llcrnrlat=36.992424 - 0.1, urcrnrlon=-\n 102.041522 + 0.25, urcrnrlat=41.003443999999995 + 0.005, lat_0=\n 39.113014, lon_0=-105.358887, projection='lcc', resolution='c')\n", (1678, 1882), False, 'from mpl_toolkits.basemap import Basemap\n'), ((4162, 4171), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4169, 4171), True, 'import matplotlib.pyplot as plt\n'), ((4444, 4532), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""1"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('1', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (4456, 4532), True, 'import matplotlib.pyplot as plt\n'), ((4563, 4651), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""2"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('2', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (4575, 4651), True, 'import matplotlib.pyplot as plt\n'), ((4684, 4772), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""3"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('3', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (4696, 4772), True, 'import matplotlib.pyplot as plt\n'), ((4807, 4895), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""4"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('4', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (4819, 4895), True, 'import matplotlib.pyplot as plt\n'), ((4923, 5011), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""5"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('5', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (4935, 5011), True, 'import matplotlib.pyplot as plt\n'), ((5040, 5128), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""6"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('6', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5052, 5128), True, 'import matplotlib.pyplot as plt\n'), ((5156, 5244), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""7"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('7', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5168, 5244), True, 'import matplotlib.pyplot as plt\n'), ((5282, 5370), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""8"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('8', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5294, 5370), True, 'import matplotlib.pyplot as plt\n'), ((5399, 5487), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""9"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('9', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5411, 5487), True, 'import matplotlib.pyplot as plt\n'), ((5520, 5609), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""10"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('10', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5532, 5609), True, 'import matplotlib.pyplot as plt\n'), ((5648, 5737), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""11"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('11', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5660, 5737), True, 'import matplotlib.pyplot as plt\n'), ((5767, 5856), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""12"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('12', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5779, 5856), True, 'import matplotlib.pyplot as plt\n'), ((5886, 5975), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""13"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('13', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (5898, 5975), True, 'import matplotlib.pyplot as plt\n'), ((6006, 6095), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""14"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('14', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6018, 6095), True, 'import matplotlib.pyplot as plt\n'), ((6124, 6213), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""15"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('15', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6136, 6213), True, 'import matplotlib.pyplot as plt\n'), ((6243, 6332), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""16"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('16', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6255, 6332), True, 'import matplotlib.pyplot as plt\n'), ((6363, 6452), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""18"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('18', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6375, 6452), True, 'import matplotlib.pyplot as plt\n'), ((6484, 6573), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""19"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('19', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6496, 6573), True, 'import matplotlib.pyplot as plt\n'), ((6598, 6687), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""20"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('20', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6610, 6687), True, 'import matplotlib.pyplot as plt\n'), ((6715, 6804), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""21"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('21', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6727, 6804), True, 'import matplotlib.pyplot as plt\n'), ((6831, 6920), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""22"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('22', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6843, 6920), True, 'import matplotlib.pyplot as plt\n'), ((6950, 7039), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""23"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('23', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (6962, 7039), True, 'import matplotlib.pyplot as plt\n'), ((7068, 7157), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""24"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('24', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7080, 7157), True, 'import matplotlib.pyplot as plt\n'), ((7187, 7276), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""25"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('25', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7199, 7276), True, 'import matplotlib.pyplot as plt\n'), ((7303, 7392), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""26"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('26', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7315, 7392), True, 'import matplotlib.pyplot as plt\n'), ((7425, 7514), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""27"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('27', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7437, 7514), True, 'import matplotlib.pyplot as plt\n'), ((7545, 7634), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""28"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('28', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7557, 7634), True, 'import matplotlib.pyplot as plt\n'), ((7667, 7756), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""29"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('29', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7679, 7756), True, 'import matplotlib.pyplot as plt\n'), ((7784, 7873), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""30"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('30', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7796, 7873), True, 'import matplotlib.pyplot as plt\n'), ((7908, 7997), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""31"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('31', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (7920, 7997), True, 'import matplotlib.pyplot as plt\n'), ((8025, 8114), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""32"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('32', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8037, 8114), True, 'import matplotlib.pyplot as plt\n'), ((8140, 8229), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""33"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('33', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8152, 8229), True, 'import matplotlib.pyplot as plt\n'), ((8260, 8349), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""34"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('34', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8272, 8349), True, 'import matplotlib.pyplot as plt\n'), ((8378, 8467), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""35"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('35', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8390, 8467), True, 'import matplotlib.pyplot as plt\n'), ((8496, 8585), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""36"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('36', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8508, 8585), True, 'import matplotlib.pyplot as plt\n'), ((8620, 8709), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""37"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('37', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8632, 8709), True, 'import matplotlib.pyplot as plt\n'), ((8741, 8830), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""38"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('38', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8753, 8830), True, 'import matplotlib.pyplot as plt\n'), ((8859, 8948), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""39"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('39', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8871, 8948), True, 'import matplotlib.pyplot as plt\n'), ((8980, 9069), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""40"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('40', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (8992, 9069), True, 'import matplotlib.pyplot as plt\n'), ((9101, 9190), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""41"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('41', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9113, 9190), True, 'import matplotlib.pyplot as plt\n'), ((9224, 9313), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""42"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('42', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9236, 9313), True, 'import matplotlib.pyplot as plt\n'), ((9348, 9437), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""43"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('43', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9360, 9437), True, 'import matplotlib.pyplot as plt\n'), ((9473, 9562), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""44"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('44', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9485, 9562), True, 'import matplotlib.pyplot as plt\n'), ((9589, 9678), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""45"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('45', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9601, 9678), True, 'import matplotlib.pyplot as plt\n'), ((9703, 9792), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""46"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('46', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9715, 9792), True, 'import matplotlib.pyplot as plt\n'), ((9822, 9911), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""47"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('47', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9834, 9911), True, 'import matplotlib.pyplot as plt\n'), ((9936, 10025), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""48"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('48', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (9948, 10025), True, 'import matplotlib.pyplot as plt\n'), ((10057, 10146), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""49"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('49', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10069, 10146), True, 'import matplotlib.pyplot as plt\n'), ((10172, 10261), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""50"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('50', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10184, 10261), True, 'import matplotlib.pyplot as plt\n'), ((10290, 10379), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""51"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('51', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10302, 10379), True, 'import matplotlib.pyplot as plt\n'), ((10407, 10496), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""52"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('52', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10419, 10496), True, 'import matplotlib.pyplot as plt\n'), ((10530, 10619), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""53"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('53', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10542, 10619), True, 'import matplotlib.pyplot as plt\n'), ((10650, 10739), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""54"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('54', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10662, 10739), True, 'import matplotlib.pyplot as plt\n'), ((10768, 10857), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""55"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('55', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10780, 10857), True, 'import matplotlib.pyplot as plt\n'), ((10885, 10974), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""56"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('56', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (10897, 10974), True, 'import matplotlib.pyplot as plt\n'), ((11005, 11094), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""57"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('57', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11017, 11094), True, 'import matplotlib.pyplot as plt\n'), ((11122, 11211), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""58"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('58', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11134, 11211), True, 'import matplotlib.pyplot as plt\n'), ((11244, 11333), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""59"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('59', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11256, 11333), True, 'import matplotlib.pyplot as plt\n'), ((11363, 11452), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""60"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('60', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11375, 11452), True, 'import matplotlib.pyplot as plt\n'), ((11486, 11575), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""61"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('61', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11498, 11575), True, 'import matplotlib.pyplot as plt\n'), ((11606, 11695), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""62"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('62', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11618, 11695), True, 'import matplotlib.pyplot as plt\n'), ((11721, 11810), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""63"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('63', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11733, 11810), True, 'import matplotlib.pyplot as plt\n'), ((11834, 11923), 'matplotlib.pyplot.annotate', 'plt.annotate', (['"""64"""'], {'xy': '(x, y)', 'xycoords': '"""data"""', 'fontsize': '(8)', 'ha': '"""center"""', 'va': '"""center"""'}), "('64', xy=(x, y), xycoords='data', fontsize=8, ha='center', va=\n 'center')\n", (11846, 11923), True, 'import matplotlib.pyplot as plt\n'), ((12615, 12632), 'numpy.zeros', 'np.zeros', (['(1, 20)'], {}), '((1, 20))\n', (12623, 12632), True, 'import numpy as np\n'), ((12813, 12921), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['img'], {'ticks': '[0, 250, 500, 750, 1000, 1250]', 'orientation': '"""horizontal"""', 'shrink': '(0.8)', 'pad': '(0.05)'}), "(img, ticks=[0, 250, 500, 750, 1000, 1250], orientation=\n 'horizontal', shrink=0.8, pad=0.05)\n", (12825, 12921), True, 'import matplotlib.pyplot as plt\n'), ((13159, 13175), 'matplotlib.pyplot.box', 'plt.box', ([], {'on': 'None'}), '(on=None)\n', (13166, 13175), True, 'import matplotlib.pyplot as plt\n'), ((13425, 13435), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13433, 13435), True, 'import matplotlib.pyplot as plt\n'), ((4258, 4298), 'matplotlib.colors.rgb2hex', 'rgb2hex', (['area_colors[area_names[nshape]]'], {}), '(area_colors[area_names[nshape]])\n', (4265, 4298), False, 'from matplotlib.colors import rgb2hex\n'), ((4311, 4367), 'matplotlib.patches.Polygon', 'Polygon', (['seg'], {'facecolor': 'area_color', 'edgecolor': 'area_color'}), '(seg, facecolor=area_color, edgecolor=area_color)\n', (4318, 4367), False, 'from matplotlib.patches import Polygon\n'), ((13176, 13185), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13183, 13185), True, 'import matplotlib.pyplot as plt\n'), ((4038, 4078), 'numpy.sqrt', 'np.sqrt', (['((weight - vmin) / (vmax - vmin))'], {}), '((weight - vmin) / (vmax - vmin))\n', (4045, 4078), True, 'import numpy as np\n')] |
""" Final assignment for this week! In this assignment you will learn to implement and use gradient checking.
You are part of a team working to make mobile payments available globally, and are asked to build
a deep learning model to detect fraud--whenever someone makes a payment, you want to see
if the payment might be fraudulent, such as if the user's account has been taken over by a hacker.
But backpropagation is quite challenging to implement, and sometimes has bugs.
Because this is a mission-critical application, your company's CEO wants to be really certain that
your implementation of backpropagation is correct. Your CEO says, "Give me a proof that
your backpropagation is actually working!" To give this reassurance, you are going to use "gradient checking".
Let's do it!"""
import numpy as np
from testCases import *
from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector
def forward_propagation(x, theta):
"""
Implement the Linear forward propagation (compute J -- J(theta) = theta * x)
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
J -- the value of function J, computed using the formula J(theta) = theta * x
"""
J = x * theta
return J
x, theta = 2, 4
J = forward_propagation(x, theta)
print("J = " + str(J))
# Implement the backward propagation step (derivative computation) -- the derivative of J(theta) = theta*x
# with respect to theta.You should get dtheta = partial J / partial theta = x
def backward_propagation(x, theta):
"""
Computes the derivative of J with respect to theta
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
Returns:
dtheta -- the gradient of the cost with respect to theta
"""
dtheta = x
return dtheta
x, theta = 2, 4
dtheta = backward_propagation(x, theta)
print("dtheta = " + str(dtheta))
""" Gradient check:
First compute "gradapprox"
Then compute the gradient using backward propagation, and store the result in a variable "grad"
Finally, compute the relative difference between "gradapprox" and the "grad"
You will need 3 Steps to compute this formula:
- compute the numerator using np.linalg.norm(...)
- compute the denominator. You will need to call np.linalg.norm(...) twice.
- divide them.
If this difference is small (say less than 10^{-7}), you can be quite confident that you have computed
your gradient correctly. Otherwise, there may be a mistake in the gradient computation."""
def gradient_check(x, theta, epsilon = 1e-7):
"""
Implement the backward prop
Arguments:
x -- a real-valued input
theta -- our parameter, a real number as well
epsilon -- tiny shift to the input to compute approximated gradient
Returns:
difference -- difference between the approximated gradient and the backward propagation gradient
"""
# compute the "gradapprox". epsilon is small enough, no need to worry about limit
thetaplus = theta + epsilon
thetaminus = theta - epsilon
J_plus = thetaplus * x
J_minus = thetaminus * x
gradapprox = (J_plus - J_minus) / (2 * epsilon)
grad = x
numerator = np.linalg.norm(grad - gradapprox)
denominator = np.linalg.norm(grad) + np.linalg.norm(gradapprox)
difference = numerator / denominator
if difference < 1e-7:
print("The gradient is correct!")
else:
print("The gradient is wrong!")
return difference
x, theta = 2, 4
difference = gradient_check(x, theta)
print("difference = " + str(difference))
# difference = 2.919335883291695e-10 --> since the difference is smaller than the 10^{-7} threshold, gradient is correct
""" In the more general case, your cost function J has more than a single 1D input.
When you are training a neural network, theta actually consists of multiple matrices W[l] and biases b[l]!
It is important to know how to do a gradient check with higher-dimensional inputs. Let's do it!"""
""" N-dimensional gradient checking
Let's look at your implementations for forward propagation and backward propagation."""
def forward_propagation_n(X, Y, parameters):
"""
Implements the forward propagation (and computes the cost)
Arguments:
X -- training set for m examples
Y -- true "labels" for m examples
parameters -- python dictionary containing your parameters "W1", "b1", "W2", "b2", "W3", "b3":
W1 -- weight matrix of shape (5, 4)
b1 -- bias vector of shape (5, 1)
W2 -- weight matrix of shape (3, 5)
b2 -- bias vector of shape (3, 1)
W3 -- weight matrix of shape (1, 3)
b3 -- bias vector of shape (1, 1)
Returns:
cost -- the cost function (logistic cost for one example)
"""
# retrieve parameters
m = X.shape[1]
W1 = parameters["W1"]
b1 = parameters["b1"]
W2 = parameters["W2"]
b2 = parameters["b2"]
W3 = parameters["W3"]
b3 = parameters["b3"]
# LINEAR -> RELU -> LINEAR -> RELU -> LINEAR -> SIGMOID
Z1 = np.dot(W1, X) + b1
A1 = relu(Z1)
Z2 = np.dot(W2, A1) + b2
A2 = relu(Z2)
Z3 = np.dot(W3, A2) + b3
A3 = sigmoid(Z3)
# Cost
logprobs = np.multiply(-np.log(A3), Y) + np.multiply(-np.log(1 - A3), 1 - Y)
cost = 1/m * np.sum(logprobs)
cache = (Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3)
return cost, cache
def backward_propagation_n(X, Y, cache):
"""
Implement the backward propagation
Args:
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
cache -- cache output from forward_propagation_n()
Returns:
gradients -- A dictionary with the gradients of the cost with respect to each parameter,
activation and pre-activation variables.
"""
m = X.shape[1]
(Z1, A1, W1, b1, Z2, A2, W2, b2, Z3, A3, W3, b3) = cache
dZ3 = A3 - Y
dW3 = 1./m * np.dot(dZ3, A2.T)
db3 = 1./m * np.sum(dZ3, axis=1, keepdims=True)
dA2 = np.dot(W3.T, dZ3)
dZ2 = np.multiply(dA2, np.int64(A2 > 0))
# dW2 = 1./m * np.dot(dZ2, A1.T) * 2
dW2 = 1./m * np.dot(dZ2, A1.T)
db2 = 1./m * np.sum(dZ2, axis=1, keepdims=True)
dA1 = np.dot(W2.T, dZ2)
dZ1 = np.multiply(dA1, np.int64(A1 > 0))
dW1 = 1./m * np.dot(dZ1, X.T)
# db1 = 4./m * np.sum(dZ1, axis=1, keepdims=True)
db1 = 1./m * np.sum(dZ1, axis=1, keepdims=True)
gradients = {"dZ3": dZ3, "dW3": dW3, "db3": db3,
"dA2": dA2, "dZ2": dZ2, "dW2": dW2, "db2": db2,
"dA1": dA1, "dZ1": dZ1, "dW1": dW1, "db1": db1}
return gradients
# You obtained some results on the fraud detection test set but you are not 100% sure of your model.
# Nobody's perfect! Let's implement gradient checking to verify if your gradients are correct.
# Want to compare "gradapprox" to the gradient computed by backpropagation
""" However, theta is not a scalar anymore. It is a dictionary called "parameters". We implemented
a function "dictionary_to_vector()" for you. It converts the "parameters" dictionary into a vector called "values",
obtained by reshaping all parameters (W1, b1, W2, b2, W3, b3) into vectors and concatenating them.
The inverse function is "vector_to_dictionary" which outputs back the "parameters" dictionary.
We have also converted the "gradients" dictionary into a vector "grad" using gradients_to_vector().
You don't need to worry about that.
To compute J_plus[i]:
Set theta^{+} to np.copy(parameters_values)
Set theta[i]^{+} to theta[i]^{+} + epsilon
Calculate J[i]^{+} using to forward_propagation_n(x, y, vector_to_dictionary(theta^{+})).
To compute J_minus[i]: do the same thing with theta^{-}
Compute gradapprox[i] = J[i]^{+} - J[i]^{-} / (2 * epsilon)
Thus, you get a vector gradapprox, where gradapprox[i] is an approximation of the gradient
with respect to parameter_values[i]. You can now compare this gradapprox vector to the gradients vector
from backpropagation. Just like for the 1D case (Steps 1', 2', 3'),
compute: difference = |grad - gradapprox|_2} / | grad |_2 + | gradapprox |_2 """
def gradient_check_n(parameters, gradients, X, Y, epsilon = 1e-7):
"""
Checks if backward_propagation_n computes correctly the gradient of the cost output by forward_propagation_n
Arguments:
parameters -- python dictionary containing your parameters
grad -- output of backward_propagation_n, contains gradients of the cost with respect to the parameter s.
X -- input datapoint, of shape (input size, 1)
Y -- true "label"
epsilon -- tiny shift to the input to compute approximated gradient
Returns:
difference -- difference between approximated gradient and the backward propagation gradient
"""
# Set up variables
parameters_values, _ = dictionary_to_vector(parameters)
grad = gradients_to_vector(gradients)
num_parameters = parameters_values.shape[0]
J_plus = np.zeros((num_parameters, 1))
J_minus = np.zeros((num_parameters, 1))
gradapprox = np.zeros((num_parameters, 1))
# compute gradapprox
for i in range(num_parameters):
# Compute J_plus[i]. Inputs: "parameters_values, epsilon". Output: "J_plus[i]"
# "_" is used because the function you have to outputs two parameters but we only care about the first one
thetaplus = np.copy(parameters_values)
thetaplus[i][0] += epsilon
J_plus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaplus))
# Compute J_minus[i]. Inputs: "parameters_values, epsilon". Output: "J_minus[i]".
thetaminus = np.copy(parameters_values)
thetaminus[i][0] -= epsilon
J_minus[i], _ = forward_propagation_n(X, Y, vector_to_dictionary(thetaminus))
# Compute gradapprox[i]
gradapprox[i] = (J_plus[i] - J_minus[i]) / (2 * epsilon)
# Compare gradapprox to backward propagation gradients by computing difference.
numerator = np.linalg.norm(gradapprox - grad)
denominator = np.linalg.norm(gradapprox) + np.linalg.norm(grad)
difference = numerator / denominator
if difference > 1.2e-7:
print("\033[93m" + "There is a mistake in the backward propagation! difference = " + str(difference) +
"\033[0m")
else:
print("\033[92m" + "Your backward propagation works perfectly fine! difference = " + str(difference) +
"\033[0m")
return difference
X, Y, parameters = gradient_check_n_test_case()
cost, cache = forward_propagation_n(X, Y, parameters)
gradients = backward_propagation_n(X, Y, cache)
difference = gradient_check_n(parameters, gradients, X, Y)
""" It seems that there were errors in the backward_propagation_n code we gave you!
Good that you've implemented the gradient check. Go back to backward_propagation and try to find/correct the errors
(Hint: check dW2 and db1).
Rerun the gradient check when you think you've fixed it. Remember you'll need to re-execute the cell
defining backward_propagation_n() if you modify the code.
Can you get gradient check to declare your derivative computation correct?
Even though this part of the assignment isn't graded, we strongly urge you to try to find the bug and
re-run gradient check until you're convinced backprop is now correctly implemented."""
""" Note
Gradient Checking is slow! Approximating the gradient with
partial J / partial theta approx= J(theta + epsilon) - J(theta - epsilon) / {2 * epsilon} is computationally costly.
For this reason, we don't run gradient checking at every iteration during training.
Just a few times to check if the gradient is correct.
Gradient Checking, at least as we've presented it, doesn't work with dropout.
You would usually run the gradient check algorithm without dropout to make sure your backprop is correct,
then add dropout.
Congrats, you can be confident that your deep learning model for fraud detection is working correctly!
You can even use this to convince your CEO. :)
Gradient checking verifies closeness between the gradients from backpropagation and
the numerical approximation of the gradient (computed using forward propagation).
Gradient checking is slow, so we don't run it in every iteration of training.
You would usually run it only to make sure your code is correct, then turn it off and use backprop
for the actual learning process."""
| [
"numpy.sum",
"numpy.log",
"numpy.copy",
"gc_utils.vector_to_dictionary",
"numpy.zeros",
"gc_utils.sigmoid",
"gc_utils.dictionary_to_vector",
"gc_utils.relu",
"numpy.linalg.norm",
"gc_utils.gradients_to_vector",
"numpy.int64",
"numpy.dot"
] | [((3353, 3386), 'numpy.linalg.norm', 'np.linalg.norm', (['(grad - gradapprox)'], {}), '(grad - gradapprox)\n', (3367, 3386), True, 'import numpy as np\n'), ((5357, 5365), 'gc_utils.relu', 'relu', (['Z1'], {}), '(Z1)\n', (5361, 5365), False, 'from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector\n'), ((5404, 5412), 'gc_utils.relu', 'relu', (['Z2'], {}), '(Z2)\n', (5408, 5412), False, 'from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector\n'), ((5451, 5462), 'gc_utils.sigmoid', 'sigmoid', (['Z3'], {}), '(Z3)\n', (5458, 5462), False, 'from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector\n'), ((6295, 6312), 'numpy.dot', 'np.dot', (['W3.T', 'dZ3'], {}), '(W3.T, dZ3)\n', (6301, 6312), True, 'import numpy as np\n'), ((6495, 6512), 'numpy.dot', 'np.dot', (['W2.T', 'dZ2'], {}), '(W2.T, dZ2)\n', (6501, 6512), True, 'import numpy as np\n'), ((9204, 9236), 'gc_utils.dictionary_to_vector', 'dictionary_to_vector', (['parameters'], {}), '(parameters)\n', (9224, 9236), False, 'from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector\n'), ((9248, 9278), 'gc_utils.gradients_to_vector', 'gradients_to_vector', (['gradients'], {}), '(gradients)\n', (9267, 9278), False, 'from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector\n'), ((9340, 9369), 'numpy.zeros', 'np.zeros', (['(num_parameters, 1)'], {}), '((num_parameters, 1))\n', (9348, 9369), True, 'import numpy as np\n'), ((9384, 9413), 'numpy.zeros', 'np.zeros', (['(num_parameters, 1)'], {}), '((num_parameters, 1))\n', (9392, 9413), True, 'import numpy as np\n'), ((9431, 9460), 'numpy.zeros', 'np.zeros', (['(num_parameters, 1)'], {}), '((num_parameters, 1))\n', (9439, 9460), True, 'import numpy as np\n'), ((10356, 10389), 'numpy.linalg.norm', 'np.linalg.norm', (['(gradapprox - grad)'], {}), '(gradapprox - grad)\n', (10370, 10389), True, 'import numpy as np\n'), ((3405, 3425), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (3419, 3425), True, 'import numpy as np\n'), ((3428, 3454), 'numpy.linalg.norm', 'np.linalg.norm', (['gradapprox'], {}), '(gradapprox)\n', (3442, 3454), True, 'import numpy as np\n'), ((5329, 5342), 'numpy.dot', 'np.dot', (['W1', 'X'], {}), '(W1, X)\n', (5335, 5342), True, 'import numpy as np\n'), ((5375, 5389), 'numpy.dot', 'np.dot', (['W2', 'A1'], {}), '(W2, A1)\n', (5381, 5389), True, 'import numpy as np\n'), ((5422, 5436), 'numpy.dot', 'np.dot', (['W3', 'A2'], {}), '(W3, A2)\n', (5428, 5436), True, 'import numpy as np\n'), ((5573, 5589), 'numpy.sum', 'np.sum', (['logprobs'], {}), '(logprobs)\n', (5579, 5589), True, 'import numpy as np\n'), ((6214, 6231), 'numpy.dot', 'np.dot', (['dZ3', 'A2.T'], {}), '(dZ3, A2.T)\n', (6220, 6231), True, 'import numpy as np\n'), ((6249, 6283), 'numpy.sum', 'np.sum', (['dZ3'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ3, axis=1, keepdims=True)\n', (6255, 6283), True, 'import numpy as np\n'), ((6340, 6356), 'numpy.int64', 'np.int64', (['(A2 > 0)'], {}), '(A2 > 0)\n', (6348, 6356), True, 'import numpy as np\n'), ((6414, 6431), 'numpy.dot', 'np.dot', (['dZ2', 'A1.T'], {}), '(dZ2, A1.T)\n', (6420, 6431), True, 'import numpy as np\n'), ((6449, 6483), 'numpy.sum', 'np.sum', (['dZ2'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ2, axis=1, keepdims=True)\n', (6455, 6483), True, 'import numpy as np\n'), ((6540, 6556), 'numpy.int64', 'np.int64', (['(A1 > 0)'], {}), '(A1 > 0)\n', (6548, 6556), True, 'import numpy as np\n'), ((6575, 6591), 'numpy.dot', 'np.dot', (['dZ1', 'X.T'], {}), '(dZ1, X.T)\n', (6581, 6591), True, 'import numpy as np\n'), ((6661, 6695), 'numpy.sum', 'np.sum', (['dZ1'], {'axis': '(1)', 'keepdims': '(True)'}), '(dZ1, axis=1, keepdims=True)\n', (6667, 6695), True, 'import numpy as np\n'), ((9746, 9772), 'numpy.copy', 'np.copy', (['parameters_values'], {}), '(parameters_values)\n', (9753, 9772), True, 'import numpy as np\n'), ((10008, 10034), 'numpy.copy', 'np.copy', (['parameters_values'], {}), '(parameters_values)\n', (10015, 10034), True, 'import numpy as np\n'), ((10408, 10434), 'numpy.linalg.norm', 'np.linalg.norm', (['gradapprox'], {}), '(gradapprox)\n', (10422, 10434), True, 'import numpy as np\n'), ((10437, 10457), 'numpy.linalg.norm', 'np.linalg.norm', (['grad'], {}), '(grad)\n', (10451, 10457), True, 'import numpy as np\n'), ((9859, 9890), 'gc_utils.vector_to_dictionary', 'vector_to_dictionary', (['thetaplus'], {}), '(thetaplus)\n', (9879, 9890), False, 'from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector\n'), ((10123, 10155), 'gc_utils.vector_to_dictionary', 'vector_to_dictionary', (['thetaminus'], {}), '(thetaminus)\n', (10143, 10155), False, 'from gc_utils import sigmoid, relu, dictionary_to_vector, vector_to_dictionary, gradients_to_vector\n'), ((5503, 5513), 'numpy.log', 'np.log', (['A3'], {}), '(A3)\n', (5509, 5513), True, 'import numpy as np\n'), ((5533, 5547), 'numpy.log', 'np.log', (['(1 - A3)'], {}), '(1 - A3)\n', (5539, 5547), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import scipy.optimize
def Att(f, ft, a):
return a/((1+(f/ft)**2)**0.5)
def derivata(f, ft, a):
return -a*(f/ft)*((1+(f/ft)**2)**(-1.5))
def Bode(f, ft):
return 20*(np.log10(ft/f))
def derivataBode(f, ft):
return np.abs(-20/(np.log(10)*f))
Vout, dVout, f, df = np.genfromtxt('data.txt', skip_header = 1, unpack = True)
Vi = 6.08
dVi = 0.19
A = Vout/Vi
dA = (1/Vi)*dVout + (Vout/(Vi**2))*dVi
# Fit normale
dA_eff = dA
popt = (174,1) # parametri iniziali
for i in range(5):
popt, pcov = scipy.optimize.curve_fit(Att, f, A, popt, dA_eff, absolute_sigma = False)
chi_2 = np.sum(((A - Att(f,*popt))/dA_eff)**2)
print(chi_2)
dA_eff = np.sqrt(((derivata(f,*popt))*df)**2 + dA**2)
ndof = len(Vout)-2
print('\nil chi quadro è', chi_2)
print('il chi quadro ridotto è', chi_2/ndof)
print('ft (frequenza di taglio) =',popt[0], '+-', pcov[0][0]**0.5)
print('a =',popt[1], '+-', pcov[1][1]**0.5)
print("Cov normalizzata =", pcov[1][0]/(pcov[0][0]*pcov[1][1])**0.5, "\n")
# Fit Bode
mask = f>400
ABode = 20*np.log10(A[mask])
fBode = f[mask]
dABode = 20*(dA[mask]/A[mask])/np.log(10)
dA_effB = dABode
dfBode = df[mask]
for i in range(5):
poptBode, pcovBode = scipy.optimize.curve_fit(Bode, fBode, ABode, (1), dA_effB, absolute_sigma = False)
chi_2 = np.sum(((ABode - Bode(fBode,*poptBode))/dA_effB)**2)
print(chi_2)
dA_effB = np.sqrt(((derivataBode(fBode, *poptBode))*dfBode)**2 + dABode**2)
print("ft =",poptBode, "+-", np.diag(pcovBode)**0.5, "\n")
# Grafici
# Plot funzione
x = np.logspace(1, 5, 4000)
plt.figure()
plt.subplot(211)
plt.xscale('log')
plt.yscale('log')
plt.errorbar(f, A, yerr=dA, xerr=df, fmt='.', label="Misure")
plt.plot(x, Att(x, *popt), label="Fit")
plt.xlabel("f [Hz]")
plt.ylabel("A")
plt.legend()
# Plot residui normalizzati
plt.subplot(212)
plt.xscale('log')
plt.errorbar(f, (A -Att(f, *popt))/dA_eff, fmt='.', label="Res. norm.")
plt.plot(x, x*0)
plt.xlabel("f [Hz]")
plt.legend()
plt.show()
# Bode plot
xB = np.linspace(0, 8, 2000)
plt.figure()
plt.subplot(211)
plt.xscale('log')
plt.errorbar(f, 20*np.log10(A), yerr=20*(dA/A)/np.log(10), xerr=df, fmt='.', label="Misure")
plt.plot(x, Bode(x, *poptBode), label="Fit")
plt.xlabel("f [Hz]")
plt.ylabel("A")
plt.legend()
# Bode residui normalizzati
plt.subplot(212)
plt.xscale('log')
plt.errorbar(fBode, (ABode - Bode(fBode, *poptBode))/dA_effB, fmt='.', label="Res. norm.")
plt.plot(x, x*0)
plt.xlabel("f [Hz]")
plt.legend()
plt.show()
| [
"matplotlib.pyplot.xscale",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.yscale",
"numpy.diag",
"matplotlib.pyplot.show",
"numpy.log",
"matplotlib.pyplot.plot",
"numpy.logspace",
"matplotlib.pyplot.legend",
"numpy.genfromtxt",
"matplotlib.pyplot.figure",
"numpy.linspace",
"matplotlib.pypl... | [((336, 389), 'numpy.genfromtxt', 'np.genfromtxt', (['"""data.txt"""'], {'skip_header': '(1)', 'unpack': '(True)'}), "('data.txt', skip_header=1, unpack=True)\n", (349, 389), True, 'import numpy as np\n'), ((1579, 1602), 'numpy.logspace', 'np.logspace', (['(1)', '(5)', '(4000)'], {}), '(1, 5, 4000)\n', (1590, 1602), True, 'import numpy as np\n'), ((1603, 1615), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1613, 1615), True, 'import matplotlib.pyplot as plt\n'), ((1616, 1632), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (1627, 1632), True, 'import matplotlib.pyplot as plt\n'), ((1633, 1650), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1643, 1650), True, 'import matplotlib.pyplot as plt\n'), ((1651, 1668), 'matplotlib.pyplot.yscale', 'plt.yscale', (['"""log"""'], {}), "('log')\n", (1661, 1668), True, 'import matplotlib.pyplot as plt\n'), ((1669, 1730), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['f', 'A'], {'yerr': 'dA', 'xerr': 'df', 'fmt': '"""."""', 'label': '"""Misure"""'}), "(f, A, yerr=dA, xerr=df, fmt='.', label='Misure')\n", (1681, 1730), True, 'import matplotlib.pyplot as plt\n'), ((1771, 1791), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (1781, 1791), True, 'import matplotlib.pyplot as plt\n'), ((1792, 1807), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""A"""'], {}), "('A')\n", (1802, 1807), True, 'import matplotlib.pyplot as plt\n'), ((1808, 1820), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (1818, 1820), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1866), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (1861, 1866), True, 'import matplotlib.pyplot as plt\n'), ((1867, 1884), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (1877, 1884), True, 'import matplotlib.pyplot as plt\n'), ((1957, 1975), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(x * 0)'], {}), '(x, x * 0)\n', (1965, 1975), True, 'import matplotlib.pyplot as plt\n'), ((1974, 1994), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (1984, 1994), True, 'import matplotlib.pyplot as plt\n'), ((1995, 2007), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2005, 2007), True, 'import matplotlib.pyplot as plt\n'), ((2008, 2018), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2016, 2018), True, 'import matplotlib.pyplot as plt\n'), ((2037, 2060), 'numpy.linspace', 'np.linspace', (['(0)', '(8)', '(2000)'], {}), '(0, 8, 2000)\n', (2048, 2060), True, 'import numpy as np\n'), ((2061, 2073), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (2071, 2073), True, 'import matplotlib.pyplot as plt\n'), ((2074, 2090), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(211)'], {}), '(211)\n', (2085, 2090), True, 'import matplotlib.pyplot as plt\n'), ((2091, 2108), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2101, 2108), True, 'import matplotlib.pyplot as plt\n'), ((2247, 2267), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (2257, 2267), True, 'import matplotlib.pyplot as plt\n'), ((2268, 2283), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""A"""'], {}), "('A')\n", (2278, 2283), True, 'import matplotlib.pyplot as plt\n'), ((2284, 2296), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2294, 2296), True, 'import matplotlib.pyplot as plt\n'), ((2326, 2342), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(212)'], {}), '(212)\n', (2337, 2342), True, 'import matplotlib.pyplot as plt\n'), ((2343, 2360), 'matplotlib.pyplot.xscale', 'plt.xscale', (['"""log"""'], {}), "('log')\n", (2353, 2360), True, 'import matplotlib.pyplot as plt\n'), ((2452, 2470), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(x * 0)'], {}), '(x, x * 0)\n', (2460, 2470), True, 'import matplotlib.pyplot as plt\n'), ((2469, 2489), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""f [Hz]"""'], {}), "('f [Hz]')\n", (2479, 2489), True, 'import matplotlib.pyplot as plt\n'), ((2490, 2502), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (2500, 2502), True, 'import matplotlib.pyplot as plt\n'), ((2503, 2513), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2511, 2513), True, 'import matplotlib.pyplot as plt\n'), ((1087, 1104), 'numpy.log10', 'np.log10', (['A[mask]'], {}), '(A[mask])\n', (1095, 1104), True, 'import numpy as np\n'), ((1152, 1162), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (1158, 1162), True, 'import numpy as np\n'), ((230, 246), 'numpy.log10', 'np.log10', (['(ft / f)'], {}), '(ft / f)\n', (238, 246), True, 'import numpy as np\n'), ((1517, 1534), 'numpy.diag', 'np.diag', (['pcovBode'], {}), '(pcovBode)\n', (1524, 1534), True, 'import numpy as np\n'), ((2128, 2139), 'numpy.log10', 'np.log10', (['A'], {}), '(A)\n', (2136, 2139), True, 'import numpy as np\n'), ((2156, 2166), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (2162, 2166), True, 'import numpy as np\n'), ((299, 309), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (305, 309), True, 'import numpy as np\n')] |
"""
gen_data_utils
--------------
Miscellanious functions for generating fake data to test MDN performance.
"""
import numpy as np
import keras
import matplotlib.pyplot as plt
from IPython.display import clear_output
def u_shape(n=1000,x=None):
"""
Create upside-down u (unimodal) data
parameters
----------
n : int
size of data
returns
-------
x : numpy array
y : numpy array
"""
if x is None: x = np.float32(np.random.uniform(0,1,(1, 1000))).T
y = np.float32(np.random.normal(loc=-10*(x-0.5)*(x-0.5)))
return x,y
def final_size(n=1000,x=None):
"""
Simulates a final size like distribution as R0 varies. Includes threshold
behaviour.
parameters
----------
n : int
size of data
returns
-------
x : numpy array
y : numpy array
"""
if x is None: x = np.random.uniform(0,1,n);
ps = x.copy()
ps[ps<0.5] = 0.
ps = np.power(ps,0.8)
mixture = np.random.rand(ps.size) < ps
m0 = np.random.normal(loc=0.*ps,scale=1.)
m1 = np.random.normal(loc=10.*ps,scale=2.*ps)
y = (1-mixture)*m0 + mixture*m1
x = x = x.reshape(x.size,1)
return x,y
| [
"numpy.random.uniform",
"numpy.power",
"numpy.random.rand",
"numpy.random.normal"
] | [((945, 962), 'numpy.power', 'np.power', (['ps', '(0.8)'], {}), '(ps, 0.8)\n', (953, 962), True, 'import numpy as np\n'), ((1015, 1056), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(0.0 * ps)', 'scale': '(1.0)'}), '(loc=0.0 * ps, scale=1.0)\n', (1031, 1056), True, 'import numpy as np\n'), ((1061, 1108), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(10.0 * ps)', 'scale': '(2.0 * ps)'}), '(loc=10.0 * ps, scale=2.0 * ps)\n', (1077, 1108), True, 'import numpy as np\n'), ((522, 571), 'numpy.random.normal', 'np.random.normal', ([], {'loc': '(-10 * (x - 0.5) * (x - 0.5))'}), '(loc=-10 * (x - 0.5) * (x - 0.5))\n', (538, 571), True, 'import numpy as np\n'), ((872, 898), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'n'], {}), '(0, 1, n)\n', (889, 898), True, 'import numpy as np\n'), ((976, 999), 'numpy.random.rand', 'np.random.rand', (['ps.size'], {}), '(ps.size)\n', (990, 999), True, 'import numpy as np\n'), ((467, 501), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', '(1, 1000)'], {}), '(0, 1, (1, 1000))\n', (484, 501), True, 'import numpy as np\n')] |
# coding: utf-8
'''
# def a function to calcualte sugar margin
__author__ = "<NAME>"
__copyright__ = "Zhejian Peng"
__license__ = MIT LICENSE
__version__ = "1.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Developing"
__update__ =
'''
import numpy as np
import pandas as pd
from scipy.special import gamma
from scipy.stats import t
from scipy import optimize
class future:
'''Every future contract traded in exchange need a margin.
This class takes daily prices of a future contract.
margin() function calculate the suggested margin of a future contract
'''
def __init__(self, prices, settlement, multiplier=100):
'''
Return a future object with prices and last settlement amount
prices: a list of prices. eg. [11.2,11.3 ...]
settlement: a float of last settlement amount
multiplier: typical contract have a multiplier of 100
'''
self.prices = prices
self.settlement = settlement
self.multiplier = multiplier
def get_prices(self):
return self.prices
def get_last_settlement(self):
return self.settlement
def daily_return(self):
prices = self.prices
log_return = []
for i in range(1, len(prices)):
temp = np.log((prices[i] / prices[i-1]))
log_return.append(temp)
return log_return
def ewma(self, ret, numbda=0.9697):
'''
inputs:
numbad: ewma constant
ret: is the daily return
output:
return a array of ewma result
'''
# print('Type: ', type(ret))
n = len(ret)
# print('n', n)
result = np.zeros(n)
result[0] = (1 - numbda)*ret[0]
for i in range(1, n):
result[i] = (1-numbda)*ret[i] + numbda*result[i-1]
return result
def margin(self):
# print('hi')
daily_ret = self.daily_return()
# daily_ret = self.prices
# print('hi', len(daily_ret))
# print(daily_ret)
ret = self.ewma(daily_ret)
ret_sqr = self.ewma([x**2 for x in daily_ret])
devol_return = [
x/np.sqrt(y-z) for x, y, z in zip(daily_ret, ret_sqr, [x**2 for x in ret])]
def loc_scale_t(x, para):
mu, sigma, v = para
temp1 = gamma((v+1) / 2) / (sigma*np.sqrt(v*np.pi)*gamma(v/2))
temp2 = ((v + ((x-mu)/sigma)**2) / v)**(-(v+1)/2)
ret = temp1 * temp2
return ret
def t_logL(para):
ret = 0
# print(devol_return)
for x in devol_return:
# print(loc_scale_t(x, para))
ret = ret + np.log(loc_scale_t(x, para))
# print(-ret)
return -ret
optimized_para = optimize.fmin(t_logL, np.array([0.02, 0.2, 10]))
print('The optimized mu, sigma, and v is:', optimized_para)
mu, sigma, v = optimized_para
upper_tail = t.ppf(0.005, v, loc=mu, scale=sigma)
lower_tail = t.ppf(0.995, v, loc=mu, scale=sigma)
print('Upper_tail:', upper_tail, 'Lower_tail', lower_tail)
risky_tail = max(upper_tail, lower_tail)
print('The larger tail is', risky_tail)
# Compute Margin
temp = risky_tail * np.sqrt(ret_sqr[-1] - ret[-1]**2)
# This is provide by the exchange
last_settlement = self.get_last_settlement()
multiplier = self.multiplier
settlement_margin = temp*last_settlement*multiplier
print('settlement_margin is:', settlement_margin)
return settlement_margin
# Russell Future Margin
# russell_data = pd.read_excel('EWMA-tf example (5).xlsx', sheetname='raw data')
# russell_data = np.array(
# russell_data['Russell 3000 future contract daily return'])
# # print(russell_data)
# russell = future(russell_data, 664)
# # print(russell.daily_return())
# russell.margin()
# Sugar Future Margin:
#%%
sugar_data = pd.read_excel('Sugar 11 Historical Prices.xls')
sugar_data = sugar_data[['DATE', 'CLOSE']]
# sugar_data
# sugar_data = sugar_data[['']]
# sugar_data.drop(['Unnamed: 2', 'Unnamed: 3'], axis=1, inplace=True)
sugar_data = np.array(sugar_data['CLOSE'])
# Here I dont know the last settlement, i used the last day's price to
# represent the settlement price
# %%
sugar = future(sugar_data, settlement=sugar_data[-1], multiplier=1120)
sugar.margin()
print(sugar.margin())
| [
"numpy.log",
"numpy.zeros",
"pandas.read_excel",
"numpy.array",
"scipy.stats.t.ppf",
"scipy.special.gamma",
"numpy.sqrt"
] | [((3980, 4027), 'pandas.read_excel', 'pd.read_excel', (['"""Sugar 11 Historical Prices.xls"""'], {}), "('Sugar 11 Historical Prices.xls')\n", (3993, 4027), True, 'import pandas as pd\n'), ((4199, 4228), 'numpy.array', 'np.array', (["sugar_data['CLOSE']"], {}), "(sugar_data['CLOSE'])\n", (4207, 4228), True, 'import numpy as np\n'), ((1709, 1720), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (1717, 1720), True, 'import numpy as np\n'), ((2995, 3031), 'scipy.stats.t.ppf', 't.ppf', (['(0.005)', 'v'], {'loc': 'mu', 'scale': 'sigma'}), '(0.005, v, loc=mu, scale=sigma)\n', (3000, 3031), False, 'from scipy.stats import t\n'), ((3053, 3089), 'scipy.stats.t.ppf', 't.ppf', (['(0.995)', 'v'], {'loc': 'mu', 'scale': 'sigma'}), '(0.995, v, loc=mu, scale=sigma)\n', (3058, 3089), False, 'from scipy.stats import t\n'), ((1304, 1337), 'numpy.log', 'np.log', (['(prices[i] / prices[i - 1])'], {}), '(prices[i] / prices[i - 1])\n', (1310, 1337), True, 'import numpy as np\n'), ((2840, 2865), 'numpy.array', 'np.array', (['[0.02, 0.2, 10]'], {}), '([0.02, 0.2, 10])\n', (2848, 2865), True, 'import numpy as np\n'), ((3308, 3343), 'numpy.sqrt', 'np.sqrt', (['(ret_sqr[-1] - ret[-1] ** 2)'], {}), '(ret_sqr[-1] - ret[-1] ** 2)\n', (3315, 3343), True, 'import numpy as np\n'), ((2189, 2203), 'numpy.sqrt', 'np.sqrt', (['(y - z)'], {}), '(y - z)\n', (2196, 2203), True, 'import numpy as np\n'), ((2350, 2368), 'scipy.special.gamma', 'gamma', (['((v + 1) / 2)'], {}), '((v + 1) / 2)\n', (2355, 2368), False, 'from scipy.special import gamma\n'), ((2393, 2405), 'scipy.special.gamma', 'gamma', (['(v / 2)'], {}), '(v / 2)\n', (2398, 2405), False, 'from scipy.special import gamma\n'), ((2376, 2394), 'numpy.sqrt', 'np.sqrt', (['(v * np.pi)'], {}), '(v * np.pi)\n', (2383, 2394), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 29 16:53:20 2019
@author: jlee
"""
import numpy as np
import glob, os
from astropy.io import fits
# ----- Directories ----- #
# Basic directories
cpath = os.path.abspath(".")+"/"
dir_root = os.path.abspath("../")+"/" # Same as 'dir_iraf'
dir_redux = dir_root+"redux5/"
dir_wav = sorted(glob.glob(dir_redux+"w*"))
# Figure directory
dir_fig = cpath+"diagram/"
if (glob.glob(dir_fig) == []):
os.system("mkdir "+dir_fig)
# Combine directory
dir_cmb = cpath+"combine/"
if (glob.glob(dir_cmb) == []):
os.system("mkdir "+dir_cmb)
# ----- Basic configurations ----- #
centwave = [l.split("/")[-1][1:-1] for l in dir_wav]
cube_list, cube_name = [], []
for i in centwave:
cube_list += sorted(glob.glob(dir_redux+"w"+i+"0/*/*_3D.fits"))
for i in np.arange(len(cube_list)):
cube_name.append(cube_list[i].split('/')[-1].split('cstxeqxbrg')[-1].split('_3D.fits')[0])
cube_spa_off = ['N20190611S0265', 'N20190612S0125', 'N20190612S0128',
'N20190612S0129', 'N20190613S0229', 'N20190613S0230',
'N20190613S0233', 'N20190613S0234', 'N20190613S0237'] # Cubes with spatial offset
cube_ref = 'N20190611S0257' # Reference cube
overwrite = True # Overwritting the pixel values of spatially non-aligned cubes
pixel_scale = 0.1 # arcsec/pixel
# ----- Wavelength setting ----- #
redshift = 0.3527 # Redshift of galaxy
wav_range_res = np.array([6550.0, 6580.0]) # H alpha wavelength range (rest-frame)
check_x = [33, 65] # [xmin, xmax] for check (including object and some bad regions)
check_y = [2, 46] # [ymin, ymax] for check (including object and some bad regions)
# Reading wavelength range
wav_0, wav_1, dwav = [], [], []
for i in np.arange(len(cube_list)):
h_sci = fits.getheader(cube_list[i], ext=1)
wav = np.linspace(start=h_sci['CRVAL3']+(1-h_sci['CRPIX3'])*h_sci['CD3_3'],
stop=h_sci['CRVAL3']+(h_sci['NAXIS3']-h_sci['CRPIX3'])*h_sci['CD3_3'],
num=h_sci['NAXIS3'], endpoint=True)
wav_0.append(wav[10])
wav_1.append(wav[-11])
dwav.append(h_sci['CD3_3'])
wav_0, wav_1, dwav = np.array(wav_0), np.array(wav_1), np.array(dwav)
# Total wavelength range
wav_range = [10 * (1 + wav_0.max() // 10),
10 * (wav_1.min() // 10)]
nw_cut = int(round((wav_range[1]-wav_range[0])/np.mean(dwav))) + 1
wav_intv = 1.0 # the resulting wavelength interval
combine_mode = 'median' # 'median' / 'clippedmean'
| [
"os.path.abspath",
"os.system",
"astropy.io.fits.getheader",
"numpy.mean",
"numpy.array",
"numpy.linspace",
"glob.glob"
] | [((1443, 1469), 'numpy.array', 'np.array', (['[6550.0, 6580.0]'], {}), '([6550.0, 6580.0])\n', (1451, 1469), True, 'import numpy as np\n'), ((229, 249), 'os.path.abspath', 'os.path.abspath', (['"""."""'], {}), "('.')\n", (244, 249), False, 'import glob, os\n'), ((265, 287), 'os.path.abspath', 'os.path.abspath', (['"""../"""'], {}), "('../')\n", (280, 287), False, 'import glob, os\n'), ((364, 391), 'glob.glob', 'glob.glob', (["(dir_redux + 'w*')"], {}), "(dir_redux + 'w*')\n", (373, 391), False, 'import glob, os\n'), ((442, 460), 'glob.glob', 'glob.glob', (['dir_fig'], {}), '(dir_fig)\n', (451, 460), False, 'import glob, os\n'), ((470, 499), 'os.system', 'os.system', (["('mkdir ' + dir_fig)"], {}), "('mkdir ' + dir_fig)\n", (479, 499), False, 'import glob, os\n'), ((550, 568), 'glob.glob', 'glob.glob', (['dir_cmb'], {}), '(dir_cmb)\n', (559, 568), False, 'import glob, os\n'), ((578, 607), 'os.system', 'os.system', (["('mkdir ' + dir_cmb)"], {}), "('mkdir ' + dir_cmb)\n", (587, 607), False, 'import glob, os\n'), ((1791, 1826), 'astropy.io.fits.getheader', 'fits.getheader', (['cube_list[i]'], {'ext': '(1)'}), '(cube_list[i], ext=1)\n', (1805, 1826), False, 'from astropy.io import fits\n'), ((1834, 2031), 'numpy.linspace', 'np.linspace', ([], {'start': "(h_sci['CRVAL3'] + (1 - h_sci['CRPIX3']) * h_sci['CD3_3'])", 'stop': "(h_sci['CRVAL3'] + (h_sci['NAXIS3'] - h_sci['CRPIX3']) * h_sci['CD3_3'])", 'num': "h_sci['NAXIS3']", 'endpoint': '(True)'}), "(start=h_sci['CRVAL3'] + (1 - h_sci['CRPIX3']) * h_sci['CD3_3'],\n stop=h_sci['CRVAL3'] + (h_sci['NAXIS3'] - h_sci['CRPIX3']) * h_sci[\n 'CD3_3'], num=h_sci['NAXIS3'], endpoint=True)\n", (1845, 2031), True, 'import numpy as np\n'), ((2152, 2167), 'numpy.array', 'np.array', (['wav_0'], {}), '(wav_0)\n', (2160, 2167), True, 'import numpy as np\n'), ((2169, 2184), 'numpy.array', 'np.array', (['wav_1'], {}), '(wav_1)\n', (2177, 2184), True, 'import numpy as np\n'), ((2186, 2200), 'numpy.array', 'np.array', (['dwav'], {}), '(dwav)\n', (2194, 2200), True, 'import numpy as np\n'), ((768, 816), 'glob.glob', 'glob.glob', (["(dir_redux + 'w' + i + '0/*/*_3D.fits')"], {}), "(dir_redux + 'w' + i + '0/*/*_3D.fits')\n", (777, 816), False, 'import glob, os\n'), ((2356, 2369), 'numpy.mean', 'np.mean', (['dwav'], {}), '(dwav)\n', (2363, 2369), True, 'import numpy as np\n')] |
import tensorflow as tf
import numpy as np
from configuration import Config
from core.models.resnet import resnet_18, resnet_34, resnet_50, resnet_101, resnet_152
from core.models.dla import dla_34, dla_60, dla_102, dla_169
from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7
from data.dataloader import GT
from core.loss import CombinedLoss, RegL1Loss
backbone_zoo = {"resnet_18": resnet_18(),
"resnet_34": resnet_34(),
"resnet_50": resnet_50(),
"resnet_101": resnet_101(),
"resnet_152": resnet_152(),
"dla_34": dla_34(),
"dla_60": dla_60(),
"dla_102": dla_102(),
"dla_169": dla_169(),
"D0": d0(), "D1": d1(), "D2": d2(), "D3": d3(), "D4": d4(), "D5": d5(), "D6": d6(), "D7": d7()}
class CenterNet(tf.keras.Model):
def __init__(self):
super(CenterNet, self).__init__()
self.backbone = backbone_zoo[Config.backbone_name]
def call(self, inputs, training=None, mask=None):
x = self.backbone(inputs, training=training)
x = tf.concat(values=x, axis=-1)
return x
class PostProcessing:
@staticmethod
def training_procedure(batch_labels, pred):
gt = GT(batch_labels)
gt_heatmap, gt_reg, gt_wh, gt_reg_mask, gt_indices = gt.get_gt_values()
loss_object = CombinedLoss()
loss = loss_object(y_pred=pred, heatmap_true=gt_heatmap, reg_true=gt_reg, wh_true=gt_wh, reg_mask=gt_reg_mask, indices=gt_indices)
return loss
@staticmethod
def testing_procedure(pred, original_image_size):
decoder = Decoder(original_image_size)
detections = decoder(pred)
bboxes = detections[:, 0:4]
scores = detections[:, 4]
clses = detections[:, 5]
return bboxes, scores, clses
class Decoder:
def __init__(self, original_image_size):
self.K = Config.max_boxes_per_image
self.original_image_size = np.array(original_image_size, dtype=np.float32)
self.input_image_size = np.array(Config.get_image_size(), dtype=np.float32)
self.downsampling_ratio = Config.downsampling_ratio
self.score_threshold = Config.score_threshold
def __call__(self, pred, *args, **kwargs):
heatmap, reg, wh = tf.split(value=pred, num_or_size_splits=[Config.num_classes, 2, 2], axis=-1)
heatmap = tf.math.sigmoid(heatmap)
batch_size = heatmap.shape[0]
heatmap = Decoder.__nms(heatmap)
scores, inds, clses, ys, xs = Decoder.__topK(scores=heatmap, K=self.K)
if reg is not None:
reg = RegL1Loss.gather_feat(feat=reg, idx=inds)
xs = tf.reshape(xs, shape=(batch_size, self.K, 1)) + reg[:, :, 0:1]
ys = tf.reshape(ys, shape=(batch_size, self.K, 1)) + reg[:, :, 1:2]
else:
xs = tf.reshape(xs, shape=(batch_size, self.K, 1)) + 0.5
ys = tf.reshape(ys, shape=(batch_size, self.K, 1)) + 0.5
wh = RegL1Loss.gather_feat(feat=wh, idx=inds)
clses = tf.cast(tf.reshape(clses, (batch_size, self.K, 1)), dtype=tf.float32)
scores = tf.reshape(scores, (batch_size, self.K, 1))
bboxes = tf.concat(values=[xs - wh[..., 0:1] / 2,
ys - wh[..., 1:2] / 2,
xs + wh[..., 0:1] / 2,
ys + wh[..., 1:2] / 2], axis=2)
detections = tf.concat(values=[bboxes, scores, clses], axis=2)
return self.__map_to_original(detections)
def __map_to_original(self, detections):
bboxes, scores, clses = tf.split(value=detections, num_or_size_splits=[4, 1, 1], axis=2)
bboxes, scores, clses = bboxes.numpy()[0], scores.numpy()[0], clses.numpy()[0]
resize_ratio = self.original_image_size / self.input_image_size
bboxes[:, 0::2] = bboxes[:, 0::2] * self.downsampling_ratio * resize_ratio[1]
bboxes[:, 1::2] = bboxes[:, 1::2] * self.downsampling_ratio * resize_ratio[0]
bboxes[:, 0::2] = np.clip(a=bboxes[:, 0::2], a_min=0, a_max=self.original_image_size[1])
bboxes[:, 1::2] = np.clip(a=bboxes[:, 1::2], a_min=0, a_max=self.original_image_size[0])
score_mask = scores >= self.score_threshold
bboxes, scores, clses = Decoder.__numpy_mask(bboxes, np.tile(score_mask, (1, 4))), Decoder.__numpy_mask(scores, score_mask), Decoder.__numpy_mask(clses, score_mask)
detections = np.concatenate([bboxes, scores, clses], axis=-1)
return detections
@staticmethod
def __numpy_mask(a, mask):
return a[mask].reshape(-1, a.shape[-1])
@staticmethod
def __nms(heatmap, pool_size=3):
hmax = tf.keras.layers.MaxPool2D(pool_size=pool_size, strides=1, padding="same")(heatmap)
keep = tf.cast(tf.equal(heatmap, hmax), tf.float32)
return hmax * keep
@staticmethod
def __topK(scores, K):
B, H, W, C = scores.shape
scores = tf.reshape(scores, shape=(B, -1))
topk_scores, topk_inds = tf.math.top_k(input=scores, k=K, sorted=True)
topk_clses = topk_inds % C
topk_xs = tf.cast(topk_inds // C % W, tf.float32)
topk_ys = tf.cast(topk_inds // C // W, tf.float32)
topk_inds = tf.cast(topk_ys * tf.cast(W, tf.float32) + topk_xs, tf.int32)
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
| [
"core.models.efficientdet.d5",
"core.loss.CombinedLoss",
"core.models.efficientdet.d4",
"tensorflow.reshape",
"numpy.clip",
"tensorflow.keras.layers.MaxPool2D",
"numpy.tile",
"core.models.efficientdet.d0",
"tensorflow.split",
"core.models.efficientdet.d2",
"core.models.dla.dla_169",
"core.mode... | [((400, 411), 'core.models.resnet.resnet_18', 'resnet_18', ([], {}), '()\n', (409, 411), False, 'from core.models.resnet import resnet_18, resnet_34, resnet_50, resnet_101, resnet_152\n'), ((442, 453), 'core.models.resnet.resnet_34', 'resnet_34', ([], {}), '()\n', (451, 453), False, 'from core.models.resnet import resnet_18, resnet_34, resnet_50, resnet_101, resnet_152\n'), ((484, 495), 'core.models.resnet.resnet_50', 'resnet_50', ([], {}), '()\n', (493, 495), False, 'from core.models.resnet import resnet_18, resnet_34, resnet_50, resnet_101, resnet_152\n'), ((527, 539), 'core.models.resnet.resnet_101', 'resnet_101', ([], {}), '()\n', (537, 539), False, 'from core.models.resnet import resnet_18, resnet_34, resnet_50, resnet_101, resnet_152\n'), ((571, 583), 'core.models.resnet.resnet_152', 'resnet_152', ([], {}), '()\n', (581, 583), False, 'from core.models.resnet import resnet_18, resnet_34, resnet_50, resnet_101, resnet_152\n'), ((611, 619), 'core.models.dla.dla_34', 'dla_34', ([], {}), '()\n', (617, 619), False, 'from core.models.dla import dla_34, dla_60, dla_102, dla_169\n'), ((647, 655), 'core.models.dla.dla_60', 'dla_60', ([], {}), '()\n', (653, 655), False, 'from core.models.dla import dla_34, dla_60, dla_102, dla_169\n'), ((684, 693), 'core.models.dla.dla_102', 'dla_102', ([], {}), '()\n', (691, 693), False, 'from core.models.dla import dla_34, dla_60, dla_102, dla_169\n'), ((722, 731), 'core.models.dla.dla_169', 'dla_169', ([], {}), '()\n', (729, 731), False, 'from core.models.dla import dla_34, dla_60, dla_102, dla_169\n'), ((755, 759), 'core.models.efficientdet.d0', 'd0', ([], {}), '()\n', (757, 759), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((767, 771), 'core.models.efficientdet.d1', 'd1', ([], {}), '()\n', (769, 771), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((779, 783), 'core.models.efficientdet.d2', 'd2', ([], {}), '()\n', (781, 783), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((791, 795), 'core.models.efficientdet.d3', 'd3', ([], {}), '()\n', (793, 795), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((803, 807), 'core.models.efficientdet.d4', 'd4', ([], {}), '()\n', (805, 807), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((815, 819), 'core.models.efficientdet.d5', 'd5', ([], {}), '()\n', (817, 819), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((827, 831), 'core.models.efficientdet.d6', 'd6', ([], {}), '()\n', (829, 831), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((839, 843), 'core.models.efficientdet.d7', 'd7', ([], {}), '()\n', (841, 843), False, 'from core.models.efficientdet import d0, d1, d2, d3, d4, d5, d6, d7\n'), ((1125, 1153), 'tensorflow.concat', 'tf.concat', ([], {'values': 'x', 'axis': '(-1)'}), '(values=x, axis=-1)\n', (1134, 1153), True, 'import tensorflow as tf\n'), ((1274, 1290), 'data.dataloader.GT', 'GT', (['batch_labels'], {}), '(batch_labels)\n', (1276, 1290), False, 'from data.dataloader import GT\n'), ((1393, 1407), 'core.loss.CombinedLoss', 'CombinedLoss', ([], {}), '()\n', (1405, 1407), False, 'from core.loss import CombinedLoss, RegL1Loss\n'), ((2003, 2050), 'numpy.array', 'np.array', (['original_image_size'], {'dtype': 'np.float32'}), '(original_image_size, dtype=np.float32)\n', (2011, 2050), True, 'import numpy as np\n'), ((2324, 2400), 'tensorflow.split', 'tf.split', ([], {'value': 'pred', 'num_or_size_splits': '[Config.num_classes, 2, 2]', 'axis': '(-1)'}), '(value=pred, num_or_size_splits=[Config.num_classes, 2, 2], axis=-1)\n', (2332, 2400), True, 'import tensorflow as tf\n'), ((2419, 2443), 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['heatmap'], {}), '(heatmap)\n', (2434, 2443), True, 'import tensorflow as tf\n'), ((3015, 3055), 'core.loss.RegL1Loss.gather_feat', 'RegL1Loss.gather_feat', ([], {'feat': 'wh', 'idx': 'inds'}), '(feat=wh, idx=inds)\n', (3036, 3055), False, 'from core.loss import CombinedLoss, RegL1Loss\n'), ((3159, 3202), 'tensorflow.reshape', 'tf.reshape', (['scores', '(batch_size, self.K, 1)'], {}), '(scores, (batch_size, self.K, 1))\n', (3169, 3202), True, 'import tensorflow as tf\n'), ((3220, 3342), 'tensorflow.concat', 'tf.concat', ([], {'values': '[xs - wh[..., 0:1] / 2, ys - wh[..., 1:2] / 2, xs + wh[..., 0:1] / 2, ys + \n wh[..., 1:2] / 2]', 'axis': '(2)'}), '(values=[xs - wh[..., 0:1] / 2, ys - wh[..., 1:2] / 2, xs + wh[...,\n 0:1] / 2, ys + wh[..., 1:2] / 2], axis=2)\n', (3229, 3342), True, 'import tensorflow as tf\n'), ((3465, 3514), 'tensorflow.concat', 'tf.concat', ([], {'values': '[bboxes, scores, clses]', 'axis': '(2)'}), '(values=[bboxes, scores, clses], axis=2)\n', (3474, 3514), True, 'import tensorflow as tf\n'), ((3643, 3707), 'tensorflow.split', 'tf.split', ([], {'value': 'detections', 'num_or_size_splits': '[4, 1, 1]', 'axis': '(2)'}), '(value=detections, num_or_size_splits=[4, 1, 1], axis=2)\n', (3651, 3707), True, 'import tensorflow as tf\n'), ((4065, 4135), 'numpy.clip', 'np.clip', ([], {'a': 'bboxes[:, 0::2]', 'a_min': '(0)', 'a_max': 'self.original_image_size[1]'}), '(a=bboxes[:, 0::2], a_min=0, a_max=self.original_image_size[1])\n', (4072, 4135), True, 'import numpy as np\n'), ((4162, 4232), 'numpy.clip', 'np.clip', ([], {'a': 'bboxes[:, 1::2]', 'a_min': '(0)', 'a_max': 'self.original_image_size[0]'}), '(a=bboxes[:, 1::2], a_min=0, a_max=self.original_image_size[0])\n', (4169, 4232), True, 'import numpy as np\n'), ((4479, 4527), 'numpy.concatenate', 'np.concatenate', (['[bboxes, scores, clses]'], {'axis': '(-1)'}), '([bboxes, scores, clses], axis=-1)\n', (4493, 4527), True, 'import numpy as np\n'), ((4990, 5023), 'tensorflow.reshape', 'tf.reshape', (['scores'], {'shape': '(B, -1)'}), '(scores, shape=(B, -1))\n', (5000, 5023), True, 'import tensorflow as tf\n'), ((5057, 5102), 'tensorflow.math.top_k', 'tf.math.top_k', ([], {'input': 'scores', 'k': 'K', 'sorted': '(True)'}), '(input=scores, k=K, sorted=True)\n', (5070, 5102), True, 'import tensorflow as tf\n'), ((5156, 5195), 'tensorflow.cast', 'tf.cast', (['(topk_inds // C % W)', 'tf.float32'], {}), '(topk_inds // C % W, tf.float32)\n', (5163, 5195), True, 'import tensorflow as tf\n'), ((5214, 5254), 'tensorflow.cast', 'tf.cast', (['(topk_inds // C // W)', 'tf.float32'], {}), '(topk_inds // C // W, tf.float32)\n', (5221, 5254), True, 'import tensorflow as tf\n'), ((2092, 2115), 'configuration.Config.get_image_size', 'Config.get_image_size', ([], {}), '()\n', (2113, 2115), False, 'from configuration import Config\n'), ((2648, 2689), 'core.loss.RegL1Loss.gather_feat', 'RegL1Loss.gather_feat', ([], {'feat': 'reg', 'idx': 'inds'}), '(feat=reg, idx=inds)\n', (2669, 2689), False, 'from core.loss import CombinedLoss, RegL1Loss\n'), ((3080, 3122), 'tensorflow.reshape', 'tf.reshape', (['clses', '(batch_size, self.K, 1)'], {}), '(clses, (batch_size, self.K, 1))\n', (3090, 3122), True, 'import tensorflow as tf\n'), ((4723, 4796), 'tensorflow.keras.layers.MaxPool2D', 'tf.keras.layers.MaxPool2D', ([], {'pool_size': 'pool_size', 'strides': '(1)', 'padding': '"""same"""'}), "(pool_size=pool_size, strides=1, padding='same')\n", (4748, 4796), True, 'import tensorflow as tf\n'), ((4829, 4852), 'tensorflow.equal', 'tf.equal', (['heatmap', 'hmax'], {}), '(heatmap, hmax)\n', (4837, 4852), True, 'import tensorflow as tf\n'), ((2707, 2752), 'tensorflow.reshape', 'tf.reshape', (['xs'], {'shape': '(batch_size, self.K, 1)'}), '(xs, shape=(batch_size, self.K, 1))\n', (2717, 2752), True, 'import tensorflow as tf\n'), ((2787, 2832), 'tensorflow.reshape', 'tf.reshape', (['ys'], {'shape': '(batch_size, self.K, 1)'}), '(ys, shape=(batch_size, self.K, 1))\n', (2797, 2832), True, 'import tensorflow as tf\n'), ((2881, 2926), 'tensorflow.reshape', 'tf.reshape', (['xs'], {'shape': '(batch_size, self.K, 1)'}), '(xs, shape=(batch_size, self.K, 1))\n', (2891, 2926), True, 'import tensorflow as tf\n'), ((2950, 2995), 'tensorflow.reshape', 'tf.reshape', (['ys'], {'shape': '(batch_size, self.K, 1)'}), '(ys, shape=(batch_size, self.K, 1))\n', (2960, 2995), True, 'import tensorflow as tf\n'), ((4346, 4373), 'numpy.tile', 'np.tile', (['score_mask', '(1, 4)'], {}), '(score_mask, (1, 4))\n', (4353, 4373), True, 'import numpy as np\n'), ((5293, 5315), 'tensorflow.cast', 'tf.cast', (['W', 'tf.float32'], {}), '(W, tf.float32)\n', (5300, 5315), True, 'import tensorflow as tf\n')] |
import tensorflow as tf
import os
import contractions
import tensorflow as tf
import pandas as pd
import numpy as np
import time
import rich
from rich.progress import track
import spacy
from model.encoder import Encoder
from model.decoder import Decoder
from config import params
from preprocess import *
from dataset import enc_seq, teach_force_seq, y
def loss(y, ypred, sce):
loss_ = sce(y, ypred)
mask = tf.cast(tf.not_equal(y, 0), tf.float32)
loss_ = mask * loss_
return tf.reduce_mean(loss_)
@tf.function
def train_step(params, x, ger_inp, ger_out, encoder, decoder, sce):
with tf.GradientTape() as tape:
tot_loss = 0
enc_seq, hidden1, hidden2 = encoder(x)
for i in range(params.dec_max_len):
dec_inp = tf.expand_dims(ger_inp[:, i], axis=1)
ypred, hidden1, hidden2, attention_weights = decoder(enc_seq, dec_inp, hidden1, hidden2)
timestep_loss = loss(tf.expand_dims(ger_out[:, i], 1), ypred, sce)
tot_loss += timestep_loss
avg_timestep_loss = tot_loss/params.dec_max_len
total_vars = encoder.trainable_variables + decoder.trainable_variables
grads = tape.gradient(avg_timestep_loss, total_vars)
params.optimizer.apply_gradients(zip(grads, total_vars))
return grads, avg_timestep_loss
def save_checkpoints(params, encoder, decoder):
checkpoint_dir = '/content/model_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
ckpt = tf.train.Checkpoint(optimizer=params.optimizer,
encoder=encoder,
decoder=decoder)
ckpt.save(file_prefix=checkpoint_prefix)
def restore_checkpoint(params, encoder, decoder):
checkpoint_dir = '/content/model_checkpoints'
ckpt= tf.train.Checkpoint(optimizer=params.optimizer,
encoder=encoder,
decoder=decoder)
ckpt.restore(tf.train.latest_checkpoint(checkpoint_dir))
encoder = Encoder(params)
decoder = Decoder(params)
def train():
sce = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True, reduction='none')
start = time.time()
avg_loss = []
for e in track(range(0, params.epochs)):
losses = []
st = time.time()
for enc_seq_batch, teach_force_seq_batch, y_batch in zip(enc_seq, teach_force_seq, y):
grads, loss = train_step(params, enc_seq_batch, teach_force_seq_batch, y_batch, encoder, decoder, sce)
losses.append(loss.numpy())
avg_loss.append(np.mean(losses))
print(f'EPOCH - {e+1} ---- LOSS - {np.mean(losses)} ---- TIME - {time.time()- st}')
save_checkpoints(params, encoder, decoder)
print(f'total time taken: {time.time()-start}')
return grads, avg_loss
grads, avg_loss = train()
| [
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.not_equal",
"tensorflow.train.Checkpoint",
"tensorflow.reduce_mean",
"time.time",
"model.encoder.Encoder",
"tensorflow.train.latest_checkpoint",
"numpy.mean",
"model.decoder.Decoder",
"os.path.join",
"tensorflow.GradientTape",
... | [((2018, 2033), 'model.encoder.Encoder', 'Encoder', (['params'], {}), '(params)\n', (2025, 2033), False, 'from model.encoder import Encoder\n'), ((2044, 2059), 'model.decoder.Decoder', 'Decoder', (['params'], {}), '(params)\n', (2051, 2059), False, 'from model.decoder import Decoder\n'), ((498, 519), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss_'], {}), '(loss_)\n', (512, 519), True, 'import tensorflow as tf\n'), ((1451, 1487), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""ckpt"""'], {}), "(checkpoint_dir, 'ckpt')\n", (1463, 1487), False, 'import os\n'), ((1499, 1585), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'params.optimizer', 'encoder': 'encoder', 'decoder': 'decoder'}), '(optimizer=params.optimizer, encoder=encoder, decoder=\n decoder)\n', (1518, 1585), True, 'import tensorflow as tf\n'), ((1799, 1885), 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'optimizer': 'params.optimizer', 'encoder': 'encoder', 'decoder': 'decoder'}), '(optimizer=params.optimizer, encoder=encoder, decoder=\n decoder)\n', (1818, 1885), True, 'import tensorflow as tf\n'), ((2085, 2171), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': '"""none"""'}), "(from_logits=True, reduction=\n 'none')\n", (2130, 2171), True, 'import tensorflow as tf\n'), ((2184, 2195), 'time.time', 'time.time', ([], {}), '()\n', (2193, 2195), False, 'import time\n'), ((428, 446), 'tensorflow.not_equal', 'tf.not_equal', (['y', '(0)'], {}), '(y, 0)\n', (440, 446), True, 'import tensorflow as tf\n'), ((613, 630), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (628, 630), True, 'import tensorflow as tf\n'), ((1958, 2000), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (1984, 2000), True, 'import tensorflow as tf\n'), ((2295, 2306), 'time.time', 'time.time', ([], {}), '()\n', (2304, 2306), False, 'import time\n'), ((778, 815), 'tensorflow.expand_dims', 'tf.expand_dims', (['ger_inp[:, i]'], {'axis': '(1)'}), '(ger_inp[:, i], axis=1)\n', (792, 815), True, 'import tensorflow as tf\n'), ((2585, 2600), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2592, 2600), True, 'import numpy as np\n'), ((952, 984), 'tensorflow.expand_dims', 'tf.expand_dims', (['ger_out[:, i]', '(1)'], {}), '(ger_out[:, i], 1)\n', (966, 984), True, 'import tensorflow as tf\n'), ((2646, 2661), 'numpy.mean', 'np.mean', (['losses'], {}), '(losses)\n', (2653, 2661), True, 'import numpy as np\n'), ((2774, 2785), 'time.time', 'time.time', ([], {}), '()\n', (2783, 2785), False, 'import time\n'), ((2676, 2687), 'time.time', 'time.time', ([], {}), '()\n', (2685, 2687), False, 'import time\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Seismic plotter.
:copyright: 2016-22 Agile Scientific
:license: Apache 2.0
"""
import argparse
import os
import time
import glob
import re
import datetime
import sys
import yaml
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mtick
from PIL import Image
from seismic import Seismic
from notice import Notice
import utils
import plotter
from _version import __version__
def main(target, cfg):
"""
Puts everything together.
"""
t0 = time.time()
#####################################################################
#
# READ SEGY
#
#####################################################################
if cfg['segy_library'].lower() == 'obspy':
s = Seismic.from_segy_with_obspy(target, params={'ndim': cfg['ndim']})
else:
s = Seismic.from_segy(target, params={'ndim': cfg['ndim']})
# Set the line and/or xline number.
try:
n, xl = cfg['number']
except:
n, xl = cfg['number'], 0.5
# Set the direction.
if (s.ndim) == 2:
direction = ['inline']
elif cfg['direction'].lower()[0] == 'i':
direction = ['inline']
elif cfg['direction'].lower()[0] in ['x', 'c']: # Allow 'crossline' too.
direction = ['xline']
elif cfg['direction'].lower()[0] == 't':
direction = ['tslice']
else:
direction = ['xline', 'inline']
# Get the data.
try:
ss = [Seismic.from_seismic(s, n=n, direction=d) for n, d in zip((n, xl), direction)]
except IndexError:
# Perhaps misinterpreted 2D as 3D
s = Seismic.from_segy(target, params={'ndim': 2})
direction = ['inline']
ss = [Seismic.from_seismic(s, n=n, direction=d) for n, d in zip((n, xl), direction)]
clip_val = np.percentile(s.data, cfg['percentile'])
if clip_val < 10:
fstr = '{:.3f}'
elif clip_val < 100:
fstr = '{:.2f}'
elif clip_val < 1000:
fstr = '{:.1f}'
else:
fstr = '{:.0f}'
# Notify user of parameters.
Notice.info("n_traces {}".format(s.ntraces))
Notice.info("n_samples {}".format(s.nsamples))
Notice.info("dt {}".format(s.dt))
Notice.info("t_start {}".format(s.tstart))
Notice.info("t_end {}".format(s.tend))
Notice.info("max_val " + fstr.format(np.amax(s.data)))
Notice.info("min_val " + fstr.format(np.amin(s.data)))
Notice.info("clip_val " + fstr.format(clip_val))
t1 = time.time()
Notice.ok("Read data in {:.1f} s".format(t1-t0))
#####################################################################
#
# MAKE PLOT
#
#####################################################################
Notice.hr_header("Plotting")
# Plot size parameters.
wsl = 6 # Width of sidelabel, inches
mih = 11 # Minimum plot height, inches
fhh = 5 # File header box height, inches
m = 0.75 # basic unit of margins, inches
# Margins, CSS like: top, right, bottom, left.
mt, mr, mb, ml = m, 1.5*m, m, 1.5*m
mm = 2*m # padded margin between seismic and label
# Determine plot dimensions. Kind of laborious and repetitive (see below).
if cfg['plot_width']:
seismic_width = cfg['plot_width'] - wsl - mm - ml - mr
tpi = max([s.ntraces for s in ss]) / seismic_width
else:
tpi = cfg['tpi']
if cfg['plot_height']:
seismic_height = max(mih, cfg['plot_height']) - mb - 0.75*(len(ss)-1) - mt
seismic_height_raw = seismic_height / len(ss)
ips = seismic_height_raw / (s.tbasis[-1] - s.tbasis[0])
else:
ips = cfg['ips']
# Width is determined by seismic width, plus sidelabel, plus margins.
# Height is given by ips, but with a minimum of mih inches.
if 'tslice' in direction:
seismic_width = [s.ntraces / tpi for s in ss]
seismic_height_raw = max([s.nxlines for s in ss]) / tpi
else:
seismic_width = [s.ntraces / tpi for s in ss]
seismic_height_raw = ips * (s.tbasis[-1] - s.tbasis[0])
w = ml + max(seismic_width) + mm + wsl + mr # inches
seismic_height = len(ss) * seismic_height_raw
h_reqd = mb + seismic_height + 0.75*(len(ss)-1) + mt # inches
h = max(mih, h_reqd)
# Calculate where to start sidelabel and seismic data.
# Depends on whether sidelabel is on the left or right.
if cfg['sidelabel'] == 'right':
ssl = (ml + max(seismic_width) + mm) / w # Start of side label (ratio)
seismic_left = ml / w
else:
ssl = ml / w
seismic_left = (ml + wsl + mm) / w
adj = max(0, h - h_reqd) / 2
seismic_bottom = (mb / h) + adj / h
seismic_width_fraction = [sw / w for sw in seismic_width]
seismic_height_fraction = seismic_height_raw / h
# Publish some notices so user knows plot size.
Notice.info("plot width {:.2f} in".format(w))
Notice.info("plot height {:.2f} in".format(h))
# Make the figure.
fig = plt.figure(figsize=(w, h), facecolor='w')
# Set the tickformat.
tickfmt = mtick.FormatStrFormatter('%.0f')
# Could definitely do better for default fontsize than 10.
# Ideally would be adaptive to plot size.
cfg['fontsize'] = cfg['fontsize'] or 10
# Plot title.
if cfg['title']:
# Deal with Windows paths: \1 gets interpreted as a group by regex.
newt = re.sub(r'\\', '@@@@@', target)
temp = re.sub(r'_filename', newt, cfg['title'])
title = re.sub(r'@@@', r'\\', temp)
title_ax = fig.add_axes([ssl, 1-mt/h, wsl/w, mt/h])
title_ax = plotter.plot_title(title_ax,
title,
fs=1.4*cfg['fontsize'],
cfg=cfg)
# Plot title.
if cfg['subtitle']:
date = str(datetime.date.today())
subtitle = re.sub(r'_date', date, cfg['subtitle'])
subtitle_ax = fig.add_axes([ssl, 1-mt/h, wsl/w, mt/h], label='subtitle')
title_ax = plotter.plot_subtitle(subtitle_ax,
subtitle,
fs=0.75*cfg['fontsize'],
cfg=cfg)
# Plot text header.
start = (h - 1.5*mt - fhh) / h
head_ax = fig.add_axes([ssl, start, wsl/w, fhh/h])
head_ax = plotter.plot_header(head_ax,
s.header,
fs=9,
cfg=cfg,
version=__version__)
# Plot histogram.
# Params for histogram plot.
pady = 0.75 / h # 0.75 inch
padx = 0.75 / w # 0.75 inch
cstrip = 0.3/h # color_strip height = 0.3 in
charth = 1.25/h # height of charts = 1.25 in
chartw = wsl/w - mr/w - padx # or ml/w for left-hand sidelabel; same thing
chartx = (ssl + padx)
histy = 1.5 * mb/h + charth + pady
# Plot colourbar under histogram.
clrbar_ax = fig.add_axes([chartx, histy - cstrip, chartw, cstrip])
clrbar_ax = plotter.plot_colourbar(clrbar_ax, cmap=cfg['cmap'])
# Plot histogram itself.
hist_ax = fig.add_axes([chartx, histy, chartw, charth])
hist_ax = plotter.plot_histogram(hist_ax,
s.data,
tickfmt,
cfg)
# Plot spectrum.
specy = 1.5 * mb/h
spec_ax = fig.add_axes([chartx, specy, chartw, charth])
try:
colour = utils.rgb_to_hex(cfg['highlight_colour'])
spec_ax = s.plot_spectrum(ax=spec_ax,
tickfmt=tickfmt,
ntraces=20,
fontsize=cfg['fontsize'],
colour=colour,
)
except:
pass # No spectrum, oh well.
for i, line in enumerate(ss):
# Add the seismic axis.
ax = fig.add_axes([seismic_left,
seismic_bottom + i*seismic_height_fraction + i*pady,
seismic_width_fraction[i],
seismic_height_fraction
])
# Plot seismic data.
if cfg['display'].lower() in ['vd', 'varden', 'variable', 'both']:
im = ax.imshow(line.data.T,
cmap=cfg['cmap'],
clim=[-clip_val, clip_val],
extent=[line.olineidx[0],
line.olineidx[-1],
1000*line.tbasis[-1],
line.tbasis[0]],
aspect='auto',
interpolation=cfg['interpolation']
)
if np.argmin(seismic_width) == i:
cax = utils.add_subplot_axes(ax, [1.01, 0.02, 0.01, 0.2])
_ = plt.colorbar(im, cax=cax)
if cfg['display'].lower() in ['wiggle', 'both']:
ax = line.wiggle_plot(cfg['number'], direction,
ax=ax,
skip=cfg['skip'],
gain=cfg['gain'],
rgb=cfg['colour'],
alpha=cfg['opacity'],
lw=cfg['lineweight'],
)
valid = ['vd', 'varden', 'variable', 'wiggle', 'both']
if cfg['display'].lower() not in valid:
Notice.fail("You must specify the display: wiggle, vd, both.")
return
# Seismic axis annotations.
ax.set_ylabel(utils.LABELS[line.ylabel],
fontsize=cfg['fontsize'])
ax.set_xlabel(utils.LABELS[line.xlabel],
fontsize=cfg['fontsize'],
ha='center')
ax.tick_params(axis='both', labelsize=cfg['fontsize'] - 2)
ax.xaxis.set_major_formatter(tickfmt)
ax.yaxis.set_major_formatter(tickfmt)
if ('tslice' not in direction):
ax.set_ylim(1000*cfg['trange'][1] or 1000*line.tbasis[-1],
1000*cfg['trange'][0])
# Crossing point. Will only work for non-arb lines.
try:
ax.axvline(ss[i-1].slineidx[0],
c=utils.rgb_to_hex(cfg['highlight_colour']),
alpha=0.5
)
except IndexError:
pass # Nevermind.
# Grid, optional.
if cfg['grid_time'] or cfg['grid_traces']:
ax.grid()
for l in ax.get_xgridlines():
l.set_color(utils.rgb_to_hex(cfg['grid_colour']))
l.set_linestyle('-')
if cfg['grid_traces']:
l.set_linewidth(1)
else:
l.set_linewidth(0)
l.set_alpha(min(1, cfg['grid_alpha']))
for l in ax.get_ygridlines():
l.set_color(utils.rgb_to_hex(cfg['grid_colour']))
l.set_linestyle('-')
if cfg['grid_time']:
if 'tslice' in direction:
l.set_linewidth(1)
else:
l.set_linewidth(1.4)
else:
l.set_linewidth(0)
l.set_alpha(min(1, 2*cfg['grid_alpha']))
# Watermark.
if cfg['watermark_text']:
ax = plotter.watermark_seismic(ax, cfg)
# Make parasitic (top) axis for labeling CDP number.
if (s.data.ndim > 2) and ('tslice' not in direction):
ylim = ax.get_ylim()
par1 = ax.twiny()
par1.spines["top"].set_position(("axes", 1.0))
par1.plot(line.slineidx, np.zeros_like(line.slineidx), alpha=0)
par1.set_xlabel(utils.LABELS[line.slabel], fontsize=cfg['fontsize'])
par1.set_ylim(ylim)
# Adjust ticks
tx = par1.get_xticks()
newtx = [line.slineidx[len(line.slineidx)*(i//len(tx))] for i, _ in enumerate(tx)]
par1.set_xticklabels(newtx, fontsize=cfg['fontsize']-2)
t2 = time.time()
Notice.ok("Built plot in {:.1f} s".format(t2-t1))
#####################################################################
#
# SAVE FILE
#
#####################################################################
Notice.hr_header("Saving")
dname, fname, ext = utils.path_bits(target)
outfile = cfg['outfile'] or ''
if not os.path.splitext(outfile)[1]:
outfile = os.path.join(cfg['outfile'] or dname, fname + '.png')
fig.savefig(outfile)
t3 = time.time()
Notice.info("output file {}".format(outfile))
Notice.ok("Saved output in {:.1f} s".format(t3-t2))
if cfg['stain_paper'] or cfg['coffee_rings'] or cfg['distort'] or cfg['scribble']:
fname = os.path.splitext(outfile)[0] + ".stupid.png"
fig.savefig(fname)
else:
return
#####################################################################
#
# SAVE STUPID FILE
#
#####################################################################
Notice.hr_header("Applying the stupidity")
stupid_image = Image.open(fname)
if cfg['stain_paper']:
utils.stain_paper(stupid_image)
utils.add_rings(stupid_image, cfg['coffee_rings'])
if cfg['scribble']:
utils.add_scribble(stupid_image)
# Trick to remove remaining semi-transparent pixels.
result = Image.new("RGB", stupid_image.size, (255, 255, 255))
result.paste(stupid_image)
result.save(fname)
t4 = time.time()
Notice.info("output file {}".format(fname))
Notice.ok("Saved stupidity in {:.1f} s".format(t4-t3))
return
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Plot a SEGY file.')
parser.add_argument("-c", "--config",
metavar="config file",
type=argparse.FileType('r'),
default="config.yml",
nargs="?",
help="The name of a YAML config file. Default: config.yml.")
parser.add_argument('filename',
metavar='SEGY file',
type=str,
nargs='?',
default='./*.[s,S]*[g,G][y,Y]',
help='The path to one or more SEGY files. Uses Unix-style pathname expansion. Omit to find all SEGY files in current directory.')
parser.add_argument('-o', '--out',
metavar='output file',
type=str,
nargs='?',
default='',
help='The path to an output file. Default: same as input file, but with png file extension.')
parser.add_argument('-n', '--ndim',
metavar='dimensions',
type=int,
nargs='?',
default=0,
help='The number of dimensions of the input seismic, usually 2 or 3. Overrides config file.')
parser.add_argument('-d', '--demo',
action='store_true',
help='Run with the demo file, data/31_81_PR.png.')
parser.add_argument('-v', '--version',
action='store_true',
help='Get the version number.')
args = parser.parse_args()
if args.version:
Notice.info(__version__)
sys.exit()
Notice.title()
target = args.filename
with args.config as f:
cfg = yaml.safe_load(f)
Notice.hr_header("Initializing")
Notice.info("config {}".format(args.config.name))
# Fill in 'missing' fields in cfg.
cfg = {k: cfg.get(k, v) for k, v in utils.DEFAULTS.items()}
cfg['outfile'] = args.out
cfg['ndim'] = args.ndim or cfg['ndim']
if args.demo:
target = './data/31_81_PR.sgy'
# Go do it!
try:
globule = glob.iglob(target, recursive=True) # Python 3.5+
except:
globule = glob.iglob(target) # Python < 3.5
for t in globule:
Notice.hr_header("Processing file")
Notice.info("filename {}".format(t))
main(t, cfg)
Notice.hr_header("Done")
| [
"PIL.Image.new",
"argparse.ArgumentParser",
"numpy.amin",
"numpy.argmin",
"matplotlib.pyplot.figure",
"yaml.safe_load",
"glob.iglob",
"os.path.join",
"utils.add_scribble",
"seismic.Seismic.from_segy",
"plotter.plot_histogram",
"utils.add_rings",
"notice.Notice.fail",
"numpy.zeros_like",
... | [((533, 544), 'time.time', 'time.time', ([], {}), '()\n', (542, 544), False, 'import time\n'), ((1828, 1868), 'numpy.percentile', 'np.percentile', (['s.data', "cfg['percentile']"], {}), "(s.data, cfg['percentile'])\n", (1841, 1868), True, 'import numpy as np\n'), ((2519, 2530), 'time.time', 'time.time', ([], {}), '()\n', (2528, 2530), False, 'import time\n'), ((2765, 2793), 'notice.Notice.hr_header', 'Notice.hr_header', (['"""Plotting"""'], {}), "('Plotting')\n", (2781, 2793), False, 'from notice import Notice\n'), ((5015, 5056), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(w, h)', 'facecolor': '"""w"""'}), "(figsize=(w, h), facecolor='w')\n", (5025, 5056), True, 'import matplotlib.pyplot as plt\n'), ((5098, 5130), 'matplotlib.ticker.FormatStrFormatter', 'mtick.FormatStrFormatter', (['"""%.0f"""'], {}), "('%.0f')\n", (5122, 5130), True, 'import matplotlib.ticker as mtick\n'), ((6384, 6458), 'plotter.plot_header', 'plotter.plot_header', (['head_ax', 's.header'], {'fs': '(9)', 'cfg': 'cfg', 'version': '__version__'}), '(head_ax, s.header, fs=9, cfg=cfg, version=__version__)\n', (6403, 6458), False, 'import plotter\n'), ((7090, 7141), 'plotter.plot_colourbar', 'plotter.plot_colourbar', (['clrbar_ax'], {'cmap': "cfg['cmap']"}), "(clrbar_ax, cmap=cfg['cmap'])\n", (7112, 7141), False, 'import plotter\n'), ((7245, 7298), 'plotter.plot_histogram', 'plotter.plot_histogram', (['hist_ax', 's.data', 'tickfmt', 'cfg'], {}), '(hist_ax, s.data, tickfmt, cfg)\n', (7267, 7298), False, 'import plotter\n'), ((12240, 12251), 'time.time', 'time.time', ([], {}), '()\n', (12249, 12251), False, 'import time\n'), ((12487, 12513), 'notice.Notice.hr_header', 'Notice.hr_header', (['"""Saving"""'], {}), "('Saving')\n", (12503, 12513), False, 'from notice import Notice\n'), ((12539, 12562), 'utils.path_bits', 'utils.path_bits', (['target'], {}), '(target)\n', (12554, 12562), False, 'import utils\n'), ((12747, 12758), 'time.time', 'time.time', ([], {}), '()\n', (12756, 12758), False, 'import time\n'), ((13254, 13296), 'notice.Notice.hr_header', 'Notice.hr_header', (['"""Applying the stupidity"""'], {}), "('Applying the stupidity')\n", (13270, 13296), False, 'from notice import Notice\n'), ((13317, 13334), 'PIL.Image.open', 'Image.open', (['fname'], {}), '(fname)\n', (13327, 13334), False, 'from PIL import Image\n'), ((13406, 13456), 'utils.add_rings', 'utils.add_rings', (['stupid_image', "cfg['coffee_rings']"], {}), "(stupid_image, cfg['coffee_rings'])\n", (13421, 13456), False, 'import utils\n'), ((13593, 13645), 'PIL.Image.new', 'Image.new', (['"""RGB"""', 'stupid_image.size', '(255, 255, 255)'], {}), "('RGB', stupid_image.size, (255, 255, 255))\n", (13602, 13645), False, 'from PIL import Image\n'), ((13711, 13722), 'time.time', 'time.time', ([], {}), '()\n', (13720, 13722), False, 'import time\n'), ((13885, 13941), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Plot a SEGY file."""'}), "(description='Plot a SEGY file.')\n", (13908, 13941), False, 'import argparse\n'), ((15639, 15653), 'notice.Notice.title', 'Notice.title', ([], {}), '()\n', (15651, 15653), False, 'from notice import Notice\n'), ((15744, 15776), 'notice.Notice.hr_header', 'Notice.hr_header', (['"""Initializing"""'], {}), "('Initializing')\n", (15760, 15776), False, 'from notice import Notice\n'), ((781, 847), 'seismic.Seismic.from_segy_with_obspy', 'Seismic.from_segy_with_obspy', (['target'], {'params': "{'ndim': cfg['ndim']}"}), "(target, params={'ndim': cfg['ndim']})\n", (809, 847), False, 'from seismic import Seismic\n'), ((870, 925), 'seismic.Seismic.from_segy', 'Seismic.from_segy', (['target'], {'params': "{'ndim': cfg['ndim']}"}), "(target, params={'ndim': cfg['ndim']})\n", (887, 925), False, 'from seismic import Seismic\n'), ((5416, 5447), 're.sub', 're.sub', (['"""\\\\\\\\"""', '"""@@@@@"""', 'target'], {}), "('\\\\\\\\', '@@@@@', target)\n", (5422, 5447), False, 'import re\n'), ((5462, 5501), 're.sub', 're.sub', (['"""_filename"""', 'newt', "cfg['title']"], {}), "('_filename', newt, cfg['title'])\n", (5468, 5501), False, 'import re\n'), ((5519, 5546), 're.sub', 're.sub', (['"""@@@"""', '"""\\\\\\\\"""', 'temp'], {}), "('@@@', '\\\\\\\\', temp)\n", (5525, 5546), False, 'import re\n'), ((5626, 5696), 'plotter.plot_title', 'plotter.plot_title', (['title_ax', 'title'], {'fs': "(1.4 * cfg['fontsize'])", 'cfg': 'cfg'}), "(title_ax, title, fs=1.4 * cfg['fontsize'], cfg=cfg)\n", (5644, 5696), False, 'import plotter\n'), ((5913, 5951), 're.sub', 're.sub', (['"""_date"""', 'date', "cfg['subtitle']"], {}), "('_date', date, cfg['subtitle'])\n", (5919, 5951), False, 'import re\n'), ((6053, 6138), 'plotter.plot_subtitle', 'plotter.plot_subtitle', (['subtitle_ax', 'subtitle'], {'fs': "(0.75 * cfg['fontsize'])", 'cfg': 'cfg'}), "(subtitle_ax, subtitle, fs=0.75 * cfg['fontsize'], cfg=cfg\n )\n", (6074, 6138), False, 'import plotter\n'), ((7542, 7583), 'utils.rgb_to_hex', 'utils.rgb_to_hex', (["cfg['highlight_colour']"], {}), "(cfg['highlight_colour'])\n", (7558, 7583), False, 'import utils\n'), ((12657, 12710), 'os.path.join', 'os.path.join', (["(cfg['outfile'] or dname)", "(fname + '.png')"], {}), "(cfg['outfile'] or dname, fname + '.png')\n", (12669, 12710), False, 'import os\n'), ((13370, 13401), 'utils.stain_paper', 'utils.stain_paper', (['stupid_image'], {}), '(stupid_image)\n', (13387, 13401), False, 'import utils\n'), ((13489, 13521), 'utils.add_scribble', 'utils.add_scribble', (['stupid_image'], {}), '(stupid_image)\n', (13507, 13521), False, 'import utils\n'), ((15591, 15615), 'notice.Notice.info', 'Notice.info', (['__version__'], {}), '(__version__)\n', (15602, 15615), False, 'from notice import Notice\n'), ((15624, 15634), 'sys.exit', 'sys.exit', ([], {}), '()\n', (15632, 15634), False, 'import sys\n'), ((15722, 15739), 'yaml.safe_load', 'yaml.safe_load', (['f'], {}), '(f)\n', (15736, 15739), False, 'import yaml\n'), ((16114, 16148), 'glob.iglob', 'glob.iglob', (['target'], {'recursive': '(True)'}), '(target, recursive=True)\n', (16124, 16148), False, 'import glob\n'), ((16259, 16294), 'notice.Notice.hr_header', 'Notice.hr_header', (['"""Processing file"""'], {}), "('Processing file')\n", (16275, 16294), False, 'from notice import Notice\n'), ((16371, 16395), 'notice.Notice.hr_header', 'Notice.hr_header', (['"""Done"""'], {}), "('Done')\n", (16387, 16395), False, 'from notice import Notice\n'), ((1486, 1527), 'seismic.Seismic.from_seismic', 'Seismic.from_seismic', (['s'], {'n': 'n', 'direction': 'd'}), '(s, n=n, direction=d)\n', (1506, 1527), False, 'from seismic import Seismic\n'), ((1642, 1687), 'seismic.Seismic.from_segy', 'Seismic.from_segy', (['target'], {'params': "{'ndim': 2}"}), "(target, params={'ndim': 2})\n", (1659, 1687), False, 'from seismic import Seismic\n'), ((5871, 5892), 'datetime.date.today', 'datetime.date.today', ([], {}), '()\n', (5890, 5892), False, 'import datetime\n'), ((9594, 9656), 'notice.Notice.fail', 'Notice.fail', (['"""You must specify the display: wiggle, vd, both."""'], {}), "('You must specify the display: wiggle, vd, both.')\n", (9605, 9656), False, 'from notice import Notice\n'), ((11534, 11568), 'plotter.watermark_seismic', 'plotter.watermark_seismic', (['ax', 'cfg'], {}), '(ax, cfg)\n', (11559, 11568), False, 'import plotter\n'), ((12609, 12634), 'os.path.splitext', 'os.path.splitext', (['outfile'], {}), '(outfile)\n', (12625, 12634), False, 'import os\n'), ((14060, 14082), 'argparse.FileType', 'argparse.FileType', (['"""r"""'], {}), "('r')\n", (14077, 14082), False, 'import argparse\n'), ((15915, 15937), 'utils.DEFAULTS.items', 'utils.DEFAULTS.items', ([], {}), '()\n', (15935, 15937), False, 'import utils\n'), ((16194, 16212), 'glob.iglob', 'glob.iglob', (['target'], {}), '(target)\n', (16204, 16212), False, 'import glob\n'), ((1733, 1774), 'seismic.Seismic.from_seismic', 'Seismic.from_seismic', (['s'], {'n': 'n', 'direction': 'd'}), '(s, n=n, direction=d)\n', (1753, 1774), False, 'from seismic import Seismic\n'), ((2374, 2389), 'numpy.amax', 'np.amax', (['s.data'], {}), '(s.data)\n', (2381, 2389), True, 'import numpy as np\n'), ((2436, 2451), 'numpy.amin', 'np.amin', (['s.data'], {}), '(s.data)\n', (2443, 2451), True, 'import numpy as np\n'), ((8855, 8879), 'numpy.argmin', 'np.argmin', (['seismic_width'], {}), '(seismic_width)\n', (8864, 8879), True, 'import numpy as np\n'), ((8908, 8959), 'utils.add_subplot_axes', 'utils.add_subplot_axes', (['ax', '[1.01, 0.02, 0.01, 0.2]'], {}), '(ax, [1.01, 0.02, 0.01, 0.2])\n', (8930, 8959), False, 'import utils\n'), ((8980, 9005), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['im'], {'cax': 'cax'}), '(im, cax=cax)\n', (8992, 9005), True, 'import matplotlib.pyplot as plt\n'), ((11852, 11880), 'numpy.zeros_like', 'np.zeros_like', (['line.slineidx'], {}), '(line.slineidx)\n', (11865, 11880), True, 'import numpy as np\n'), ((12969, 12994), 'os.path.splitext', 'os.path.splitext', (['outfile'], {}), '(outfile)\n', (12985, 12994), False, 'import os\n'), ((10403, 10444), 'utils.rgb_to_hex', 'utils.rgb_to_hex', (["cfg['highlight_colour']"], {}), "(cfg['highlight_colour'])\n", (10419, 10444), False, 'import utils\n'), ((10732, 10768), 'utils.rgb_to_hex', 'utils.rgb_to_hex', (["cfg['grid_colour']"], {}), "(cfg['grid_colour'])\n", (10748, 10768), False, 'import utils\n'), ((11071, 11107), 'utils.rgb_to_hex', 'utils.rgb_to_hex', (["cfg['grid_colour']"], {}), "(cfg['grid_colour'])\n", (11087, 11107), False, 'import utils\n')] |
#!/usr/bin/env python
import numpy as np
import rospy
from geometry_msgs.msg import PoseStamped, TwistStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
from scipy.spatial import KDTree
from copy import deepcopy
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
'''
LOOKAHEAD_WPS = 100 # Number of waypoints we will publish. You can change this number
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/current_velocity', TwistStamped, self.twisted_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# member variables we need
self.pose = None
self.twisted = None
self.base_waypoints = None
self.stopline_wp_idx = -1
self.waypoints = None
self.waypoints_2d = None
self.tree = None
self.braking = False
self.loop()
def loop(self):
rate = rospy.Rate(10) ## 50, could probably go as low as 30 Hz per video walkthrough
while not rospy.is_shutdown():
if self.pose and self.tree:
closest_waypoint_idx = self.get_closest_waypoint_idx() #get closest waypoint
self.publish_waypoints(closest_waypoint_idx)
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
closest_idx = self.tree.query([x,y], 1)[1]
closest_coord = self.waypoints_2d[closest_idx] # check if closest is ahead of or behind vehicle
prev_coord = self.waypoints_2d[closest_idx - 1]
cl_vect = np.array(closest_coord) # Equation for hyperplane through closest coords
prev_vect = np.array(prev_coord)
pos_vect = np.array([x,y]) # car's position
val = np.dot(cl_vect - prev_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx +1) % len(self.waypoints_2d)
return closest_idx
def publish_waypoints(self, closest_idx):
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = self.base_waypoints.waypoints[closest_idx:farthest_idx]
if (self.stopline_wp_idx == -1): # Normal
lane.waypoints = base_waypoints
else:
d_to_stop = self.arc_distance(self.base_waypoints.waypoints, closest_idx, self.stopline_wp_idx - 2)
STOP_THRESH = 85 * 0.3048
# 85' per https://nacto.org/docs/usdg/vehicle_stopping_distance_and_time_upenn.pdf
if d_to_stop > STOP_THRESH:
lane.waypoints = base_waypoints
self.braking = False
elif d_to_stop < 5 and not self.braking:
lane.waypoints = base_waypoints
else:
lane.waypoints = self.decelerate_waypoints(base_waypoints, d_to_stop, closest_idx) # Nellie slow down!
self.braking = True
return lane
def get_current_speed(self):
return self.twisted.twist.linear.x
def decelerate_waypoints(self, waypoints, total_dist, closest_idx):
temp = []
cur_speed = self.twisted.twist.linear.x
stop_idx = self.stopline_wp_idx - 2 # two waypoints back from the line so front of car stops at the line
# total_dist = self.arc_distance(waypoints, 0, stop_idx - closest_idx)
# DEBUG
# rospy.loginfo("cur speed = {}".format(cur_speed))
# rospy.loginfo("closest_idx, stop_idx self.stopline_wp_idx total_dist = {} {} {} {} {}".format(closest_idx, stop_idx, self.stopline_wp_idx, closest_idx, total_dist))
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = waypoints[i].pose
dist = self.arc_distance(waypoints, i, stop_idx - closest_idx)
if dist == 0:
vel = 0
else:
vel = dist / total_dist * cur_speed # linear ramp, fast to slower to zero, could use an S curve here
if vel < 1.:
vel = 0.
if i >= 1:
p.twist.twist.linear.x = vel
temp.append(p)
p = Waypoint()
p.pose = waypoints[len(waypoints) - 1].pose
p.twist.twist.linear.x = 0.
temp.append(p)
# DEBUG
# v_str = ""
# for j in range(len(temp)-1):
# v_str = v_str + "{00:.2f} ".format(temp[j].twist.twist.linear.x)
# rospy.loginfo("vels: {}\n".format(v_str))
return temp
def pose_cb(self, msg):
self.pose = msg
def twisted_cb(self, msg):
self.twisted = msg
def waypoints_cb(self, waypoints):
self.base_waypoints = waypoints # from walkthrough
self.waypoints = deepcopy(self.base_waypoints.waypoints)
if not self.tree:
self.waypoints_2d = [[wp.pose.pose.position.x, wp.pose.pose.position.y] for wp in self.waypoints]
self.tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.stopline_wp_idx = msg.data
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def arc_distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
def overall_distance(self, waypoints): # convenience method for overall length
return self.arc_distance(waypoints, 0, len(waypoints)-1)
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| [
"copy.deepcopy",
"rospy.logerr",
"rospy.Subscriber",
"math.sqrt",
"styx_msgs.msg.Lane",
"rospy.Publisher",
"rospy.Rate",
"rospy.is_shutdown",
"numpy.array",
"rospy.init_node",
"scipy.spatial.KDTree",
"numpy.dot",
"styx_msgs.msg.Waypoint"
] | [((969, 1004), 'rospy.init_node', 'rospy.init_node', (['"""waypoint_updater"""'], {}), "('waypoint_updater')\n", (984, 1004), False, 'import rospy\n'), ((1014, 1074), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1030, 1074), False, 'import rospy\n'), ((1083, 1143), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1099, 1143), False, 'import rospy\n'), ((1152, 1220), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_velocity"""', 'TwistStamped', 'self.twisted_cb'], {}), "('/current_velocity', TwistStamped, self.twisted_cb)\n", (1168, 1220), False, 'import rospy\n'), ((1315, 1376), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/traffic_waypoint"""', 'Int32', 'self.traffic_cb'], {}), "('/traffic_waypoint', Int32, self.traffic_cb)\n", (1331, 1376), False, 'import rospy\n'), ((1413, 1467), 'rospy.Publisher', 'rospy.Publisher', (['"""final_waypoints"""', 'Lane'], {'queue_size': '(1)'}), "('final_waypoints', Lane, queue_size=1)\n", (1428, 1467), False, 'import rospy\n'), ((1800, 1814), 'rospy.Rate', 'rospy.Rate', (['(10)'], {}), '(10)\n', (1810, 1814), False, 'import rospy\n'), ((2486, 2509), 'numpy.array', 'np.array', (['closest_coord'], {}), '(closest_coord)\n', (2494, 2509), True, 'import numpy as np\n'), ((2583, 2603), 'numpy.array', 'np.array', (['prev_coord'], {}), '(prev_coord)\n', (2591, 2603), True, 'import numpy as np\n'), ((2623, 2639), 'numpy.array', 'np.array', (['[x, y]'], {}), '([x, y])\n', (2631, 2639), True, 'import numpy as np\n'), ((2682, 2729), 'numpy.dot', 'np.dot', (['(cl_vect - prev_vect)', '(pos_vect - cl_vect)'], {}), '(cl_vect - prev_vect, pos_vect - cl_vect)\n', (2688, 2729), True, 'import numpy as np\n'), ((3033, 3039), 'styx_msgs.msg.Lane', 'Lane', ([], {}), '()\n', (3037, 3039), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((5201, 5211), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (5209, 5211), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((5791, 5830), 'copy.deepcopy', 'deepcopy', (['self.base_waypoints.waypoints'], {}), '(self.base_waypoints.waypoints)\n', (5799, 5830), False, 'from copy import deepcopy\n'), ((1897, 1916), 'rospy.is_shutdown', 'rospy.is_shutdown', ([], {}), '()\n', (1914, 1916), False, 'import rospy\n'), ((4725, 4735), 'styx_msgs.msg.Waypoint', 'Waypoint', ([], {}), '()\n', (4733, 4735), False, 'from styx_msgs.msg import Lane, Waypoint\n'), ((5991, 6016), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2d'], {}), '(self.waypoints_2d)\n', (5997, 6016), False, 'from scipy.spatial import KDTree\n'), ((6534, 6599), 'math.sqrt', 'math.sqrt', (['((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)'], {}), '((a.x - b.x) ** 2 + (a.y - b.y) ** 2 + (a.z - b.z) ** 2)\n', (6543, 6599), False, 'import math\n'), ((7016, 7070), 'rospy.logerr', 'rospy.logerr', (['"""Could not start waypoint updater node."""'], {}), "('Could not start waypoint updater node.')\n", (7028, 7070), False, 'import rospy\n')] |
# __author__ : slade
# __time__ : 17/12/21
import pandas as pd
import numpy as np
from xgboost.sklearn import XGBClassifier
from sklearn.preprocessing import OneHotEncoder
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.grid_search import GridSearchCV
# load data
X_train = pd.read_csv('ensemble_X_train.csv').iloc[:, 1:]
Y_train = pd.read_csv('ensemble_Y_train.csv', header=None).iloc[:, 1:]
X_test = pd.read_csv('ensemble_X_test.csv').iloc[:, 1:]
Y_test = pd.read_csv('ensemble_Y_test.csv', header=None).iloc[:, 1:]
Y_train = np.array(Y_train).ravel()
Y_test = np.array(Y_test).ravel()
# define the correction rate , the res1 is the positive case rate , the res2 is the correction rate
def metrics_spec(actual_data, predict_data, cutoff=0.5):
actual_data = np.array(actual_data)
predict_data = np.array(predict_data)
bind_data = np.c_[actual_data, predict_data]
res1 = 1.0 * (bind_data[bind_data[:, 0] == 1][:, 1] >= cutoff).sum() / bind_data[bind_data[:, 0] == 1].shape[0]
res2 = 1.0 * (
(bind_data[bind_data[:, 0] == 1][:, 1] >= cutoff).sum() + (
bind_data[bind_data[:, 0] == 0][:, 1] < cutoff).sum()) / \
bind_data.shape[0]
return res1, res2
# if you have read the article 'Kaggle-TianChi分类问题相关纯算法理论剖析', you may know the suggestion of tuning methods , let's follow
# you can adjust scale_weight_suggestion = (len(Y_train) - Y_train.sum()) / Y_train.sum() to balance your scale between positive cases and negtive cases
# get the n_estimators and learning_rate first
# if necessary ,increasing param:cv can increase the confidence degree of the current model's result
param_test = {
'learning_rate': [0.1, 0.3, 0.9],
'n_estimators': [50, 100, 300, 500]
}
gsearch = GridSearchCV(
estimator=XGBClassifier(
learning_rate=0.1,
objective='binary:logistic',
scale_pos_weight=1.002252816020025,
seed=27),
param_grid=param_test,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=2)
gsearch.fit(X_train, Y_train)
print(gsearch.best_params_)
# {'learning_rate': 0.1, 'n_estimators': 100}
# we should also consider the training speed of each process,sometimes we can sacrifice some effect to improve the speed. but don't worry , you can also retrain the two param at last if needed
# get subsample next
param_test1 = {
'subsample': [0.6, 0.7, 0.8, 0.9]
}
gsearch1 = GridSearchCV(
estimator=XGBClassifier(
learning_rate=0.1,
n_estimators=100,
objective='binary:logistic',
scale_pos_weight=1.002252816020025,
seed=27),
param_grid=param_test1,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=2)
gsearch1.fit(X_train, Y_train)
print(gsearch1.best_params_)
# {'subsample': 0.7}
# if you want your model more accurate , you can calculate the accuration at your test set after each training process
# Compared with the last time at your test set if the accuracy rate decline, you should follow actions from the article guide 'Kaggle-TianChi分类问题相关纯算法理论剖析' to adjust the params
# i have train the max_leaf_nodes and min_weight_fraction_leaf privately but it doesn't work ,so we skip it. get min_samples_split and max_depth result directly
param_test2 = {
'max_depth': [3, 5, 7],
'min_child_weight': [0.8, 1, 1.2]
}
gsearch2 = GridSearchCV(
estimator=XGBClassifier(
learning_rate=0.1,
n_estimators=100,
subsample=0.7,
objective='binary:logistic',
scale_pos_weight=1.002252816020025,
seed=27),
param_grid=param_test2,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=2)
gsearch2.fit(X_train, Y_train)
print(gsearch2.best_params_)
# {'max_depth': 3, 'min_child_weight': 0.8}
# train colsample_bytree next
param_test3 = {
'colsample_bytree': [0.6, 0.7, 0.8, 0.9]
}
gsearch3 = GridSearchCV(
estimator=XGBClassifier(
learning_rate=0.1,
n_estimators=100,
max_depth=3,
subsample=0.8,
min_child_weight=0.7,
objective='binary:logistic',
scale_pos_weight=1.002252816020025,
seed=27),
param_grid=param_test3,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=2)
gsearch3.fit(X_train, Y_train)
print(gsearch3.best_params_)
# {'colsample_bytree': 0.7}
# reg_lambda and reg_alpha at last
param_test4 = {
'reg_lambda': [0.1, 0.3, 0.9, 3],
'reg_alpha': [0.1, 0.3, 0.9, 3]
}
gsearch4 = GridSearchCV(
estimator=XGBClassifier(
learning_rate=0.1,
n_estimators=100,
max_depth=3,
subsample=0.7,
min_child_weight=0.8,
colsample_bytree=0.7,
objective='binary:logistic',
scale_pos_weight=1.002252816020025,
seed=27),
param_grid=param_test4,
scoring='roc_auc',
n_jobs=4,
iid=False,
cv=2)
gsearch4.fit(X_train, Y_train)
print(gsearch4.best_params_)
# {'reg_alpha': 0.3, 'reg_lambda': 0.1}
# for short, we skip the process of training the max_features and the process of training the pairs between learning_rate and n_estimators,but if u want to train a nice model these ways should be added at your process.
# with the same reason,i skip the code '鞍点逃逸' and '极限探索' ,follow the methods mentioned at the article 'Kaggle&TianChi分类问题相关纯算法理论剖析' ,try it by yourself
# define the final param
clf = XGBClassifier(
learning_rate=0.1,
n_estimators=100,
max_depth=3,
subsample=0.7,
min_child_weight=0.8,
colsample_bytree=0.7,
objective='binary:logistic',
scale_pos_weight=1.002252816020025,
reg_alpha=0.3,
reg_lambda=0.1,
seed=27
)
# train the values
model_sklearn = clf.fit(X_train, Y_train)
y_bst = model_sklearn.predict_proba(X_test)[:, 1]
metrics_spec(Y_train, model_sklearn.predict_proba(X_train)[:, 1])
metrics_spec(Y_test, y_bst)
# make new features
# we can get the spare leaf nodes for the input of stacking
train_new_feature = clf.apply(X_train)
test_new_feature = clf.apply(X_test)
enc = OneHotEncoder()
enc.fit(train_new_feature)
train_new_feature2 = np.array(enc.transform(train_new_feature).toarray())
test_new_feature2 = np.array(enc.transform(test_new_feature).toarray())
res_data = pd.DataFrame(np.c_[Y_train, train_new_feature2])
res_data.columns = ['f' + str(x) for x in range(res_data.shape[1])]
res_test = pd.DataFrame(np.c_[Y_test, test_new_feature2])
res_test.columns = ['f' + str(x) for x in range(res_test.shape[1])]
# stacking a model , it can be logistic or fm, nerual network and they will come to be beyond all expectations
# attention points of the stacking model can be obtained from the article mentioned at the top of the code
lr = LogisticRegression(C=1, penalty='l2', max_iter=100, solver='sag', multi_class='ovr')
model_lr = lr.fit(res_data.iloc[:, 1:], res_data['f0'])
y_train_lr = model_lr.predict_proba(res_data.iloc[:, 1:])[:, 1]
y_test_lr = model_lr.predict_proba(res_test.iloc[:, 1:])[:, 1]
res = metrics_spec(Y_test, y_test_lr)
correct_rank = X_train.columns
# save models, you will load them if u want to deploy a trained model
from sklearn.externals import joblib
joblib.dump(model_sklearn, 'model_sklearn.pkl')
joblib.dump(correct_rank, 'correct_rank.pkl')
joblib.dump(enc, 'enc.pkl')
joblib.dump(model_lr, 'model_lr.pkl')
# 算法评估 ks值
# ks_xgb_lr = np.c_[Y_test,y_test_lr]
# ks_xgb_lr = sorted(ks_xgb_lr , key = lambda x : x[1],reverse = True)
# ks_xgb_lr = pd.DataFrame(ks_xgb_lr)
# for i in range(9):
# end = (i+1)*break_cut
# res1 = 1.0*ks_xgb_lr.iloc[:end,:][ks_xgb_lr.iloc[:end,0]==0].shape[0]/ks_xgb_lr[ks_xgb_lr.iloc[:,0]==0].shape[0]
# res2 = 1.0*ks_xgb_lr.iloc[:end,:][ks_xgb_lr.iloc[:end,0]==1].shape[0]/ks_xgb_lr[ks_xgb_lr.iloc[:,0]==1].shape[0]
# res = res2-res1
# print(res1,res2,res)
| [
"pandas.DataFrame",
"sklearn.externals.joblib.dump",
"pandas.read_csv",
"xgboost.sklearn.XGBClassifier",
"sklearn.preprocessing.OneHotEncoder",
"sklearn.linear_model.logistic.LogisticRegression",
"numpy.array"
] | [((5327, 5568), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'learning_rate': '(0.1)', 'n_estimators': '(100)', 'max_depth': '(3)', 'subsample': '(0.7)', 'min_child_weight': '(0.8)', 'colsample_bytree': '(0.7)', 'objective': '"""binary:logistic"""', 'scale_pos_weight': '(1.002252816020025)', 'reg_alpha': '(0.3)', 'reg_lambda': '(0.1)', 'seed': '(27)'}), "(learning_rate=0.1, n_estimators=100, max_depth=3, subsample=\n 0.7, min_child_weight=0.8, colsample_bytree=0.7, objective=\n 'binary:logistic', scale_pos_weight=1.002252816020025, reg_alpha=0.3,\n reg_lambda=0.1, seed=27)\n", (5340, 5568), False, 'from xgboost.sklearn import XGBClassifier\n'), ((5970, 5985), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {}), '()\n', (5983, 5985), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((6170, 6218), 'pandas.DataFrame', 'pd.DataFrame', (['np.c_[Y_train, train_new_feature2]'], {}), '(np.c_[Y_train, train_new_feature2])\n', (6182, 6218), True, 'import pandas as pd\n'), ((6298, 6344), 'pandas.DataFrame', 'pd.DataFrame', (['np.c_[Y_test, test_new_feature2]'], {}), '(np.c_[Y_test, test_new_feature2])\n', (6310, 6344), True, 'import pandas as pd\n'), ((6638, 6726), 'sklearn.linear_model.logistic.LogisticRegression', 'LogisticRegression', ([], {'C': '(1)', 'penalty': '"""l2"""', 'max_iter': '(100)', 'solver': '"""sag"""', 'multi_class': '"""ovr"""'}), "(C=1, penalty='l2', max_iter=100, solver='sag',\n multi_class='ovr')\n", (6656, 6726), False, 'from sklearn.linear_model.logistic import LogisticRegression\n'), ((7086, 7133), 'sklearn.externals.joblib.dump', 'joblib.dump', (['model_sklearn', '"""model_sklearn.pkl"""'], {}), "(model_sklearn, 'model_sklearn.pkl')\n", (7097, 7133), False, 'from sklearn.externals import joblib\n'), ((7134, 7179), 'sklearn.externals.joblib.dump', 'joblib.dump', (['correct_rank', '"""correct_rank.pkl"""'], {}), "(correct_rank, 'correct_rank.pkl')\n", (7145, 7179), False, 'from sklearn.externals import joblib\n'), ((7180, 7207), 'sklearn.externals.joblib.dump', 'joblib.dump', (['enc', '"""enc.pkl"""'], {}), "(enc, 'enc.pkl')\n", (7191, 7207), False, 'from sklearn.externals import joblib\n'), ((7208, 7245), 'sklearn.externals.joblib.dump', 'joblib.dump', (['model_lr', '"""model_lr.pkl"""'], {}), "(model_lr, 'model_lr.pkl')\n", (7219, 7245), False, 'from sklearn.externals import joblib\n'), ((793, 814), 'numpy.array', 'np.array', (['actual_data'], {}), '(actual_data)\n', (801, 814), True, 'import numpy as np\n'), ((834, 856), 'numpy.array', 'np.array', (['predict_data'], {}), '(predict_data)\n', (842, 856), True, 'import numpy as np\n'), ((302, 337), 'pandas.read_csv', 'pd.read_csv', (['"""ensemble_X_train.csv"""'], {}), "('ensemble_X_train.csv')\n", (313, 337), True, 'import pandas as pd\n'), ((360, 408), 'pandas.read_csv', 'pd.read_csv', (['"""ensemble_Y_train.csv"""'], {'header': 'None'}), "('ensemble_Y_train.csv', header=None)\n", (371, 408), True, 'import pandas as pd\n'), ((430, 464), 'pandas.read_csv', 'pd.read_csv', (['"""ensemble_X_test.csv"""'], {}), "('ensemble_X_test.csv')\n", (441, 464), True, 'import pandas as pd\n'), ((486, 533), 'pandas.read_csv', 'pd.read_csv', (['"""ensemble_Y_test.csv"""'], {'header': 'None'}), "('ensemble_Y_test.csv', header=None)\n", (497, 533), True, 'import pandas as pd\n'), ((556, 573), 'numpy.array', 'np.array', (['Y_train'], {}), '(Y_train)\n', (564, 573), True, 'import numpy as np\n'), ((591, 607), 'numpy.array', 'np.array', (['Y_test'], {}), '(Y_test)\n', (599, 607), True, 'import numpy as np\n'), ((1792, 1902), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'learning_rate': '(0.1)', 'objective': '"""binary:logistic"""', 'scale_pos_weight': '(1.002252816020025)', 'seed': '(27)'}), "(learning_rate=0.1, objective='binary:logistic',\n scale_pos_weight=1.002252816020025, seed=27)\n", (1805, 1902), False, 'from xgboost.sklearn import XGBClassifier\n'), ((2437, 2566), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'learning_rate': '(0.1)', 'n_estimators': '(100)', 'objective': '"""binary:logistic"""', 'scale_pos_weight': '(1.002252816020025)', 'seed': '(27)'}), "(learning_rate=0.1, n_estimators=100, objective=\n 'binary:logistic', scale_pos_weight=1.002252816020025, seed=27)\n", (2450, 2566), False, 'from xgboost.sklearn import XGBClassifier\n'), ((3358, 3502), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'learning_rate': '(0.1)', 'n_estimators': '(100)', 'subsample': '(0.7)', 'objective': '"""binary:logistic"""', 'scale_pos_weight': '(1.002252816020025)', 'seed': '(27)'}), "(learning_rate=0.1, n_estimators=100, subsample=0.7, objective\n ='binary:logistic', scale_pos_weight=1.002252816020025, seed=27)\n", (3371, 3502), False, 'from xgboost.sklearn import XGBClassifier\n'), ((3875, 4058), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'learning_rate': '(0.1)', 'n_estimators': '(100)', 'max_depth': '(3)', 'subsample': '(0.8)', 'min_child_weight': '(0.7)', 'objective': '"""binary:logistic"""', 'scale_pos_weight': '(1.002252816020025)', 'seed': '(27)'}), "(learning_rate=0.1, n_estimators=100, max_depth=3, subsample=\n 0.8, min_child_weight=0.7, objective='binary:logistic',\n scale_pos_weight=1.002252816020025, seed=27)\n", (3888, 4058), False, 'from xgboost.sklearn import XGBClassifier\n'), ((4461, 4667), 'xgboost.sklearn.XGBClassifier', 'XGBClassifier', ([], {'learning_rate': '(0.1)', 'n_estimators': '(100)', 'max_depth': '(3)', 'subsample': '(0.7)', 'min_child_weight': '(0.8)', 'colsample_bytree': '(0.7)', 'objective': '"""binary:logistic"""', 'scale_pos_weight': '(1.002252816020025)', 'seed': '(27)'}), "(learning_rate=0.1, n_estimators=100, max_depth=3, subsample=\n 0.7, min_child_weight=0.8, colsample_bytree=0.7, objective=\n 'binary:logistic', scale_pos_weight=1.002252816020025, seed=27)\n", (4474, 4667), False, 'from xgboost.sklearn import XGBClassifier\n')] |
#!/usr/local/bin/python3
# -*- coding: utf-8 -*-
# DESCRIPTION: Given a directory with text files passed as argument
# to the script, a stratified (by directory) sample of file texts
# is created.
#
# Usage example:
# python get_random_sample.py --directory=../../../Fall\ 2017/deidentified/ --n_files=2
# python get_random_sample.py --directory=deidentified_files/Spring\ 2018/ --n_files=2
# python get_random_sample.py --directory=../../../MACAWS/deidentified_files/ --n_files=2
import argparse
import numpy as np
import os
import re
import sys
from shutil import copyfile
# Define the way we retrieve arguments sent to the script.
parser = argparse.ArgumentParser(description='Get Random Sample of Textfiles')
parser.add_argument('--directory', action="store", dest='dir', default='')
parser.add_argument('--n_files', action="store", dest='n_files', default=1)
args = parser.parse_args()
def get_sample(filename):
# create output filename with directory path
cleaned_filename = re.sub(r'\.\.[\\\/]', r'', filename)
output_directory = 'random_sample'
output_filename = os.path.join(output_directory, cleaned_filename)
directory = os.path.dirname(output_filename)
if not os.path.exists(directory):
os.makedirs(directory)
copyfile(filename, output_filename)
def get_sample_recursive(directory, n):
list_of_files = {}
for dirpath, dirnames, files in os.walk(directory):
for dir in dirpath:
list_of_files[dirpath] = []
for name in files:
if '.txt' in name and '_DF_' in name:
found_text_files = True
list_of_files[dirpath].append(name)
random_file_list = []
for dir in list_of_files:
if len(list_of_files[dir]) > 0:
this_random_list = np.random.choice(list_of_files[dir], n)
for file_chosen in this_random_list:
random_file_list.append(os.path.join(dir, file_chosen))
print('A total of ' + str(len(random_file_list)) + ' files have been chosen randomly, stratified by directory.')
for file_chosen in random_file_list:
get_sample(file_chosen)
if len(list_of_files) == 0:
print('No text files found in the directory.')
if args.dir:
get_sample_recursive(args.dir, int(args.n_files))
else:
print('You need to supply a valid directory with textfiles')
| [
"argparse.ArgumentParser",
"os.makedirs",
"os.path.dirname",
"os.walk",
"os.path.exists",
"numpy.random.choice",
"shutil.copyfile",
"os.path.join",
"re.sub"
] | [((654, 723), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Get Random Sample of Textfiles"""'}), "(description='Get Random Sample of Textfiles')\n", (677, 723), False, 'import argparse\n'), ((1001, 1040), 're.sub', 're.sub', (['"""\\\\.\\\\.[\\\\\\\\\\\\/]"""', '""""""', 'filename'], {}), "('\\\\.\\\\.[\\\\\\\\\\\\/]', '', filename)\n", (1007, 1040), False, 'import re\n'), ((1099, 1147), 'os.path.join', 'os.path.join', (['output_directory', 'cleaned_filename'], {}), '(output_directory, cleaned_filename)\n', (1111, 1147), False, 'import os\n'), ((1164, 1196), 'os.path.dirname', 'os.path.dirname', (['output_filename'], {}), '(output_filename)\n', (1179, 1196), False, 'import os\n'), ((1271, 1306), 'shutil.copyfile', 'copyfile', (['filename', 'output_filename'], {}), '(filename, output_filename)\n', (1279, 1306), False, 'from shutil import copyfile\n'), ((1409, 1427), 'os.walk', 'os.walk', (['directory'], {}), '(directory)\n', (1416, 1427), False, 'import os\n'), ((1208, 1233), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (1222, 1233), False, 'import os\n'), ((1243, 1265), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (1254, 1265), False, 'import os\n'), ((1794, 1833), 'numpy.random.choice', 'np.random.choice', (['list_of_files[dir]', 'n'], {}), '(list_of_files[dir], n)\n', (1810, 1833), True, 'import numpy as np\n'), ((1923, 1953), 'os.path.join', 'os.path.join', (['dir', 'file_chosen'], {}), '(dir, file_chosen)\n', (1935, 1953), False, 'import os\n')] |
""" Test script for meslas.sampling
"""
import torch
import numpy as np
from meslas.means import ConstantMean
from meslas.covariance.spatial_covariance_functions import Matern32
from meslas.covariance.cross_covariances import UniformMixing
from meslas.covariance.heterotopic import FactorCovariance
from meslas.geometry.grid import TriangularGrid, SquareGrid
from meslas.random_fields import GRF
from meslas.excursion import coverage_fct_fixed_location
# Dimension of the response.
n_out = 4
# Spatial Covariance.
matern_cov = Matern32(lmbda=0.1, sigma=1.0)
# Cross covariance.
cross_cov = UniformMixing(gamma0=0.0, sigmas=[np.sqrt(0.25), np.sqrt(0.3),
np.sqrt(0.4), np.sqrt(0.5)])
covariance = FactorCovariance(matern_cov, cross_cov, n_out=n_out)
# Specify mean function
mean = ConstantMean([1.0, -2.0, 4.0, 33.0])
# Create the GRF.
myGRF = GRF(mean, covariance)
# Array of locations.
S1 = torch.Tensor([[0, 0], [0, 1], [0, 2], [3, 0]]).float()
S2 = torch.Tensor([[0, 0], [3, 0], [5, 4]]).float()
# Corresponding response indices.
L1 = torch.Tensor([0, 0, 0, 1]).long()
L2 = torch.Tensor([0, 3, 0]).long()
# Test the sampling.
print(myGRF.sample(S1, L1))
| [
"meslas.covariance.heterotopic.FactorCovariance",
"meslas.random_fields.GRF",
"meslas.means.ConstantMean",
"meslas.covariance.spatial_covariance_functions.Matern32",
"torch.Tensor",
"numpy.sqrt"
] | [((530, 560), 'meslas.covariance.spatial_covariance_functions.Matern32', 'Matern32', ([], {'lmbda': '(0.1)', 'sigma': '(1.0)'}), '(lmbda=0.1, sigma=1.0)\n', (538, 560), False, 'from meslas.covariance.spatial_covariance_functions import Matern32\n'), ((708, 760), 'meslas.covariance.heterotopic.FactorCovariance', 'FactorCovariance', (['matern_cov', 'cross_cov'], {'n_out': 'n_out'}), '(matern_cov, cross_cov, n_out=n_out)\n', (724, 760), False, 'from meslas.covariance.heterotopic import FactorCovariance\n'), ((793, 829), 'meslas.means.ConstantMean', 'ConstantMean', (['[1.0, -2.0, 4.0, 33.0]'], {}), '([1.0, -2.0, 4.0, 33.0])\n', (805, 829), False, 'from meslas.means import ConstantMean\n'), ((857, 878), 'meslas.random_fields.GRF', 'GRF', (['mean', 'covariance'], {}), '(mean, covariance)\n', (860, 878), False, 'from meslas.random_fields import GRF\n'), ((911, 957), 'torch.Tensor', 'torch.Tensor', (['[[0, 0], [0, 1], [0, 2], [3, 0]]'], {}), '([[0, 0], [0, 1], [0, 2], [3, 0]])\n', (923, 957), False, 'import torch\n'), ((971, 1009), 'torch.Tensor', 'torch.Tensor', (['[[0, 0], [3, 0], [5, 4]]'], {}), '([[0, 0], [3, 0], [5, 4]])\n', (983, 1009), False, 'import torch\n'), ((1058, 1084), 'torch.Tensor', 'torch.Tensor', (['[0, 0, 0, 1]'], {}), '([0, 0, 0, 1])\n', (1070, 1084), False, 'import torch\n'), ((1097, 1120), 'torch.Tensor', 'torch.Tensor', (['[0, 3, 0]'], {}), '([0, 3, 0])\n', (1109, 1120), False, 'import torch\n'), ((628, 641), 'numpy.sqrt', 'np.sqrt', (['(0.25)'], {}), '(0.25)\n', (635, 641), True, 'import numpy as np\n'), ((643, 655), 'numpy.sqrt', 'np.sqrt', (['(0.3)'], {}), '(0.3)\n', (650, 655), True, 'import numpy as np\n'), ((665, 677), 'numpy.sqrt', 'np.sqrt', (['(0.4)'], {}), '(0.4)\n', (672, 677), True, 'import numpy as np\n'), ((679, 691), 'numpy.sqrt', 'np.sqrt', (['(0.5)'], {}), '(0.5)\n', (686, 691), True, 'import numpy as np\n')] |
"""Initializers.
Functions to initialize posterior distribution variables.
* :func:`.xavier` - Xavier initializer
* :func:`.scale_xavier` - Xavier initializer scaled for scale parameters
* :func:`.pos_xavier` - positive-only initizlier
----------
"""
import numpy as np
from probflow.utils.settings import get_backend, get_datatype
def xavier(shape):
"""Xavier initializer"""
scale = np.sqrt(2 / sum(shape))
if get_backend() == "pytorch":
# TODO: use truncated normal for torch
import torch
return torch.randn(shape, dtype=get_datatype()) * scale
else:
import tensorflow as tf
return tf.random.truncated_normal(
shape, mean=0.0, stddev=scale, dtype=get_datatype()
)
def scale_xavier(shape):
"""Xavier initializer for scale variables"""
vals = xavier(shape)
if get_backend() == "pytorch":
import torch
numel = torch.prod(torch.Tensor(shape))
return vals + 2 - 2 * torch.log(numel) / np.log(10.0)
else:
import tensorflow as tf
numel = float(tf.reduce_prod(shape))
return vals + 2 - 2 * tf.math.log(numel) / tf.math.log(10.0)
def pos_xavier(shape):
"""Xavier initializer for positive variables"""
vals = xavier(shape)
if get_backend() == "pytorch":
import torch
numel = torch.prod(torch.Tensor(shape))
return vals + torch.log(numel) / np.log(10.0)
else:
import tensorflow as tf
numel = float(tf.reduce_prod(shape))
return vals + tf.math.log(numel) / tf.math.log(10.0)
def full_of(val):
"""Get initializer which returns tensor full of single value"""
import probflow.utils.ops as O
def init(shape):
return val * O.ones(shape)
return init
| [
"tensorflow.math.log",
"numpy.log",
"probflow.utils.ops.ones",
"tensorflow.reduce_prod",
"torch.Tensor",
"probflow.utils.settings.get_datatype",
"probflow.utils.settings.get_backend",
"torch.log"
] | [((432, 445), 'probflow.utils.settings.get_backend', 'get_backend', ([], {}), '()\n', (443, 445), False, 'from probflow.utils.settings import get_backend, get_datatype\n'), ((861, 874), 'probflow.utils.settings.get_backend', 'get_backend', ([], {}), '()\n', (872, 874), False, 'from probflow.utils.settings import get_backend, get_datatype\n'), ((1287, 1300), 'probflow.utils.settings.get_backend', 'get_backend', ([], {}), '()\n', (1298, 1300), False, 'from probflow.utils.settings import get_backend, get_datatype\n'), ((938, 957), 'torch.Tensor', 'torch.Tensor', (['shape'], {}), '(shape)\n', (950, 957), False, 'import torch\n'), ((1086, 1107), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['shape'], {}), '(shape)\n', (1100, 1107), True, 'import tensorflow as tf\n'), ((1364, 1383), 'torch.Tensor', 'torch.Tensor', (['shape'], {}), '(shape)\n', (1376, 1383), False, 'import torch\n'), ((1504, 1525), 'tensorflow.reduce_prod', 'tf.reduce_prod', (['shape'], {}), '(shape)\n', (1518, 1525), True, 'import tensorflow as tf\n'), ((1755, 1768), 'probflow.utils.ops.ones', 'O.ones', (['shape'], {}), '(shape)\n', (1761, 1768), True, 'import probflow.utils.ops as O\n'), ((728, 742), 'probflow.utils.settings.get_datatype', 'get_datatype', ([], {}), '()\n', (740, 742), False, 'from probflow.utils.settings import get_backend, get_datatype\n'), ((1008, 1020), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (1014, 1020), True, 'import numpy as np\n'), ((1160, 1177), 'tensorflow.math.log', 'tf.math.log', (['(10.0)'], {}), '(10.0)\n', (1171, 1177), True, 'import tensorflow as tf\n'), ((1407, 1423), 'torch.log', 'torch.log', (['numel'], {}), '(numel)\n', (1416, 1423), False, 'import torch\n'), ((1426, 1438), 'numpy.log', 'np.log', (['(10.0)'], {}), '(10.0)\n', (1432, 1438), True, 'import numpy as np\n'), ((1549, 1567), 'tensorflow.math.log', 'tf.math.log', (['numel'], {}), '(numel)\n', (1560, 1567), True, 'import tensorflow as tf\n'), ((1570, 1587), 'tensorflow.math.log', 'tf.math.log', (['(10.0)'], {}), '(10.0)\n', (1581, 1587), True, 'import tensorflow as tf\n'), ((569, 583), 'probflow.utils.settings.get_datatype', 'get_datatype', ([], {}), '()\n', (581, 583), False, 'from probflow.utils.settings import get_backend, get_datatype\n'), ((989, 1005), 'torch.log', 'torch.log', (['numel'], {}), '(numel)\n', (998, 1005), False, 'import torch\n'), ((1139, 1157), 'tensorflow.math.log', 'tf.math.log', (['numel'], {}), '(numel)\n', (1150, 1157), True, 'import tensorflow as tf\n')] |
import math
import numpy as np
from PuzzleLib.Containers import Sequential
from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear
from PuzzleLib.Datasets import Cifar10Loader
from PuzzleLib.Visual import showImageBasedFilters, showFilters
from PuzzleLib.Handlers import Trainer, Validator
from PuzzleLib.Optimizers import MomentumSGD
from PuzzleLib.Cost import CrossEntropy
def buildNet():
seq = Sequential()
seq.append(Conv2D(3, 32, 5, pad=2, wscale=0.0001, initscheme="gaussian"))
seq.append(MaxPool2D(3, 2))
seq.append(Activation(relu))
seq.append(Conv2D(32, 32, 5, pad=2, wscale=0.01, initscheme="gaussian"))
seq.append(MaxPool2D(3, 2))
seq.append(Activation(relu))
seq.append(Conv2D(32, 64, 5, pad=2, wscale=0.01, initscheme="gaussian"))
seq.append(MaxPool2D(3, 2))
seq.append(Activation(relu))
seq.append(Flatten())
seq.append(Linear(seq.dataShapeFrom((1, 3, 32, 32))[1], 64, wscale=0.1, initscheme="gaussian"))
seq.append(Activation(relu))
seq.append(Linear(64, 10, wscale=0.1, initscheme="gaussian"))
return seq
def main():
cifar10 = Cifar10Loader()
data, labels = cifar10.load(path="../TestData/")
data, labels = data[:], labels[:]
print("Loaded cifar10")
np.random.seed(1234)
net = buildNet()
optimizer = MomentumSGD()
optimizer.setupOn(net, useGlobalState=True)
optimizer.learnRate = 0.01
optimizer.momRate = 0.9
cost = CrossEntropy(maxlabels=10)
trainer = Trainer(net, cost, optimizer)
validator = Validator(net, cost)
currerror = math.inf
for i in range(25):
trainer.trainFromHost(
data[:50000], labels[:50000], macroBatchSize=50000,
onMacroBatchFinish=lambda train: print("Train error: %s" % train.cost.getMeanError())
)
valerror = validator.validateFromHost(data[50000:], labels[50000:], macroBatchSize=10000)
print("Accuracy: %s" % (1.0 - valerror))
if valerror >= currerror:
optimizer.learnRate *= 0.5
print("Lowered learn rate: %s" % optimizer.learnRate)
currerror = valerror
showImageBasedFilters(net[0].W.get(), "../TestData/conv1.png")
showFilters(net[3].W.get(), "../TestData/conv2.png")
showFilters(net[6].W.get(), "../TestData/conv3.png")
if __name__ == "__main__":
main()
| [
"PuzzleLib.Containers.Sequential",
"PuzzleLib.Modules.Flatten",
"PuzzleLib.Handlers.Trainer",
"numpy.random.seed",
"PuzzleLib.Modules.Activation",
"PuzzleLib.Optimizers.MomentumSGD",
"PuzzleLib.Datasets.Cifar10Loader",
"PuzzleLib.Modules.Linear",
"PuzzleLib.Modules.MaxPool2D",
"PuzzleLib.Cost.Cros... | [((430, 442), 'PuzzleLib.Containers.Sequential', 'Sequential', ([], {}), '()\n', (440, 442), False, 'from PuzzleLib.Containers import Sequential\n'), ((1098, 1113), 'PuzzleLib.Datasets.Cifar10Loader', 'Cifar10Loader', ([], {}), '()\n', (1111, 1113), False, 'from PuzzleLib.Datasets import Cifar10Loader\n'), ((1226, 1246), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (1240, 1246), True, 'import numpy as np\n'), ((1279, 1292), 'PuzzleLib.Optimizers.MomentumSGD', 'MomentumSGD', ([], {}), '()\n', (1290, 1292), False, 'from PuzzleLib.Optimizers import MomentumSGD\n'), ((1400, 1426), 'PuzzleLib.Cost.CrossEntropy', 'CrossEntropy', ([], {'maxlabels': '(10)'}), '(maxlabels=10)\n', (1412, 1426), False, 'from PuzzleLib.Cost import CrossEntropy\n'), ((1438, 1467), 'PuzzleLib.Handlers.Trainer', 'Trainer', (['net', 'cost', 'optimizer'], {}), '(net, cost, optimizer)\n', (1445, 1467), False, 'from PuzzleLib.Handlers import Trainer, Validator\n'), ((1482, 1502), 'PuzzleLib.Handlers.Validator', 'Validator', (['net', 'cost'], {}), '(net, cost)\n', (1491, 1502), False, 'from PuzzleLib.Handlers import Trainer, Validator\n'), ((456, 517), 'PuzzleLib.Modules.Conv2D', 'Conv2D', (['(3)', '(32)', '(5)'], {'pad': '(2)', 'wscale': '(0.0001)', 'initscheme': '"""gaussian"""'}), "(3, 32, 5, pad=2, wscale=0.0001, initscheme='gaussian')\n", (462, 517), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((531, 546), 'PuzzleLib.Modules.MaxPool2D', 'MaxPool2D', (['(3)', '(2)'], {}), '(3, 2)\n', (540, 546), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((560, 576), 'PuzzleLib.Modules.Activation', 'Activation', (['relu'], {}), '(relu)\n', (570, 576), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((591, 651), 'PuzzleLib.Modules.Conv2D', 'Conv2D', (['(32)', '(32)', '(5)'], {'pad': '(2)', 'wscale': '(0.01)', 'initscheme': '"""gaussian"""'}), "(32, 32, 5, pad=2, wscale=0.01, initscheme='gaussian')\n", (597, 651), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((665, 680), 'PuzzleLib.Modules.MaxPool2D', 'MaxPool2D', (['(3)', '(2)'], {}), '(3, 2)\n', (674, 680), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((694, 710), 'PuzzleLib.Modules.Activation', 'Activation', (['relu'], {}), '(relu)\n', (704, 710), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((725, 785), 'PuzzleLib.Modules.Conv2D', 'Conv2D', (['(32)', '(64)', '(5)'], {'pad': '(2)', 'wscale': '(0.01)', 'initscheme': '"""gaussian"""'}), "(32, 64, 5, pad=2, wscale=0.01, initscheme='gaussian')\n", (731, 785), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((799, 814), 'PuzzleLib.Modules.MaxPool2D', 'MaxPool2D', (['(3)', '(2)'], {}), '(3, 2)\n', (808, 814), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((828, 844), 'PuzzleLib.Modules.Activation', 'Activation', (['relu'], {}), '(relu)\n', (838, 844), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((859, 868), 'PuzzleLib.Modules.Flatten', 'Flatten', ([], {}), '()\n', (866, 868), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((979, 995), 'PuzzleLib.Modules.Activation', 'Activation', (['relu'], {}), '(relu)\n', (989, 995), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n'), ((1010, 1059), 'PuzzleLib.Modules.Linear', 'Linear', (['(64)', '(10)'], {'wscale': '(0.1)', 'initscheme': '"""gaussian"""'}), "(64, 10, wscale=0.1, initscheme='gaussian')\n", (1016, 1059), False, 'from PuzzleLib.Modules import Conv2D, MaxPool2D, Activation, relu, Flatten, Linear\n')] |
"""
Utility Algorithm Nodes.
These nodes are mainly used for testing.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import numpy as np
import traitlets as tl
# Internal dependencies
from podpac.core.coordinates import Coordinates
from podpac.core.algorithm.algorithm import Algorithm
class Arange(Algorithm):
"""A simple test node that gives each value in the output a number."""
def algorithm(self, inputs, coordinates):
"""Uses np.arange to give each value in output a unique number
Arguments
---------
inputs : dict
Unused, should be empty for this algorithm.
coordinates : podpac.Coordinates
Requested coordinates.
Returns
-------
UnitsDataArray
A row-majored numbered array of the requested size.
"""
data = np.arange(coordinates.size).reshape(coordinates.shape)
return self.create_output_array(coordinates, data=data)
class CoordData(Algorithm):
"""Extracts the coordinates from a request and makes it available as a data
Attributes
----------
coord_name : str
Name of coordinate to extract (one of lat, lon, time, alt)
"""
coord_name = tl.Enum(["time", "lat", "lon", "alt"], default_value="none", allow_none=False).tag(attr=True)
def algorithm(self, inputs, coordinates):
"""Extract coordinate from request and makes data available.
Arguments
----------
inputs : dict
Unused, should be empty for this algorithm.
coordinates : podpac.Coordinates
Requested coordinates.
Note that the ``inputs`` may contain with different coordinates.
Returns
-------
UnitsDataArray
The coordinates as data for the requested coordinate.
"""
if self.coord_name not in coordinates.udims:
raise ValueError("Coordinate name not in evaluated coordinates")
c = coordinates[self.coord_name]
coords = Coordinates([c], validate_crs=False)
return self.create_output_array(coords, data=c.coordinates)
class SinCoords(Algorithm):
"""A simple test node that creates a data based on coordinates and trigonometric (sin) functions."""
def algorithm(self, inputs, coordinates):
"""Computes sinusoids of all the coordinates.
Arguments
----------
inputs : dict
Unused, should be empty for this algorithm.
coordinates : podpac.Coordinates
Requested coordinates.
Returns
-------
UnitsDataArray
Sinusoids of a certain period for all of the requested coordinates
"""
out = self.create_output_array(coordinates, data=1.0)
crds = list(out.coords.values())
try:
i_time = list(out.coords.keys()).index("time")
crds[i_time] = crds[i_time].astype("datetime64[h]").astype(float)
except ValueError:
pass
crds = np.meshgrid(*crds, indexing="ij")
for crd in crds:
out *= np.sin(np.pi * crd / 90.0)
return out
| [
"numpy.meshgrid",
"traitlets.Enum",
"podpac.core.coordinates.Coordinates",
"numpy.sin",
"numpy.arange"
] | [((2063, 2099), 'podpac.core.coordinates.Coordinates', 'Coordinates', (['[c]'], {'validate_crs': '(False)'}), '([c], validate_crs=False)\n', (2074, 2099), False, 'from podpac.core.coordinates import Coordinates\n'), ((3056, 3089), 'numpy.meshgrid', 'np.meshgrid', (['*crds'], {'indexing': '"""ij"""'}), "(*crds, indexing='ij')\n", (3067, 3089), True, 'import numpy as np\n'), ((1260, 1338), 'traitlets.Enum', 'tl.Enum', (["['time', 'lat', 'lon', 'alt']"], {'default_value': '"""none"""', 'allow_none': '(False)'}), "(['time', 'lat', 'lon', 'alt'], default_value='none', allow_none=False)\n", (1267, 1338), True, 'import traitlets as tl\n'), ((3134, 3160), 'numpy.sin', 'np.sin', (['(np.pi * crd / 90.0)'], {}), '(np.pi * crd / 90.0)\n', (3140, 3160), True, 'import numpy as np\n'), ((886, 913), 'numpy.arange', 'np.arange', (['coordinates.size'], {}), '(coordinates.size)\n', (895, 913), True, 'import numpy as np\n')] |
import numpy as np
import os
from simplestat import statinf
fns=["results/"+zw for zw in os.listdir("results")]
fs=[np.load(fn) for fn in fns]
aucs=[float(f["auc"]) for f in fs]
outdims=[int(f["outdim"]) for f in fs]
q={}
for auc,outdim in zip(aucs,outdims):
if not outdim in q.keys():q[outdim]=[]
q[outdim].append(auc)
from plt import *
x=list(q.keys())
y=[np.mean(q[xx]) for xx in x]
s=[np.std(q[xx])/np.sqrt(len(q[xx])) for xx in x]
plt.xlabel("dimension")
plt.ylabel("auc")
plt.errorbar(x,y,fmt="o",yerr=s)
plt.show()
| [
"numpy.std",
"numpy.load",
"numpy.mean",
"os.listdir"
] | [((119, 130), 'numpy.load', 'np.load', (['fn'], {}), '(fn)\n', (126, 130), True, 'import numpy as np\n'), ((376, 390), 'numpy.mean', 'np.mean', (['q[xx]'], {}), '(q[xx])\n', (383, 390), True, 'import numpy as np\n'), ((90, 111), 'os.listdir', 'os.listdir', (['"""results"""'], {}), "('results')\n", (100, 111), False, 'import os\n'), ((407, 420), 'numpy.std', 'np.std', (['q[xx]'], {}), '(q[xx])\n', (413, 420), True, 'import numpy as np\n')] |
import cv2
# import tensorflow as tf
from tensorflow import keras
import numpy as np
# tf.config.set_visible_devices([], 'GPU')
# tf.device("cpu")
def load_image(image_path):
if image_path.endswith(".gif"):
cap = cv2.VideoCapture(image_path)
ret, data = cap.read()
else:
data = cv2.imread(image_path)
return data
def reshape(data, shape):
try:
r_data = cv2.resize(data, shape)
return r_data
except Exception as e:
print(e)
exit(11)
def merge_label(base_data, new_data):
if base_data is not None:
base_data |= new_data
else:
base_data = new_data
return base_data
def load_label(path_list):
data = None
for file_path in path_list:
data = merge_label(data, np.load(file_path))
return data
class BoneDataset(keras.utils.Sequence):
"""Helper to iterate over the data (as Numpy arrays)."""
def __init__(self, batch_size, img_size, input_img_paths, target_img_paths):
self.batch_size = batch_size
self.img_size = img_size
self.input_img_paths = input_img_paths
self.target_img_paths = target_img_paths
def __len__(self):
return len(self.target_img_paths) // self.batch_size
# x dogrudan calisacak calismamasi icin bir sakinca yok
# y biraz daha farkli bizim senaryoda
def __getitem__(self, idx):
"""Returns tuple (input, target) correspond to batch #idx."""
i = idx * self.batch_size
batch_input_img_paths = self.input_img_paths[i : i + self.batch_size]
batch_target_img_paths = self.target_img_paths[i : i + self.batch_size]
x = np.zeros((self.batch_size,) + self.img_size + (3,), dtype="float32")
for j, path in enumerate(batch_input_img_paths):
img = load_image(image_path=path)
img = reshape(data=img, shape=self.img_size)
x[j] = img
y = np.zeros((self.batch_size,) + self.img_size + (1,), dtype="uint8")
for j, path_list in enumerate(batch_target_img_paths):
img = load_label(path_list=path_list)
img = reshape(data=img, shape=self.img_size)
y[j] = np.expand_dims(img, 2)
y = y.astype(np.float32)
return x, y
| [
"numpy.load",
"numpy.zeros",
"numpy.expand_dims",
"cv2.VideoCapture",
"cv2.imread",
"cv2.resize"
] | [((227, 255), 'cv2.VideoCapture', 'cv2.VideoCapture', (['image_path'], {}), '(image_path)\n', (243, 255), False, 'import cv2\n'), ((312, 334), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (322, 334), False, 'import cv2\n'), ((404, 427), 'cv2.resize', 'cv2.resize', (['data', 'shape'], {}), '(data, shape)\n', (414, 427), False, 'import cv2\n'), ((1696, 1764), 'numpy.zeros', 'np.zeros', (['((self.batch_size,) + self.img_size + (3,))'], {'dtype': '"""float32"""'}), "((self.batch_size,) + self.img_size + (3,), dtype='float32')\n", (1704, 1764), True, 'import numpy as np\n'), ((1978, 2044), 'numpy.zeros', 'np.zeros', (['((self.batch_size,) + self.img_size + (1,))'], {'dtype': '"""uint8"""'}), "((self.batch_size,) + self.img_size + (1,), dtype='uint8')\n", (1986, 2044), True, 'import numpy as np\n'), ((815, 833), 'numpy.load', 'np.load', (['file_path'], {}), '(file_path)\n', (822, 833), True, 'import numpy as np\n'), ((2234, 2256), 'numpy.expand_dims', 'np.expand_dims', (['img', '(2)'], {}), '(img, 2)\n', (2248, 2256), True, 'import numpy as np\n')] |
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from scipy.ndimage import map_coordinates
from skimage import data
import pytest
from tadataka.interpolation import interpolation
def test_interpolation():
image = np.array([
[0, 1, 5],
[0, 0, 2],
[4, 3, 2],
[5, 6, 1]
], dtype=np.float64)
# width, height = (3, 4)
coordinates = np.array([[0.1, 1.2], [1.1, 2.1], [2.0, 2.3]])
assert(interpolation(image, coordinates).shape == (3,))
coordinate = np.array([0.1, 1.2])
assert(interpolation(image, coordinate).dtype == np.float64)
# ordinary
expected = (image[2, 1] * (2.0 - 1.3) * (3.0 - 2.6) +
image[2, 2] * (1.3 - 1.0) * (3.0 - 2.6) +
image[3, 1] * (2.0 - 1.3) * (2.6 - 2.0) +
image[3, 2] * (1.3 - 1.0) * (2.6 - 2.0))
# 2d input
coordinates = np.array([[1.3, 2.6]])
assert_almost_equal(interpolation(image, coordinates).squeeze(), expected)
# 1d input
coordinate = np.array([1.3, 2.6])
assert_almost_equal(interpolation(image, coordinate), expected)
# minimum coordinate
coordinate = np.array([0.0, 0.0])
assert_almost_equal(interpolation(image, coordinate), image[0, 0])
# minimum x
coordinate = np.array([0.0, 0.1])
expected = (image[0, 0] * (1.0 - 0.0) * (1.0 - 0.1) +
image[1, 0] * (1.0 - 0.0) * (0.1 - 0.0))
assert_almost_equal(interpolation(image, coordinate), expected)
# minimum y
coordinate = np.array([0.1, 0.0])
expected = (image[0, 0] * (1.0 - 0.1) * (1.0 - 0.0) +
image[0, 1] * (0.1 - 0.0) * (1.0 - 0.0))
assert_almost_equal(interpolation(image, coordinate), expected)
# maximum x
coordinate = np.array([2.0, 2.9])
expected = (image[2, 2] * (3.0 - 2.0) * (3.0 - 2.9) +
image[3, 2] * (3.0 - 2.0) * (2.9 - 2.0))
assert_almost_equal(interpolation(image, coordinate), expected)
coordinate = np.array([1.9, 3.0]) # maximum y
expected = (image[3, 1] * (2.0 - 1.9) * (4.0 - 3.0) +
image[3, 2] * (1.9 - 1.0) * (4.0 - 3.0))
assert_almost_equal(interpolation(image, coordinate), expected)
# maximum coordinate
coordinate = np.array([2.0, 3.0])
assert_almost_equal(interpolation(image, coordinate), image[3, 2])
with pytest.raises(ValueError):
interpolation(image, [3.0, 2.01])
with pytest.raises(ValueError):
interpolation(image, [3.01, 2.0])
with pytest.raises(ValueError):
interpolation(image, [-0.01, 0.0])
with pytest.raises(ValueError):
interpolation(image, [0.0, -0.01])
| [
"pytest.raises",
"tadataka.interpolation.interpolation",
"numpy.array"
] | [((264, 336), 'numpy.array', 'np.array', (['[[0, 1, 5], [0, 0, 2], [4, 3, 2], [5, 6, 1]]'], {'dtype': 'np.float64'}), '([[0, 1, 5], [0, 0, 2], [4, 3, 2], [5, 6, 1]], dtype=np.float64)\n', (272, 336), True, 'import numpy as np\n'), ((423, 469), 'numpy.array', 'np.array', (['[[0.1, 1.2], [1.1, 2.1], [2.0, 2.3]]'], {}), '([[0.1, 1.2], [1.1, 2.1], [2.0, 2.3]])\n', (431, 469), True, 'import numpy as np\n'), ((548, 568), 'numpy.array', 'np.array', (['[0.1, 1.2]'], {}), '([0.1, 1.2])\n', (556, 568), True, 'import numpy as np\n'), ((914, 936), 'numpy.array', 'np.array', (['[[1.3, 2.6]]'], {}), '([[1.3, 2.6]])\n', (922, 936), True, 'import numpy as np\n'), ((1048, 1068), 'numpy.array', 'np.array', (['[1.3, 2.6]'], {}), '([1.3, 2.6])\n', (1056, 1068), True, 'import numpy as np\n'), ((1180, 1200), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1188, 1200), True, 'import numpy as np\n'), ((1306, 1326), 'numpy.array', 'np.array', (['[0.0, 0.1]'], {}), '([0.0, 0.1])\n', (1314, 1326), True, 'import numpy as np\n'), ((1544, 1564), 'numpy.array', 'np.array', (['[0.1, 0.0]'], {}), '([0.1, 0.0])\n', (1552, 1564), True, 'import numpy as np\n'), ((1782, 1802), 'numpy.array', 'np.array', (['[2.0, 2.9]'], {}), '([2.0, 2.9])\n', (1790, 1802), True, 'import numpy as np\n'), ((2004, 2024), 'numpy.array', 'np.array', (['[1.9, 3.0]'], {}), '([1.9, 3.0])\n', (2012, 2024), True, 'import numpy as np\n'), ((2264, 2284), 'numpy.array', 'np.array', (['[2.0, 3.0]'], {}), '([2.0, 3.0])\n', (2272, 2284), True, 'import numpy as np\n'), ((1093, 1125), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (1106, 1125), False, 'from tadataka.interpolation import interpolation\n'), ((1225, 1257), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (1238, 1257), False, 'from tadataka.interpolation import interpolation\n'), ((1466, 1498), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (1479, 1498), False, 'from tadataka.interpolation import interpolation\n'), ((1704, 1736), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (1717, 1736), False, 'from tadataka.interpolation import interpolation\n'), ((1942, 1974), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (1955, 1974), False, 'from tadataka.interpolation import interpolation\n'), ((2177, 2209), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (2190, 2209), False, 'from tadataka.interpolation import interpolation\n'), ((2309, 2341), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (2322, 2341), False, 'from tadataka.interpolation import interpolation\n'), ((2366, 2391), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2379, 2391), False, 'import pytest\n'), ((2401, 2434), 'tadataka.interpolation.interpolation', 'interpolation', (['image', '[3.0, 2.01]'], {}), '(image, [3.0, 2.01])\n', (2414, 2434), False, 'from tadataka.interpolation import interpolation\n'), ((2445, 2470), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2458, 2470), False, 'import pytest\n'), ((2480, 2513), 'tadataka.interpolation.interpolation', 'interpolation', (['image', '[3.01, 2.0]'], {}), '(image, [3.01, 2.0])\n', (2493, 2513), False, 'from tadataka.interpolation import interpolation\n'), ((2524, 2549), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2537, 2549), False, 'import pytest\n'), ((2559, 2593), 'tadataka.interpolation.interpolation', 'interpolation', (['image', '[-0.01, 0.0]'], {}), '(image, [-0.01, 0.0])\n', (2572, 2593), False, 'from tadataka.interpolation import interpolation\n'), ((2604, 2629), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2617, 2629), False, 'import pytest\n'), ((2639, 2673), 'tadataka.interpolation.interpolation', 'interpolation', (['image', '[0.0, -0.01]'], {}), '(image, [0.0, -0.01])\n', (2652, 2673), False, 'from tadataka.interpolation import interpolation\n'), ((481, 514), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinates'], {}), '(image, coordinates)\n', (494, 514), False, 'from tadataka.interpolation import interpolation\n'), ((580, 612), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinate'], {}), '(image, coordinate)\n', (593, 612), False, 'from tadataka.interpolation import interpolation\n'), ((961, 994), 'tadataka.interpolation.interpolation', 'interpolation', (['image', 'coordinates'], {}), '(image, coordinates)\n', (974, 994), False, 'from tadataka.interpolation import interpolation\n')] |
import argparse
import polygon_primitives.polygon as pp
import polygon_primitives.edge as pe
import utm
from shapely.geometry import Polygon
import geopandas
import numpy as np
import cv2
_projections = {}
def compute_centroid(points):
centroid = np.mean(points)
return centroid
def parse_polygon_file(filename, offset=[0.0, 0.0]):
edges = []
shapely_polys = []
with open(filename, "r") as f:
lines = f.readlines()
file_type = lines[0].rstrip()
edge_lines = lines[1:]
next_polygon = pp.Polygon()
polygon_points = []
for line in edge_lines:
if line == "\n":
for i in range(len(polygon_points) - 1):
next_index = i + 1
edge = pe.Edge(polygon_points[i], polygon_points[next_index])
if edge.get_start() == edge.get_end():
print(i, next_index)
edges.append(edge)
shap_pol = Polygon(polygon_points)
shapely_polys.append(shap_pol)
polygon_points = []
else:
points = [float(i) for i in line.split()]
if file_type != "UTM":
x, y, z, l = utm.from_latlon(points[0], points[1])
else:
x = points[0]
y = points[1]
x = x + offset[0]
y = y + offset[1]
polygon_points.append((x, y))
if line[-1] != "\n":
# for i in range(len(polygon_points)):
# next_index = (i + 1) % len(polygon_points)
# edge = pp.Edge(polygon_points[i], polygon_points[next_index])
# next_polygon.add_edge(edge)
# polygons.append(next_polygon)
shap_pol = Polygon(polygon_points)
shapely_polys.append(shap_pol)
return shapely_polys, edges
def get_least_squares_approx(points, ref_points):
points = np.array(points)
ref_points = np.array(ref_points)
point_centroid = compute_centroid(points)
ref_centroid = compute_centroid(ref_points)
points = points - point_centroid
ref_points = ref_points - ref_centroid
H = np.dot(points, np.transpose(ref_points))
u,s,v = np.linalg.svd(H)
rot = np.dot(v, np.transpose(u))
t = np.dot(rot, -point_centroid) + ref_centroid
return np.dot(rot, points) + t
def get_fund_mat(points, ref_points):
return cv2.findFundamentalMat(points, ref_points)
def create_shapely_polygon(merged_polygons, points):
poly = Polygon(points)
return poly
def filter_matches(kp1, kp2, matches, ratio = 0.75):
mkp1, mkp2 = [], []
for m in matches:
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
m = m[0]
mkp1.append( kp1[m.queryIdx] )
mkp2.append( kp2[m.trainIdx] )
kp_pairs = zip(mkp1, mkp2)
return kp_pairs
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--polygon_file", type=str)
parser.add_argument("--comparison_file", type=str)
args = parser.parse_args()
comp_file = args.comparison_file
poly_file = args.polygon_file
comp_polygons, edges = parse_polygon_file(poly_file)
# if __name__ == "__main__":
# main() | [
"utm.from_latlon",
"polygon_primitives.edge.Edge",
"argparse.ArgumentParser",
"shapely.geometry.Polygon",
"numpy.transpose",
"cv2.findFundamentalMat",
"numpy.linalg.svd",
"numpy.mean",
"numpy.array",
"polygon_primitives.polygon.Polygon",
"numpy.dot"
] | [((253, 268), 'numpy.mean', 'np.mean', (['points'], {}), '(points)\n', (260, 268), True, 'import numpy as np\n'), ((2016, 2032), 'numpy.array', 'np.array', (['points'], {}), '(points)\n', (2024, 2032), True, 'import numpy as np\n'), ((2050, 2070), 'numpy.array', 'np.array', (['ref_points'], {}), '(ref_points)\n', (2058, 2070), True, 'import numpy as np\n'), ((2306, 2322), 'numpy.linalg.svd', 'np.linalg.svd', (['H'], {}), '(H)\n', (2319, 2322), True, 'import numpy as np\n'), ((2497, 2539), 'cv2.findFundamentalMat', 'cv2.findFundamentalMat', (['points', 'ref_points'], {}), '(points, ref_points)\n', (2519, 2539), False, 'import cv2\n'), ((2605, 2620), 'shapely.geometry.Polygon', 'Polygon', (['points'], {}), '(points)\n', (2612, 2620), False, 'from shapely.geometry import Polygon\n'), ((2987, 3012), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (3010, 3012), False, 'import argparse\n'), ((538, 550), 'polygon_primitives.polygon.Polygon', 'pp.Polygon', ([], {}), '()\n', (548, 550), True, 'import polygon_primitives.polygon as pp\n'), ((2268, 2292), 'numpy.transpose', 'np.transpose', (['ref_points'], {}), '(ref_points)\n', (2280, 2292), True, 'import numpy as np\n'), ((2343, 2358), 'numpy.transpose', 'np.transpose', (['u'], {}), '(u)\n', (2355, 2358), True, 'import numpy as np\n'), ((2368, 2396), 'numpy.dot', 'np.dot', (['rot', '(-point_centroid)'], {}), '(rot, -point_centroid)\n', (2374, 2396), True, 'import numpy as np\n'), ((2423, 2442), 'numpy.dot', 'np.dot', (['rot', 'points'], {}), '(rot, points)\n', (2429, 2442), True, 'import numpy as np\n'), ((1840, 1863), 'shapely.geometry.Polygon', 'Polygon', (['polygon_points'], {}), '(polygon_points)\n', (1847, 1863), False, 'from shapely.geometry import Polygon\n'), ((990, 1013), 'shapely.geometry.Polygon', 'Polygon', (['polygon_points'], {}), '(polygon_points)\n', (997, 1013), False, 'from shapely.geometry import Polygon\n'), ((763, 817), 'polygon_primitives.edge.Edge', 'pe.Edge', (['polygon_points[i]', 'polygon_points[next_index]'], {}), '(polygon_points[i], polygon_points[next_index])\n', (770, 817), True, 'import polygon_primitives.edge as pe\n'), ((1246, 1283), 'utm.from_latlon', 'utm.from_latlon', (['points[0]', 'points[1]'], {}), '(points[0], points[1])\n', (1261, 1283), False, 'import utm\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:gis_38]
# language: python
# name: conda-env-gis_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import geopandas as gpd
import pandas as pd
import numpy as np
import xarray as xr
from shapely.geometry import Polygon
import multiprocessing as mp
import psutil
import os
import resource
import math
import time
# %%
workdir = '/Users/pnorton/Projects/National_Hydrology_Model/datasets/SNODAS/unmasked'
src_netcdf = f'{workdir}/SWE_subset.nc'
src_gridspacing = 0.04166666
# %%
def new_calc_dx_dy(longitude,latitude,shape,radius=6370997.):
''' This definition calculates the distance
between grid points that are in
a latitude/longitude format.
Using pyproj GEOD; different Earth Shapes
https://jswhit.github.io/pyproj/pyproj.Geod-class.html
Common shapes: 'sphere', 'WGS84', 'GRS80'
Accepts, 1D arrays for latitude and longitude
Returns: dx, dy; 2D arrays of distances
between grid points in the x and y direction in meters
'''
# from: https://github.com/Unidata/MetPy/issues/288
from pyproj import Geod
if (radius != 6370997.):
g = Geod(a=radius,b=radius)
else:
g = Geod(ellps=shape)
dx = np.empty(latitude.shape)
dy = np.zeros(longitude.shape)
for i in range(latitude.shape[1]):
for j in range(latitude.shape[0]-1):
_, _, dx[j,i] = g.inv(longitude[j,i],latitude[j,i],longitude[j+1,i],latitude[j+1,i])
dx[j+1,:] = dx[j,:]
for i in range(latitude.shape[1]-1):
for j in range(latitude.shape[0]):
_, _, dy[j,i] = g.inv(longitude[j,i],latitude[j,i],longitude[j,i+1],latitude[j,i+1])
dy[:,i+1] = dy[:,i]
return dx, dy
# %%
lons = np.array([-124.730225, -124.72317])
lats = np.array([24.878002, 24.884838])
new_calc_dx_dy(lons, lats, 'WGS84')
# %%
def convert_size(size_bytes):
# from https://python-forum.io/Thread-Convert-file-sizes-will-this-produce-accurate-results
if size_bytes == 0:
return "0 B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
power = math.pow(1024, i)
size = round(size_bytes / power, 2)
return f'{size} {size_name[i]}'
# %%
def create_polygons(lats, lons, grid_spacing, data):
t1 = time.time()
lon, lat = np.meshgrid(lons, lats)
print(f'meshgrid: {time.time() - t1}')
# For weight generation the datahandle variable is not used for anything
# but creating the geodataframe of the source netcdf file
t1 = time.time()
df = pd.DataFrame({'grid': data})
print(f'DataFrame: {time.time() - t1}')
# res is half of the 'resolution' (e.g. gridspacing in degrees)
res = grid_spacing / 2.0
poly = []
index = []
count = 0
# Create polygon features of the grid-cell bounding boxes
t1 = time.time()
for i in range(np.shape(lat)[0]):
for j in range(np.shape(lon)[1]):
lat_point_list = [lat[i, j] - res, lat[i, j] + res, lat[i, j] + res, lat[i, j] - res]
lon_point_list = [lon[i, j] + res, lon[i, j] + res, lon[i, j] - res, lon[i, j] - res]
poly.append(Polygon(zip(lon_point_list, lat_point_list)))
index.append(count)
count += 1
print(f'poly: {time.time() - t1}')
t1 = time.time()
ncfcells = gpd.GeoDataFrame(df, index=index, geometry=poly, crs='epsg:4326')
print(f'GeoDataFrame: {time.time() - t1}')
return ncfcells
# %%
ds = xr.open_dataset(src_netcdf, mask_and_scale=False)
lathandle = ds['lat']
lonhandle = ds['lon']
datahandle = ds['SWE']
# Print some information on the data
print('\n Data sizes are: \n', datahandle.sizes)
print('\n Data coords are: \n', datahandle.coords)
# %%
# %%
print('Before create ncfcells mem usage:', convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
ncfcells = create_polygons(lathandle, lonhandle, src_gridspacing, datahandle.isel(time=0).values.flatten())
print('After create ncfcells mem usage:', convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
# %%
lon, lat = np.meshgrid(lonhandle, lathandle)
print(lon)
# %%
print(lon.shape)
# %%
res = src_gridspacing / 2.0
# %%
# i => lat, j => lon; [i, j]
i = 0
j = 0
lat_point_list = [lat[i, j] - res, lat[i, j] + res, lat[i, j] + res, lat[i, j] - res]
lon_point_list = [lon[i, j] + res, lon[i, j] + res, lon[i, j] - res, lon[i, j] - res]
# poly.append(Polygon(zip(lon_point_list, lat_point_list)))
# %%
print(f'{lat[i, j], lon[i, j]}')
print(lat_point_list)
print(lon_point_list)
# %%
ll_2d = list(zip(lon_point_list, lat_point_list))
print(ll_2d)
# %%
cc = lat[:, :] - res
dd = lat[:, :] + res
# np.array([[2],[3],[4]])
yy = np.dstack((cc, dd, dd, cc)).flatten()
# xx = np.dstack([cc, dd])
print(yy.shape)
# %%
# array([ 24.85716797, 24.89883463, 24.89883463, 24.85716797,
# -119.99688528, -119.99688528, -120.03855194, -120.03855194])
#24.85716797
#24.85716796875
#24.89883463
#24.89883462875
#-119.99688528
#-119.99688527731261
# -120.03855194
# -120.03855193731262
# %%
bb = np.ndarray(lat[:,:] - res, lat[:, :] + res, lat[:, :] + res, lat[:, :] - res)
# %%
ff = lon[:, :] + res
gg = lon[:, :] - res
xx = np.dstack((ff, ff, gg, gg)).flatten()
print(xx.shape)
# %%
cc[i, j]
# %%
# np.array([[2],[3],[4]])
yy = np.dstack((cc, dd, dd, cc)).flatten()
# xx = np.dstack([cc, dd])
print(yy.shape)
# %%
# %%
t1 = time.time()
for i in range(np.shape(lat)[0]):
for j in range(np.shape(lon)[1]):
lat_point_list = [lat[i, j] - res, lat[i, j] + res, lat[i, j] + res, lat[i, j] - res]
lon_point_list = [lon[i, j] + res, lon[i, j] + res, lon[i, j] - res, lon[i, j] - res]
print(f'poly: {time.time() - t1}')
# %%
zz = np.dstack((yy, xx))
# %%
zz[0:2,0,:]
# %%
zz.shape
# %%
def create_polygons_v2(lats, lons, grid_spacing, data):
def get_poly(row, lats, lons):
ii = row.name * 4
return Polygon(zip(lats[ii:ii+4], lons[ii:ii+4]))
t1 = time.time()
lon, lat = np.meshgrid(lons, lats)
print(f'meshgrid: {time.time() - t1}')
# res is half of the 'resolution' (e.g. gridspacing in degrees)
res = grid_spacing / 2.0
# For weight generation the datahandle variable is not used for anything
# but creating the geodataframe of the source netcdf file
t1 = time.time()
df = pd.DataFrame({'grid': data})
print(f'DataFrame: {time.time() - t1}')
# Create polygon features of the grid-cell bounding boxes
t1 = time.time()
lats1 = lat[:, :] - res
lats2 = lat[:, :] + res
yy = np.dstack((lats1, lats2, lats2, lats1)).flatten()
lons1 = lon[:, :] + res
lons2 = lon[:, :] - res
xx = np.dstack((lons1, lons1, lons2, lons2)).flatten()
print(f'numpy: {time.time() - t1}')
print(' mem usage:', convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
t1 = time.time()
df['geometry'] = df.apply(get_poly, lats=yy, lons=xx, axis=1)
print(f'df_geometry: {time.time() - t1}')
print(' mem usage:', convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
t1 = time.time()
ncfcells = gpd.GeoDataFrame(df, crs='epsg:4326')
print(f'GeoDataFrame: {time.time() - t1}')
del df
return ncfcells
# %%
print('Before create ncfcells mem usage:', convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
ncfcells = create_polygons_v2(lathandle, lonhandle, src_gridspacing, datahandle.isel(time=0).values.flatten())
print('After create ncfcells mem usage:', convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
# %%
ncfcells.info()
# %%
ncfcells.head()
# %%
print('After create ncfcells mem usage:', convert_size(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
# %%
# %%
| [
"numpy.dstack",
"pandas.DataFrame",
"numpy.meshgrid",
"math.pow",
"numpy.empty",
"xarray.open_dataset",
"numpy.zeros",
"time.time",
"numpy.shape",
"geopandas.GeoDataFrame",
"numpy.array",
"math.log",
"resource.getrusage",
"numpy.ndarray",
"pyproj.Geod"
] | [((2076, 2111), 'numpy.array', 'np.array', (['[-124.730225, -124.72317]'], {}), '([-124.730225, -124.72317])\n', (2084, 2111), True, 'import numpy as np\n'), ((2119, 2151), 'numpy.array', 'np.array', (['[24.878002, 24.884838]'], {}), '([24.878002, 24.884838])\n', (2127, 2151), True, 'import numpy as np\n'), ((3861, 3910), 'xarray.open_dataset', 'xr.open_dataset', (['src_netcdf'], {'mask_and_scale': '(False)'}), '(src_netcdf, mask_and_scale=False)\n', (3876, 3910), True, 'import xarray as xr\n'), ((4473, 4506), 'numpy.meshgrid', 'np.meshgrid', (['lonhandle', 'lathandle'], {}), '(lonhandle, lathandle)\n', (4484, 4506), True, 'import numpy as np\n'), ((5460, 5538), 'numpy.ndarray', 'np.ndarray', (['(lat[:, :] - res)', '(lat[:, :] + res)', '(lat[:, :] + res)', '(lat[:, :] - res)'], {}), '(lat[:, :] - res, lat[:, :] + res, lat[:, :] + res, lat[:, :] - res)\n', (5470, 5538), True, 'import numpy as np\n'), ((5797, 5808), 'time.time', 'time.time', ([], {}), '()\n', (5806, 5808), False, 'import time\n'), ((6115, 6134), 'numpy.dstack', 'np.dstack', (['(yy, xx)'], {}), '((yy, xx))\n', (6124, 6134), True, 'import numpy as np\n'), ((1558, 1582), 'numpy.empty', 'np.empty', (['latitude.shape'], {}), '(latitude.shape)\n', (1566, 1582), True, 'import numpy as np\n'), ((1592, 1617), 'numpy.zeros', 'np.zeros', (['longitude.shape'], {}), '(longitude.shape)\n', (1600, 1617), True, 'import numpy as np\n'), ((2501, 2518), 'math.pow', 'math.pow', (['(1024)', 'i'], {}), '(1024, i)\n', (2509, 2518), False, 'import math\n'), ((2664, 2675), 'time.time', 'time.time', ([], {}), '()\n', (2673, 2675), False, 'import time\n'), ((2691, 2714), 'numpy.meshgrid', 'np.meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (2702, 2714), True, 'import numpy as np\n'), ((2911, 2922), 'time.time', 'time.time', ([], {}), '()\n', (2920, 2922), False, 'import time\n'), ((2932, 2960), 'pandas.DataFrame', 'pd.DataFrame', (["{'grid': data}"], {}), "({'grid': data})\n", (2944, 2960), True, 'import pandas as pd\n'), ((3222, 3233), 'time.time', 'time.time', ([], {}), '()\n', (3231, 3233), False, 'import time\n'), ((3688, 3699), 'time.time', 'time.time', ([], {}), '()\n', (3697, 3699), False, 'import time\n'), ((3715, 3780), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['df'], {'index': 'index', 'geometry': 'poly', 'crs': '"""epsg:4326"""'}), "(df, index=index, geometry=poly, crs='epsg:4326')\n", (3731, 3780), True, 'import geopandas as gpd\n'), ((6369, 6380), 'time.time', 'time.time', ([], {}), '()\n', (6378, 6380), False, 'import time\n'), ((6396, 6419), 'numpy.meshgrid', 'np.meshgrid', (['lons', 'lats'], {}), '(lons, lats)\n', (6407, 6419), True, 'import numpy as np\n'), ((6714, 6725), 'time.time', 'time.time', ([], {}), '()\n', (6723, 6725), False, 'import time\n'), ((6735, 6763), 'pandas.DataFrame', 'pd.DataFrame', (["{'grid': data}"], {}), "({'grid': data})\n", (6747, 6763), True, 'import pandas as pd\n'), ((6880, 6891), 'time.time', 'time.time', ([], {}), '()\n', (6889, 6891), False, 'import time\n'), ((7285, 7296), 'time.time', 'time.time', ([], {}), '()\n', (7294, 7296), False, 'import time\n'), ((7513, 7524), 'time.time', 'time.time', ([], {}), '()\n', (7522, 7524), False, 'import time\n'), ((7540, 7577), 'geopandas.GeoDataFrame', 'gpd.GeoDataFrame', (['df'], {'crs': '"""epsg:4326"""'}), "(df, crs='epsg:4326')\n", (7556, 7577), True, 'import geopandas as gpd\n'), ((1480, 1504), 'pyproj.Geod', 'Geod', ([], {'a': 'radius', 'b': 'radius'}), '(a=radius, b=radius)\n', (1484, 1504), False, 'from pyproj import Geod\n'), ((1526, 1543), 'pyproj.Geod', 'Geod', ([], {'ellps': 'shape'}), '(ellps=shape)\n', (1530, 1543), False, 'from pyproj import Geod\n'), ((5086, 5113), 'numpy.dstack', 'np.dstack', (['(cc, dd, dd, cc)'], {}), '((cc, dd, dd, cc))\n', (5095, 5113), True, 'import numpy as np\n'), ((5592, 5619), 'numpy.dstack', 'np.dstack', (['(ff, ff, gg, gg)'], {}), '((ff, ff, gg, gg))\n', (5601, 5619), True, 'import numpy as np\n'), ((5698, 5725), 'numpy.dstack', 'np.dstack', (['(cc, dd, dd, cc)'], {}), '((cc, dd, dd, cc))\n', (5707, 5725), True, 'import numpy as np\n'), ((5824, 5837), 'numpy.shape', 'np.shape', (['lat'], {}), '(lat)\n', (5832, 5837), True, 'import numpy as np\n'), ((2460, 2486), 'math.log', 'math.log', (['size_bytes', '(1024)'], {}), '(size_bytes, 1024)\n', (2468, 2486), False, 'import math\n'), ((3253, 3266), 'numpy.shape', 'np.shape', (['lat'], {}), '(lat)\n', (3261, 3266), True, 'import numpy as np\n'), ((4187, 4227), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (4205, 4227), False, 'import resource\n'), ((4403, 4443), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (4421, 4443), False, 'import resource\n'), ((5862, 5875), 'numpy.shape', 'np.shape', (['lon'], {}), '(lon)\n', (5870, 5875), True, 'import numpy as np\n'), ((6966, 7005), 'numpy.dstack', 'np.dstack', (['(lats1, lats2, lats2, lats1)'], {}), '((lats1, lats2, lats2, lats1))\n', (6975, 7005), True, 'import numpy as np\n'), ((7087, 7126), 'numpy.dstack', 'np.dstack', (['(lons1, lons1, lons2, lons2)'], {}), '((lons1, lons1, lons2, lons2))\n', (7096, 7126), True, 'import numpy as np\n'), ((7719, 7759), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (7737, 7759), False, 'import resource\n'), ((7938, 7978), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (7956, 7978), False, 'import resource\n'), ((8096, 8136), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (8114, 8136), False, 'import resource\n'), ((3295, 3308), 'numpy.shape', 'np.shape', (['lon'], {}), '(lon)\n', (3303, 3308), True, 'import numpy as np\n'), ((6084, 6095), 'time.time', 'time.time', ([], {}), '()\n', (6093, 6095), False, 'import time\n'), ((7218, 7258), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (7236, 7258), False, 'import resource\n'), ((7450, 7490), 'resource.getrusage', 'resource.getrusage', (['resource.RUSAGE_SELF'], {}), '(resource.RUSAGE_SELF)\n', (7468, 7490), False, 'import resource\n'), ((2738, 2749), 'time.time', 'time.time', ([], {}), '()\n', (2747, 2749), False, 'import time\n'), ((2985, 2996), 'time.time', 'time.time', ([], {}), '()\n', (2994, 2996), False, 'import time\n'), ((3654, 3665), 'time.time', 'time.time', ([], {}), '()\n', (3663, 3665), False, 'import time\n'), ((3808, 3819), 'time.time', 'time.time', ([], {}), '()\n', (3817, 3819), False, 'import time\n'), ((6443, 6454), 'time.time', 'time.time', ([], {}), '()\n', (6452, 6454), False, 'import time\n'), ((6788, 6799), 'time.time', 'time.time', ([], {}), '()\n', (6797, 6799), False, 'import time\n'), ((7157, 7168), 'time.time', 'time.time', ([], {}), '()\n', (7166, 7168), False, 'import time\n'), ((7389, 7400), 'time.time', 'time.time', ([], {}), '()\n', (7398, 7400), False, 'import time\n'), ((7605, 7616), 'time.time', 'time.time', ([], {}), '()\n', (7614, 7616), False, 'import time\n')] |
# Implementing Gradient Descent using TensorFlow
import numpy as np
import tensorflow as tf
from sklearn.datasets import fetch_california_housing
from sklearn.preprocessing import StandardScaler
# Get data
housing = fetch_california_housing()
m, n = housing.data.shape
# Learning parameters
n_epochs = 2500
learning_rate = 0.025
# Transform data into usable tensors, set up theta
scaled_X = StandardScaler().fit_transform(housing.data)
housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_X]
X = tf.constant(housing_data_plus_bias, dtype=tf.float32, name='X')
y = tf.constant(housing.target.reshape(-1, 1), dtype=tf.float32, name='y')
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0), name='theta')
# Construct graph
y_pred = tf.matmul(X, theta, name='y_pred')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name='mse')
# gradients = (2/m) * tf.matmul(tf.transpose(X), error)
# Use autodiff instead
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
# Alternate optimization
optimizer2 = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=0.9)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
# Computation
with tf.Session() as sess:
sess.run(init)
print("Learning rate: ", learning_rate)
print(theta.eval())
for epoch in range(n_epochs):
if epoch % 100 == 0:
print('Epoch ', epoch, 'MSE = ', mse.eval())
sess.run(training_op)
best_theta = theta.eval()
print(best_theta) | [
"tensorflow.random_uniform",
"sklearn.preprocessing.StandardScaler",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"numpy.ones",
"tensorflow.constant",
"sklearn.datasets.fetch_california_housing",
"tensorflow.matmul",
"tensorflow.train.MomentumOptimizer",
"tensorflow.square",
... | [((218, 244), 'sklearn.datasets.fetch_california_housing', 'fetch_california_housing', ([], {}), '()\n', (242, 244), False, 'from sklearn.datasets import fetch_california_housing\n'), ((502, 565), 'tensorflow.constant', 'tf.constant', (['housing_data_plus_bias'], {'dtype': 'tf.float32', 'name': '"""X"""'}), "(housing_data_plus_bias, dtype=tf.float32, name='X')\n", (513, 565), True, 'import tensorflow as tf\n'), ((745, 779), 'tensorflow.matmul', 'tf.matmul', (['X', 'theta'], {'name': '"""y_pred"""'}), "(X, theta, name='y_pred')\n", (754, 779), True, 'import tensorflow as tf\n'), ((941, 1003), 'tensorflow.train.GradientDescentOptimizer', 'tf.train.GradientDescentOptimizer', ([], {'learning_rate': 'learning_rate'}), '(learning_rate=learning_rate)\n', (974, 1003), True, 'import tensorflow as tf\n'), ((1042, 1111), 'tensorflow.train.MomentumOptimizer', 'tf.train.MomentumOptimizer', ([], {'learning_rate': 'learning_rate', 'momentum': '(0.9)'}), '(learning_rate=learning_rate, momentum=0.9)\n', (1068, 1111), True, 'import tensorflow as tf\n'), ((1158, 1191), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (1189, 1191), True, 'import tensorflow as tf\n'), ((661, 701), 'tensorflow.random_uniform', 'tf.random_uniform', (['[n + 1, 1]', '(-1.0)', '(1.0)'], {}), '([n + 1, 1], -1.0, 1.0)\n', (678, 701), True, 'import tensorflow as tf\n'), ((820, 836), 'tensorflow.square', 'tf.square', (['error'], {}), '(error)\n', (829, 836), True, 'import tensorflow as tf\n'), ((1212, 1224), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1222, 1224), True, 'import tensorflow as tf\n'), ((395, 411), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (409, 411), False, 'from sklearn.preprocessing import StandardScaler\n'), ((471, 486), 'numpy.ones', 'np.ones', (['(m, 1)'], {}), '((m, 1))\n', (478, 486), True, 'import numpy as np\n')] |
import json
import random
import numpy as np
from Source.Utility.Pathfinding.Graph import Graph
class GameMetrics:
def __init__(self):
self.game_state = 0
# always set game state before getting metrics
def set_game_state(self, game_state):
self.game_state = game_state
def get_distance_to_players(self):
own_player = self.game_state["players"][str(self.game_state["you"])]
distances = [0, 0, 0, 0, 0, 0]
current_position = (own_player["x"], own_player["y"])
if self.game_state["players"][str(self.game_state["you"])]["active"]:
for i in range(6):
if i + 1 == self.game_state["you"]:
distances[i] = 0
else:
if self.game_state["players"][str(i + 1)]["active"]:
enemy_position = (
self.game_state["players"][str(i + 1)]["x"], self.game_state["players"][str(i + 1)]["y"])
distance = np.sqrt(np.power(current_position[0] - enemy_position[0], 2) + np.power(
current_position[1] - enemy_position[1], 2))
distances[i] = distance
else:
distances[i] = 0
max_distance = np.sqrt(np.power(self.game_state["width"], 2) + np.power(self.game_state["height"], 2))
for i in range(len(distances)):
distances[i] = distances[i] / max_distance
return distances
def get_average_distance(self, distances):
sum = counter = 0.0
for i in range(len(distances)):
if distances[i] == 0:
pass
else:
sum += distances[i]
counter += 1
if counter == 0:
return 0
else:
return sum / counter
def get_avg_speed(self):
sum = 0.0
counter = 0.0
avg = 0.0
if self.game_state["players"][str(self.game_state["you"])]["active"]:
for i in range(6):
if i + 1 == self.game_state["you"]:
pass
else:
if self.game_state["players"][str(i + 1)]["active"]:
sum += self.game_state["players"][str(i + 1)]["speed"]
counter += 1
if counter > 0:
avg = sum / counter
norm_avg = avg / 10
return norm_avg
def get_num_living_players(self):
num = 0
for i in range(6):
if self.game_state["players"][str(i + 1)]["active"]:
num += 1
return num
def get_player_data(self, id):
x = self.game_state["players"][str(id + 1)]["x"]
y = self.game_state["players"][str(id + 1)]["y"]
speed = self.game_state["players"][str(id + 1)]["speed"]
return x, y, speed
def get_distances_to_borders(self, id):
board_height = self.game_state["height"]
board_width = self.game_state["width"]
position = self.game_state["players"][str(id + 1)]["x"], self.game_state["players"][str(id + 1)]["y"]
top_distance = position[1] - 1
bottom_distance = (board_height - 1) - (position[1] - 1)
right_distance = (board_width - 1) - (position[0] - 1)
left_distance = position[0] - 1
return top_distance, bottom_distance, right_distance, left_distance
def get_own_speed(self):
own_player = self.game_state["players"][str(self.game_state["you"])]
speed = own_player["speed"]
return speed
def get_free_spaces(self, new_position):
own_player = self.game_state["players"][str(self.game_state["you"])]
speed = own_player["speed"]
number_of_free_spaces = 0
for i in range(-2, 3):
for j in range(-2, 3):
try:
if self.game_state["cells"][new_position[1] + i][new_position[0] + j] == 0:
number_of_free_spaces += 1
except IndexError:
pass
normalised_num = (number_of_free_spaces - speed) / 25.0
return normalised_num
def get_up_free(self):
own_player = self.game_state["players"][str(self.game_state["you"])]
speed = own_player["speed"]
current_position = (own_player["x"], own_player["y"])
free = True
for i in range(speed):
try:
if self.game_state["cells"][current_position[1] - i] != 0 or current_position[1]-i <= 0:
free = False
except IndexError:
pass
return free
def get_down_free(self):
own_player = self.game_state["players"][str(self.game_state["you"])]
speed = own_player["speed"]
current_position = (own_player["x"], own_player["y"])
free = True
for i in range(speed):
try:
if self.game_state["cells"][current_position[1] + i] != 0 or current_position[1]+i <= 0:
free = False
except IndexError:
pass
return free
def get_left_free(self):
own_player = self.game_state["players"][str(self.game_state["you"])]
speed = own_player["speed"]
current_position = (own_player["x"], own_player["y"])
free = True
for i in range(speed):
try:
if self.game_state["cells"][current_position[0] - i] != 0 or current_position[0]-i <= 0:
free = False
except IndexError:
pass
return free
def get_right_free(self):
own_player = self.game_state["players"][str(self.game_state["you"])]
speed = own_player["speed"]
current_position = (own_player["x"], own_player["y"])
free = True
for i in range(speed):
try:
if self.game_state["cells"][current_position[0] + i] != 0 or current_position[0]+i <= 0:
free = False
except IndexError:
pass
return free
def get_connected_fields_for_new_position(self, x, y, new_direction):
graph = Graph(self.game_state["cells"],x,y, self.game_state["width"], self.game_state["height"], new_direction, 69)
return len(graph.get_connected_components())
| [
"numpy.power",
"Source.Utility.Pathfinding.Graph.Graph"
] | [((6184, 6298), 'Source.Utility.Pathfinding.Graph.Graph', 'Graph', (["self.game_state['cells']", 'x', 'y', "self.game_state['width']", "self.game_state['height']", 'new_direction', '(69)'], {}), "(self.game_state['cells'], x, y, self.game_state['width'], self.\n game_state['height'], new_direction, 69)\n", (6189, 6298), False, 'from Source.Utility.Pathfinding.Graph import Graph\n'), ((1298, 1335), 'numpy.power', 'np.power', (["self.game_state['width']", '(2)'], {}), "(self.game_state['width'], 2)\n", (1306, 1335), True, 'import numpy as np\n'), ((1338, 1376), 'numpy.power', 'np.power', (["self.game_state['height']", '(2)'], {}), "(self.game_state['height'], 2)\n", (1346, 1376), True, 'import numpy as np\n'), ((1014, 1066), 'numpy.power', 'np.power', (['(current_position[0] - enemy_position[0])', '(2)'], {}), '(current_position[0] - enemy_position[0], 2)\n', (1022, 1066), True, 'import numpy as np\n'), ((1069, 1121), 'numpy.power', 'np.power', (['(current_position[1] - enemy_position[1])', '(2)'], {}), '(current_position[1] - enemy_position[1], 2)\n', (1077, 1121), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.