code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
# Audio processing tools
#
# <NAME> 2020
#
# Some code modified from original MATLAB rastamat package.
#
import numpy as np
from scipy.signal import hanning, spectrogram, resample, hilbert, butter, filtfilt
from scipy.io import wavfile
# import spectools
# from .fbtools import fft2melmx
from matplotlib import pyplot as plt
import parselmouth as pm
# from soundsig import sound
def get_meanF0s_v2(fileName, steps=1/128.0, f0min=50, f0max=300):
"""
Uses parselmouth Sound and Pitch object to generate frequency spectrum of
wavfile, 'fileName'. Mean F0 frequencies are calculated for each phoneme
in 'phoneme_times' by averaging non-zero frequencies within a given
phoneme's time segment. A range of 10 log spaced center frequencies is
calculated for pitch classes. A pitch belongs to the class of the closest
center frequency bin that falls below one standard deviation of the center
frequency range.
"""
#fileName = wav_dirs + wav_name
sound_obj = pm.Sound(fileName)
pitch = sound_obj.to_pitch(steps, f0min, f0max) #create a praat pitch object
pitch_values = pitch.selected_array['frequency']
return pitch_values
def fft2melmx(nfft, sr=8000, nfilts=0, bwidth=1.0, minfreq=0, maxfreq=4000, htkmel=False, constamp=0):
'''
# Generate a matrix of weights to combine FFT bins into Mel
# bins. nfft defines the source FFT size at sampling rate sr.
# Optional nfilts specifies the number of output bands required
# (else one per "mel/width"), and width is the constant width of each
# band relative to standard Mel (default 1).
# While wts has nfft columns, the second half are all zero.
# Hence, Mel spectrum is fft2melmx(nfft,sr)*abs(fft(xincols,nfft));
# minfreq is the frequency (in Hz) of the lowest band edge;
# default is 0, but 133.33 is a common standard (to skip LF).
# maxfreq is frequency in Hz of upper edge; default sr/2.
# You can exactly duplicate the mel matrix in Slaney's mfcc.m
# as fft2melmx(512, 8000, 40, 1, 133.33, 6855.5, 0);
# htkmel=1 means use HTK's version of the mel curve, not Slaney's.
# constamp=1 means make integration windows peak at 1, not sum to 1.
# frqs returns bin center frqs.
# 2004-09-05 <EMAIL> based on fft2barkmx
'''
if nfilts == 0:
nfilts = np.ceil(hz2mel(maxfreq, htkmel)/2);
wts = np.zeros((nfilts, nfft))
# Center freqs of each FFT bin
fftfrqs = np.arange(0,nfft/2.)/nfft*sr
# 'Center freqs' of mel bands - uniformly spaced between limits
minmel = hz2mel(minfreq, htkmel)
maxmel = hz2mel(maxfreq, htkmel)
binfrqs = mel2hz(minmel+np.arange(0.,nfilts+2)/(nfilts+2.)*(maxmel-minmel), htkmel);
binbin = np.round(binfrqs/sr*(nfft-1.))
for i in np.arange(nfilts):
fs = binfrqs[i+[0, 1, 2]]
#print fs
# scale by width
fs = fs[1]+bwidth*(fs - fs[1]);
# lower and upper slopes for all bins
loslope = (fftfrqs - fs[0])/(fs[1] - fs[0])
hislope = (fs[2] - fftfrqs)/(fs[2] - fs[1])
w = np.min((loslope, hislope), axis=0)
w[w<0] = 0
# .. then intersect them with each other and zero
wts[i, 0:np.int(nfft/2)] = w
if constamp == 0:
# Slaney-style mel is scaled to be approx constant E per channel
wts = np.dot(np.diag(2./(binfrqs[2+np.arange(nfilts)]-binfrqs[np.arange(nfilts)])),wts)
#wts = np.atleast_2d(2/(binfrqs[2+np.arange(nfilts)]-binfrqs[np.arange(nfilts)]))*wts
#wts = np.dot(2/(binfrqs[2+np.arange(nfilts)]-binfrqs[np.arange(nfilts)]),wts)
# Make sure 2nd half of FFT is zero
wts[:,np.int(nfft/2+2):np.int(nfft)] = 0
# seems like a good idea to avoid aliasing
return wts, binfrqs
def hz2mel(f,htk=False):
'''
# z = hz2mel(f,htk)
# Convert frequencies f (in Hz) to mel 'scale'.
# Optional htk = 1 uses the mel axis defined in the HTKBook
# otherwise use Slaney's formula
# 2005-04-19 <EMAIL>
'''
if htk:
z = 2595 * log10(1+f/700)
else:
# Mel fn to match Slaney's Auditory Toolbox mfcc.m
f_0 = 0.; # 133.33333;
f_sp = 200./3; # 66.66667;
brkfrq = 1000.;
brkpt = (brkfrq - f_0)/f_sp; # starting mel value for log region
logstep = np.exp(np.log(6.4)/27.); # the magic 1.0711703 which is the ratio needed to get from 1000 Hz to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz and the preceding linear filter center at 933.33333 Hz (actually 1000/933.33333 = 1.07142857142857 and exp(log(6.4)/27) = 1.07117028749447)
linpts = [f < brkfrq];
z = 0*f;
# fill in parts separately
if len(linpts) == 1:
if linpts[0]:
z = f - f_0/f_sp
else:
z = brkpt+(np.log(f/brkfrq))/np.log(logstep)
else:
z[linpts==True] = (f[linpts==True] - f_0)/f_sp
z[linpts==False] = brkpt+(np.log(f[linpts==False]/brkfrq))/np.log(logstep)
return z
def mel2hz(z, htk=False):
# f = mel2hz(z, htk)
# Convert 'mel scale' frequencies into Hz
# Optional htk = 1 means use the HTK formula
# else use the formula from Slaney's mfcc.m
# 2005-04-19 <EMAIL>
if htk:
f = 700.*(10**(z/2595.)-1);
else:
f_0 = 0.; # 133.33333;
f_sp = 200./3.; # 66.66667;
brkfrq = 1000.;
brkpt = (brkfrq - f_0)/f_sp; # starting mel value for log region
logstep = np.exp(np.log(6.4)/27.); # the magic 1.0711703 which is the ratio needed to get from 1000 Hz to 6400 Hz in 27 steps, and is *almost* the ratio between 1000 Hz and the preceding linear filter center at 933.33333 Hz (actually 1000/933.33333 = 1.07142857142857 and exp(log(6.4)/27) = 1.07117028749447)
linpts = [z < brkpt]
nonlinpts = [z >= brkpt]
f = 0*z;
# fill in parts separately
f[linpts] = f_0 + f_sp*z[linpts];
f[nonlinpts] = brkfrq*np.exp(np.log(logstep)*(z[nonlinpts]-brkpt));
return f
def get_envelope(audio, audio_fs, new_fs, cof=25, bef_aft=[0, 0], pad_next_pow2=False):
''' Get the envelope of a sound file
Inputs:
w [float] : audio signal vector
fs [int] : sampling rate of audio signal
new_fs [int] : desired sampling rate of the envelope (same as your EEG, for example)
Outputs:
envelope [array-like] : returns the envelope of the sound as an array
'''
if pad_next_pow2:
print("Padding the signal to the nearest power of two...this should speed things up")
orig_len = len(audio)
sound_pad = np.hstack((audio, np.zeros((2**np.int(np.ceil(np.log2(len(audio))))-len(audio),))))
audio = sound_pad
print("calculating hilbert transform")
env_hilb = np.abs(hilbert(audio))
nyq = audio_fs/2. #Nyquist frequency
b, a = butter(3, cof/nyq, 'low'); #this designs a 3-pole low-pass filter
print("Low-pass filtering hilbert transform to get audio envelope")
envelope_long = np.atleast_2d(filtfilt(b, a, env_hilb, axis=0)) #filtfilt makes it non-causal (fwd/backward)
envelope = resample(envelope_long.T, np.int(np.floor(envelope_long.shape[1]/(audio_fs/new_fs))))
if pad_next_pow2:
print("Removing padding")
final_len = np.int((orig_len/audio_fs)*new_fs)
envelope = envelope[:final_len,:]
print(envelope.shape)
if bef_aft[0] < 0:
print("Adding %.2f seconds of silence before"%bef_aft[0])
envelope = np.vstack(( np.zeros((np.int(np.abs(bef_aft[0])*new_fs), 1)), envelope ))
if bef_aft[1] > 0:
print("Adding %.2f seconds of silence after"%bef_aft[1])
envelope = np.vstack(( envelope, np.zeros((np.int(bef_aft[1]*new_fs), 1)) ))
return envelope
def get_cse_onset(audio, audio_fs, wins = [0.04], nfilts=80, pos_deriv=True, spec_noise_thresh=1.04):
"""
Get the onset based on cochlear scaled entropy
Inputs:
audio [np.array] : your audio
audio_fs [float] : audio sampling rate
wins [list] : list of windows to use in the boxcar convolution
pos_deriv [bool] : whether to detect onsets only (True) or onsets and offsets (False)
Outputs:
cse [np.array] : rectified cochlear scaled entropy over window [wins]
auddiff [np.array] : instantaneous derivative of spectrogram
"""
new_fs = 100 # Sampling frequency of spectrogram
specgram = get_mel_spectrogram(audio, audio_fs, nfilts=nfilts)
specgram[specgram<spec_noise_thresh] = 0
nfilts, ntimes = specgram.shape
if pos_deriv is False:
auddiff= np.sum(np.diff(np.hstack((np.atleast_2d(specgram[:,0]).T, specgram)))**2, axis=0)
else:
all_diff = np.diff(np.hstack((np.atleast_2d(specgram[:,0]).T, specgram)))
all_diff[all_diff<0] = 0
auddiff = np.sum(all_diff**2, axis=0)
cse = np.zeros((len(wins), ntimes))
# Get the windows over which we are summing as bins, not times
win_segments = [np.int(w*new_fs) for w in wins]
for wi, w in enumerate(win_segments):
box = np.hstack((np.atleast_2d(boxcar(w)), -np.ones((1, np.int(0.15*new_fs))))).ravel()
cse[wi,:] = convolve(auddiff, box, 'full')[:ntimes]
cse[cse<0] = 0
cse = cse/cse.max()
return cse, auddiff
def get_peak_rate(envelope):
env_diff = np.diff(np.concatenate((0, envelope), axis=None))
env_diff[env_diff<0] = 0
return env_diff
def get_mel_spectrogram(w, fs, wintime=0.025, steptime=0.010, nfilts=80, minfreq=0, maxfreq=None):
''' Make mel-band spectrogram
Inputs:
w [float] : audio signal vector
fs [int] : sampling rate of audio signal
wintime [float] : window size
steptime [float] : step size (time resolution)
nfilts [int] : number of mel-band filters
minfreq [int] : Minimum frequency to analyze (in Hz)
maxfreq [int] : Maximum frequency to analyze (in Hz). If none, defaults to fs/2
Outputs:
mel_spectrogram [array]: mel-band spectrogram
freqs [array] : array of floats, bin edges of spectrogram
'''
if maxfreq is None:
maxfreq = np.int(fs/2)
pspec, e = powspec(w, sr=fs, wintime=wintime, steptime=steptime, dither=1)
aspectrum, wts, freqs = audspec(pspec, sr=fs, nfilts=nfilts, fbtype='mel', minfreq=minfreq, maxfreq=maxfreq, sumpower=True, bwidth=1.0)
mel_spectrogram = aspectrum**0.001
return mel_spectrogram, freqs
def powspec(x, sr=8000, wintime=0.025, steptime=0.010, dither=1):
'''
# compute the powerspectrum and frame energy of the input signal.
# basically outputs a power spectrogram
#
# each column represents a power spectrum for a given frame
# each row represents a frequency
#
# default values:
# sr = 8000Hz
# wintime = 25ms (200 samps)
# steptime = 10ms (80 samps)
# which means use 256 point fft
# hamming window
#
# $Header: /Users/dpwe/matlab/rastamat/RCS/powspec.m,v 1.3 2012/09/03 14:02:01 dpwe Exp dpwe $
# for sr = 8000
#NFFT = 256;
#NOVERLAP = 120;
#SAMPRATE = 8000;
#WINDOW = hamming(200);
'''
winpts = int(np.round(wintime*sr))
steppts = int(np.round(steptime*sr))
NFFT = 2**(np.ceil(np.log(winpts)/np.log(2)))
WINDOW = hanning(winpts).T
# hanning gives much less noisy sidelobes
NOVERLAP = winpts - steppts
SAMPRATE = sr
# Values coming out of rasta treat samples as integers,
# not range -1..1, hence scale up here to match (approx)
f,t,Sxx = spectrogram(x*32768, nfft=NFFT, fs=SAMPRATE, nperseg=len(WINDOW), window= WINDOW, noverlap=NOVERLAP)
y = np.abs(Sxx)**2
# imagine we had random dither that had a variance of 1 sample
# step and a white spectrum. That's like (in expectation, anyway)
# adding a constant value to every bin (to avoid digital zero)
if dither:
y = y + winpts
# ignoring the hamming window, total power would be = #pts
# I think this doesn't quite make sense, but it's what rasta/powspec.c does
# 2012-09-03 Calculate log energy - after windowing, by parseval
e = np.log(np.sum(y))
return y, e
def audspec(pspectrum, sr=16000, nfilts=80, fbtype='mel', minfreq=0, maxfreq=8000, sumpower=True, bwidth=1.0):
'''
perform critical band analysis (see PLP)
takes power spectrogram as input
'''
[nfreqs,nframes] = pspectrum.shape
nfft = int((nfreqs-1)*2)
freqs = []
if fbtype == 'mel':
wts, freqs = fft2melmx(nfft=nfft, sr=sr, nfilts=nfilts, bwidth=bwidth, minfreq=minfreq, maxfreq=maxfreq);
elif fbtype == 'htkmel':
wts = fft2melmx(nfft, sr, nfilts, bwidth, minfreq, maxfreq, 1, 1);
elif fbtype == 'fcmel':
wts = fft2melmx(nfft, sr, nfilts, bwidth, minfreq, maxfreq, 1, 0);
elif fbtype == 'bark':
wts = fft2barkmx(nfft, sr, nfilts, bwidth, minfreq, maxfreq);
else:
error(['fbtype ' + fbtype + ' not recognized']);
wts = wts[:, 0:nfreqs]
#figure(1)
#plt.imshow(wts)
# Integrate FFT bins into Mel bins, in abs or abs^2 domains:
if sumpower:
aspectrum = np.dot(wts, pspectrum)
else:
aspectrum = np.dot(wts, np.sqrt(pspectrum))**2.
return aspectrum, wts, freqs
def get_mps(audio, audio_fs, window=0.5):
'''
Calculate the modulation power spectrum based on Theunissen lab code from soundsig package
Inputs:
audio [array]: sound pressure waveform
audio_fs [int]: sampling rate of the sound
window [float] : Time window for the modulation power spectrum
Outputs:
mps [array] : modulation power spectrum matrix, dimensions are spectral modulation x temporal modulation
wt [array] : array of temporal modulation values (Hz) to go with temporal dimension axis of mps
wf [array] : array of spectral modulation values (cyc/kHz) to go with spectral dimension axis of mps
Example:
import librosa
s, sample_rate =librosa.load('/Users/liberty/Box/stimulidb/distractors/use_these_ITU_R/stim167_gargling__ITU_R_BS1770-3_12ms_200ms_-29LUFS.mp3')
mps, wt, wf = get_mps(s, sample_rate)
'''
soundobj = sound.BioSound(audio, audio_fs)
soundobj.mpsCalc(window=window)
# Return the modulation power spectrum, which is in units of spectral modulation x temporal modulation
# The actual temporal and spectral modulation values are given by wt and wf
return soundobj.mps, soundobj.wt, soundobj.wf
if __name__=='main':
stim_wav = '/Users/liberty/Documents/Austin/code/semantic_EOG/stimuli/blah.wav'
print("Reading in %s"%stim_wav)
[stim_fs, w] = wavfile.read(stim_wav)
stim_fs = 44100 # Should be this for everyone, some of them were 48000 but played at 44kHz
eeg_fs = 128
print(stim_fs)
envelope = get_envelope(w[:,0], stim_fs, eeg_fs, pad_next_pow2=True)
| [
"parselmouth.Sound",
"numpy.sqrt",
"scipy.signal.filtfilt",
"numpy.log",
"scipy.signal.hanning",
"numpy.arange",
"numpy.atleast_2d",
"numpy.dot",
"numpy.concatenate",
"numpy.min",
"numpy.round",
"numpy.abs",
"numpy.floor",
"scipy.io.wavfile.read",
"numpy.int",
"scipy.signal.butter",
... | [((969, 987), 'parselmouth.Sound', 'pm.Sound', (['fileName'], {}), '(fileName)\n', (977, 987), True, 'import parselmouth as pm\n'), ((2381, 2405), 'numpy.zeros', 'np.zeros', (['(nfilts, nfft)'], {}), '((nfilts, nfft))\n', (2389, 2405), True, 'import numpy as np\n'), ((2723, 2760), 'numpy.round', 'np.round', (['(binfrqs / sr * (nfft - 1.0))'], {}), '(binfrqs / sr * (nfft - 1.0))\n', (2731, 2760), True, 'import numpy as np\n'), ((2768, 2785), 'numpy.arange', 'np.arange', (['nfilts'], {}), '(nfilts)\n', (2777, 2785), True, 'import numpy as np\n'), ((6634, 6661), 'scipy.signal.butter', 'butter', (['(3)', '(cof / nyq)', '"""low"""'], {}), "(3, cof / nyq, 'low')\n", (6640, 6661), False, 'from scipy.signal import hanning, spectrogram, resample, hilbert, butter, filtfilt\n'), ((14438, 14460), 'scipy.io.wavfile.read', 'wavfile.read', (['stim_wav'], {}), '(stim_wav)\n', (14450, 14460), False, 'from scipy.io import wavfile\n'), ((3035, 3069), 'numpy.min', 'np.min', (['(loslope, hislope)'], {'axis': '(0)'}), '((loslope, hislope), axis=0)\n', (3041, 3069), True, 'import numpy as np\n'), ((6565, 6579), 'scipy.signal.hilbert', 'hilbert', (['audio'], {}), '(audio)\n', (6572, 6579), False, 'from scipy.signal import hanning, spectrogram, resample, hilbert, butter, filtfilt\n'), ((6811, 6843), 'scipy.signal.filtfilt', 'filtfilt', (['b', 'a', 'env_hilb'], {'axis': '(0)'}), '(b, a, env_hilb, axis=0)\n', (6819, 6843), False, 'from scipy.signal import hanning, spectrogram, resample, hilbert, butter, filtfilt\n'), ((7068, 7104), 'numpy.int', 'np.int', (['(orig_len / audio_fs * new_fs)'], {}), '(orig_len / audio_fs * new_fs)\n', (7074, 7104), True, 'import numpy as np\n'), ((8617, 8646), 'numpy.sum', 'np.sum', (['(all_diff ** 2)'], {'axis': '(0)'}), '(all_diff ** 2, axis=0)\n', (8623, 8646), True, 'import numpy as np\n'), ((8773, 8791), 'numpy.int', 'np.int', (['(w * new_fs)'], {}), '(w * new_fs)\n', (8779, 8791), True, 'import numpy as np\n'), ((9126, 9166), 'numpy.concatenate', 'np.concatenate', (['(0, envelope)'], {'axis': 'None'}), '((0, envelope), axis=None)\n', (9140, 9166), True, 'import numpy as np\n'), ((9934, 9948), 'numpy.int', 'np.int', (['(fs / 2)'], {}), '(fs / 2)\n', (9940, 9948), True, 'import numpy as np\n'), ((10947, 10969), 'numpy.round', 'np.round', (['(wintime * sr)'], {}), '(wintime * sr)\n', (10955, 10969), True, 'import numpy as np\n'), ((10987, 11010), 'numpy.round', 'np.round', (['(steptime * sr)'], {}), '(steptime * sr)\n', (10995, 11010), True, 'import numpy as np\n'), ((11074, 11089), 'scipy.signal.hanning', 'hanning', (['winpts'], {}), '(winpts)\n', (11081, 11089), False, 'from scipy.signal import hanning, spectrogram, resample, hilbert, butter, filtfilt\n'), ((11434, 11445), 'numpy.abs', 'np.abs', (['Sxx'], {}), '(Sxx)\n', (11440, 11445), True, 'import numpy as np\n'), ((11922, 11931), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (11928, 11931), True, 'import numpy as np\n'), ((12923, 12945), 'numpy.dot', 'np.dot', (['wts', 'pspectrum'], {}), '(wts, pspectrum)\n', (12929, 12945), True, 'import numpy as np\n'), ((2454, 2478), 'numpy.arange', 'np.arange', (['(0)', '(nfft / 2.0)'], {}), '(0, nfft / 2.0)\n', (2463, 2478), True, 'import numpy as np\n'), ((6939, 6993), 'numpy.floor', 'np.floor', (['(envelope_long.shape[1] / (audio_fs / new_fs))'], {}), '(envelope_long.shape[1] / (audio_fs / new_fs))\n', (6947, 6993), True, 'import numpy as np\n'), ((3580, 3600), 'numpy.int', 'np.int', (['(nfft / 2 + 2)'], {}), '(nfft / 2 + 2)\n', (3586, 3600), True, 'import numpy as np\n'), ((3597, 3609), 'numpy.int', 'np.int', (['nfft'], {}), '(nfft)\n', (3603, 3609), True, 'import numpy as np\n'), ((4191, 4202), 'numpy.log', 'np.log', (['(6.4)'], {}), '(6.4)\n', (4197, 4202), True, 'import numpy as np\n'), ((5280, 5291), 'numpy.log', 'np.log', (['(6.4)'], {}), '(6.4)\n', (5286, 5291), True, 'import numpy as np\n'), ((11034, 11048), 'numpy.log', 'np.log', (['winpts'], {}), '(winpts)\n', (11040, 11048), True, 'import numpy as np\n'), ((11049, 11058), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (11055, 11058), True, 'import numpy as np\n'), ((12988, 13006), 'numpy.sqrt', 'np.sqrt', (['pspectrum'], {}), '(pspectrum)\n', (12995, 13006), True, 'import numpy as np\n'), ((2648, 2674), 'numpy.arange', 'np.arange', (['(0.0)', '(nfilts + 2)'], {}), '(0.0, nfilts + 2)\n', (2657, 2674), True, 'import numpy as np\n'), ((3152, 3168), 'numpy.int', 'np.int', (['(nfft / 2)'], {}), '(nfft / 2)\n', (3158, 3168), True, 'import numpy as np\n'), ((4788, 4823), 'numpy.log', 'np.log', (['(f[linpts == False] / brkfrq)'], {}), '(f[linpts == False] / brkfrq)\n', (4794, 4823), True, 'import numpy as np\n'), ((4821, 4836), 'numpy.log', 'np.log', (['logstep'], {}), '(logstep)\n', (4827, 4836), True, 'import numpy as np\n'), ((5745, 5760), 'numpy.log', 'np.log', (['logstep'], {}), '(logstep)\n', (5751, 5760), True, 'import numpy as np\n'), ((4659, 4677), 'numpy.log', 'np.log', (['(f / brkfrq)'], {}), '(f / brkfrq)\n', (4665, 4677), True, 'import numpy as np\n'), ((4677, 4692), 'numpy.log', 'np.log', (['logstep'], {}), '(logstep)\n', (4683, 4692), True, 'import numpy as np\n'), ((7497, 7524), 'numpy.int', 'np.int', (['(bef_aft[1] * new_fs)'], {}), '(bef_aft[1] * new_fs)\n', (7503, 7524), True, 'import numpy as np\n'), ((8522, 8551), 'numpy.atleast_2d', 'np.atleast_2d', (['specgram[:, 0]'], {}), '(specgram[:, 0])\n', (8535, 8551), True, 'import numpy as np\n'), ((3332, 3349), 'numpy.arange', 'np.arange', (['nfilts'], {}), '(nfilts)\n', (3341, 3349), True, 'import numpy as np\n'), ((3305, 3322), 'numpy.arange', 'np.arange', (['nfilts'], {}), '(nfilts)\n', (3314, 3322), True, 'import numpy as np\n'), ((7313, 7331), 'numpy.abs', 'np.abs', (['bef_aft[0]'], {}), '(bef_aft[0])\n', (7319, 7331), True, 'import numpy as np\n'), ((8418, 8447), 'numpy.atleast_2d', 'np.atleast_2d', (['specgram[:, 0]'], {}), '(specgram[:, 0])\n', (8431, 8447), True, 'import numpy as np\n'), ((8912, 8933), 'numpy.int', 'np.int', (['(0.15 * new_fs)'], {}), '(0.15 * new_fs)\n', (8918, 8933), True, 'import numpy as np\n')] |
import argparse
import gym
import numpy as np
import os
import torch
import BCQ
import BEAR
import utils
def train_PQL_BEAR(state_dim, action_dim, max_action, device, args):
print("Training BEARState\n")
log_name = f"{args.dataset}_{args.seed}"
# Initialize policy
policy = BEAR.BEAR(2, state_dim, action_dim, max_action, delta_conf=0.1, use_bootstrap=False,
version=args.version,
lambda_=0.0,
threshold=0.05,
mode=args.mode,
num_samples_match=args.num_samples_match,
mmd_sigma=args.mmd_sigma,
lagrange_thresh=args.lagrange_thresh,
use_kl=(True if args.distance_type == "KL" else False),
use_ensemble=(False if args.use_ensemble_variance == "False" else True),
kernel_type=args.kernel_type,
use_state_filter=True, actor_lr=args.actor_lr, beta=args.beta,
n_action=args.n_action, n_action_execute=args.n_action_execute,
backup=args.backup, ql_noise=args.ql_noise, vmin=args.vmin
)
# Load buffer
replay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)
replay_buffer.load(f"./buffers/{args.dataset}", args.load_buffer_size, bootstrap_dim=4)
evaluations = []
training_iters = 0
while training_iters < args.max_vae_trainstep:
vae_loss = policy.train_vae(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
print(f"Training iterations: {training_iters}")
print("VAE loss", vae_loss)
training_iters += args.eval_freq
if args.automatic_beta: # args.automatic_beta:
test_loss = policy.test_vae(replay_buffer, batch_size=100000)
beta = np.percentile(test_loss, args.beta_percentile)
policy.beta = beta
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_bpercentile{args.beta_percentile}"
print("Test vae",args.beta_percentile,"percentile:", beta)
else:
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_beta{str(args.beta)}"
if args.backup == "QL":
hp_setting += f"_ql{args.ql_noise}"
training_iters = 0
while training_iters < args.max_timesteps:
pol_vals = policy.train(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
evaluations.append(eval_policy(policy, args.env, args.seed))
np.save(f"./results/PQL_BEAR_{hp_setting}_{log_name}", evaluations)
training_iters += args.eval_freq
print(f"Training iterations: {training_iters}")
def train_PQL_BCQ(state_dim, action_dim, max_state, max_action, device, args):
# For saving files
log_name = f"{args.dataset}_{args.seed}"
print("=== Start Train ===\n")
print("Args:\n",args)
# Initialize policy
policy = BCQ.PQL_BCQ(state_dim, action_dim, max_state, max_action, device, args.discount, args.tau, args.lmbda, args.phi,
n_action=args.n_action, n_action_execute=args.n_action_execute,
backup=args.backup, ql_noise=args.ql_noise,
actor_lr=args.actor_lr, beta=args.beta, vmin=args.vmin)
# Load buffer
# replay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)
# replay_buffer.load(f"./buffers/{buffer_name}", args.load_buffer_size)
replay_buffer = utils.ReplayBuffer(state_dim, action_dim, device)
replay_buffer.load(f"./buffers/{args.dataset}", args.load_buffer_size)
evaluations = []
filter_scores = []
training_iters = 0
while training_iters < args.max_vae_trainstep:
vae_loss = policy.train_vae(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
print(f"Training iterations: {training_iters}. State VAE loss: {vae_loss:.3f}.")
training_iters += args.eval_freq
if args.automatic_beta: # args.automatic_beta:
test_loss = policy.test_vae(replay_buffer, batch_size=100000)
beta = np.percentile(test_loss, args.beta_percentile)
policy.beta = beta
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_bpercentile{args.beta_percentile}"
print("Test vae",args.beta_percentile,"percentile:", beta)
else:
hp_setting = f"N{args.load_buffer_size}_phi{args.phi}_n{args.n_action}_beta{str(args.beta)}"
if args.backup == "QL":
hp_setting += f"_ql{args.ql_noise}"
# Start training
print("Log files at:", f"./results/BCQState_{hp_setting}_{log_name}")
training_iters = 0
while training_iters < args.max_timesteps:
policy.train(replay_buffer, iterations=int(args.eval_freq), batch_size=args.batch_size)
evaluations.append(eval_policy(policy, args.env, args.seed, eval_episodes=20))
np.save(f"./results/PQL_{hp_setting}_{log_name}", evaluations)
training_iters += args.eval_freq
print(f"Training iterations: {training_iters}")
# Runs policy for X episodes and returns average reward
# A fixed seed is used for the eval environment
def eval_policy(policy, env_name, seed, eval_episodes=10):
eval_env = gym.make(env_name)
eval_env.seed(seed + 100)
avg_reward = 0.
for _ in range(eval_episodes):
state, done = eval_env.reset(), False
while not done:
action = policy.select_action(np.array(state))
state, reward, done, _ = eval_env.step(action)
avg_reward += reward
avg_reward /= eval_episodes
print("---------------------------------------")
print(f"Evaluation over {eval_episodes} episodes: {avg_reward:.3f}")
print("---------------------------------------")
return avg_reward
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--env", default="Hopper-v2") # OpenAI gym environment name (need to be consistent with the dataset name)
parser.add_argument("--dataset", default="d4rl-hopper-medium-v0") # D4RL dataset name
parser.add_argument("--seed", default=1, type=int) # Sets Gym, PyTorch and Numpy seeds
parser.add_argument("--eval_freq", default=1e4, type=float) # How often (time steps) we evaluate
parser.add_argument("--max_timesteps", default=5e5,
type=int) # Max time steps to run environment or train for (this defines buffer size)
parser.add_argument("--max_vae_trainstep", default=2e5, type=int)
# BCQ parameter
parser.add_argument("--batch_size", default=100, type=int) # Mini batch size for networks
parser.add_argument("--discount", default=0.99) # Discount factor
parser.add_argument("--tau", default=0.005) # Target network update rate
parser.add_argument("--lmbda", default=0.75) # Weighting for clipped double Q-learning in BCQ
parser.add_argument("--phi", default=0.1, type=float) # Max perturbation hyper-parameter for BCQ
parser.add_argument("--load_buffer_size", default=1000000, type=int) # number of samples to load into the buffer
parser.add_argument("--actor_lr", default=1e-3, type=float) # learning rate of actor
parser.add_argument("--n_action", default=100, type=int) # number of sampling action for policy (in backup)
parser.add_argument("--n_action_execute", default=100, type=int) # number of sampling action for policy (in execution)
# BEAR parameter
parser.add_argument("--bear", action="store_true") # If true, use BEAR
parser.add_argument("--version", default='0',
type=str) # Basically whether to do min(Q), max(Q), mean(Q)
parser.add_argument('--mode', default='hardcoded', #hardcoded
type=str) # Whether to do automatic lagrange dual descent or manually tune coefficient of the MMD loss (prefered "auto")
parser.add_argument('--num_samples_match', default=5, type=int) # number of samples to do matching in MMD
parser.add_argument('--mmd_sigma', default=20.0, type=float) # The bandwidth of the MMD kernel parameter default 10
parser.add_argument('--kernel_type', default='laplacian',
type=str) # kernel type for MMD ("laplacian" or "gaussian")
parser.add_argument('--lagrange_thresh', default=10.0,
type=float) # What is the threshold for the lagrange multiplier
parser.add_argument('--distance_type', default="MMD", type=str) # Distance type ("KL" or "MMD")
parser.add_argument('--use_ensemble_variance', default='False', type=str) # Whether to use ensemble variance or not
# Our parameter
parser.add_argument("--backup", type=str, default="QL") # "QL": q learning (Q-max) back up, "AC": actor-critic backup
parser.add_argument("--ql_noise", type=float, default=0.15) # Noise of next action in QL
parser.add_argument("--automatic_beta", type=bool, default=True) # If true, use percentile for b (beta is the b in paper)
parser.add_argument("--beta_percentile", type=float, default=2.0) # Use x-Percentile as the value of b
parser.add_argument("--beta", default=-0.4, type=float) # hardcoded b, only effective when automatic_beta = False
parser.add_argument("--vmin", default=0, type=float) # min value of the environment. Empirically I set it to be the min of 1000 random rollout.
args = parser.parse_args()
print("---------------------------------------")
if args.bear:
print(f"Setting: Training PQL-BEAR, Env: {args.env}, Seed: {args.seed}")
else:
print(f"Setting: Training PQL-BCQ, Env: {args.env}, Seed: {args.seed}")
print("---------------------------------------")
if not os.path.exists("./results"):
os.makedirs("./results")
if not os.path.exists("./models"):
os.makedirs("./models")
env = gym.make(args.env)
env.seed(args.seed)
torch.manual_seed(args.seed)
np.random.seed(args.seed)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
max_action = float(env.action_space.high[0])
max_state = float(env.observation_space.high[0])
if max_state == np.inf:
max_state = None
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if args.bear:
train_PQL_BEAR(state_dim, action_dim, max_action, device, args)
else:
train_PQL_BCQ(state_dim, action_dim, max_state, max_action, device, args)
| [
"torch.manual_seed",
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"utils.ReplayBuffer",
"numpy.array",
"torch.cuda.is_available",
"BEAR.BEAR",
"numpy.random.seed",
"BCQ.PQL_BCQ",
"numpy.percentile",
"gym.make",
"numpy.save"
] | [((292, 928), 'BEAR.BEAR', 'BEAR.BEAR', (['(2)', 'state_dim', 'action_dim', 'max_action'], {'delta_conf': '(0.1)', 'use_bootstrap': '(False)', 'version': 'args.version', 'lambda_': '(0.0)', 'threshold': '(0.05)', 'mode': 'args.mode', 'num_samples_match': 'args.num_samples_match', 'mmd_sigma': 'args.mmd_sigma', 'lagrange_thresh': 'args.lagrange_thresh', 'use_kl': "(True if args.distance_type == 'KL' else False)", 'use_ensemble': "(False if args.use_ensemble_variance == 'False' else True)", 'kernel_type': 'args.kernel_type', 'use_state_filter': '(True)', 'actor_lr': 'args.actor_lr', 'beta': 'args.beta', 'n_action': 'args.n_action', 'n_action_execute': 'args.n_action_execute', 'backup': 'args.backup', 'ql_noise': 'args.ql_noise', 'vmin': 'args.vmin'}), "(2, state_dim, action_dim, max_action, delta_conf=0.1,\n use_bootstrap=False, version=args.version, lambda_=0.0, threshold=0.05,\n mode=args.mode, num_samples_match=args.num_samples_match, mmd_sigma=\n args.mmd_sigma, lagrange_thresh=args.lagrange_thresh, use_kl=True if \n args.distance_type == 'KL' else False, use_ensemble=False if args.\n use_ensemble_variance == 'False' else True, kernel_type=args.\n kernel_type, use_state_filter=True, actor_lr=args.actor_lr, beta=args.\n beta, n_action=args.n_action, n_action_execute=args.n_action_execute,\n backup=args.backup, ql_noise=args.ql_noise, vmin=args.vmin)\n", (301, 928), False, 'import BEAR\n'), ((1258, 1307), 'utils.ReplayBuffer', 'utils.ReplayBuffer', (['state_dim', 'action_dim', 'device'], {}), '(state_dim, action_dim, device)\n', (1276, 1307), False, 'import utils\n'), ((2990, 3280), 'BCQ.PQL_BCQ', 'BCQ.PQL_BCQ', (['state_dim', 'action_dim', 'max_state', 'max_action', 'device', 'args.discount', 'args.tau', 'args.lmbda', 'args.phi'], {'n_action': 'args.n_action', 'n_action_execute': 'args.n_action_execute', 'backup': 'args.backup', 'ql_noise': 'args.ql_noise', 'actor_lr': 'args.actor_lr', 'beta': 'args.beta', 'vmin': 'args.vmin'}), '(state_dim, action_dim, max_state, max_action, device, args.\n discount, args.tau, args.lmbda, args.phi, n_action=args.n_action,\n n_action_execute=args.n_action_execute, backup=args.backup, ql_noise=\n args.ql_noise, actor_lr=args.actor_lr, beta=args.beta, vmin=args.vmin)\n', (3001, 3280), False, 'import BCQ\n'), ((3529, 3578), 'utils.ReplayBuffer', 'utils.ReplayBuffer', (['state_dim', 'action_dim', 'device'], {}), '(state_dim, action_dim, device)\n', (3547, 3578), False, 'import utils\n'), ((5290, 5308), 'gym.make', 'gym.make', (['env_name'], {}), '(env_name)\n', (5298, 5308), False, 'import gym\n'), ((5893, 5918), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (5916, 5918), False, 'import argparse\n'), ((9901, 9919), 'gym.make', 'gym.make', (['args.env'], {}), '(args.env)\n', (9909, 9919), False, 'import gym\n'), ((9949, 9977), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (9966, 9977), False, 'import torch\n'), ((9982, 10007), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (9996, 10007), True, 'import numpy as np\n'), ((1879, 1925), 'numpy.percentile', 'np.percentile', (['test_loss', 'args.beta_percentile'], {}), '(test_loss, args.beta_percentile)\n', (1892, 1925), True, 'import numpy as np\n'), ((2575, 2642), 'numpy.save', 'np.save', (['f"""./results/PQL_BEAR_{hp_setting}_{log_name}"""', 'evaluations'], {}), "(f'./results/PQL_BEAR_{hp_setting}_{log_name}', evaluations)\n", (2582, 2642), True, 'import numpy as np\n'), ((4153, 4199), 'numpy.percentile', 'np.percentile', (['test_loss', 'args.beta_percentile'], {}), '(test_loss, args.beta_percentile)\n', (4166, 4199), True, 'import numpy as np\n'), ((4949, 5011), 'numpy.save', 'np.save', (['f"""./results/PQL_{hp_setting}_{log_name}"""', 'evaluations'], {}), "(f'./results/PQL_{hp_setting}_{log_name}', evaluations)\n", (4956, 5011), True, 'import numpy as np\n'), ((9756, 9783), 'os.path.exists', 'os.path.exists', (['"""./results"""'], {}), "('./results')\n", (9770, 9783), False, 'import os\n'), ((9793, 9817), 'os.makedirs', 'os.makedirs', (['"""./results"""'], {}), "('./results')\n", (9804, 9817), False, 'import os\n'), ((9830, 9856), 'os.path.exists', 'os.path.exists', (['"""./models"""'], {}), "('./models')\n", (9844, 9856), False, 'import os\n'), ((9866, 9889), 'os.makedirs', 'os.makedirs', (['"""./models"""'], {}), "('./models')\n", (9877, 9889), False, 'import os\n'), ((10291, 10316), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10314, 10316), False, 'import torch\n'), ((5507, 5522), 'numpy.array', 'np.array', (['state'], {}), '(state)\n', (5515, 5522), True, 'import numpy as np\n')] |
from numpy import sin, pi, cos
from objects.CSCG._3d.exact_solutions.status.Stokes.base import Stokes_Base
# noinspection PyAbstractClass
class Stokes_SinCos1(Stokes_Base):
"""
The sin cos test case 1.
"""
def __init__(self, es):
super(Stokes_SinCos1, self).__init__(es)
self._es_.standard_properties.name = 'Stokes-sin-cos-1'
@property
def valid_time(self):
"""Return None because this exact solution is valid at any time."""
return None
def p(self, t, x, y, z):
return sin(2*pi*x) * sin(2*pi*y) * sin(2*pi*z)
def u(self, t, x, y, z):
return cos(2*pi*x) * sin(2*pi*y) * sin(2*pi*z)
def v(self, t, x, y, z):
return sin(2*pi*x) * cos(2*pi*y) * sin(2*pi*z)
def w(self, t, x, y, z):
return - 2 * sin(2*pi*x) * sin(2*pi*y) * cos(2*pi*z) | [
"numpy.sin",
"numpy.cos"
] | [((573, 588), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (576, 588), False, 'from numpy import sin, pi, cos\n'), ((659, 674), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (662, 674), False, 'from numpy import sin, pi, cos\n'), ((743, 758), 'numpy.sin', 'sin', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (746, 758), False, 'from numpy import sin, pi, cos\n'), ((833, 848), 'numpy.cos', 'cos', (['(2 * pi * z)'], {}), '(2 * pi * z)\n', (836, 848), False, 'from numpy import sin, pi, cos\n'), ((545, 560), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (548, 560), False, 'from numpy import sin, pi, cos\n'), ((559, 574), 'numpy.sin', 'sin', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (562, 574), False, 'from numpy import sin, pi, cos\n'), ((631, 646), 'numpy.cos', 'cos', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (634, 646), False, 'from numpy import sin, pi, cos\n'), ((645, 660), 'numpy.sin', 'sin', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (648, 660), False, 'from numpy import sin, pi, cos\n'), ((715, 730), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (718, 730), False, 'from numpy import sin, pi, cos\n'), ((729, 744), 'numpy.cos', 'cos', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (732, 744), False, 'from numpy import sin, pi, cos\n'), ((819, 834), 'numpy.sin', 'sin', (['(2 * pi * y)'], {}), '(2 * pi * y)\n', (822, 834), False, 'from numpy import sin, pi, cos\n'), ((805, 820), 'numpy.sin', 'sin', (['(2 * pi * x)'], {}), '(2 * pi * x)\n', (808, 820), False, 'from numpy import sin, pi, cos\n')] |
#!/usr/bin/python
"""
Test that pairwise deletion mask (intersection) returns expected values
"""
from __future__ import print_function
from __future__ import division
from builtins import zip
from builtins import range
from past.utils import old_div
from pybraincompare.mr.datasets import get_pair_images, get_data_directory
from pybraincompare.compare.mrutils import make_binary_deletion_mask, make_binary_deletion_vector
from pybraincompare.mr.datasets import get_data_directory
from numpy.testing import assert_array_equal, assert_almost_equal, assert_equal
from nose.tools import assert_true, assert_false
from nilearn.image import resample_img
import nibabel
import random
import pandas
import numpy
import os
'''Test that binary deletion mask returns expected overlap given two images, nans and zeros'''
def test_binary_deletion_mask():
mr_directory = get_data_directory()
standard = "%s/MNI152_T1_8mm_brain_mask.nii.gz" %(mr_directory)
brain_mask = nibabel.load(standard)
unzip = lambda l:tuple(zip(*l))
# We will generate data with the following overlap percentages
overlap_percents = [0.0,0.25,0.5,0.75,1.0]
for overlap in overlap_percents:
image1 = numpy.zeros(brain_mask.shape)
image2 = numpy.zeros(brain_mask.shape)
x,y,z = numpy.where(brain_mask.get_data()==1)
idx = list(zip(x,y,z))
numpy.random.shuffle(idx)
number_voxels = len(idx)
number_overlap_voxels = int(numpy.floor(overlap*number_voxels))
remaining_voxels = int(number_voxels - number_overlap_voxels)
# We break the remaining voxels into 4 groups:
# - nans that will overlap
# - zeros that will overlap (no change to images here, already zeros)
# - nans in image1, random sample of values in image2
# - zeros in image2, random sample of values in image1
group_size = old_div(remaining_voxels,4)
if overlap != 0.0:
# Here are the overlapping voxels for each image
overlap_idx = unzip(idx[0:number_overlap_voxels])
image1[overlap_idx] = 1
image2[overlap_idx] = 1
if overlap != 1.0:
# Nans that will overlap
nans_overlap_idx = unzip(idx[number_overlap_voxels:(number_overlap_voxels+group_size)])
image1[nans_overlap_idx] = numpy.nan
image2[nans_overlap_idx] = numpy.nan
# Nans in image1, random sample of values in image 2
start = number_overlap_voxels+group_size
end = number_overlap_voxels+2*group_size
nans_image1 = idx[start:end]
values_image2 = unzip(random.sample(nans_image1,int(old_div(group_size,2))))
image1[unzip(nans_image1)] = numpy.nan
image2[values_image2] = 0.5
# Zeros in image2, random sample of values in image 1
start = number_overlap_voxels+2*group_size
end = number_overlap_voxels+3*group_size
zeros_image2 = idx[start:end]
values_image1 = unzip(random.sample(zeros_image2,int(old_div(group_size,2))))
image1[values_image1] = 0.75
# Create nifti images and pdmask
nii1 = nibabel.Nifti1Image(image1,affine=brain_mask.get_affine(),header=brain_mask.get_header())
nii2 = nibabel.Nifti1Image(image2,affine=brain_mask.get_affine(),header=brain_mask.get_header())
pdmask = make_binary_deletion_mask([nii1,nii2])
actual_overlap = len(numpy.where(pdmask!=0)[0])
print("Overlap %s percent: should have %s, actual %s" %(overlap,number_overlap_voxels,actual_overlap))
assert_equal(actual_overlap,number_overlap_voxels)
'''Test that returned image is binary, no nans, infs'''
def test_binary_deletion_mask_values():
images = get_pair_images(voxdims=["2","2"])
image1 = nibabel.load(images[0])
image2 = nibabel.load(images[1])
pdmask = make_binary_deletion_mask([image1,image2])
assert_equal(numpy.unique(pdmask)[0],0.0)
assert_equal(numpy.unique(pdmask)[1],1.0)
assert_false(numpy.isnan(pdmask).any())
assert_false(numpy.isinf(pdmask).any())
'''Test that binary deletion mask returns expected overlap given two images, nans and zeros'''
def test_binary_deletion_vector():
mr_directory = get_data_directory()
# We will generate data with the following overlap percentages
overlap_percents = [0.0,0.25,0.5,0.75,1.0]
for overlap in overlap_percents:
vector_length = 10000
image_vector1 = numpy.zeros((vector_length))
image_vector2 = numpy.zeros((vector_length))
number_overlap_voxels = int(numpy.floor(overlap*vector_length))
remaining_voxels = int(vector_length - number_overlap_voxels)
idx = list(range(0,vector_length))
# We break the remaining voxels into 4 groups:
# - nans that will overlap
# - zeros that will overlap (no change to images here, already zeros)
# - nans in image1, random sample of values in image2
# - zeros in image2, random sample of values in image1
group_size = old_div(remaining_voxels,4)
if overlap != 0.0:
# Here are the overlapping voxels for each image
overlap_idx = list(range(0,number_overlap_voxels))
image_vector1[overlap_idx] = 1
image_vector2[overlap_idx] = 1
if overlap != 1.0:
# Nans that will overlap
nans_overlap_idx = list(range(number_overlap_voxels,(number_overlap_voxels+group_size)))
image_vector1[nans_overlap_idx] = numpy.nan
image_vector2[nans_overlap_idx] = numpy.nan
# Nans in image1, random sample of values in image 2
start = number_overlap_voxels+group_size
end = number_overlap_voxels+2*group_size
nans_image1 = idx[start:end]
values_image2 = list(range(nans_image1[-1],(nans_image1[-1] + int(old_div(group_size,2)))))
image_vector1[nans_image1] = numpy.nan
image_vector2[values_image2] = 0.5
# Zeros in image2, random sample of values in image 1
start = number_overlap_voxels+2*group_size
end = number_overlap_voxels+3*group_size
zeros_image2 = idx[start:end]
values_image1 = list(range(zeros_image2[-1],(zeros_image2[-1] + int(old_div(group_size,2)))))
image_vector1[values_image1] = 0.75
# Create nifti images and pdmask
pdmask = make_binary_deletion_vector([image_vector1,image_vector2])
actual_overlap = len(numpy.where(pdmask!=0)[0])
print("Overlap %s percent: should have %s, actual %s" %(overlap,number_overlap_voxels,actual_overlap))
assert_equal(actual_overlap,number_overlap_voxels)
# Also check that is binary
if overlap != 0 and overlap != 1:
assert_equal(numpy.unique(pdmask)[0],0)
assert_equal(numpy.unique(pdmask)[1],1)
if overlap == 0:
assert_equal(numpy.unique(pdmask)[0],0)
if overlap == 1:
assert_equal(numpy.unique(pdmask)[0],1)
| [
"pybraincompare.mr.datasets.get_data_directory",
"numpy.testing.assert_equal",
"numpy.unique",
"nibabel.load",
"numpy.where",
"numpy.floor",
"past.utils.old_div",
"builtins.zip",
"numpy.zeros",
"pybraincompare.compare.mrutils.make_binary_deletion_vector",
"builtins.range",
"numpy.isnan",
"py... | [((864, 884), 'pybraincompare.mr.datasets.get_data_directory', 'get_data_directory', ([], {}), '()\n', (882, 884), False, 'from pybraincompare.mr.datasets import get_data_directory\n'), ((966, 988), 'nibabel.load', 'nibabel.load', (['standard'], {}), '(standard)\n', (978, 988), False, 'import nibabel\n'), ((3557, 3592), 'pybraincompare.mr.datasets.get_pair_images', 'get_pair_images', ([], {'voxdims': "['2', '2']"}), "(voxdims=['2', '2'])\n", (3572, 3592), False, 'from pybraincompare.mr.datasets import get_pair_images, get_data_directory\n'), ((3604, 3627), 'nibabel.load', 'nibabel.load', (['images[0]'], {}), '(images[0])\n', (3616, 3627), False, 'import nibabel\n'), ((3639, 3662), 'nibabel.load', 'nibabel.load', (['images[1]'], {}), '(images[1])\n', (3651, 3662), False, 'import nibabel\n'), ((3675, 3718), 'pybraincompare.compare.mrutils.make_binary_deletion_mask', 'make_binary_deletion_mask', (['[image1, image2]'], {}), '([image1, image2])\n', (3700, 3718), False, 'from pybraincompare.compare.mrutils import make_binary_deletion_mask, make_binary_deletion_vector\n'), ((4048, 4068), 'pybraincompare.mr.datasets.get_data_directory', 'get_data_directory', ([], {}), '()\n', (4066, 4068), False, 'from pybraincompare.mr.datasets import get_data_directory\n'), ((1185, 1214), 'numpy.zeros', 'numpy.zeros', (['brain_mask.shape'], {}), '(brain_mask.shape)\n', (1196, 1214), False, 'import numpy\n'), ((1228, 1257), 'numpy.zeros', 'numpy.zeros', (['brain_mask.shape'], {}), '(brain_mask.shape)\n', (1239, 1257), False, 'import numpy\n'), ((1339, 1364), 'numpy.random.shuffle', 'numpy.random.shuffle', (['idx'], {}), '(idx)\n', (1359, 1364), False, 'import numpy\n'), ((1820, 1848), 'past.utils.old_div', 'old_div', (['remaining_voxels', '(4)'], {}), '(remaining_voxels, 4)\n', (1827, 1848), False, 'from past.utils import old_div\n'), ((3191, 3230), 'pybraincompare.compare.mrutils.make_binary_deletion_mask', 'make_binary_deletion_mask', (['[nii1, nii2]'], {}), '([nii1, nii2])\n', (3216, 3230), False, 'from pybraincompare.compare.mrutils import make_binary_deletion_mask, make_binary_deletion_vector\n'), ((3394, 3445), 'numpy.testing.assert_equal', 'assert_equal', (['actual_overlap', 'number_overlap_voxels'], {}), '(actual_overlap, number_overlap_voxels)\n', (3406, 3445), False, 'from numpy.testing import assert_array_equal, assert_almost_equal, assert_equal\n'), ((4263, 4289), 'numpy.zeros', 'numpy.zeros', (['vector_length'], {}), '(vector_length)\n', (4274, 4289), False, 'import numpy\n'), ((4312, 4338), 'numpy.zeros', 'numpy.zeros', (['vector_length'], {}), '(vector_length)\n', (4323, 4338), False, 'import numpy\n'), ((4805, 4833), 'past.utils.old_div', 'old_div', (['remaining_voxels', '(4)'], {}), '(remaining_voxels, 4)\n', (4812, 4833), False, 'from past.utils import old_div\n'), ((6049, 6108), 'pybraincompare.compare.mrutils.make_binary_deletion_vector', 'make_binary_deletion_vector', (['[image_vector1, image_vector2]'], {}), '([image_vector1, image_vector2])\n', (6076, 6108), False, 'from pybraincompare.compare.mrutils import make_binary_deletion_mask, make_binary_deletion_vector\n'), ((6272, 6323), 'numpy.testing.assert_equal', 'assert_equal', (['actual_overlap', 'number_overlap_voxels'], {}), '(actual_overlap, number_overlap_voxels)\n', (6284, 6323), False, 'from numpy.testing import assert_array_equal, assert_almost_equal, assert_equal\n'), ((1015, 1022), 'builtins.zip', 'zip', (['*l'], {}), '(*l)\n', (1018, 1022), False, 'from builtins import zip\n'), ((1323, 1335), 'builtins.zip', 'zip', (['x', 'y', 'z'], {}), '(x, y, z)\n', (1326, 1335), False, 'from builtins import zip\n'), ((1427, 1463), 'numpy.floor', 'numpy.floor', (['(overlap * number_voxels)'], {}), '(overlap * number_voxels)\n', (1438, 1463), False, 'import numpy\n'), ((3734, 3754), 'numpy.unique', 'numpy.unique', (['pdmask'], {}), '(pdmask)\n', (3746, 3754), False, 'import numpy\n'), ((3778, 3798), 'numpy.unique', 'numpy.unique', (['pdmask'], {}), '(pdmask)\n', (3790, 3798), False, 'import numpy\n'), ((4373, 4409), 'numpy.floor', 'numpy.floor', (['(overlap * vector_length)'], {}), '(overlap * vector_length)\n', (4384, 4409), False, 'import numpy\n'), ((4490, 4513), 'builtins.range', 'range', (['(0)', 'vector_length'], {}), '(0, vector_length)\n', (4495, 4513), False, 'from builtins import range\n'), ((3256, 3280), 'numpy.where', 'numpy.where', (['(pdmask != 0)'], {}), '(pdmask != 0)\n', (3267, 3280), False, 'import numpy\n'), ((3822, 3841), 'numpy.isnan', 'numpy.isnan', (['pdmask'], {}), '(pdmask)\n', (3833, 3841), False, 'import numpy\n'), ((3864, 3883), 'numpy.isinf', 'numpy.isinf', (['pdmask'], {}), '(pdmask)\n', (3875, 3883), False, 'import numpy\n'), ((4936, 4967), 'builtins.range', 'range', (['(0)', 'number_overlap_voxels'], {}), '(0, number_overlap_voxels)\n', (4941, 4967), False, 'from builtins import range\n'), ((5128, 5192), 'builtins.range', 'range', (['number_overlap_voxels', '(number_overlap_voxels + group_size)'], {}), '(number_overlap_voxels, number_overlap_voxels + group_size)\n', (5133, 5192), False, 'from builtins import range\n'), ((6134, 6158), 'numpy.where', 'numpy.where', (['(pdmask != 0)'], {}), '(pdmask != 0)\n', (6145, 6158), False, 'import numpy\n'), ((6416, 6436), 'numpy.unique', 'numpy.unique', (['pdmask'], {}), '(pdmask)\n', (6428, 6436), False, 'import numpy\n'), ((6462, 6482), 'numpy.unique', 'numpy.unique', (['pdmask'], {}), '(pdmask)\n', (6474, 6482), False, 'import numpy\n'), ((6530, 6550), 'numpy.unique', 'numpy.unique', (['pdmask'], {}), '(pdmask)\n', (6542, 6550), False, 'import numpy\n'), ((6598, 6618), 'numpy.unique', 'numpy.unique', (['pdmask'], {}), '(pdmask)\n', (6610, 6618), False, 'import numpy\n'), ((2524, 2546), 'past.utils.old_div', 'old_div', (['group_size', '(2)'], {}), '(group_size, 2)\n', (2531, 2546), False, 'from past.utils import old_div\n'), ((2879, 2901), 'past.utils.old_div', 'old_div', (['group_size', '(2)'], {}), '(group_size, 2)\n', (2886, 2901), False, 'from past.utils import old_div\n'), ((5553, 5575), 'past.utils.old_div', 'old_div', (['group_size', '(2)'], {}), '(group_size, 2)\n', (5560, 5575), False, 'from past.utils import old_div\n'), ((5931, 5953), 'past.utils.old_div', 'old_div', (['group_size', '(2)'], {}), '(group_size, 2)\n', (5938, 5953), False, 'from past.utils import old_div\n')] |
import numpy as np
from math import ceil
from scipy.stats import norm
from TaPR import compute_precision_recall
from data_loader import _count_anomaly_segments
n_thresholds = 1000
def _simulate_thresholds(rec_errors, n, verbose):
# maximum value of the anomaly score for all time steps in the test data
thresholds, step_size = [], abs(np.max(rec_errors) - np.min(rec_errors)) / n
th = np.min(rec_errors)
if verbose:
print(f'Threshold Range: ({np.max(rec_errors)}, {np.min(rec_errors)}) with Step Size: {step_size}')
for i in range(n):
thresholds.append(float(th))
th = th + step_size
return thresholds
def _flatten_anomaly_scores(values, stride, flatten=False):
flat_seq = []
if flatten:
for i, x in enumerate(values):
if i == len(values) - 1:
flat_seq = flat_seq + list(np.ravel(x).astype(float))
else:
flat_seq = flat_seq + list(np.ravel(x[:stride]).astype(float))
else:
flat_seq = list(np.ravel(values).astype(float))
return flat_seq
def compute_anomaly_scores(x, rec_x, scoring='square', x_val=None, rec_val=None):
# average anomaly scores from different sensors/channels/metrics/variables (in case of multivariate time series)
if scoring == 'absolute':
return np.mean(np.abs(x - rec_x), axis=-1)
elif scoring == 'square':
return np.mean(np.square(x - rec_x), axis=-1)
elif scoring == 'normal':
if x_val is not None and rec_val is not None:
val_rec_err = x_val - rec_val
test_rec_err = x - rec_x
mu, std = norm.fit(val_rec_err)
return (test_rec_err - mu).T * std ** -1 * (test_rec_err - mu)
def compute_metrics(anomaly_scores, labels, label_segments=None, n=n_thresholds, delta=0.01, alpha=0.5, theta=0.5, stride=1, verbose=False):
if label_segments is None:
label_segments = []
thresholds = _simulate_thresholds(anomaly_scores, n, verbose)
correct_count, correct_ratio = [], []
precision, recall, f1 = [], [], []
flat_seq = _flatten_anomaly_scores(anomaly_scores, stride, flatten=len(anomaly_scores.shape) == 2)
print('here1', len(thresholds))
for th in thresholds:
pred_anomalies = np.zeros(len(flat_seq)).astype(int) # default with no anomaly
pred_anomalies[np.where(np.array(flat_seq) > th)[0]] = 1 # assign 1 if scores > threshold
_, pred_segments = _count_anomaly_segments(pred_anomalies)
if len(labels) != len(pred_anomalies):
print(f'evaluating with unmatch shape: Labels: {len(labels)} vs. Preds: {len(pred_anomalies)}')
labels = labels[-len(pred_anomalies):] # ref. OmniAnomaly
print(f'evaluating with unmatch shape: Labels: {len(labels)} vs. Preds: {len(pred_anomalies)}')
anomaly_lengths = []
for seg in label_segments:
anomaly_lengths.append(len(seg))
TaD = 0 if len(anomaly_lengths) == 0 else np.ceil(np.mean(anomaly_lengths) * delta).astype(int)
TaP, TaR = compute_precision_recall(pred_anomalies, labels, theta=theta, delta=TaD, alpha=alpha, verbose=verbose)
count, ratio = compute_accuracy(pred_segments, label_segments, delta)
precision.append(float(TaP))
recall.append(float(TaR))
f1.append(float(2 * (TaP * TaR) / (TaP + TaR + 1e-7)))
correct_count.append(int(count))
correct_ratio.append(float(ratio))
return {
'precision': np.mean(precision),
'recall': np.mean(recall),
'f1': np.max(f1),
'count': correct_count,
'ratio': correct_ratio,
'thresholds': thresholds,
'anomaly_scores': flat_seq
}
def visualization(anomaly_scores, x_test, labels):
# TODO: visualize original data + label and anomaly_scores side-by-side
return
def compute_accuracy(pred_segments, anomaly_segments, delta):
correct = 0
for seg in anomaly_segments:
L = seg[-1] - seg[0] # length of anomaly
d = ceil(L * delta)
for pred in pred_segments:
P = pred[len(pred) // 2] # center location as an integer
if min([seg[0] - L, seg[0] - d]) < P < max([seg[-1] + L, seg[-1] + d]):
correct = correct + 1
break
return correct, correct / (len(anomaly_segments) + 1e-7)
| [
"numpy.mean",
"numpy.abs",
"data_loader._count_anomaly_segments",
"math.ceil",
"numpy.max",
"numpy.square",
"scipy.stats.norm.fit",
"numpy.array",
"TaPR.compute_precision_recall",
"numpy.min",
"numpy.ravel"
] | [((401, 419), 'numpy.min', 'np.min', (['rec_errors'], {}), '(rec_errors)\n', (407, 419), True, 'import numpy as np\n'), ((2459, 2498), 'data_loader._count_anomaly_segments', '_count_anomaly_segments', (['pred_anomalies'], {}), '(pred_anomalies)\n', (2482, 2498), False, 'from data_loader import _count_anomaly_segments\n'), ((3068, 3174), 'TaPR.compute_precision_recall', 'compute_precision_recall', (['pred_anomalies', 'labels'], {'theta': 'theta', 'delta': 'TaD', 'alpha': 'alpha', 'verbose': 'verbose'}), '(pred_anomalies, labels, theta=theta, delta=TaD,\n alpha=alpha, verbose=verbose)\n', (3092, 3174), False, 'from TaPR import compute_precision_recall\n'), ((3503, 3521), 'numpy.mean', 'np.mean', (['precision'], {}), '(precision)\n', (3510, 3521), True, 'import numpy as np\n'), ((3541, 3556), 'numpy.mean', 'np.mean', (['recall'], {}), '(recall)\n', (3548, 3556), True, 'import numpy as np\n'), ((3572, 3582), 'numpy.max', 'np.max', (['f1'], {}), '(f1)\n', (3578, 3582), True, 'import numpy as np\n'), ((4039, 4054), 'math.ceil', 'ceil', (['(L * delta)'], {}), '(L * delta)\n', (4043, 4054), False, 'from math import ceil\n'), ((1336, 1353), 'numpy.abs', 'np.abs', (['(x - rec_x)'], {}), '(x - rec_x)\n', (1342, 1353), True, 'import numpy as np\n'), ((1417, 1437), 'numpy.square', 'np.square', (['(x - rec_x)'], {}), '(x - rec_x)\n', (1426, 1437), True, 'import numpy as np\n'), ((347, 365), 'numpy.max', 'np.max', (['rec_errors'], {}), '(rec_errors)\n', (353, 365), True, 'import numpy as np\n'), ((368, 386), 'numpy.min', 'np.min', (['rec_errors'], {}), '(rec_errors)\n', (374, 386), True, 'import numpy as np\n'), ((472, 490), 'numpy.max', 'np.max', (['rec_errors'], {}), '(rec_errors)\n', (478, 490), True, 'import numpy as np\n'), ((494, 512), 'numpy.min', 'np.min', (['rec_errors'], {}), '(rec_errors)\n', (500, 512), True, 'import numpy as np\n'), ((1029, 1045), 'numpy.ravel', 'np.ravel', (['values'], {}), '(values)\n', (1037, 1045), True, 'import numpy as np\n'), ((1633, 1654), 'scipy.stats.norm.fit', 'norm.fit', (['val_rec_err'], {}), '(val_rec_err)\n', (1641, 1654), False, 'from scipy.stats import norm\n'), ((2365, 2383), 'numpy.array', 'np.array', (['flat_seq'], {}), '(flat_seq)\n', (2373, 2383), True, 'import numpy as np\n'), ((3002, 3026), 'numpy.mean', 'np.mean', (['anomaly_lengths'], {}), '(anomaly_lengths)\n', (3009, 3026), True, 'import numpy as np\n'), ((871, 882), 'numpy.ravel', 'np.ravel', (['x'], {}), '(x)\n', (879, 882), True, 'import numpy as np\n'), ((959, 979), 'numpy.ravel', 'np.ravel', (['x[:stride]'], {}), '(x[:stride])\n', (967, 979), True, 'import numpy as np\n')] |
# import gym
# env = gym.make('FrozenLake8x8-v0')
# env.reset()
# for _ in range(10):
# env.render()
# env.step(env.action_space.sample()) # take a random action
# env.close()
# from gym import envs
# import gym
# frozen = gym.make('FrozenLake8x8-v0')
# numEpisodes = 10
# for episode in range(numEpisodes):
# observation = frozen.reset()
# for epochs in range(100):
# # frozen.render()
# # print(observation)
# action = frozen.action_space.sample()
# # print(action)
# obs, reward, done, info = frozen.step(action)
# if done: #this is needed to be changed
# print("Episode finished after {} timesteps".format(epochs+1))
# break
# frozen.close()
import gym
import numpy as np
import matplotlib.pyplot as plt
from tqdm import trange
ngames = 1000
percentage = []
scores = []
env = gym.make('FrozenLake8x8-v0')
for i in trange(ngames):
done = False
obs = env.reset()
score = 0
while not done:
action = env.action_space.sample()
obs, reward, done, info = env.step(action=action)
score+=reward
scores.append(score)
if i%10==0:
average = np.mean(scores[-10:])
percentage.append(average)
plt.plot(percentage)
plt.show()
| [
"numpy.mean",
"matplotlib.pyplot.plot",
"gym.make",
"tqdm.trange",
"matplotlib.pyplot.show"
] | [((898, 926), 'gym.make', 'gym.make', (['"""FrozenLake8x8-v0"""'], {}), "('FrozenLake8x8-v0')\n", (906, 926), False, 'import gym\n'), ((937, 951), 'tqdm.trange', 'trange', (['ngames'], {}), '(ngames)\n', (943, 951), False, 'from tqdm import trange\n'), ((1269, 1289), 'matplotlib.pyplot.plot', 'plt.plot', (['percentage'], {}), '(percentage)\n', (1277, 1289), True, 'import matplotlib.pyplot as plt\n'), ((1290, 1300), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1298, 1300), True, 'import matplotlib.pyplot as plt\n'), ((1211, 1232), 'numpy.mean', 'np.mean', (['scores[-10:]'], {}), '(scores[-10:])\n', (1218, 1232), True, 'import numpy as np\n')] |
# --- built in ---
import os
import sys
import time
import math
import logging
import functools
# --- 3rd party ---
import numpy as np
import tensorflow as tf
# --- my module ---
__all__ = [
'ToyMLP',
'Energy',
'Trainer',
]
# --- primitives ---
class ToyMLP(tf.keras.Model):
def __init__(
self,
input_dim=2,
output_dim=1,
units=[300, 300],
silu=True,
dropout=False
):
"""Toy MLP from
https://github.com/ermongroup/ncsn/blob/master/runners/toy_runner.py#L198
Args:
input_dim (int, optional): input dimensions. Defaults to 2.
output_dim (int, optional): output dimensions. Defaults to 1.
units (list, optional): hidden units. Defaults to [300, 300].
silu (bool, optional): use silu as activation function. Set False to use
soft plus instead. Defaults to True.
dropout (bool, optional): use dropout layers. Defaults to False.
"""
super().__init__()
layers = [tf.keras.layers.Flatten()]
for unit in units:
layers.extend([
tf.keras.layers.Dense(unit),
tf.keras.layers.Activation('silu' if silu else 'softplus'),
tf.keras.layers.Dropout(.5) if dropout else tf.keras.layers.Layer()
])
layers.append(tf.keras.layers.Dense(output_dim))
self.net = tf.keras.Sequential(layers)
# forwrad dummy tensor
dummy = tf.keras.Input((input_dim,), dtype=tf.float32)
self.net(dummy)
def call(self, x, training=True):
return self.net(x, training=training)
# --- energy model ---
class Energy(tf.keras.Model):
def __init__(self, net):
"""A simple energy model
Args:
net (tf.keras.Model): An energy function, the output shape of
the energy function should be (b, 1). The score is
computed by grad(-E(x))
"""
super().__init__()
self.net = net
def call(self, x, training=True):
return self.net(x, training=training)
def score(self, x, sigma=None, training=True):
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x)
logp = -tf.math.reduce_sum(self.net(x, training=training))
grad = tape.gradient(logp, x)
return grad
class Trainer():
def __init__(
self,
model,
learning_rate = 1e-3,
clipnorm = 100.,
n_slices = 1,
loss_type = 'ssm-vr',
noise_type = 'gaussian',
):
"""Energy based model trainer
Args:
model (nn.Module): energy-based model
learning_rate (float, optional): learning rate. Defaults to 1e-4.
clipnorm (float, optional): gradient clip. Defaults to 100..
n_slices (int, optional): number of slices for sliced score matching loss.
Defaults to 1.
loss_type (str, optional): type of loss. Can be 'ssm-vr', 'ssm', 'deen',
'dsm'. Defaults to 'ssm-vr'.
noise_type (str, optional): type of noise. Can be 'radermacher', 'sphere'
or 'gaussian'. Defaults to 'radermacher'.
"""
self.model = model
self.learning_rate = learning_rate
self.clipnorm = clipnorm
self.n_slices = n_slices
self.loss_type = loss_type.lower()
self.noise_type = noise_type.lower()
# setup optimizer
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=learning_rate,
clipnorm=clipnorm
)
self.num_gradsteps = 0
self.num_epochs = 0
self.progress = 0
self.tb_writer = None
def ssm_loss(self, x, v):
"""SSM loss from
Sliced Score Matching: A Scalable Approach to Density and Score Estimation
The loss is computed as
s = -dE(x)/dx
loss = vT*(ds/dx)*v + 1/2*(vT*s)^2
Args:
x (tf.Tensor): input samples
v (tf.Tensor): sampled noises
Returns:
SSM loss
"""
x = tf.repeat(x, self.n_slices, axis=0) # (n_slices*b, ...)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x)
score = self.model.score(x) # (n_slices*b, ...)
sv = tf.math.reduce_sum(score * v) # ()
gsv = tape.gradient(sv, x) # (n_slices*b, ...)
loss1 = tf.math.reduce_sum(score * v, axis=-1) ** 2 * 0.5 # (n_slices*b,)
loss2 = tf.math.reduce_sum(v * gsv, axis=-1) # (n_slices*b,)
loss = tf.math.reduce_mean(loss1 + loss2) # ()
return loss
def ssm_vr_loss(self, x, v):
"""SSM-VR (variance reduction) loss from
Sliced Score Matching: A Scalable Approach to Density and Score Estimation
The loss is computed as
s = -dE(x)/dx
loss = vT*(ds/dx)*v + 1/2*||s||^2
Args:
x (tf.Tensor): input samples
v (tf.Tensor): sampled noises
Returns:
SSM-VR loss
"""
x = tf.repeat(x, self.n_slices, axis=0) # (n_slices*b, ...)
with tf.GradientTape(watch_accessed_variables=False) as tape:
tape.watch(x)
score = self.model.score(x) # (n_slices*b, ...)
sv = tf.math.reduce_sum(score * v) # ()
gsv = tape.gradient(sv, x) # (n_slices*b, ...)
loss1 = tf.norm(score, axis=-1) ** 2 * 0.5 # (n_slices*b,)
loss2 = tf.math.reduce_sum(v*gsv, axis=-1) # (n_slices*b,)
loss = tf.math.reduce_mean(loss1 + loss2) # ()
return loss
def deen_loss(self, x, v, sigma=0.1):
"""DEEN loss from
Deep Energy Estimator Networks
The loss is computed as
x_ = x + v # noisy samples
s = -dE(x_)/dx_
loss = 1/2*||x - x_ + sigma^2*s||^2
Args:
x (tf.Tensor): input samples
v (tf.Tensor): sampled noises
sigma (int, optional): noise scale. Defaults to 1.
Returns:
DEEN loss
"""
v = v * sigma
x_ = x + v
s = (sigma ** 2) * self.model.score(x_)
loss = tf.norm(s+v, axis=-1)**2
loss = 0.5 * tf.math.reduce_mean(loss)
return loss
def dsm_loss(self, x, v, sigma=0.1):
"""DSM loss from
A Connection Between Score Matching
and Denoising Autoencoders
The loss is computed as
x_ = x + v # noisy samples
s = -dE(x_)/dx_
loss = 1/2*||s + (x-x_)/sigma^2||^2
Args:
x (tf.Tensor): input samples
v (tf.Tensor): sampled noises
sigma (float, optional): noise scale. Defaults to 0.1.
Returns:
DSM loss
"""
v = v * sigma
x_ = x + v
s = self.model.score(x_)
loss = tf.norm(s + v/(sigma**2), axis=-1) ** 2
loss = 0.5 * tf.math.reduce_mean(loss)
return loss
def get_random_noise(self, x, n_slices=None):
"""Sampling random noises
Args:
x (tf.Tensor): input samples
n_slices (int, optional): number of slices. Defaults to None.
Returns:
tf.Tensor: sampled noises
"""
if n_slices is None:
v = tf.random.normal(x.shape)
else:
v = tf.random.normal((n_slices, *x.shape))
v = tf.reshape(v, (-1, *v.shape[2:])) # (n_slices*b, 2)
if self.noise_type == 'radermacher':
v = tf.math.sign(v)
elif self.noise_type == 'sphere':
v = v/tf.norm(v, axis=-1, keepdims=True) * np.sqrt(v.shape[-1])
elif self.noise_type == 'gaussian':
pass
else:
raise NotImplementedError(
f"Noise type '{self.noise_type}' not implemented."
)
return v
def get_loss(self, x, v=None):
"""Compute loss
Args:
x (tf.Tensor): input samples
v (tf.Tensor, optional): sampled noises. Defaults to None.
Returns:
loss
"""
if self.loss_type == 'ssm-vr':
v = self.get_random_noise(x, self.n_slices)
loss = self.ssm_vr_loss(x, v)
elif self.loss_type == 'ssm':
v = self.get_random_noise(x, self.n_slices)
loss = self.ssm_loss(x, v)
elif self.loss_type == 'deen':
v = self.get_random_noise(x, None)
loss = self.deen_loss(x, v)
elif self.loss_type == 'dsm':
v = self.get_random_noise(x, None)
loss = self.dsm_loss(x, v)
else:
raise NotImplementedError(
f"Loss type '{self.loss_type}' not implemented."
)
return loss
@tf.function
def train_step(self, batch, update=True):
"""Train one batch
Args:
batch (dict): batch data
update (bool, optional): whether to update networks.
Defaults to True.
Returns:
loss
"""
x = batch['samples']
# move inputs to device
x = tf.convert_to_tensor(x, dtype=tf.float32)
vars = self.model.variables
with tf.GradientTape() as tape:
tape.watch(vars)
# compute losses
loss = self.get_loss(x)
# update model
if update:
# compute gradients
grads = tape.gradient(loss, vars)
self.optimizer.apply_gradients(zip(grads, vars))
return loss
def train(self, dataset, batch_size):
"""Train one epoch
Args:
dataset (tf.data.Dataset): Tensorflow dataset
batch_size (int): batch size
Returns:
np.ndarray: mean loss
"""
all_losses = []
dataset = dataset.batch(batch_size)
for batch_data in dataset.as_numpy_iterator():
sample_batch = {
'samples': batch_data
}
loss = self.train_step(sample_batch)
self.num_gradsteps += 1
all_losses.append(loss)
m_loss = np.mean(all_losses).astype(np.float32)
return m_loss
def eval(self, dataset, batch_size):
"""Eval one epoch
Args:
dataset (tf.data.Dataset): Tensorflow dataset
batch_size (int): batch size
Returns:
np.ndarray: mean loss
"""
all_losses = []
dataset = dataset.batch(batch_size)
for batch_data in dataset.as_numpy_iterator():
sample_batch = {
'samples': batch_data
}
loss = self.train_step(sample_batch, update=False)
all_losses.append(loss)
m_loss = np.mean(all_losses).astype(np.float32)
return m_loss
def learn(
self,
train_dataset,
eval_dataset = None,
n_epochs = 5,
batch_size = 100,
log_freq = 1,
eval_freq = 1,
vis_freq = 1,
vis_callback = None,
tb_logdir = None
):
"""Train the model
Args:
train_dataset (tf.data.Dataset): training dataset
eval_dataset (tf.data.Dataset, optional): evaluation dataset.
Defaults to None.
n_epochs (int, optional): number of epochs to train. Defaults to 5.
batch_size (int, optional): batch size. Defaults to 100.
log_freq (int, optional): logging frequency (epoch). Defaults to 1.
eval_freq (int, optional): evaluation frequency (epoch). Defaults to 1.
vis_freq (int, optional): visualizing frequency (epoch). Defaults to 1.
vis_callback (callable, optional): visualization function. Defaults to None.
tb_logdir (str, optional): path to tensorboard files. Defaults to None.
Returns:
self
"""
if tb_logdir is not None:
self.tb_writer = tf.summary.create_file_writer(tb_logdir)
# initialize
time_start = time.time()
time_spent = 0
total_epochs = n_epochs
for epoch in range(n_epochs):
self.num_epochs += 1
self.progress = float(self.num_epochs) / float(n_epochs)
# train one epoch
loss = self.train(train_dataset, batch_size)
# write tensorboard
if self.tb_writer is not None:
with self.tb_writer.as_default():
tf.summary.scalar(f'train/loss', loss, step=self.num_epochs)
if (log_freq is not None) and (self.num_epochs % log_freq == 0):
logging.info(
f"[Epoch {self.num_epochs}/{total_epochs}]: loss: {loss}"
)
if (eval_dataset is not None) and (self.num_epochs % eval_freq == 0):
# evaluate
eval_loss = self.eval(eval_dataset, batch_size)
if self.tb_writer is not None:
with self.tb_writer.as_default():
tf.summary.scalar(f'eval/loss', eval_loss, step=self.num_epochs)
logging.info(
f"[Eval {self.num_epochs}/{total_epochs}]: loss: {eval_loss}"
)
if (vis_callback is not None) and (self.num_epochs % vis_freq == 0):
logging.debug("Visualizing")
vis_callback(self)
return self
| [
"numpy.sqrt",
"logging.debug",
"tensorflow.GradientTape",
"tensorflow.keras.layers.Dense",
"logging.info",
"tensorflow.math.sign",
"tensorflow.random.normal",
"numpy.mean",
"tensorflow.keras.Sequential",
"tensorflow.math.reduce_mean",
"tensorflow.convert_to_tensor",
"tensorflow.repeat",
"ten... | [((1432, 1459), 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['layers'], {}), '(layers)\n', (1451, 1459), True, 'import tensorflow as tf\n'), ((1507, 1553), 'tensorflow.keras.Input', 'tf.keras.Input', (['(input_dim,)'], {'dtype': 'tf.float32'}), '((input_dim,), dtype=tf.float32)\n', (1521, 1553), True, 'import tensorflow as tf\n'), ((3544, 3616), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate', 'clipnorm': 'clipnorm'}), '(learning_rate=learning_rate, clipnorm=clipnorm)\n', (3568, 3616), True, 'import tensorflow as tf\n'), ((4173, 4208), 'tensorflow.repeat', 'tf.repeat', (['x', 'self.n_slices'], {'axis': '(0)'}), '(x, self.n_slices, axis=0)\n', (4182, 4208), True, 'import tensorflow as tf\n'), ((4593, 4629), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(v * gsv)'], {'axis': '(-1)'}), '(v * gsv, axis=-1)\n', (4611, 4629), True, 'import tensorflow as tf\n'), ((4661, 4695), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['(loss1 + loss2)'], {}), '(loss1 + loss2)\n', (4680, 4695), True, 'import tensorflow as tf\n'), ((5152, 5187), 'tensorflow.repeat', 'tf.repeat', (['x', 'self.n_slices'], {'axis': '(0)'}), '(x, self.n_slices, axis=0)\n', (5161, 5187), True, 'import tensorflow as tf\n'), ((5557, 5593), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(v * gsv)'], {'axis': '(-1)'}), '(v * gsv, axis=-1)\n', (5575, 5593), True, 'import tensorflow as tf\n'), ((5623, 5657), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['(loss1 + loss2)'], {}), '(loss1 + loss2)\n', (5642, 5657), True, 'import tensorflow as tf\n'), ((9240, 9281), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['x'], {'dtype': 'tf.float32'}), '(x, dtype=tf.float32)\n', (9260, 9281), True, 'import tensorflow as tf\n'), ((12189, 12200), 'time.time', 'time.time', ([], {}), '()\n', (12198, 12200), False, 'import time\n'), ((1053, 1078), 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), '()\n', (1076, 1078), True, 'import tensorflow as tf\n'), ((1377, 1410), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['output_dim'], {}), '(output_dim)\n', (1398, 1410), True, 'import tensorflow as tf\n'), ((2187, 2234), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'watch_accessed_variables': '(False)'}), '(watch_accessed_variables=False)\n', (2202, 2234), True, 'import tensorflow as tf\n'), ((4242, 4289), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'watch_accessed_variables': '(False)'}), '(watch_accessed_variables=False)\n', (4257, 4289), True, 'import tensorflow as tf\n'), ((4405, 4434), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(score * v)'], {}), '(score * v)\n', (4423, 4434), True, 'import tensorflow as tf\n'), ((5221, 5268), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'watch_accessed_variables': '(False)'}), '(watch_accessed_variables=False)\n', (5236, 5268), True, 'import tensorflow as tf\n'), ((5384, 5413), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(score * v)'], {}), '(score * v)\n', (5402, 5413), True, 'import tensorflow as tf\n'), ((6250, 6273), 'tensorflow.norm', 'tf.norm', (['(s + v)'], {'axis': '(-1)'}), '(s + v, axis=-1)\n', (6257, 6273), True, 'import tensorflow as tf\n'), ((6296, 6321), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['loss'], {}), '(loss)\n', (6315, 6321), True, 'import tensorflow as tf\n'), ((6943, 6979), 'tensorflow.norm', 'tf.norm', (['(s + v / sigma ** 2)'], {'axis': '(-1)'}), '(s + v / sigma ** 2, axis=-1)\n', (6950, 6979), True, 'import tensorflow as tf\n'), ((7004, 7029), 'tensorflow.math.reduce_mean', 'tf.math.reduce_mean', (['loss'], {}), '(loss)\n', (7023, 7029), True, 'import tensorflow as tf\n'), ((7378, 7403), 'tensorflow.random.normal', 'tf.random.normal', (['x.shape'], {}), '(x.shape)\n', (7394, 7403), True, 'import tensorflow as tf\n'), ((7434, 7472), 'tensorflow.random.normal', 'tf.random.normal', (['(n_slices, *x.shape)'], {}), '((n_slices, *x.shape))\n', (7450, 7472), True, 'import tensorflow as tf\n'), ((7489, 7522), 'tensorflow.reshape', 'tf.reshape', (['v', '(-1, *v.shape[2:])'], {}), '(v, (-1, *v.shape[2:]))\n', (7499, 7522), True, 'import tensorflow as tf\n'), ((7603, 7618), 'tensorflow.math.sign', 'tf.math.sign', (['v'], {}), '(v)\n', (7615, 7618), True, 'import tensorflow as tf\n'), ((9331, 9348), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (9346, 9348), True, 'import tensorflow as tf\n'), ((12105, 12145), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['tb_logdir'], {}), '(tb_logdir)\n', (12134, 12145), True, 'import tensorflow as tf\n'), ((4511, 4549), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(score * v)'], {'axis': '(-1)'}), '(score * v, axis=-1)\n', (4529, 4549), True, 'import tensorflow as tf\n'), ((5490, 5513), 'tensorflow.norm', 'tf.norm', (['score'], {'axis': '(-1)'}), '(score, axis=-1)\n', (5497, 5513), True, 'import tensorflow as tf\n'), ((10251, 10270), 'numpy.mean', 'np.mean', (['all_losses'], {}), '(all_losses)\n', (10258, 10270), True, 'import numpy as np\n'), ((10886, 10905), 'numpy.mean', 'np.mean', (['all_losses'], {}), '(all_losses)\n', (10893, 10905), True, 'import numpy as np\n'), ((12796, 12867), 'logging.info', 'logging.info', (['f"""[Epoch {self.num_epochs}/{total_epochs}]: loss: {loss}"""'], {}), "(f'[Epoch {self.num_epochs}/{total_epochs}]: loss: {loss}')\n", (12808, 12867), False, 'import logging\n'), ((13304, 13379), 'logging.info', 'logging.info', (['f"""[Eval {self.num_epochs}/{total_epochs}]: loss: {eval_loss}"""'], {}), "(f'[Eval {self.num_epochs}/{total_epochs}]: loss: {eval_loss}')\n", (13316, 13379), False, 'import logging\n'), ((13516, 13544), 'logging.debug', 'logging.debug', (['"""Visualizing"""'], {}), "('Visualizing')\n", (13529, 13544), False, 'import logging\n'), ((1151, 1178), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['unit'], {}), '(unit)\n', (1172, 1178), True, 'import tensorflow as tf\n'), ((1196, 1254), 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (["('silu' if silu else 'softplus')"], {}), "('silu' if silu else 'softplus')\n", (1222, 1254), True, 'import tensorflow as tf\n'), ((7716, 7736), 'numpy.sqrt', 'np.sqrt', (['v.shape[-1]'], {}), '(v.shape[-1])\n', (7723, 7736), True, 'import numpy as np\n'), ((12629, 12689), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['f"""train/loss"""', 'loss'], {'step': 'self.num_epochs'}), "(f'train/loss', loss, step=self.num_epochs)\n", (12646, 12689), True, 'import tensorflow as tf\n'), ((1272, 1300), 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (1295, 1300), True, 'import tensorflow as tf\n'), ((1316, 1339), 'tensorflow.keras.layers.Layer', 'tf.keras.layers.Layer', ([], {}), '()\n', (1337, 1339), True, 'import tensorflow as tf\n'), ((7679, 7713), 'tensorflow.norm', 'tf.norm', (['v'], {'axis': '(-1)', 'keepdims': '(True)'}), '(v, axis=-1, keepdims=True)\n', (7686, 7713), True, 'import tensorflow as tf\n'), ((13206, 13270), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['f"""eval/loss"""', 'eval_loss'], {'step': 'self.num_epochs'}), "(f'eval/loss', eval_loss, step=self.num_epochs)\n", (13223, 13270), True, 'import tensorflow as tf\n')] |
"""
Test `sinethesizer.effects.stereo` module.
Author: <NAME>
"""
import numpy as np
import pytest
from sinethesizer.effects.stereo import apply_haas_effect, apply_panning
from sinethesizer.synth.core import Event
@pytest.mark.parametrize(
"sound, event, location, max_channel_delay, expected",
[
(
# `sound`
np.array([
[1, 2, 3],
[2, 3, 4],
]),
# `event`
Event(
instrument='any_instrument',
start_time=0.0,
duration=1.0,
frequency=440.0,
velocity=0.0,
effects='',
frame_rate=20
),
# `location`
-0.5,
# `max_channel_delay`
0.1,
# `expected`
np.array([
[1, 2, 3, 0],
[0, 2, 3, 4],
]),
),
(
# `sound`
np.array([
[1, 2, 3],
[2, 3, 4],
]),
# `event`
Event(
instrument='any_instrument',
start_time=0.0,
duration=1.0,
frequency=440.0,
velocity=0.0,
effects='',
frame_rate=20
),
# `location`
0.5,
# `max_channel_delay`
0.1,
# `expected`
np.array([
[0, 1, 2, 3],
[2, 3, 4, 0],
]),
),
]
)
def test_apply_haas_effect(
sound: np.ndarray, event: Event,
location: float, max_channel_delay: float,
expected: np.ndarray
) -> None:
"""Test `apply_haas_effect` function."""
result = apply_haas_effect(sound, event, location, max_channel_delay)
np.testing.assert_equal(result, expected)
@pytest.mark.parametrize(
"sound, event, left_volume_ratio, right_volume_ratio, expected",
[
(
# `sound`
np.array([
[1.0, 2, 3],
[2, 3, 4],
]),
# `event`
Event(
instrument='any_instrument',
start_time=0.0,
duration=1.0,
frequency=440.0,
velocity=0.0,
effects='',
frame_rate=20
),
# `left_volume_ratio`
0.5,
# `right_volume_ratio`
0.1,
# `expected`
np.array([
[0.5, 1.0, 1.5],
[0.2, 0.3, 0.4],
]),
),
]
)
def test_apply_panning(
sound: np.ndarray, event: Event,
left_volume_ratio: float, right_volume_ratio: float,
expected: np.ndarray
) -> None:
"""Test `apply_panning` function."""
result = apply_panning(sound, event, left_volume_ratio, right_volume_ratio)
np.testing.assert_almost_equal(result, expected)
| [
"numpy.testing.assert_equal",
"sinethesizer.effects.stereo.apply_haas_effect",
"sinethesizer.effects.stereo.apply_panning",
"numpy.testing.assert_almost_equal",
"numpy.array",
"sinethesizer.synth.core.Event"
] | [((1812, 1872), 'sinethesizer.effects.stereo.apply_haas_effect', 'apply_haas_effect', (['sound', 'event', 'location', 'max_channel_delay'], {}), '(sound, event, location, max_channel_delay)\n', (1829, 1872), False, 'from sinethesizer.effects.stereo import apply_haas_effect, apply_panning\n'), ((1877, 1918), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['result', 'expected'], {}), '(result, expected)\n', (1900, 1918), True, 'import numpy as np\n'), ((2905, 2971), 'sinethesizer.effects.stereo.apply_panning', 'apply_panning', (['sound', 'event', 'left_volume_ratio', 'right_volume_ratio'], {}), '(sound, event, left_volume_ratio, right_volume_ratio)\n', (2918, 2971), False, 'from sinethesizer.effects.stereo import apply_haas_effect, apply_panning\n'), ((2976, 3024), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['result', 'expected'], {}), '(result, expected)\n', (3006, 3024), True, 'import numpy as np\n'), ((355, 387), 'numpy.array', 'np.array', (['[[1, 2, 3], [2, 3, 4]]'], {}), '([[1, 2, 3], [2, 3, 4]])\n', (363, 387), True, 'import numpy as np\n'), ((470, 597), 'sinethesizer.synth.core.Event', 'Event', ([], {'instrument': '"""any_instrument"""', 'start_time': '(0.0)', 'duration': '(1.0)', 'frequency': '(440.0)', 'velocity': '(0.0)', 'effects': '""""""', 'frame_rate': '(20)'}), "(instrument='any_instrument', start_time=0.0, duration=1.0, frequency=\n 440.0, velocity=0.0, effects='', frame_rate=20)\n", (475, 597), False, 'from sinethesizer.synth.core import Event\n'), ((851, 889), 'numpy.array', 'np.array', (['[[1, 2, 3, 0], [0, 2, 3, 4]]'], {}), '([[1, 2, 3, 0], [0, 2, 3, 4]])\n', (859, 889), True, 'import numpy as np\n'), ((993, 1025), 'numpy.array', 'np.array', (['[[1, 2, 3], [2, 3, 4]]'], {}), '([[1, 2, 3], [2, 3, 4]])\n', (1001, 1025), True, 'import numpy as np\n'), ((1108, 1235), 'sinethesizer.synth.core.Event', 'Event', ([], {'instrument': '"""any_instrument"""', 'start_time': '(0.0)', 'duration': '(1.0)', 'frequency': '(440.0)', 'velocity': '(0.0)', 'effects': '""""""', 'frame_rate': '(20)'}), "(instrument='any_instrument', start_time=0.0, duration=1.0, frequency=\n 440.0, velocity=0.0, effects='', frame_rate=20)\n", (1113, 1235), False, 'from sinethesizer.synth.core import Event\n'), ((1488, 1526), 'numpy.array', 'np.array', (['[[0, 1, 2, 3], [2, 3, 4, 0]]'], {}), '([[0, 1, 2, 3], [2, 3, 4, 0]])\n', (1496, 1526), True, 'import numpy as np\n'), ((2066, 2100), 'numpy.array', 'np.array', (['[[1.0, 2, 3], [2, 3, 4]]'], {}), '([[1.0, 2, 3], [2, 3, 4]])\n', (2074, 2100), True, 'import numpy as np\n'), ((2183, 2310), 'sinethesizer.synth.core.Event', 'Event', ([], {'instrument': '"""any_instrument"""', 'start_time': '(0.0)', 'duration': '(1.0)', 'frequency': '(440.0)', 'velocity': '(0.0)', 'effects': '""""""', 'frame_rate': '(20)'}), "(instrument='any_instrument', start_time=0.0, duration=1.0, frequency=\n 440.0, velocity=0.0, effects='', frame_rate=20)\n", (2188, 2310), False, 'from sinethesizer.synth.core import Event\n'), ((2573, 2617), 'numpy.array', 'np.array', (['[[0.5, 1.0, 1.5], [0.2, 0.3, 0.4]]'], {}), '([[0.5, 1.0, 1.5], [0.2, 0.3, 0.4]])\n', (2581, 2617), True, 'import numpy as np\n')] |
import enum
from typing import Any, Optional, Union, cast
import numpy as np
import scipy.special
import sklearn.metrics as skm
from . import util
from .util import TaskType
class PredictionType(enum.Enum):
LOGITS = 'logits'
PROBS = 'probs'
def calculate_rmse(
y_true: np.ndarray, y_pred: np.ndarray, std: Optional[float]
) -> float:
rmse = skm.mean_squared_error(y_true, y_pred) ** 0.5
if std is not None:
rmse *= std
return rmse
def _get_labels_and_probs(
y_pred: np.ndarray, task_type: TaskType, prediction_type: Optional[PredictionType]
) -> tuple[np.ndarray, Optional[np.ndarray]]:
assert task_type in (TaskType.BINCLASS, TaskType.MULTICLASS)
if prediction_type is None:
return y_pred, None
if prediction_type == PredictionType.LOGITS:
probs = (
scipy.special.expit(y_pred)
if task_type == TaskType.BINCLASS
else scipy.special.softmax(y_pred, axis=1)
)
elif prediction_type == PredictionType.PROBS:
probs = y_pred
else:
util.raise_unknown('prediction_type', prediction_type)
assert probs is not None
labels = np.round(probs) if task_type == TaskType.BINCLASS else probs.argmax(axis=1)
return labels.astype('int64'), probs
def calculate_metrics(
y_true: np.ndarray,
y_pred: np.ndarray,
task_type: Union[str, TaskType],
prediction_type: Optional[Union[str, PredictionType]],
y_info: dict[str, Any],
) -> dict[str, Any]:
# Example: calculate_metrics(y_true, y_pred, 'binclass', 'logits', {})
task_type = TaskType(task_type)
if prediction_type is not None:
prediction_type = PredictionType(prediction_type)
if task_type == TaskType.REGRESSION:
assert prediction_type is None
assert 'std' in y_info
rmse = calculate_rmse(y_true, y_pred, y_info['std'])
result = {'rmse': rmse}
else:
labels, probs = _get_labels_and_probs(y_pred, task_type, prediction_type)
result = cast(
dict[str, Any], skm.classification_report(y_true, labels, output_dict=True)
)
if task_type == TaskType.BINCLASS:
result['roc_auc'] = skm.roc_auc_score(y_true, probs)
return result
| [
"sklearn.metrics.classification_report",
"sklearn.metrics.roc_auc_score",
"numpy.round",
"sklearn.metrics.mean_squared_error"
] | [((363, 401), 'sklearn.metrics.mean_squared_error', 'skm.mean_squared_error', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (385, 401), True, 'import sklearn.metrics as skm\n'), ((1165, 1180), 'numpy.round', 'np.round', (['probs'], {}), '(probs)\n', (1173, 1180), True, 'import numpy as np\n'), ((2053, 2112), 'sklearn.metrics.classification_report', 'skm.classification_report', (['y_true', 'labels'], {'output_dict': '(True)'}), '(y_true, labels, output_dict=True)\n', (2078, 2112), True, 'import sklearn.metrics as skm\n'), ((2198, 2230), 'sklearn.metrics.roc_auc_score', 'skm.roc_auc_score', (['y_true', 'probs'], {}), '(y_true, probs)\n', (2215, 2230), True, 'import sklearn.metrics as skm\n')] |
import matplotlib.pyplot as plt
import numpy as np
from matplotlib import cm
from matplotlib import colors
from matplotlib import patches
import os.path as path
from Synthesis.units import *
from tqdm import tqdm
from scipy.integrate import quad
def Power_Law(x, a, b):
return a * np.power(x, b)
def scatter_parameters(pop):
TotalMasses = []
SigmaCoeffs = []
Reference = []
for sim in pop.SIMS.values():
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
print(sim.Sigma_Exponent)
print(sim.Sigma_Norm * (R_S / au)**sim.Sigma_Exponent / denstos)
Reference.append(sim.Sigma_Norm / (R_S / au)**sim.Sigma_Exponent / denstos * pow(au/R_S, sim.Sigma_Exponent) / denstos)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(Reference)
cmax = max(Reference)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=Reference, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Reference Value at $1 \mathrm{au}$ [$\mathrm{g}\mathrm{cm}^{-2}$]', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_numbers(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
Reference = []
Masses = []
Orb_Dist = []
Numbers = []
Means = []
Systems = []
for id,sim in pop.SIMS.items():
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
Masses = list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist = list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
system = zip(Masses, Orb_Dist)
filtered = [item for item in system if item[0] >= m_low_lim and item[1] <= a_up_lim]
# mean = np.max([item[0] for item in filtered])/np.sum([item[0] for item in filtered])
# Means.append(mean)
Numbers.append(len(filtered))
#Means = np.array(Means) / np.sum(Means)
print(Numbers)
Numbers = np.array(Numbers)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(Numbers)
cmax = max(Numbers)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=Numbers, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Number of Planets', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_numbers.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_lost_mass(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
lost_mass = []
numbers = []
Reference = []
TM = []
for id, sim in pop.SIMS.items():
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
TM.append(np.sum([item for item in list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)]))
Reference.append(sim.Sigma_Norm / (R_S / au)**sim.Sigma_Exponent / denstos * pow(au/R_S, sim.Sigma_Exponent) / denstos)
masses = sim.lost_satellites['mass'].values * M_S / M_J
cols = sim.lost_satellites['collision'].values
filter_null = cols == 0.0
filtered = masses[filter_null]
summed = np.sum(filtered)
numbers.append(len(filtered))
# summed = np.sum(filtered)
lost_mass.append(summed)
lost_mass = np.array(lost_mass)
#Means = np.array(Means) / np.sum(Means)
# print(Numbers / np.array(Means))
Numbers = np.array(SigmaCoeffs)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
# arr = np.unique(SigmaCoeffs)
cmap = plt.get_cmap(pop.cmap_standart,len(SigmaCoeffs))
norm = colors.BoundaryNorm(np.linspace(-1.625, -0.375, len(np.unique(SigmaCoeffs))+1, endpoint=True), cmap.N)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Reference, lost_mass, c=SigmaCoeffs, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(ylabel='Total Lost Mass [$\mathrm{M_J}$]', xlabel=r'Reference Value at 1 $\mathrm{au}$ [$\mathrm{g cm^{-2}}$]')
# ax2 = ax.twinx()
# mn, mx = ax.get_ylim()
# ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
# ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap,norm=norm), orientation='vertical',
label=r'Power-Law Exponent', ax=ax, ticks=np.unique(SigmaCoeffs))
ax.set_yscale('log')
ax.set_xscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_reference_lost_mass.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_AMD(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
Reference = []
Masses = []
Orb_Dist = []
Numbers = []
Means = []
Systems = []
AMDS = []
for sim in tqdm(pop.SIMS.values()):
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
AMD, N = sim.get_AMD(m_low_lim, a_up_lim)
AMDS.append(AMD)
Means = np.array(Means) / np.sum(Means)
print(Numbers * Means)
Numbers = np.array(AMDS)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(Numbers)
cmax = max(Numbers)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=Numbers, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'AMD', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_amd.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_parameters_RMC(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
Reference = []
Masses = []
Orb_Dist = []
Numbers = []
Means = []
Systems = []
RMCS = []
for sim in tqdm(pop.SIMS.values()):
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
RMC, N = sim.get_RMC(m_low_lim, a_up_lim)
RMCS.append(RMC)
Means = np.array(Means) / np.sum(Means)
print(Numbers * Means)
Numbers = np.array(RMCS)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(RMCS)
cmax = max(RMCS)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=RMCS, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'RMC', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_rmc_nonlog.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_collision_number(pop, m_low_lim=0, a_up_lim=30):
TotalMasses = []
SigmaCoeffs = []
times = []
for sim in tqdm(pop.SIMS.values()):
TotalMasses.append(sim.Total_Mass)
SigmaCoeffs.append(sim.Sigma_Exponent)
times.append(len(sim.collisions.index))
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
cmap = pop.cmap_standart
cmin = min(times)
cmax = max(times)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SigmaCoeffs, TotalMasses, c=times, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SigmaCoeffs)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Number of Collisions', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_collision_number.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
def scatter_ecc_inc(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
Ecc = []
Inc = []
Types = []
for sim in pop.SIMS.values():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
Ecc += list(sim.snaps[sim.N_snaps - 1].satellites['e'].values)
Inc += list(sim.snaps[sim.N_snaps - 1].satellites['i'].values)
Types += list(sim.snaps[sim.N_snaps - 1].satellites['Type'].values)
data = zip(Masses, Orb_Dist, Ecc, Inc, Types)
data = [item for item in data if item[0] >= m_low_lim and item[1] <= a_up_lim]
Masses, Orb_Dist, Ecc, Inc, Types = zip(*data)
number_of_no_accretion = len([item for item in data if np.abs(0.01-item[0])/item[0] < 0.01 and item[-1] == 1])
print(f'Number of Object: {len(Masses)}')
print(f'Number of Embryos with no significant accretion: {number_of_no_accretion}, {number_of_no_accretion/len(Masses)}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
cmin = min(Orb_Dist)
cmax = max(Orb_Dist)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Ecc, np.sin(np.array(Inc)/360 * 2 * np.pi), c=Orb_Dist, cmap=cmap, norm=norm, s=3)
# ax.scatter(Ecc, np.sin(np.array(Inc)), c=Orb_Dist, cmap=cmap, norm=norm, s=3)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Eccentricity', ylabel=r'$\sin(\mathrm{inclination})$')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Orbital Distance [$\mathrm{au}$]',
ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Eccentricity and Inclination')
save_name = 'scatter_ecc_inc'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
def scatter_a_mass(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
WM = []
SWM = []
for sim in pop.SIMS.values():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
WM += list(sim.snaps[sim.N_snaps - 1].satellites['WM'].values * M_S / M_E)
SWM += list(sim.snaps[sim.N_snaps - 1].satellites['SWM'].values * M_S / M_E)
data = zip(Masses, Orb_Dist, WM, SWM)
data = [(m, a, wm / m, swm / m) for (m, a, wm, swm) in data if m >= m_low_lim and a <= a_up_lim]
Masses, Orb_Dist, WMF, SWMF = zip(*data)
TWMF = np.array(WMF) + np.array(SWMF)
print(f'Number of Object: {len(Masses)}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
cmin = min(TWMF)
cmax = max(TWMF)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Orb_Dist, Masses, c=TWMF, cmap=cmap, norm=norm, s=2, alpha=1)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel=r'Orbital Distance [$\mathrm{au}$]', ylabel=r'Mass [$\mathrm{M_{\oplus}}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Total WMF',
ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Eccentricity and Inclination')
save_name = 'scatter_a_mass'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
def scatter_radial_twmf(pop, m_low_lim=0, a_up_lim=30):
Masses = []
Orb_Dist = []
WM = []
SWM = []
Ecc = []
System = []
for key, sim in pop.SIMS.items():
Masses += list(sim.snaps[sim.N_snaps - 1].satellites['M'].values * M_S / M_E)
Ecc += list(sim.snaps[sim.N_snaps - 1].satellites['e'].values * M_S / M_E)
Orb_Dist += list(sim.snaps[sim.N_snaps - 1].satellites['a'].values * R_S / au)
WM += list(sim.snaps[sim.N_snaps - 1].satellites['WM'].values * M_S / M_E)
SWM += list(sim.snaps[sim.N_snaps - 1].satellites['SWM'].values * M_S / M_E)
System += [key for i in sim.snaps[sim.N_snaps - 1].satellites['M'].values]
WMF = np.array(WM) / np.array(Masses)
SWMF = np.array(SWM) / np.array(Masses)
TWMF = WMF + SWMF
total_number = len(Masses)
print(f'Total Number of planets: {total_number}')
data = zip(Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System)
data = [item for item in data if item[0] >= 0.3 and item[0] <= 3]
mass_lim_number = len(data)
print(f'Number of planets in mass limit: {mass_lim_number}, {mass_lim_number/total_number}')
data_copy = data.copy()
data_wmf = [item for item in data_copy if item[2] > 0.0]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_wmf)
n_ea_ml_nz_wmf = len(Masses)
print(f'Number of planets in mass limit with nonzero liquid watermass fraction: {n_ea_ml_nz_wmf}, {n_ea_ml_nz_wmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(WMF)), np.log10(max(WMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_wmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_wmf_lim = [item for item in data_copy if item[2] > 0.0 and item[3] > 0.00075]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_wmf_lim)
# n_ea_ml_nz_wmf = len(Masses)
# print(f'Number of planets in mass limit with nonzero liquid watermass fraction: {n_ea_ml_nz_wmf}, {n_ea_ml_nz_wmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(WMF)), np.log10(max(WMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_wmf_lim'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_swmf_lim = [item for item in data_copy if item[3] > 0.0 and item[2] > 0.00025]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_swmf_lim)
# n_ea_ml_nz_wmf = len(Masses)
# print(f'Number of planets in mass limit with nonzero liquid watermass fraction: {n_ea_ml_nz_wmf}, {n_ea_ml_nz_wmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(WMF)), np.log10(max(WMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_swmf_lim'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_swmf = [item for item in data_copy if item[3] > 0.0]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_swmf)
n_ea_ml_nz_swmf = len(Masses)
print(f'Number of planets in mass limit with nonzero hydrated solids watermass fraction: {n_ea_ml_nz_swmf}, {n_ea_ml_nz_swmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(SWMF)), np.log10(max(SWMF)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(WMF, bins=bins, rwidth=0.95)
ax.axvline(3 * OE/M_E, color='red', linewidth=1)
ax.set(xlabel=r'Mass Fraction', ylabel=r'Counts')
ax.set_xscale('log')
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_swmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data_copy = data.copy()
data_twmf = [item for item in data_copy if item[2] > 0.0 and item[3] > 0.0]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data_twmf)
n_ea_ml_nz_twmf = len(Masses)
print(f'Number of planets in mass limit with nonzero wmf and swmf: {n_ea_ml_nz_twmf}, {n_ea_ml_nz_twmf/mass_lim_number}')
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
ratios = np.array(WMF)/np.array(SWMF)
N_bins = 15
bins = 10 ** np.linspace(np.log10(min(ratios)), np.log10(max(ratios)), N_bins)
fig, ax = plt.subplots(figsize=pop.figsize)
# ax.hist(Masses, bins=bins)
# values, base, _ = plt.hist(Orb_Dist, bins=bins, rwidth=0.95)
ax.hist(ratios, bins=bins, rwidth=0.95)
ax.axvline(1/3, color='red', linewidth=1)
ax.set(xlabel=r'Ratio', ylabel=r'Counts')
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Histrogram of Terrestrial Planets Orbital Distances')
save_name = 'histogram_earth_analogs_twmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
data = [item for item in data if item[4] > 0]
non_zero_wm = data.copy()
non_zero_wmf_number = len(data)
print(f'Number of planets in mass limit with non zero TWMF: {non_zero_wmf_number}, {non_zero_wmf_number/mass_lim_number} ({non_zero_wmf_number/total_number})')
earth_analogs = [item for item in data if item[0] >= 0.101 and item[1] <= a_up_lim and item[2] > 0.001]
#print(earth_analogs)
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*data)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
TWMF = TWMF
cmin = min(TWMF)
cmax = max(TWMF)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(Orb_Dist, Masses, c=TWMF, cmap=cmap, norm=norm, s=7, alpha=1)
# ax.scatter(obs, ms, c=twmf, cmap=cmap, norm=norm, s=10)
ax.axvline(1, color='black', linewidth=0.7, linestyle='--')
ax.axhline(1, color='black', linewidth=0.7, linestyle='--')
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center', )
ax.set(xlabel='Orbital Distance [$\mathrm{au}$]', ylabel=r'Mass [$\mathrm{M_{\oplus}}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical', label=r'Total WMF', ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Total WMF Radial Distribution')
save_name = 'scatter_radial_twmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
def scatter_pie(earth_analogs):
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
fig, ax = plt.subplots(figsize=pop.figsize)
colors = ['red', 'blue']
labels = ['Hydrated Silica', 'Water/Ice']
red_patch = patches.Patch(color='red', label='Hydrated Silica')
blue_patch = patches.Patch(color='blue', label='Water/Ice')
handles = [red_patch, blue_patch]
Masses, Orb_Dist, WMF, SWMF, TWMF, Ecc, System = zip(*earth_analogs)
mean_mass = np.min(Masses)
mass_scaling = mean_mass / 90000
mass_scaling = 0.000000001
def pie_1d(r1, r2):
# calculate the points of the first pie marker
# these are just the origin (0, 0) + some (cos, sin) points on a circle
x1 = np.cos(2 * np.pi * np.linspace(0, r1))
y1 = np.sin(2 * np.pi * np.linspace(0, r1))
xy1 = np.row_stack([[0, 0], np.column_stack([x1, y1])])
s1 = np.abs(xy1).max()
x2 = np.cos(2 * np.pi * np.linspace(r1, 1))
y2 = np.sin(2 * np.pi * np.linspace(r1, 1))
xy2 = np.row_stack([[0, 0], np.column_stack([x2, y2])])
s2 = np.abs(xy2).max()
# x3 = np.cos(2 * np.pi * np.linspace(r2, 1))
# y3 = np.sin(2 * np.pi * np.linspace(r2, 1))
# xy3 = np.row_stack([[0, 0], np.column_stack([x3, y3])])
# s3 = np.abs(xy3).max()
return xy1, s1, xy2, s2#, xy3, s3
# cale the masses to the marker sizes
# def NormalizeData(m):
# return (np.log10(m) - np.log10(np.min(TWMF))) / (np.log10(np.max(TWMF)) - np.log10(np.min(TWMF)))
def NormalizeData(m):
return (np.log10(m) - np.log10(np.min(Masses))) / (np.log10(np.max(Masses)) - np.log10(np.min(Masses)))
# def NormalizeData(m):
# return (m - (np.min(TWMF))) / ((np.max(TWMF)) - (np.min(TWMF)))
# def NormalizeData(m):
# return (m - (np.min(Masses))) / ((np.max(Masses)) - (np.min(Masses)))
earth_point = (1,1,0.00025,0.00075,0.001,0,0)
def plot_one(row,earth=False):
WMF_ratio = row[2]/row[4]
SWMF_Ratio = 1
#xy1, s1, xy2, s2, xy3, s3 = pie_1d(WMF_ratio, SWMF_ratio)
xy1, s1, xy2, s2 = pie_1d(WMF_ratio, 1)
scale = NormalizeData(row[0]) * 50
if earth == True:
ax.scatter(row[1], row[4], s=s2 * scale * 2, facecolor='green')
ax.scatter(row[1], row[4], marker=xy1, s=s1 * scale , facecolor='blue')
ax.scatter(row[1], row[4], marker=xy2, s=s2 * scale, facecolor='red')
#ax.scatter(row[1], row[6], marker=xy3, s=s3 * scale , facecolor='red')
for index, row in enumerate(earth_analogs):
plot_one(row)
plot_one(earth_point,True)
#ax.set_ylim(-1 * min(self.satellites['e']), 1.1 * max(self.satellites['e']))
ax.set_xlabel(r'Orbital Distance [$\mathrm{au}$]')
ax.set_ylabel('Total Water Mass Fractions')
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend(handles=handles, title='Components')
fig.savefig(path.join(pop.PLOT, 'scatter_ratios.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
#filter twmf close to earth
data = [item for item in data if item[2] >= 0.00025 and item[3] >= 0.00075]
systems_id = [sys[-1] for sys in data]
print(f'Number of systems with Earth Candidate {len(np.unique(systems_id))}, {len(np.unique(systems_id))/pop.NSIMS} ')
scatter_pie(non_zero_wm)
earth_analogs2 = data.copy()
wmf_sim_number = len(data)
#print(data)
print(f'Number of planets in mass limit and WMF above 0.00025 and SWMF above 0.00075: {len(data)}, {wmf_sim_number/mass_lim_number} ({wmf_sim_number/total_number})')
# for earth in data:
# print(f'System: {earth[-1]}')
# print(f'Mass: {earth[0]}')
# print(f'Orb Dist: {earth[1]}')
# print(f'WMF: {earth[2]}')
# print(f'SWMF: {earth[2]}')
# print(f'TWMF: {earth[2]}')
# print(f'Exponent: {pop.SIMS[earth[-1]].Sigma_Exponent}')
# print(f'Disk Mass: {pop.SIMS[earth[-1]].Total_Mass * M_S / M_J}')
# print(" ")
ms, obs, wmf, swmf, twmf, ecc, system = zip(*earth_analogs2)
plt.rcParams.update({'figure.autolayout': True})
plt.style.use('seaborn-paper')
plt.rcParams.update({'font.size': pop.fontsize})
plt.rcParams.update({"legend.title_fontsize": pop.legend_fontsize})
cmap = pop.cmap_standart
cmin = min(twmf)
cmax = max(twmf)
norm = colors.Normalize(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(wmf, swmf, c=twmf, cmap=cmap, norm=norm, s=2, alpha=1)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel=r'Water Mass Fraction]', ylabel=r'Solids Water Mass Fraction')
ax.axvline(0.00025, color='black', linewidth=0.7, linestyle='--')
ax.axhline(0.00075, color='black', linewidth=0.7, linestyle='--')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Total WMF',
ax=ax)
ax.set_xscale('log')
ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Eccentricity and Inclination')
save_name = 'scatter_wmf_swmf'
if a_up_lim < 30 and m_low_lim > 0:
save_name += '_lim'
fig.savefig(path.join(pop.PLOT, save_name + '.png'), transparent=False, dpi=pop.dpi, bbox_inches="tight")
plt.close(fig)
#filter roughly earth mass already roughly in the right positions
data = [item for item in data if item[1] <= 2]
earth_like_number = len(data)
print(f'Number of planets in mass limit and WMF above 0.00025 and SWMF above 0.00075 at correct positions: {earth_like_number}, {earth_like_number/wmf_sim_number} ({earth_like_number/total_number})')
SE = []
TM = []
RE = []
for id in np.unique([sys[-1] for sys in earth_analogs2]):
SE.append(pop.SIMS[id].Sigma_Exponent)
TM.append(pop.SIMS[id].Total_Mass)
RE.append(pop.SIMS[id].Sigma_Norm / (R_S / au)**pop.SIMS[id].Sigma_Exponent / denstos * pow(au/R_S, pop.SIMS[id].Sigma_Exponent) / denstos)
SE = np.array(SE)
TM = np.array(TM)
cmap = pop.cmap_standart
cmin = min(RE)
cmax = max(RE)
norm = colors.LogNorm(cmin, cmax)
fig, ax = plt.subplots(figsize=pop.figsize)
ax.scatter(SE, TM, c=RE, cmap=cmap, norm=norm, s=12)
x_labels = ax.get_xticklabels()
plt.setp(x_labels, horizontalalignment='center')
ax.set(xlabel='Surface Density Power Law Exponent', ylabel=r'Total Disk Mass [$M_{\odot}$]', xticks=SE)
ax2 = ax.twinx()
mn, mx = ax.get_ylim()
ax2.set_ylim(M_S / M_J * mn, M_S / M_J * mx)
ax2.set_ylabel('Total Disk Mass [$M_{J}$]')
fig.colorbar(cm.ScalarMappable(cmap=cmap, norm=norm), orientation='vertical',
label=r'Reference Value at $1 \mathrm{au}$ [$\mathrm{g}\mathrm{cm}^{-2}$]', ax=ax2, pad=0.12)
# ax.set_yscale('log')
if pop.plot_config == 'presentation':
ax.set(title=r'Synthesis Parameters')
fig.savefig(path.join(pop.PLOT, 'scatter_parameters_earth_analogs.png'), transparent=False, dpi=pop.dpi,
bbox_inches="tight")
plt.close(fig)
| [
"numpy.log10",
"numpy.column_stack",
"numpy.array",
"matplotlib.colors.LogNorm",
"matplotlib.pyplot.style.use",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.linspace",
"matplotlib.cm.ScalarMappable",
"numpy.min",
"numpy.abs",
"matplotlib.patches.Patch",
"matplotlib.colors.Normalize",
"ma... | [((758, 806), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (777, 806), True, 'import matplotlib.pyplot as plt\n'), ((811, 841), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (824, 841), True, 'import matplotlib.pyplot as plt\n'), ((846, 894), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (865, 894), True, 'import matplotlib.pyplot as plt\n'), ((989, 1015), 'matplotlib.colors.LogNorm', 'colors.LogNorm', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (1003, 1015), False, 'from matplotlib import colors\n'), ((1031, 1064), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (1043, 1064), True, 'import matplotlib.pyplot as plt\n'), ((1187, 1235), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (1195, 1235), True, 'import matplotlib.pyplot as plt\n'), ((1938, 1952), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (1947, 1952), True, 'import matplotlib.pyplot as plt\n'), ((2832, 2849), 'numpy.array', 'np.array', (['Numbers'], {}), '(Numbers)\n', (2840, 2849), True, 'import numpy as np\n'), ((2854, 2902), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (2873, 2902), True, 'import matplotlib.pyplot as plt\n'), ((2907, 2937), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (2920, 2937), True, 'import matplotlib.pyplot as plt\n'), ((2942, 2990), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (2961, 2990), True, 'import matplotlib.pyplot as plt\n'), ((3080, 3108), 'matplotlib.colors.Normalize', 'colors.Normalize', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (3096, 3108), False, 'from matplotlib import colors\n'), ((3124, 3157), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (3136, 3157), True, 'import matplotlib.pyplot as plt\n'), ((3278, 3326), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (3286, 3326), True, 'import matplotlib.pyplot as plt\n'), ((3994, 4008), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (4003, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4903, 4922), 'numpy.array', 'np.array', (['lost_mass'], {}), '(lost_mass)\n', (4911, 4922), True, 'import numpy as np\n'), ((5022, 5043), 'numpy.array', 'np.array', (['SigmaCoeffs'], {}), '(SigmaCoeffs)\n', (5030, 5043), True, 'import numpy as np\n'), ((5048, 5096), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (5067, 5096), True, 'import matplotlib.pyplot as plt\n'), ((5101, 5131), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (5114, 5131), True, 'import matplotlib.pyplot as plt\n'), ((5136, 5184), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (5155, 5184), True, 'import matplotlib.pyplot as plt\n'), ((5413, 5446), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (5425, 5446), True, 'import matplotlib.pyplot as plt\n'), ((5567, 5615), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (5575, 5615), True, 'import matplotlib.pyplot as plt\n'), ((6340, 6354), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (6349, 6354), True, 'import matplotlib.pyplot as plt\n'), ((6868, 6882), 'numpy.array', 'np.array', (['AMDS'], {}), '(AMDS)\n', (6876, 6882), True, 'import numpy as np\n'), ((6887, 6935), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (6906, 6935), True, 'import matplotlib.pyplot as plt\n'), ((6940, 6970), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (6953, 6970), True, 'import matplotlib.pyplot as plt\n'), ((6975, 7023), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (6994, 7023), True, 'import matplotlib.pyplot as plt\n'), ((7113, 7139), 'matplotlib.colors.LogNorm', 'colors.LogNorm', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (7127, 7139), False, 'from matplotlib import colors\n'), ((7155, 7188), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (7167, 7188), True, 'import matplotlib.pyplot as plt\n'), ((7309, 7357), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (7317, 7357), True, 'import matplotlib.pyplot as plt\n'), ((8007, 8021), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (8016, 8021), True, 'import matplotlib.pyplot as plt\n'), ((8534, 8548), 'numpy.array', 'np.array', (['RMCS'], {}), '(RMCS)\n', (8542, 8548), True, 'import numpy as np\n'), ((8553, 8601), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (8572, 8601), True, 'import matplotlib.pyplot as plt\n'), ((8606, 8636), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (8619, 8636), True, 'import matplotlib.pyplot as plt\n'), ((8641, 8689), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (8660, 8689), True, 'import matplotlib.pyplot as plt\n'), ((8773, 8801), 'matplotlib.colors.Normalize', 'colors.Normalize', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (8789, 8801), False, 'from matplotlib import colors\n'), ((8817, 8850), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (8829, 8850), True, 'import matplotlib.pyplot as plt\n'), ((8968, 9016), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (8976, 9016), True, 'import matplotlib.pyplot as plt\n'), ((9673, 9687), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9682, 9687), True, 'import matplotlib.pyplot as plt\n'), ((9994, 10042), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (10013, 10042), True, 'import matplotlib.pyplot as plt\n'), ((10047, 10077), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (10060, 10077), True, 'import matplotlib.pyplot as plt\n'), ((10082, 10130), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (10101, 10130), True, 'import matplotlib.pyplot as plt\n'), ((10216, 10244), 'matplotlib.colors.Normalize', 'colors.Normalize', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (10232, 10244), False, 'from matplotlib import colors\n'), ((10260, 10293), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (10272, 10293), True, 'import matplotlib.pyplot as plt\n'), ((10412, 10460), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (10420, 10460), True, 'import matplotlib.pyplot as plt\n'), ((11140, 11154), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (11149, 11154), True, 'import matplotlib.pyplot as plt\n'), ((12192, 12240), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (12211, 12240), True, 'import matplotlib.pyplot as plt\n'), ((12245, 12275), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (12258, 12275), True, 'import matplotlib.pyplot as plt\n'), ((12280, 12328), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (12299, 12328), True, 'import matplotlib.pyplot as plt\n'), ((12333, 12400), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'legend.title_fontsize': pop.legend_fontsize}"], {}), "({'legend.title_fontsize': pop.legend_fontsize})\n", (12352, 12400), True, 'import matplotlib.pyplot as plt\n'), ((12493, 12519), 'matplotlib.colors.LogNorm', 'colors.LogNorm', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (12507, 12519), False, 'from matplotlib import colors\n'), ((12535, 12568), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (12547, 12568), True, 'import matplotlib.pyplot as plt\n'), ((12791, 12839), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (12799, 12839), True, 'import matplotlib.pyplot as plt\n'), ((13442, 13456), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (13451, 13456), True, 'import matplotlib.pyplot as plt\n'), ((14231, 14279), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (14250, 14279), True, 'import matplotlib.pyplot as plt\n'), ((14284, 14314), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (14297, 14314), True, 'import matplotlib.pyplot as plt\n'), ((14319, 14367), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (14338, 14367), True, 'import matplotlib.pyplot as plt\n'), ((14372, 14439), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'legend.title_fontsize': pop.legend_fontsize}"], {}), "({'legend.title_fontsize': pop.legend_fontsize})\n", (14391, 14439), True, 'import matplotlib.pyplot as plt\n'), ((14524, 14552), 'matplotlib.colors.Normalize', 'colors.Normalize', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (14540, 14552), False, 'from matplotlib import colors\n'), ((14568, 14601), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (14580, 14601), True, 'import matplotlib.pyplot as plt\n'), ((14719, 14767), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (14727, 14767), True, 'import matplotlib.pyplot as plt\n'), ((15367, 15381), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (15376, 15381), True, 'import matplotlib.pyplot as plt\n'), ((16864, 16912), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (16883, 16912), True, 'import matplotlib.pyplot as plt\n'), ((16917, 16947), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (16930, 16947), True, 'import matplotlib.pyplot as plt\n'), ((16952, 17000), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (16971, 17000), True, 'import matplotlib.pyplot as plt\n'), ((17109, 17142), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (17121, 17142), True, 'import matplotlib.pyplot as plt\n'), ((17786, 17800), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (17795, 17800), True, 'import matplotlib.pyplot as plt\n'), ((18168, 18216), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (18187, 18216), True, 'import matplotlib.pyplot as plt\n'), ((18221, 18251), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (18234, 18251), True, 'import matplotlib.pyplot as plt\n'), ((18256, 18304), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (18275, 18304), True, 'import matplotlib.pyplot as plt\n'), ((18413, 18446), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (18425, 18446), True, 'import matplotlib.pyplot as plt\n'), ((19094, 19108), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (19103, 19108), True, 'import matplotlib.pyplot as plt\n'), ((19478, 19526), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (19497, 19526), True, 'import matplotlib.pyplot as plt\n'), ((19531, 19561), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (19544, 19561), True, 'import matplotlib.pyplot as plt\n'), ((19566, 19614), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (19585, 19614), True, 'import matplotlib.pyplot as plt\n'), ((19723, 19756), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (19735, 19756), True, 'import matplotlib.pyplot as plt\n'), ((20405, 20419), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (20414, 20419), True, 'import matplotlib.pyplot as plt\n'), ((20767, 20815), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (20786, 20815), True, 'import matplotlib.pyplot as plt\n'), ((20820, 20850), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (20833, 20850), True, 'import matplotlib.pyplot as plt\n'), ((20855, 20903), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (20874, 20903), True, 'import matplotlib.pyplot as plt\n'), ((21014, 21047), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (21026, 21047), True, 'import matplotlib.pyplot as plt\n'), ((21696, 21710), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (21705, 21710), True, 'import matplotlib.pyplot as plt\n'), ((22054, 22102), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (22073, 22102), True, 'import matplotlib.pyplot as plt\n'), ((22107, 22137), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (22120, 22137), True, 'import matplotlib.pyplot as plt\n'), ((22142, 22190), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (22161, 22190), True, 'import matplotlib.pyplot as plt\n'), ((22346, 22379), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (22358, 22379), True, 'import matplotlib.pyplot as plt\n'), ((23014, 23028), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (23023, 23028), True, 'import matplotlib.pyplot as plt\n'), ((23519, 23567), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (23538, 23567), True, 'import matplotlib.pyplot as plt\n'), ((23572, 23602), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (23585, 23602), True, 'import matplotlib.pyplot as plt\n'), ((23607, 23655), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (23626, 23655), True, 'import matplotlib.pyplot as plt\n'), ((23660, 23727), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'legend.title_fontsize': pop.legend_fontsize}"], {}), "({'legend.title_fontsize': pop.legend_fontsize})\n", (23679, 23727), True, 'import matplotlib.pyplot as plt\n'), ((23828, 23854), 'matplotlib.colors.LogNorm', 'colors.LogNorm', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (23842, 23854), False, 'from matplotlib import colors\n'), ((23870, 23903), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (23882, 23903), True, 'import matplotlib.pyplot as plt\n'), ((24211, 24259), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (24219, 24259), True, 'import matplotlib.pyplot as plt\n'), ((24833, 24847), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (24842, 24847), True, 'import matplotlib.pyplot as plt\n'), ((29369, 29417), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (29388, 29417), True, 'import matplotlib.pyplot as plt\n'), ((29422, 29452), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (29435, 29452), True, 'import matplotlib.pyplot as plt\n'), ((29457, 29505), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (29476, 29505), True, 'import matplotlib.pyplot as plt\n'), ((29510, 29577), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'legend.title_fontsize': pop.legend_fontsize}"], {}), "({'legend.title_fontsize': pop.legend_fontsize})\n", (29529, 29577), True, 'import matplotlib.pyplot as plt\n'), ((29662, 29690), 'matplotlib.colors.Normalize', 'colors.Normalize', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (29678, 29690), False, 'from matplotlib import colors\n'), ((29706, 29739), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (29718, 29739), True, 'import matplotlib.pyplot as plt\n'), ((29850, 29898), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (29858, 29898), True, 'import matplotlib.pyplot as plt\n'), ((30626, 30640), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (30635, 30640), True, 'import matplotlib.pyplot as plt\n'), ((31054, 31100), 'numpy.unique', 'np.unique', (['[sys[-1] for sys in earth_analogs2]'], {}), '([sys[-1] for sys in earth_analogs2])\n', (31063, 31100), True, 'import numpy as np\n'), ((31350, 31362), 'numpy.array', 'np.array', (['SE'], {}), '(SE)\n', (31358, 31362), True, 'import numpy as np\n'), ((31372, 31384), 'numpy.array', 'np.array', (['TM'], {}), '(TM)\n', (31380, 31384), True, 'import numpy as np\n'), ((31465, 31491), 'matplotlib.colors.LogNorm', 'colors.LogNorm', (['cmin', 'cmax'], {}), '(cmin, cmax)\n', (31479, 31491), False, 'from matplotlib import colors\n'), ((31506, 31539), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (31518, 31539), True, 'import matplotlib.pyplot as plt\n'), ((31637, 31685), 'matplotlib.pyplot.setp', 'plt.setp', (['x_labels'], {'horizontalalignment': '"""center"""'}), "(x_labels, horizontalalignment='center')\n", (31645, 31685), True, 'import matplotlib.pyplot as plt\n'), ((32398, 32412), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (32407, 32412), True, 'import matplotlib.pyplot as plt\n'), ((286, 300), 'numpy.power', 'np.power', (['x', 'b'], {}), '(x, b)\n', (294, 300), True, 'import numpy as np\n'), ((1510, 1549), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (1527, 1549), False, 'from matplotlib import cm\n'), ((1818, 1863), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_parameters.png"""'], {}), "(pop.PLOT, 'scatter_parameters.png')\n", (1827, 1863), True, 'import os.path as path\n'), ((3606, 3645), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (3623, 3645), False, 'from matplotlib import cm\n'), ((3866, 3919), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_parameters_numbers.png"""'], {}), "(pop.PLOT, 'scatter_parameters_numbers.png')\n", (3875, 3919), True, 'import os.path as path\n'), ((4762, 4778), 'numpy.sum', 'np.sum', (['filtered'], {}), '(filtered)\n', (4768, 4778), True, 'import numpy as np\n'), ((5909, 5948), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (5926, 5948), False, 'from matplotlib import cm\n'), ((6211, 6265), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_reference_lost_mass.png"""'], {}), "(pop.PLOT, 'scatter_reference_lost_mass.png')\n", (6220, 6265), True, 'import os.path as path\n'), ((6795, 6810), 'numpy.array', 'np.array', (['Means'], {}), '(Means)\n', (6803, 6810), True, 'import numpy as np\n'), ((6813, 6826), 'numpy.sum', 'np.sum', (['Means'], {}), '(Means)\n', (6819, 6826), True, 'import numpy as np\n'), ((7637, 7676), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (7654, 7676), False, 'from matplotlib import cm\n'), ((7883, 7932), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_parameters_amd.png"""'], {}), "(pop.PLOT, 'scatter_parameters_amd.png')\n", (7892, 7932), True, 'import os.path as path\n'), ((8461, 8476), 'numpy.array', 'np.array', (['Means'], {}), '(Means)\n', (8469, 8476), True, 'import numpy as np\n'), ((8479, 8492), 'numpy.sum', 'np.sum', (['Means'], {}), '(Means)\n', (8485, 8492), True, 'import numpy as np\n'), ((9296, 9335), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (9313, 9335), False, 'from matplotlib import cm\n'), ((9542, 9598), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_parameters_rmc_nonlog.png"""'], {}), "(pop.PLOT, 'scatter_parameters_rmc_nonlog.png')\n", (9551, 9598), True, 'import os.path as path\n'), ((10740, 10779), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (10757, 10779), False, 'from matplotlib import cm\n'), ((11003, 11065), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_parameters_collision_number.png"""'], {}), "(pop.PLOT, 'scatter_parameters_collision_number.png')\n", (11012, 11065), True, 'import os.path as path\n'), ((12931, 12970), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (12948, 12970), False, 'from matplotlib import cm\n'), ((13344, 13383), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (13353, 13383), True, 'import os.path as path\n'), ((14148, 14161), 'numpy.array', 'np.array', (['WMF'], {}), '(WMF)\n', (14156, 14161), True, 'import numpy as np\n'), ((14164, 14178), 'numpy.array', 'np.array', (['SWMF'], {}), '(SWMF)\n', (14172, 14178), True, 'import numpy as np\n'), ((14880, 14919), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (14897, 14919), False, 'from matplotlib import cm\n'), ((15269, 15308), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (15278, 15308), True, 'import os.path as path\n'), ((16085, 16097), 'numpy.array', 'np.array', (['WM'], {}), '(WM)\n', (16093, 16097), True, 'import numpy as np\n'), ((16100, 16116), 'numpy.array', 'np.array', (['Masses'], {}), '(Masses)\n', (16108, 16116), True, 'import numpy as np\n'), ((16128, 16141), 'numpy.array', 'np.array', (['SWM'], {}), '(SWM)\n', (16136, 16141), True, 'import numpy as np\n'), ((16144, 16160), 'numpy.array', 'np.array', (['Masses'], {}), '(Masses)\n', (16152, 16160), True, 'import numpy as np\n'), ((17688, 17727), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (17697, 17727), True, 'import os.path as path\n'), ((18996, 19035), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (19005, 19035), True, 'import os.path as path\n'), ((20307, 20346), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (20316, 20346), True, 'import os.path as path\n'), ((21598, 21637), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (21607, 21637), True, 'import os.path as path\n'), ((22204, 22217), 'numpy.array', 'np.array', (['WMF'], {}), '(WMF)\n', (22212, 22217), True, 'import numpy as np\n'), ((22218, 22232), 'numpy.array', 'np.array', (['SWMF'], {}), '(SWMF)\n', (22226, 22232), True, 'import numpy as np\n'), ((22916, 22955), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (22925, 22955), True, 'import os.path as path\n'), ((24374, 24413), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (24391, 24413), False, 'from matplotlib import cm\n'), ((24735, 24774), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (24744, 24774), True, 'import os.path as path\n'), ((24894, 24942), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'figure.autolayout': True}"], {}), "({'figure.autolayout': True})\n", (24913, 24942), True, 'import matplotlib.pyplot as plt\n'), ((24951, 24981), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn-paper"""'], {}), "('seaborn-paper')\n", (24964, 24981), True, 'import matplotlib.pyplot as plt\n'), ((24990, 25038), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'font.size': pop.fontsize}"], {}), "({'font.size': pop.fontsize})\n", (25009, 25038), True, 'import matplotlib.pyplot as plt\n'), ((25047, 25114), 'matplotlib.pyplot.rcParams.update', 'plt.rcParams.update', (["{'legend.title_fontsize': pop.legend_fontsize}"], {}), "({'legend.title_fontsize': pop.legend_fontsize})\n", (25066, 25114), True, 'import matplotlib.pyplot as plt\n'), ((25134, 25167), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': 'pop.figsize'}), '(figsize=pop.figsize)\n', (25146, 25167), True, 'import matplotlib.pyplot as plt\n'), ((25273, 25324), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""red"""', 'label': '"""Hydrated Silica"""'}), "(color='red', label='Hydrated Silica')\n", (25286, 25324), False, 'from matplotlib import patches\n'), ((25346, 25392), 'matplotlib.patches.Patch', 'patches.Patch', ([], {'color': '"""blue"""', 'label': '"""Water/Ice"""'}), "(color='blue', label='Water/Ice')\n", (25359, 25392), False, 'from matplotlib import patches\n'), ((25534, 25548), 'numpy.min', 'np.min', (['Masses'], {}), '(Masses)\n', (25540, 25548), True, 'import numpy as np\n'), ((28304, 28318), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (28313, 28318), True, 'import matplotlib.pyplot as plt\n'), ((30137, 30176), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (30154, 30176), False, 'from matplotlib import cm\n'), ((30528, 30567), 'os.path.join', 'path.join', (['pop.PLOT', "(save_name + '.png')"], {}), "(pop.PLOT, save_name + '.png')\n", (30537, 30567), True, 'import os.path as path\n'), ((31956, 31995), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'cmap': 'cmap', 'norm': 'norm'}), '(cmap=cmap, norm=norm)\n', (31973, 31995), False, 'from matplotlib import cm\n'), ((32264, 32323), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_parameters_earth_analogs.png"""'], {}), "(pop.PLOT, 'scatter_parameters_earth_analogs.png')\n", (32273, 32323), True, 'import os.path as path\n'), ((6032, 6054), 'numpy.unique', 'np.unique', (['SigmaCoeffs'], {}), '(SigmaCoeffs)\n', (6041, 6054), True, 'import numpy as np\n'), ((28200, 28241), 'os.path.join', 'path.join', (['pop.PLOT', '"""scatter_ratios.png"""'], {}), "(pop.PLOT, 'scatter_ratios.png')\n", (28209, 28241), True, 'import os.path as path\n'), ((5345, 5367), 'numpy.unique', 'np.unique', (['SigmaCoeffs'], {}), '(SigmaCoeffs)\n', (5354, 5367), True, 'import numpy as np\n'), ((25833, 25851), 'numpy.linspace', 'np.linspace', (['(0)', 'r1'], {}), '(0, r1)\n', (25844, 25851), True, 'import numpy as np\n'), ((25889, 25907), 'numpy.linspace', 'np.linspace', (['(0)', 'r1'], {}), '(0, r1)\n', (25900, 25907), True, 'import numpy as np\n'), ((25949, 25974), 'numpy.column_stack', 'np.column_stack', (['[x1, y1]'], {}), '([x1, y1])\n', (25964, 25974), True, 'import numpy as np\n'), ((25994, 26005), 'numpy.abs', 'np.abs', (['xy1'], {}), '(xy1)\n', (26000, 26005), True, 'import numpy as np\n'), ((26049, 26067), 'numpy.linspace', 'np.linspace', (['r1', '(1)'], {}), '(r1, 1)\n', (26060, 26067), True, 'import numpy as np\n'), ((26105, 26123), 'numpy.linspace', 'np.linspace', (['r1', '(1)'], {}), '(r1, 1)\n', (26116, 26123), True, 'import numpy as np\n'), ((26165, 26190), 'numpy.column_stack', 'np.column_stack', (['[x2, y2]'], {}), '([x2, y2])\n', (26180, 26190), True, 'import numpy as np\n'), ((26210, 26221), 'numpy.abs', 'np.abs', (['xy2'], {}), '(xy2)\n', (26216, 26221), True, 'import numpy as np\n'), ((26741, 26752), 'numpy.log10', 'np.log10', (['m'], {}), '(m)\n', (26749, 26752), True, 'import numpy as np\n'), ((28531, 28552), 'numpy.unique', 'np.unique', (['systems_id'], {}), '(systems_id)\n', (28540, 28552), True, 'import numpy as np\n'), ((12596, 12609), 'numpy.array', 'np.array', (['Inc'], {}), '(Inc)\n', (12604, 12609), True, 'import numpy as np\n'), ((26764, 26778), 'numpy.min', 'np.min', (['Masses'], {}), '(Masses)\n', (26770, 26778), True, 'import numpy as np\n'), ((26793, 26807), 'numpy.max', 'np.max', (['Masses'], {}), '(Masses)\n', (26799, 26807), True, 'import numpy as np\n'), ((26820, 26834), 'numpy.min', 'np.min', (['Masses'], {}), '(Masses)\n', (26826, 26834), True, 'import numpy as np\n'), ((28561, 28582), 'numpy.unique', 'np.unique', (['systems_id'], {}), '(systems_id)\n', (28570, 28582), True, 'import numpy as np\n'), ((11957, 11979), 'numpy.abs', 'np.abs', (['(0.01 - item[0])'], {}), '(0.01 - item[0])\n', (11963, 11979), True, 'import numpy as np\n')] |
from .vec3 import vec3
from .geometry import isnear
import numpy as np
class quat:
def __repr__(self):
return f'quat({self.w:.4f}, {self.x:.4f}, {self.y:.4f}, {self.z:.4f})'
def __init__(self, w, x, y, z):
self.w = w
self.x = x
self.y = y
self.z = z
@classmethod
def O(cls):
return cls(1, 0, 0, 0)
@classmethod
def av(cls, a, v):
v = v * (np.sin(a / 2.0) / v.mag())
return cls(np.cos(a / 2.0), v.x, v.y, v.z)
@classmethod
def uu(cls, x, y):
a = x.ang(y)
if a == 0.0:
return cls(1, 0, 0, 0)
else:
v = x.crs(y).nrm()
if isnear(v.dot(v), 0):
v = x.crs(vec3.X()).nrm()
if isnear(v.dot(v), 0):
v = x.crs(vec3.Y()).nrm()
if isnear(v.dot(v), 0):
raise
#v = vec3.Z()
return cls.av(a, v)
@classmethod
def toxy(cls, v):
vz = v.nrm().z
if isnear(vz, -1):
return cls(0, 1, 0, 0)
elif not isnear(vz, 1):
return cls.uu(v, vec3.Z())
else:
return cls.av(0, vec3.Z())
@classmethod
def rotz(cls, a):
return cls.av(a, vec3.Z())
def fp(self):
return quat(-self.w, self.x, self.y, self.z)
def rot(self, ps):
for p in ps:
p.rot(self)
return ps
| [
"numpy.sin",
"numpy.cos"
] | [((471, 486), 'numpy.cos', 'np.cos', (['(a / 2.0)'], {}), '(a / 2.0)\n', (477, 486), True, 'import numpy as np\n'), ((425, 440), 'numpy.sin', 'np.sin', (['(a / 2.0)'], {}), '(a / 2.0)\n', (431, 440), True, 'import numpy as np\n')] |
import threading
import time
import numpy as np
from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds
import pandas as pd
import tkinter as tk
from tkinter import filedialog
from queue import Queue
from threading import Thread
import streamlit as st
from streamlit.scriptrunner import add_script_run_ctx
class Client():
def __init__(self, datatype):
self.params = BrainFlowInputParams()
self.params.serial_port = 'com3'
self.params.board_id = 0
self.board = BoardShim(0, self.params)
self.datatype = datatype
self.file_path = None
self.fake_matrix = None
self.df = None
self.fake_df = None
self.times_to_go_over = 0
def collect_data(self, datatype):
if datatype == 'real':
start_real = Real(self)
start_real.collect_data_live()
else:
start_fake = Fake(self)
self.file_path = start_fake.choose_file()
self.fake_matrix = start_fake.read_file()
self.times_to_go_over = start_fake.passes_calc()
return self.fake_matrix, self.times_to_go_over
def real_data_collection(self):
start_real = Real(self)
m = start_real.read_data()
for i in range(10):
time.sleep(1)
d = start_real.read_data()
m = np.append(m, d, axis=1)
return m
class Real(Client):
pass
def start_stream(self):
self.board.prepare_session()
self.board.start_stream()
def read_data(self):
data = self.board.get_board_data()
return data
def stop_stream(self):
self.board.stop_stream()
self.board.release_session()
class Fake(Client):
def choose_file(self):
root = tk.Tk()
root.withdraw()
self.file_path = filedialog.askopenfilename()
return self.file_path
def read_file(self):
self.df = pd.read_csv(self.file_path, sep=" ", header=None,
names=["samples", "channel 1", "channel 2", "channel 3",
"channel 4", "channel 5"])
return self.df
def passes_calc(self):
rows = len(self.df.index)
self.times_to_go_over = int(np.floor(rows / 256))
return self.times_to_go_over
def the_data(datatype, out_q):
if datatype == 'real':
start_real = Real(datatype)
start_real.start_stream()
counter = 0
time.sleep(1)
while counter < 600:
d = start_real.read_data()
A = pd.DataFrame(d)
A = A.transpose()
out_q.put(A)
counter += 1
if datatype == 'fake':
fake_matrix = Client(datatype)
the_fake_matrix, passes = fake_matrix.collect_data(datatype)
time.sleep(1)
for i in range(passes):
temp_df = the_fake_matrix[i * 256:i * 256 + 256]
out_q.put(temp_df)
def get_all_queue_result(queue):
result_list = []
while not queue.empty():
result_list.append(queue.get())
return result_list
def testing_queue(in_q, all_data):
while True:
time.sleep(5)
temporary_df = pd.DataFrame()
for i in range(in_q.qsize()):
temporary_df = pd.concat([temporary_df, in_q.get()])
all_data = pd.concat([all_data, temporary_df], axis=0)
in_q.task_done()
def streamlitapp(q):
header = st.container()
dataset = st.container()
features = st.container()
modelTraining = st.container()
with header:
st.title('welcome to my project')
st.text('description')
with features:
st.header('features')
st.text('info about features')
with dataset:
st.header('Dataset')
st.text('info about dataset')
# data =
data = abs(data) / 1000
placeholder = st.empty()
placeholder.line_chart(data.iloc[1:50, 1:5])
with placeholder.container():
for i in range(1, len(data), 2):
time.sleep(0.058)
placeholder.line_chart(data.iloc[i:i + 50, 1:5])
with modelTraining:
st.header('time to train the model')
st.text('info about training the model')
def main():
datatype = 'real'
q = Queue()
all_data = pd.DataFrame()
t1 = Thread(target=the_data, args=(datatype, q))
t2 = Thread(target=testing_queue, args=(q, all_data))
t1.start()
t2.start()
t3 = Thread(target=streamlitapp, args=(q,))
add_script_run_ctx(t3)
t3.start()
q.join()
if __name__ == '__main__':
main()
| [
"brainflow.board_shim.BoardShim",
"pandas.DataFrame",
"brainflow.board_shim.BrainFlowInputParams",
"pandas.read_csv",
"numpy.floor",
"time.sleep",
"streamlit.title",
"numpy.append",
"streamlit.text",
"tkinter.Tk",
"streamlit.container",
"threading.Thread",
"queue.Queue",
"streamlit.empty",... | [((3451, 3465), 'streamlit.container', 'st.container', ([], {}), '()\n', (3463, 3465), True, 'import streamlit as st\n'), ((3480, 3494), 'streamlit.container', 'st.container', ([], {}), '()\n', (3492, 3494), True, 'import streamlit as st\n'), ((3510, 3524), 'streamlit.container', 'st.container', ([], {}), '()\n', (3522, 3524), True, 'import streamlit as st\n'), ((3545, 3559), 'streamlit.container', 'st.container', ([], {}), '()\n', (3557, 3559), True, 'import streamlit as st\n'), ((4303, 4310), 'queue.Queue', 'Queue', ([], {}), '()\n', (4308, 4310), False, 'from queue import Queue\n'), ((4326, 4340), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (4338, 4340), True, 'import pandas as pd\n'), ((4350, 4393), 'threading.Thread', 'Thread', ([], {'target': 'the_data', 'args': '(datatype, q)'}), '(target=the_data, args=(datatype, q))\n', (4356, 4393), False, 'from threading import Thread\n'), ((4403, 4451), 'threading.Thread', 'Thread', ([], {'target': 'testing_queue', 'args': '(q, all_data)'}), '(target=testing_queue, args=(q, all_data))\n', (4409, 4451), False, 'from threading import Thread\n'), ((4491, 4529), 'threading.Thread', 'Thread', ([], {'target': 'streamlitapp', 'args': '(q,)'}), '(target=streamlitapp, args=(q,))\n', (4497, 4529), False, 'from threading import Thread\n'), ((4534, 4556), 'streamlit.scriptrunner.add_script_run_ctx', 'add_script_run_ctx', (['t3'], {}), '(t3)\n', (4552, 4556), False, 'from streamlit.scriptrunner import add_script_run_ctx\n'), ((400, 422), 'brainflow.board_shim.BrainFlowInputParams', 'BrainFlowInputParams', ([], {}), '()\n', (420, 422), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds\n'), ((518, 543), 'brainflow.board_shim.BoardShim', 'BoardShim', (['(0)', 'self.params'], {}), '(0, self.params)\n', (527, 543), False, 'from brainflow.board_shim import BoardShim, BrainFlowInputParams, BoardIds\n'), ((1788, 1795), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1793, 1795), True, 'import tkinter as tk\n'), ((1845, 1873), 'tkinter.filedialog.askopenfilename', 'filedialog.askopenfilename', ([], {}), '()\n', (1871, 1873), False, 'from tkinter import filedialog\n'), ((1948, 2085), 'pandas.read_csv', 'pd.read_csv', (['self.file_path'], {'sep': '""" """', 'header': 'None', 'names': "['samples', 'channel 1', 'channel 2', 'channel 3', 'channel 4', 'channel 5']"}), "(self.file_path, sep=' ', header=None, names=['samples',\n 'channel 1', 'channel 2', 'channel 3', 'channel 4', 'channel 5'])\n", (1959, 2085), True, 'import pandas as pd\n'), ((2487, 2500), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2497, 2500), False, 'import time\n'), ((2825, 2838), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2835, 2838), False, 'import time\n'), ((3172, 3185), 'time.sleep', 'time.sleep', (['(5)'], {}), '(5)\n', (3182, 3185), False, 'import time\n'), ((3209, 3223), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (3221, 3223), True, 'import pandas as pd\n'), ((3346, 3389), 'pandas.concat', 'pd.concat', (['[all_data, temporary_df]'], {'axis': '(0)'}), '([all_data, temporary_df], axis=0)\n', (3355, 3389), True, 'import pandas as pd\n'), ((3586, 3619), 'streamlit.title', 'st.title', (['"""welcome to my project"""'], {}), "('welcome to my project')\n", (3594, 3619), True, 'import streamlit as st\n'), ((3628, 3650), 'streamlit.text', 'st.text', (['"""description"""'], {}), "('description')\n", (3635, 3650), True, 'import streamlit as st\n'), ((3678, 3699), 'streamlit.header', 'st.header', (['"""features"""'], {}), "('features')\n", (3687, 3699), True, 'import streamlit as st\n'), ((3708, 3738), 'streamlit.text', 'st.text', (['"""info about features"""'], {}), "('info about features')\n", (3715, 3738), True, 'import streamlit as st\n'), ((3765, 3785), 'streamlit.header', 'st.header', (['"""Dataset"""'], {}), "('Dataset')\n", (3774, 3785), True, 'import streamlit as st\n'), ((3794, 3823), 'streamlit.text', 'st.text', (['"""info about dataset"""'], {}), "('info about dataset')\n", (3801, 3823), True, 'import streamlit as st\n'), ((3895, 3905), 'streamlit.empty', 'st.empty', ([], {}), '()\n', (3903, 3905), True, 'import streamlit as st\n'), ((4173, 4209), 'streamlit.header', 'st.header', (['"""time to train the model"""'], {}), "('time to train the model')\n", (4182, 4209), True, 'import streamlit as st\n'), ((4218, 4258), 'streamlit.text', 'st.text', (['"""info about training the model"""'], {}), "('info about training the model')\n", (4225, 4258), True, 'import streamlit as st\n'), ((1296, 1309), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1306, 1309), False, 'import time\n'), ((1365, 1388), 'numpy.append', 'np.append', (['m', 'd'], {'axis': '(1)'}), '(m, d, axis=1)\n', (1374, 1388), True, 'import numpy as np\n'), ((2270, 2290), 'numpy.floor', 'np.floor', (['(rows / 256)'], {}), '(rows / 256)\n', (2278, 2290), True, 'import numpy as np\n'), ((2585, 2600), 'pandas.DataFrame', 'pd.DataFrame', (['d'], {}), '(d)\n', (2597, 2600), True, 'import pandas as pd\n'), ((4058, 4075), 'time.sleep', 'time.sleep', (['(0.058)'], {}), '(0.058)\n', (4068, 4075), False, 'import time\n')] |
#!/usr/bin/env python
from __future__ import print_function
from soma import aims
import numpy as np
import glob
import os
import json
def get_scale(img, divisions=21, x_shift=-5):
y = [img.getSize()[1] * ((float(i) + 0.5) / divisions)
for i in range(divisions)]
x = x_shift
if x < 0:
x = img.getSize()[0] + x_shift
scl_map = []
for i, py in enumerate(y):
rgb = img.at(x, py)
scl_map.append(rgb)
return list(reversed(scl_map))
def scale_image(img):
if isinstance(img.at(0), aims.AimsHSV):
return scale_image_hsv(img)
else:
return scale_image_rgb(img)
def scale_image_rgb(img):
for y in range(img.getSize()[1]):
for x in range(img.getSize()[0]):
rgb = img.at(x, y)
intens = np.sqrt(rgb[0]*rgb[0] + rgb[1]*rgb[1] + rgb[2]*rgb[2])
if intens == 0:
img.setValue([0, 0, 0], x, y)
else:
scl = float( 128. / intens )
img.setValue(aims.AimsRGB(int(rgb[0] * scl),
int(rgb[1] * scl),
int(rgb[2] * scl)), x, y)
def scale_image_hsv(img):
for y in range(img.getSize()[1]):
for x in range(img.getSize()[0]):
hsv = aims.AimsHSV(img.at(x, y))
#if hsv[1] < 10 or hsv[0] > 200:
hsv[2] = 255
if hsv[0] > 200:
hsv[0] = 0
if hsv[1] < 10:
hsv[0] = 0
if hsv[0] > 10:
hsv[1] = 255
else:
scl = float(hsv[0]) / 10.
hsv[1] = int(round((100. + hsv[1] * 155. / 255.) * (1. - scl) + 255. * scl))
img.setValue(hsv, x, y)
def get_float_altitude(img, src_scl, dst_scl):
def interpolate(rgb, src_scl, dst_scl):
rgb = np.asarray(rgb).astype(float)
#if rgb[1] < 10 or rgb[0] > 200:
#if rgb[0] > 200:
#rgb[0] = 0.
#rgb[0] *= 2.
#rgb[1] = 255.
#rgb[2] = 255.
dist = np.sum((dst_scl - rgb) ** 2, axis=1)
#dist = ((dst_scl - rgb) ** 2)[:, 0]
#print('dist:', dist)
dmin = np.argmin(dist)
if dmin == 0:
ind = [dmin, dmin + 1]
elif dmin == dst_scl.shape[0] - 1:
ind = [dmin - 1, dmin]
else:
if dist[dmin - 1] < dist[dmin + 1]:
ind = [dmin - 1, dmin]
else:
ind = [dmin, dmin + 1]
# project
axis = dst_scl[ind[1]] - dst_scl[ind[0]]
d2 = np.sum(axis ** 2)
if d2 == 0:
return src_scl[-1]
else:
x = (rgb - dst_scl[ind[0]]).dot(axis) / d2
if x < 0:
x = 0.
elif x > 1:
x = 1.
return src_scl[ind[0]] + (src_scl[ind[1]] - src_scl[ind[0]]) * x
dst_scl = np.asarray(scl_map).astype(float)
#dst_scl[:, 0] *= 2.
#dst_scl[:, 1] = 255.
new_img = aims.Volume(img.getSize(), dtype='FLOAT')
new_img.header()['voxel_size'] = img.header()['voxel_size']
for y in range(img.getSize()[1]):
for x in range(img.getSize()[0]):
rgb = img.at(x, y)
alt = interpolate(rgb, src_scl, dst_scl)
new_img.setValue(alt, x, y)
return new_img
images = sorted(glob.glob('altitude/raw/*.jpg'))
if not os.path.exists('altitude/intens'):
os.mkdir('altitude/intens')
if not os.path.exists('altitude/real'):
os.mkdir('altitude/real')
scales = {}
scl_min = 1000
scl_max = -10
print('read scales')
for image in images:
src_scl = image.replace('.jpg', '.json')
if os.path.exists(src_scl):
scale = json.load(open(src_scl))['altitudes']
scales[image] = scale
m = min(scale)
if m < scl_min:
scl_min = m
m = max(scale)
if m > scl_max:
scl_max = m
print('global min/max:', scl_min, '/', scl_max)
glob_scale = {'scale_min': scl_min, 'scale_max': scl_max}
json.dump(glob_scale, open('altitude/real/global.json', 'w'))
for image in images:
print('read:', image)
img_rgb = aims.read(image)
## scale in RGB space
#scale_image(img_rgb)
out_img = image.replace('/raw/', '/intens/')
#print('write:', out_img)
#aims.write(img_rgb, out_img)
# re-scale in HSV space
c = aims.Converter_Volume_RGB_Volume_HSV()
img = c(img_rgb)
scale_image(img)
# go back to RGB
#c = aims.Converter_Volume_HSV_Volume_RGB()
#img = c(img)
out_img = out_img.replace('.jpg', '.ima')
print('write:', out_img)
aims.write(img, out_img)
scl_map = get_scale(img)
json_d = {'scale_map': [list(x) for x in scl_map]}
out_img_json = out_img.replace('.ima', '.json')
json.dump(json_d, open(out_img_json, 'w'))
scale = scales.get(image)
if scale is not None:
print('build real alt')
#c = aims.Converter_Volume_HSV_Volume_RGB()
flt_alt = get_float_altitude(img, scale, scl_map)
out_flt_alt_file = image.replace('/raw/', '/real/').replace(
'.jpg', '.ima')
print('write:', out_flt_alt_file)
aims.write(flt_alt, out_flt_alt_file)
# write as jpeg
flt_alt = (flt_alt - scl_min) * 255.49 / (scl_max - scl_min)
c = aims.Converter_Volume_FLOAT_Volume_U16()
u16_alt = c(flt_alt)
out_u16_alt_file = out_flt_alt_file.replace('.ima', '.jpg')
print('write:', out_u16_alt_file)
aims.write(u16_alt, out_u16_alt_file, format='JPG')
| [
"os.path.exists",
"soma.aims.write",
"numpy.sqrt",
"soma.aims.Converter_Volume_RGB_Volume_HSV",
"numpy.asarray",
"soma.aims.Converter_Volume_FLOAT_Volume_U16",
"numpy.sum",
"os.mkdir",
"soma.aims.read",
"numpy.argmin",
"glob.glob"
] | [((3336, 3367), 'glob.glob', 'glob.glob', (['"""altitude/raw/*.jpg"""'], {}), "('altitude/raw/*.jpg')\n", (3345, 3367), False, 'import glob\n'), ((3376, 3409), 'os.path.exists', 'os.path.exists', (['"""altitude/intens"""'], {}), "('altitude/intens')\n", (3390, 3409), False, 'import os\n'), ((3415, 3442), 'os.mkdir', 'os.mkdir', (['"""altitude/intens"""'], {}), "('altitude/intens')\n", (3423, 3442), False, 'import os\n'), ((3450, 3481), 'os.path.exists', 'os.path.exists', (['"""altitude/real"""'], {}), "('altitude/real')\n", (3464, 3481), False, 'import os\n'), ((3487, 3512), 'os.mkdir', 'os.mkdir', (['"""altitude/real"""'], {}), "('altitude/real')\n", (3495, 3512), False, 'import os\n'), ((3650, 3673), 'os.path.exists', 'os.path.exists', (['src_scl'], {}), '(src_scl)\n', (3664, 3673), False, 'import os\n'), ((4132, 4148), 'soma.aims.read', 'aims.read', (['image'], {}), '(image)\n', (4141, 4148), False, 'from soma import aims\n'), ((4350, 4388), 'soma.aims.Converter_Volume_RGB_Volume_HSV', 'aims.Converter_Volume_RGB_Volume_HSV', ([], {}), '()\n', (4386, 4388), False, 'from soma import aims\n'), ((4599, 4623), 'soma.aims.write', 'aims.write', (['img', 'out_img'], {}), '(img, out_img)\n', (4609, 4623), False, 'from soma import aims\n'), ((2072, 2108), 'numpy.sum', 'np.sum', (['((dst_scl - rgb) ** 2)'], {'axis': '(1)'}), '((dst_scl - rgb) ** 2, axis=1)\n', (2078, 2108), True, 'import numpy as np\n'), ((2199, 2214), 'numpy.argmin', 'np.argmin', (['dist'], {}), '(dist)\n', (2208, 2214), True, 'import numpy as np\n'), ((2588, 2605), 'numpy.sum', 'np.sum', (['(axis ** 2)'], {}), '(axis ** 2)\n', (2594, 2605), True, 'import numpy as np\n'), ((5153, 5190), 'soma.aims.write', 'aims.write', (['flt_alt', 'out_flt_alt_file'], {}), '(flt_alt, out_flt_alt_file)\n', (5163, 5190), False, 'from soma import aims\n'), ((5296, 5336), 'soma.aims.Converter_Volume_FLOAT_Volume_U16', 'aims.Converter_Volume_FLOAT_Volume_U16', ([], {}), '()\n', (5334, 5336), False, 'from soma import aims\n'), ((5484, 5535), 'soma.aims.write', 'aims.write', (['u16_alt', 'out_u16_alt_file'], {'format': '"""JPG"""'}), "(u16_alt, out_u16_alt_file, format='JPG')\n", (5494, 5535), False, 'from soma import aims\n'), ((798, 858), 'numpy.sqrt', 'np.sqrt', (['(rgb[0] * rgb[0] + rgb[1] * rgb[1] + rgb[2] * rgb[2])'], {}), '(rgb[0] * rgb[0] + rgb[1] * rgb[1] + rgb[2] * rgb[2])\n', (805, 858), True, 'import numpy as np\n'), ((2890, 2909), 'numpy.asarray', 'np.asarray', (['scl_map'], {}), '(scl_map)\n', (2900, 2909), True, 'import numpy as np\n'), ((1867, 1882), 'numpy.asarray', 'np.asarray', (['rgb'], {}), '(rgb)\n', (1877, 1882), True, 'import numpy as np\n')] |
from __future__ import absolute_import, division
import logging
import time
from builtins import int
import numpy as np
from future.utils import raise_with_traceback
from scipy.stats import ks_2samp
from sklearn.metrics import silhouette_score
from .mediods import k_medoids
from .tfidf import get_n_top_keywords
from .utils import split_to_chunks
from .word2vec import text2ids, train_word2vec
DEFAULT_T = 10
DEFAULT_CHUNK_SIZE = 50
DEFAULT_N_TOP_KEYWORDS = 1000
DEFAULT_CLUSTERING_K = 2
DEFAULT_CLUSTERING_SPAWN = 10
def _calculate_similarity_matrix(model, words, chunk_size=DEFAULT_CHUNK_SIZE):
chunks = split_to_chunks(words, chunk_size)
logging.debug("No. of words: {}, Chunk size: {}".format(
len(words), chunk_size))
chunks_num = int(
len(words) / chunk_size) + (1 if (len(words) % chunk_size != 0) else 0)
logging.debug("No. of chunks {}".format(chunks_num))
similarites_vector_size = (chunk_size * (chunk_size - 1)) // 2
similarites_matrix = np.zeros((chunks_num, similarites_vector_size))
similarity_func = np.vectorize(
lambda x, y: model.similarity(x, y), otypes=[float])
chunk_index = 0
for chunk in chunks:
in_chunk_index = 0
for i in range(len(chunk)):
similarities = similarity_func(chunk[i], chunk[i + 1:len(chunk)])
similarites_matrix[chunk_index, in_chunk_index:in_chunk_index +
len(similarities)] = similarities
in_chunk_index += len(similarities)
chunk_index += 1
logging.debug("Similarity matrix shape {}".format(
similarites_matrix.shape))
return similarites_matrix
def _calculate_zv(first_mat, i, second_mat, j, T=DEFAULT_T):
ks = np.apply_along_axis(
ks_2samp,
1,
second_mat[j - T + 1:j],
first_mat[i],
)
ks_stats = ks[:, 0]
return np.average(ks_stats)
def _calculate_zv_distances(similarites_matrix, T=DEFAULT_T):
chunks_num = similarites_matrix.shape[0]
if chunks_num < T:
err_msg = "The are to few chunks for calculate ZV distance, chunks number:{} must be bigger the T:{}.".format(
chunks_num, T)
logging.error(err_msg)
raise_with_traceback(Exception(err_msg))
ZVs = np.zeros(chunks_num - T - 1)
for i in range(ZVs.shape[0]):
ZVs[i] = _calculate_zv(similarites_matrix, T + i, similarites_matrix,
T + i - 1, T)
return ZVs
def _calculate_dzv_distances(first_similarites_matrix,
second_similarites_matrix,
T=DEFAULT_T):
DZV = np.zeros((first_similarites_matrix.shape[0] - T - 1,
second_similarites_matrix.shape[0] - T - 1))
for i in range(DZV.shape[0]):
zv_1 = _calculate_zv(first_similarites_matrix, T + i,
first_similarites_matrix, T + i - 1, T)
for j in range(DZV.shape[1]):
zv_2 = _calculate_zv(second_similarites_matrix, T + j,
second_similarites_matrix, T + j - 1, T)
zv_3 = _calculate_zv(first_similarites_matrix, T + i,
second_similarites_matrix, T + j - 1, T)
zv_4 = _calculate_zv(second_similarites_matrix, T + j,
first_similarites_matrix, T + i - 1, T)
DZV[i, j] = abs(zv_1 + zv_2 - zv_3 - zv_4)
return DZV
def zv_process(text,
model,
stop_words,
keywords,
T=DEFAULT_T,
chunk_size=DEFAULT_CHUNK_SIZE):
start_time = time.time()
ids, words = text2ids(
model=model,
text=text,
stop_words=stop_words,
acceptable_tokens=keywords,
remove_skipped_tokens=True)
end_time = time.time()
logging.debug("word2vec runs {:.4f} seconds".format(end_time - start_time))
start_time = time.time()
sim_mat = _calculate_similarity_matrix(model, words, chunk_size)
end_time = time.time()
logging.debug(
"similarity matrix calculation took {:.4f} seconds".format(end_time -
start_time))
del ids, words
start_time = time.time()
ZVs = _calculate_zv_distances(sim_mat, T)
end_time = time.time()
logging.debug(
"ZV distances calculation took {:.4f} seconds".format(end_time -
start_time))
del sim_mat
return ZVs
def dzv_process(first_text,
second_text,
model,
stop_words,
keywords,
T=DEFAULT_T,
chunk_size=DEFAULT_CHUNK_SIZE):
start_time = time.time()
first_text_ids, first_text_words = text2ids(
model=model,
text=first_text,
stop_words=stop_words,
acceptable_tokens=keywords,
remove_skipped_tokens=True)
end_time = time.time()
logging.debug("first text word2vec runs {:.4f} seconds".format(end_time -
start_time))
start_time = time.time()
first_sim_mat = _calculate_similarity_matrix(model, first_text_words,
chunk_size)
end_time = time.time()
logging.debug(
"first text similarity matrix calculation took {:.4f} seconds".format(
end_time - start_time))
del first_text_ids, first_text_words
start_time = time.time()
second_text_ids, second_text_words = text2ids(
model=model,
text=second_text,
stop_words=stop_words,
acceptable_tokens=keywords,
remove_skipped_tokens=True)
end_time = time.time()
logging.debug(
"second text word2vec runs {:.4f} seconds".format(end_time -
start_time))
start_time = time.time()
second_sim_mat = _calculate_similarity_matrix(model, second_text_words,
chunk_size)
end_time = time.time()
logging.debug(
"second text similarity matrix calculation took {:.4f} seconds".format(
end_time - start_time))
del second_text_ids, second_text_words
start_time = time.time()
DZV = _calculate_dzv_distances(
first_similarites_matrix=first_sim_mat,
second_similarites_matrix=second_sim_mat,
T=T)
end_time = time.time()
logging.debug(
"DZV matrix calculation took {:.4f} seconds".format(end_time -
start_time))
del first_sim_mat, second_sim_mat
return DZV
def _preprocess(texts, model=None, n_top_keywords=DEFAULT_N_TOP_KEYWORDS):
stop_words = []
if model is None:
model = train_word2vec(texts, stop_words, iter=20)
keywords = get_n_top_keywords(texts, stop_words, int(n_top_keywords * 1.5))
n_top_keyword = [
keyword[0] for keyword in keywords if keyword[0] in model.wv.vocab
]
n_top_keyword = n_top_keyword[:min(len(n_top_keyword), n_top_keywords)]
return stop_words, model, n_top_keyword
def execute_algorithm(first_text,
second_text,
model=None,
T=DEFAULT_T,
chunk_size=DEFAULT_CHUNK_SIZE,
n_top_keywords=DEFAULT_N_TOP_KEYWORDS):
del_model = model is None
start_time = time.time()
stop_words, model, n_top_keyword = _preprocess(
texts=[first_text, second_text],
model=model,
n_top_keywords=n_top_keywords)
end_time = time.time()
logging.debug(
"Preprocessing took {:.4f} seconds".format(end_time - start_time))
start_time = time.time()
ZV = zv_process(
text=first_text + " " + second_text,
model=model,
stop_words=stop_words,
keywords=n_top_keyword,
T=T,
chunk_size=chunk_size)
end_time = time.time()
logging.debug(
"ZV calculation took {:.4f} seconds".format(end_time - start_time))
start_time = time.time()
DZV = dzv_process(
first_text=first_text,
second_text=second_text,
model=model,
stop_words=stop_words,
keywords=n_top_keyword,
T=T,
chunk_size=chunk_size)
end_time = time.time()
logging.debug(
"DZV calculation took {:.4f} seconds".format(end_time - start_time))
if del_model:
del model
clustering_result = execute_dzv_clustering(DZV)
return ZV, DZV, clustering_result
def execute_zv(text,
model=None,
T=DEFAULT_T,
chunk_size=DEFAULT_CHUNK_SIZE,
n_top_keywords=DEFAULT_N_TOP_KEYWORDS):
del_model = model is None
start_time = time.time()
stop_words, model, n_top_keyword = _preprocess(
texts=[text], model=model, n_top_keywords=n_top_keywords)
end_time = time.time()
logging.debug(
"Preprocessing took {:.4f} seconds".format(end_time - start_time))
start_time = time.time()
ZV = zv_process(
text=text,
model=model,
stop_words=stop_words,
keywords=n_top_keyword,
T=T,
chunk_size=chunk_size)
end_time = time.time()
logging.debug(
"ZV calculation took {:.4f} seconds".format(end_time - start_time))
if del_model:
del model
return ZV
def execute_dzv(first_text,
second_text,
model=None,
T=DEFAULT_T,
chunk_size=DEFAULT_CHUNK_SIZE,
n_top_keywords=DEFAULT_N_TOP_KEYWORDS):
del_model = model is None
start_time = time.time()
stop_words, model, n_top_keyword = _preprocess(
texts=[first_text, second_text],
model=model,
n_top_keywords=n_top_keywords)
end_time = time.time()
logging.debug(
"Preprocessing took {:.4f} seconds".format(end_time - start_time))
start_time = time.time()
DZV = dzv_process(
first_text=first_text,
second_text=second_text,
model=model,
stop_words=stop_words,
keywords=n_top_keyword,
T=T,
chunk_size=chunk_size)
end_time = time.time()
logging.debug(
"DZV calculation took {:.4f} seconds".format(end_time - start_time))
if del_model:
del model
return DZV
def execute_dzv_clustering(dzv,
k=DEFAULT_CLUSTERING_K,
spawn=DEFAULT_CLUSTERING_SPAWN):
if k < 2:
k = 2
if spawn < 1:
spawn = 1
def distance(a, b):
return np.linalg.norm(a - b)
start_time = time.time()
diameter, medoids = k_medoids(
k=k,
points=dzv,
distance=distance,
spawn=spawn,
equality=distance,
verbose=True)
end_time = time.time()
logging.debug(
"DZV clustering took {:.4f} seconds".format(end_time - start_time))
logging.debug(
"Clustering to {} clusters with spawn of {} gives diameter of {:.4f}".
format(k, spawn, diameter))
labels = np.zeros(dzv.shape[0], dtype=np.int)
distances = np.zeros(dzv.shape[0])
for i, row in enumerate(dzv):
row_distances = [distance(medoid.kernel, row) for medoid in medoids]
min_index = np.argmin(row_distances)
labels[i] = min_index + 1
distances[i] = row_distances[min_index]
silhouette = silhouette_score(dzv, labels, distance)
logging.debug(
"Clustering to {} clusters gives silhouette score of {:.4f}".format(
k, silhouette))
return labels, distances, silhouette
| [
"numpy.average",
"numpy.zeros",
"numpy.apply_along_axis",
"numpy.linalg.norm",
"numpy.argmin",
"builtins.int",
"sklearn.metrics.silhouette_score",
"time.time",
"logging.error"
] | [((1002, 1049), 'numpy.zeros', 'np.zeros', (['(chunks_num, similarites_vector_size)'], {}), '((chunks_num, similarites_vector_size))\n', (1010, 1049), True, 'import numpy as np\n'), ((1744, 1815), 'numpy.apply_along_axis', 'np.apply_along_axis', (['ks_2samp', '(1)', 'second_mat[j - T + 1:j]', 'first_mat[i]'], {}), '(ks_2samp, 1, second_mat[j - T + 1:j], first_mat[i])\n', (1763, 1815), True, 'import numpy as np\n'), ((1891, 1911), 'numpy.average', 'np.average', (['ks_stats'], {}), '(ks_stats)\n', (1901, 1911), True, 'import numpy as np\n'), ((2282, 2310), 'numpy.zeros', 'np.zeros', (['(chunks_num - T - 1)'], {}), '(chunks_num - T - 1)\n', (2290, 2310), True, 'import numpy as np\n'), ((2652, 2754), 'numpy.zeros', 'np.zeros', (['(first_similarites_matrix.shape[0] - T - 1, second_similarites_matrix.shape\n [0] - T - 1)'], {}), '((first_similarites_matrix.shape[0] - T - 1, \n second_similarites_matrix.shape[0] - T - 1))\n', (2660, 2754), True, 'import numpy as np\n'), ((3657, 3668), 'time.time', 'time.time', ([], {}), '()\n', (3666, 3668), False, 'import time\n'), ((3854, 3865), 'time.time', 'time.time', ([], {}), '()\n', (3863, 3865), False, 'import time\n'), ((3964, 3975), 'time.time', 'time.time', ([], {}), '()\n', (3973, 3975), False, 'import time\n'), ((4060, 4071), 'time.time', 'time.time', ([], {}), '()\n', (4069, 4071), False, 'import time\n'), ((4287, 4298), 'time.time', 'time.time', ([], {}), '()\n', (4296, 4298), False, 'import time\n'), ((4360, 4371), 'time.time', 'time.time', ([], {}), '()\n', (4369, 4371), False, 'import time\n'), ((4803, 4814), 'time.time', 'time.time', ([], {}), '()\n', (4812, 4814), False, 'import time\n'), ((5028, 5039), 'time.time', 'time.time', ([], {}), '()\n', (5037, 5039), False, 'import time\n'), ((5216, 5227), 'time.time', 'time.time', ([], {}), '()\n', (5225, 5227), False, 'import time\n'), ((5378, 5389), 'time.time', 'time.time', ([], {}), '()\n', (5387, 5389), False, 'import time\n'), ((5584, 5595), 'time.time', 'time.time', ([], {}), '()\n', (5593, 5595), False, 'import time\n'), ((5812, 5823), 'time.time', 'time.time', ([], {}), '()\n', (5821, 5823), False, 'import time\n'), ((6001, 6012), 'time.time', 'time.time', ([], {}), '()\n', (6010, 6012), False, 'import time\n'), ((6166, 6177), 'time.time', 'time.time', ([], {}), '()\n', (6175, 6177), False, 'import time\n'), ((6375, 6386), 'time.time', 'time.time', ([], {}), '()\n', (6384, 6386), False, 'import time\n'), ((6550, 6561), 'time.time', 'time.time', ([], {}), '()\n', (6559, 6561), False, 'import time\n'), ((7569, 7580), 'time.time', 'time.time', ([], {}), '()\n', (7578, 7580), False, 'import time\n'), ((7749, 7760), 'time.time', 'time.time', ([], {}), '()\n', (7758, 7760), False, 'import time\n'), ((7873, 7884), 'time.time', 'time.time', ([], {}), '()\n', (7882, 7884), False, 'import time\n'), ((8094, 8105), 'time.time', 'time.time', ([], {}), '()\n', (8103, 8105), False, 'import time\n'), ((8219, 8230), 'time.time', 'time.time', ([], {}), '()\n', (8228, 8230), False, 'import time\n'), ((8461, 8472), 'time.time', 'time.time', ([], {}), '()\n', (8470, 8472), False, 'import time\n'), ((8926, 8937), 'time.time', 'time.time', ([], {}), '()\n', (8935, 8937), False, 'import time\n'), ((9071, 9082), 'time.time', 'time.time', ([], {}), '()\n', (9080, 9082), False, 'import time\n'), ((9195, 9206), 'time.time', 'time.time', ([], {}), '()\n', (9204, 9206), False, 'import time\n'), ((9390, 9401), 'time.time', 'time.time', ([], {}), '()\n', (9399, 9401), False, 'import time\n'), ((9817, 9828), 'time.time', 'time.time', ([], {}), '()\n', (9826, 9828), False, 'import time\n'), ((9997, 10008), 'time.time', 'time.time', ([], {}), '()\n', (10006, 10008), False, 'import time\n'), ((10121, 10132), 'time.time', 'time.time', ([], {}), '()\n', (10130, 10132), False, 'import time\n'), ((10363, 10374), 'time.time', 'time.time', ([], {}), '()\n', (10372, 10374), False, 'import time\n'), ((10814, 10825), 'time.time', 'time.time', ([], {}), '()\n', (10823, 10825), False, 'import time\n'), ((11007, 11018), 'time.time', 'time.time', ([], {}), '()\n', (11016, 11018), False, 'import time\n'), ((11263, 11299), 'numpy.zeros', 'np.zeros', (['dzv.shape[0]'], {'dtype': 'np.int'}), '(dzv.shape[0], dtype=np.int)\n', (11271, 11299), True, 'import numpy as np\n'), ((11316, 11338), 'numpy.zeros', 'np.zeros', (['dzv.shape[0]'], {}), '(dzv.shape[0])\n', (11324, 11338), True, 'import numpy as np\n'), ((11595, 11634), 'sklearn.metrics.silhouette_score', 'silhouette_score', (['dzv', 'labels', 'distance'], {}), '(dzv, labels, distance)\n', (11611, 11634), False, 'from sklearn.metrics import silhouette_score\n'), ((2199, 2221), 'logging.error', 'logging.error', (['err_msg'], {}), '(err_msg)\n', (2212, 2221), False, 'import logging\n'), ((7014, 7039), 'builtins.int', 'int', (['(n_top_keywords * 1.5)'], {}), '(n_top_keywords * 1.5)\n', (7017, 7039), False, 'from builtins import int\n'), ((10774, 10795), 'numpy.linalg.norm', 'np.linalg.norm', (['(a - b)'], {}), '(a - b)\n', (10788, 10795), True, 'import numpy as np\n'), ((11470, 11494), 'numpy.argmin', 'np.argmin', (['row_distances'], {}), '(row_distances)\n', (11479, 11494), True, 'import numpy as np\n')] |
# encoding: utf-8
# butterfly.py
# TODO fix documentation
import numpy as np
from math import sqrt
from numba import jit
from scipy.linalg import block_diag
from scipy.stats import chi
from .basics import get_D, radius, rnsimp
NQ = 3
@jit(nopython=True)
def butterfly_generating_vector(n):
'''
Generates a vector `u` used to construct random butterfly orthogonal
matrices.
Args:
=====
n: size of generating vector, n = N + 1 to construct random
butterfly orthogonal matrix of size N x N.
Returns:
========
u: generating vector used to calculate angles for random butterfly
orthogonal matrices.
'''
l = n // 2 - 1
r = np.random.rand(n-1)
u = np.zeros(n)
for i in range(l):
m = n - 2*i
s = np.sin(2 * np.pi * r[m-2])
c = np.cos(2 * np.pi * r[m-2])
pos = n - 2*np.arange(1, i+1) - 1
ds = 1. / (pos + 1)
p = np.prod(r[pos]**ds)
u[m - 1] = np.sqrt(1. - r[m-3]**(2./(m-3))) * p * s
u[m - 2] = np.sqrt(1. - r[m-3]**(2./(m-3))) * p * c
s = np.sin(2 * np.pi * r[0])
c = np.cos(2 * np.pi * r[0])
pos = n - 2*np.arange(1, l+1) - 1
ds = 1. / (pos + 1)
p = np.prod(r[pos]**ds)
if n % 2 == 0:
u[0] = c * p
u[1] = s * p
else:
u[2] = (2 * r[1] - 1) * p
u[1] = 2 * np.sqrt(r[1] * (1 - r[1])) * p * s
u[0] = 2 * np.sqrt(r[1] * (1 - r[1])) * p * c
return u
@jit(nopython=True)
def butterfly_angles(u):
'''
Computes angles (in radians) from components of the generating vector
`u` for random butterfly orthogonal matrices.
Args:
=====
u: a generating vector for random butterfly orthogonal matrices, use
make_generating_vector() to obtain one.
Returns:
========
thetas: an 1-D array of angles for computing random butterfly orthogonal
matrices.
'''
thetas = np.arctan2(u[:-1], u[1:])
return thetas
@jit(nopython=True)
def cos_sin(N):
c = np.log2(N)
f = np.ceil(c)
n = int(2 ** f)
u = butterfly_generating_vector(n)
thetas = butterfly_angles(u)
if c != f:
thetas = np.concatenate((thetas[:N-1], np.array([0.] * (n - N))))
cos = np.cos(thetas)
sin = np.sin(thetas)
return cos, sin
@jit(nopython=True)
def butterfly_params(n, k):
h = int(np.ceil(k))
log = np.log2(n)
next_power = 2**int(np.ceil(log))
cos = np.empty((h, next_power-1))
sin = np.empty((h, next_power-1))
perm = np.empty((h, n), np.int32)
for i in range(h):
c, s = cos_sin(n)
cos[i] = c
sin[i] = s
p = np.arange(n)
np.random.shuffle(p)
perm[i] = p
return cos, sin, perm
#@jit(nopython=True)
def butterfly_block(n, cos, sin):
'''
Generates n x n block of a butterfly matrix.
'''
i = n // 2
sdiag = np.repeat(sin[i-1], i)
Q = np.diagflat(-1 * sdiag, i)
Q -= Q.T
np.fill_diagonal(Q, cos[i-1])
return Q
#@jit(nopython=True)
def butterfly_factors(n, cos, sin, N):
'''
Generates a sequence of log_2(n) factors for butterfly orthogonal matrix of
size n x n.
Args:
=====
n: the next power of two in case the desired size N is not one.
cos: cosines of generating angles.
sin: sines of generating angles.
N: the size of butterfly matrix.
Returns:
========
Qs: sequence of log_2(n) random butterfly orthogonal matrix factors, each
of size n x n.
'''
if n == 1:
return np.array(1)
c = np.log2(n)
f = np.ceil(c)
if c != f:
raise Exception('n is not power of two.')
Qs = []
for i in range(int(f)):
blockn = 2**(i+1)
nblocks = n//blockn
blocks = [butterfly_block(blockn, cos[blockn*j:], sin[blockn*j:]) \
for j in range(nblocks)]
Qs.append(block_diag(*blocks))
l = (N // blockn) * blockn
h = l + blockn
if N > (h - blockn//2) and N < h:
j = blockn//2 - h + N
ll = l+j
hh = -j
di = np.arange(N+hh-ll) + ll
Qs[i][di, di] = 1.
return Qs
#@jit(nopython=True)
def butterfly(N, cos=None, sin=None, perm=None):
'''
Generates dense random butterfly orthogonal matrix from its factors.
Args:
=====
N: size of the matrix, should be a power of 2.
Returns:
========
QP: random butterfly orthogonal matrix of size N x N: QPQP...QP (product of
NQ QP matrices).
Qs: butterfly factors
cs: a tuple of cosines and sines
perm: permutation
'''
if N == 1:
return np.array(1)
if cos is not None:
cs = (cos, sin)
else:
cs = cos_sin(N)
cos, sin = cs
perm = np.arange(N)
np.random.shuffle(perm)
n = len(cos) + 1
Qs = butterfly_factors(n, cos, sin, N)
Q = np.eye(N)
Qs = [q[:N, :N] for q in Qs]
for q in Qs:
Q = Q.dot(q)
QP = Q[:, perm]
for _ in range(NQ - 1):
QP = QP.dot(Q[:, perm])
return QP, Qs, cs, perm
#@jit(nopython=True)
def butterfly_transform(S, cos, sin, perm):
'''
Naive implementation of simplexes randomly rotated by butterfly matrix.
Args:
=====
S: a matrix [n x n+1] of a random n-simplex.
Returns:
========
QS: QS, where Q is random butterfly matrix.
'''
N = S.shape[0]
if cos is not None:
Q, _, _, _ = butterfly(N, cos, sin, perm)
else:
Q, _, _, _ = butterfly(N)
QS = Q.dot(S)
return QS
#@jit(nopython=True)
def generate_butterfly_weights(d, n, r=None, b_params=None, even=False):
D = get_D(d, n)
if even:
t = int(np.ceil(2*n))
else:
t = int(np.ceil(n))
if r is None:
r = radius(d, t)
if b_params is None:
b_params = butterfly_params(d, t)
S = rnsimp(d)
cos, sin, perm = b_params
M = butterfly_transform(S, cos[0], sin[0], perm[0]).T
for i in range(1, t):
L = butterfly_transform(S, cos[i], sin[i], perm[i])
M = np.vstack((M, L.T))
M = np.einsum('i,ij->ij', r, M)
w = sqrt(d) / r
if even is False:
M = np.vstack((M, -M))
w = np.concatenate((w, w))
return M[:D, :], w[:D] | [
"numpy.prod",
"numpy.sqrt",
"numpy.random.rand",
"math.sqrt",
"numpy.array",
"numpy.arctan2",
"numpy.einsum",
"numpy.sin",
"numpy.arange",
"numpy.repeat",
"numpy.empty",
"numpy.vstack",
"numpy.concatenate",
"numpy.diagflat",
"numpy.ceil",
"numpy.eye",
"numpy.fill_diagonal",
"numba.... | [((244, 262), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (247, 262), False, 'from numba import jit\n'), ((1468, 1486), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1471, 1486), False, 'from numba import jit\n'), ((1980, 1998), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (1983, 1998), False, 'from numba import jit\n'), ((2307, 2325), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (2310, 2325), False, 'from numba import jit\n'), ((696, 717), 'numpy.random.rand', 'np.random.rand', (['(n - 1)'], {}), '(n - 1)\n', (710, 717), True, 'import numpy as np\n'), ((724, 735), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (732, 735), True, 'import numpy as np\n'), ((1090, 1114), 'numpy.sin', 'np.sin', (['(2 * np.pi * r[0])'], {}), '(2 * np.pi * r[0])\n', (1096, 1114), True, 'import numpy as np\n'), ((1123, 1147), 'numpy.cos', 'np.cos', (['(2 * np.pi * r[0])'], {}), '(2 * np.pi * r[0])\n', (1129, 1147), True, 'import numpy as np\n'), ((1219, 1240), 'numpy.prod', 'np.prod', (['(r[pos] ** ds)'], {}), '(r[pos] ** ds)\n', (1226, 1240), True, 'import numpy as np\n'), ((1933, 1958), 'numpy.arctan2', 'np.arctan2', (['u[:-1]', 'u[1:]'], {}), '(u[:-1], u[1:])\n', (1943, 1958), True, 'import numpy as np\n'), ((2023, 2033), 'numpy.log2', 'np.log2', (['N'], {}), '(N)\n', (2030, 2033), True, 'import numpy as np\n'), ((2042, 2052), 'numpy.ceil', 'np.ceil', (['c'], {}), '(c)\n', (2049, 2052), True, 'import numpy as np\n'), ((2244, 2258), 'numpy.cos', 'np.cos', (['thetas'], {}), '(thetas)\n', (2250, 2258), True, 'import numpy as np\n'), ((2269, 2283), 'numpy.sin', 'np.sin', (['thetas'], {}), '(thetas)\n', (2275, 2283), True, 'import numpy as np\n'), ((2388, 2398), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (2395, 2398), True, 'import numpy as np\n'), ((2447, 2476), 'numpy.empty', 'np.empty', (['(h, next_power - 1)'], {}), '((h, next_power - 1))\n', (2455, 2476), True, 'import numpy as np\n'), ((2485, 2514), 'numpy.empty', 'np.empty', (['(h, next_power - 1)'], {}), '((h, next_power - 1))\n', (2493, 2514), True, 'import numpy as np\n'), ((2524, 2550), 'numpy.empty', 'np.empty', (['(h, n)', 'np.int32'], {}), '((h, n), np.int32)\n', (2532, 2550), True, 'import numpy as np\n'), ((2887, 2911), 'numpy.repeat', 'np.repeat', (['sin[i - 1]', 'i'], {}), '(sin[i - 1], i)\n', (2896, 2911), True, 'import numpy as np\n'), ((2918, 2944), 'numpy.diagflat', 'np.diagflat', (['(-1 * sdiag)', 'i'], {}), '(-1 * sdiag, i)\n', (2929, 2944), True, 'import numpy as np\n'), ((2962, 2993), 'numpy.fill_diagonal', 'np.fill_diagonal', (['Q', 'cos[i - 1]'], {}), '(Q, cos[i - 1])\n', (2978, 2993), True, 'import numpy as np\n'), ((3564, 3574), 'numpy.log2', 'np.log2', (['n'], {}), '(n)\n', (3571, 3574), True, 'import numpy as np\n'), ((3583, 3593), 'numpy.ceil', 'np.ceil', (['c'], {}), '(c)\n', (3590, 3593), True, 'import numpy as np\n'), ((4902, 4911), 'numpy.eye', 'np.eye', (['N'], {}), '(N)\n', (4908, 4911), True, 'import numpy as np\n'), ((6104, 6131), 'numpy.einsum', 'np.einsum', (['"""i,ij->ij"""', 'r', 'M'], {}), "('i,ij->ij', r, M)\n", (6113, 6131), True, 'import numpy as np\n'), ((792, 820), 'numpy.sin', 'np.sin', (['(2 * np.pi * r[m - 2])'], {}), '(2 * np.pi * r[m - 2])\n', (798, 820), True, 'import numpy as np\n'), ((831, 859), 'numpy.cos', 'np.cos', (['(2 * np.pi * r[m - 2])'], {}), '(2 * np.pi * r[m - 2])\n', (837, 859), True, 'import numpy as np\n'), ((940, 961), 'numpy.prod', 'np.prod', (['(r[pos] ** ds)'], {}), '(r[pos] ** ds)\n', (947, 961), True, 'import numpy as np\n'), ((2366, 2376), 'numpy.ceil', 'np.ceil', (['k'], {}), '(k)\n', (2373, 2376), True, 'import numpy as np\n'), ((2650, 2662), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (2659, 2662), True, 'import numpy as np\n'), ((2671, 2691), 'numpy.random.shuffle', 'np.random.shuffle', (['p'], {}), '(p)\n', (2688, 2691), True, 'import numpy as np\n'), ((3544, 3555), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (3552, 3555), True, 'import numpy as np\n'), ((4653, 4664), 'numpy.array', 'np.array', (['(1)'], {}), '(1)\n', (4661, 4664), True, 'import numpy as np\n'), ((4784, 4796), 'numpy.arange', 'np.arange', (['N'], {}), '(N)\n', (4793, 4796), True, 'import numpy as np\n'), ((4805, 4828), 'numpy.random.shuffle', 'np.random.shuffle', (['perm'], {}), '(perm)\n', (4822, 4828), True, 'import numpy as np\n'), ((6076, 6095), 'numpy.vstack', 'np.vstack', (['(M, L.T)'], {}), '((M, L.T))\n', (6085, 6095), True, 'import numpy as np\n'), ((6140, 6147), 'math.sqrt', 'sqrt', (['d'], {}), '(d)\n', (6144, 6147), False, 'from math import sqrt\n'), ((6186, 6204), 'numpy.vstack', 'np.vstack', (['(M, -M)'], {}), '((M, -M))\n', (6195, 6204), True, 'import numpy as np\n'), ((6217, 6239), 'numpy.concatenate', 'np.concatenate', (['(w, w)'], {}), '((w, w))\n', (6231, 6239), True, 'import numpy as np\n'), ((2423, 2435), 'numpy.ceil', 'np.ceil', (['log'], {}), '(log)\n', (2430, 2435), True, 'import numpy as np\n'), ((3892, 3911), 'scipy.linalg.block_diag', 'block_diag', (['*blocks'], {}), '(*blocks)\n', (3902, 3911), False, 'from scipy.linalg import block_diag\n'), ((5710, 5724), 'numpy.ceil', 'np.ceil', (['(2 * n)'], {}), '(2 * n)\n', (5717, 5724), True, 'import numpy as np\n'), ((5750, 5760), 'numpy.ceil', 'np.ceil', (['n'], {}), '(n)\n', (5757, 5760), True, 'import numpy as np\n'), ((980, 1022), 'numpy.sqrt', 'np.sqrt', (['(1.0 - r[m - 3] ** (2.0 / (m - 3)))'], {}), '(1.0 - r[m - 3] ** (2.0 / (m - 3)))\n', (987, 1022), True, 'import numpy as np\n'), ((1040, 1082), 'numpy.sqrt', 'np.sqrt', (['(1.0 - r[m - 3] ** (2.0 / (m - 3)))'], {}), '(1.0 - r[m - 3] ** (2.0 / (m - 3)))\n', (1047, 1082), True, 'import numpy as np\n'), ((1164, 1183), 'numpy.arange', 'np.arange', (['(1)', '(l + 1)'], {}), '(1, l + 1)\n', (1173, 1183), True, 'import numpy as np\n'), ((2207, 2232), 'numpy.array', 'np.array', (['([0.0] * (n - N))'], {}), '([0.0] * (n - N))\n', (2215, 2232), True, 'import numpy as np\n'), ((4105, 4127), 'numpy.arange', 'np.arange', (['(N + hh - ll)'], {}), '(N + hh - ll)\n', (4114, 4127), True, 'import numpy as np\n'), ((878, 897), 'numpy.arange', 'np.arange', (['(1)', '(i + 1)'], {}), '(1, i + 1)\n', (887, 897), True, 'import numpy as np\n'), ((1363, 1389), 'numpy.sqrt', 'np.sqrt', (['(r[1] * (1 - r[1]))'], {}), '(r[1] * (1 - r[1]))\n', (1370, 1389), True, 'import numpy as np\n'), ((1417, 1443), 'numpy.sqrt', 'np.sqrt', (['(r[1] * (1 - r[1]))'], {}), '(r[1] * (1 - r[1]))\n', (1424, 1443), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 1 23:29:40 2021
@author: <NAME>
Converted from Yi Jiang's 'postProcess.m''
"""
import numpy as np
from skimage.transform import rotate, rescale
def sind(x):
return np.sin(x * np.pi / 180)
def cosd(x):
return np.cos(x * np.pi / 180)
def removePhaseRamp(input, dx):
Ny = input.shape[0]
Nx = input.shape[1]
y = np.linspace(-np.floor(Ny/2),np.ceil(Ny/2)-1,Ny)
x = np.linspace(-np.floor(Nx/2),np.ceil(Nx/2)-1,Nx)
[X,Y] = np.meshgrid(x,y)
X = X*dx
Y = Y*dx
phase_image = np.angle(input)
#fit ramp
Xf = X.flatten()
Yf = Y.flatten()
A = np.array([Xf*0+1, Xf, Yf]).T
B = phase_image.flatten()
coeff, r, rank, s = np.linalg.lstsq(A, B)
background = X*coeff[1]+Y*coeff[2]
output = phase_image - background
return output
def postProcess(obj, rot_angle, px, py, dx):
#Post process reconstructed object
# Convert from Matlab postProcess.m Y<NAME> (<EMAIL>)
#rotate object to 0 degree
py_rot = px*-sind(-rot_angle) + py*cosd(-rot_angle)
px_rot = px*cosd(-rot_angle) + py*sind(-rot_angle)
obj_rot_r = rescale(obj.real,2)
obj_rot_i = rescale(obj.imag,2)#upsample to reduce rotation artifacts
obj_rot_r = rotate(obj_rot_r, -rot_angle)
obj_rot_i = rotate(obj_rot_i, -rot_angle)
obj_rot = obj_rot_r + 1j*obj_rot_i
cen_rot = np.floor(np.size(obj_rot,1)/2)+1
dx = dx/2
y_lb = np.ceil(min(py_rot[0])/dx+cen_rot)
y_ub = np.floor(max(py_rot[0])/dx+cen_rot)
x_lb = np.ceil(min(px_rot[0])/dx+cen_rot)
x_ub = np.floor(max(px_rot[0])/dx+cen_rot)
obj_crop = obj_rot[int(y_lb):int(y_ub),int(x_lb):int(x_ub)]
#remove phase ramp
obj_crop_phase = removePhaseRamp(obj_crop, dx)
obj_crop = abs(obj_crop) * np.exp(1j*obj_crop_phase)
obj_crop_r = rescale(obj_crop.real,1/2)
obj_crop_i = rescale(obj_crop.imag,1/2) #scale back to original size
obj_crop = obj_crop_r + 1j*obj_crop_i
return obj_crop | [
"numpy.ceil",
"skimage.transform.rotate",
"numpy.size",
"numpy.floor",
"numpy.angle",
"numpy.exp",
"numpy.array",
"numpy.cos",
"numpy.linalg.lstsq",
"numpy.sin",
"numpy.meshgrid",
"skimage.transform.rescale"
] | [((218, 241), 'numpy.sin', 'np.sin', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (224, 241), True, 'import numpy as np\n'), ((267, 290), 'numpy.cos', 'np.cos', (['(x * np.pi / 180)'], {}), '(x * np.pi / 180)\n', (273, 290), True, 'import numpy as np\n'), ((496, 513), 'numpy.meshgrid', 'np.meshgrid', (['x', 'y'], {}), '(x, y)\n', (507, 513), True, 'import numpy as np\n'), ((557, 572), 'numpy.angle', 'np.angle', (['input'], {}), '(input)\n', (565, 572), True, 'import numpy as np\n'), ((721, 742), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['A', 'B'], {}), '(A, B)\n', (736, 742), True, 'import numpy as np\n'), ((1143, 1163), 'skimage.transform.rescale', 'rescale', (['obj.real', '(2)'], {}), '(obj.real, 2)\n', (1150, 1163), False, 'from skimage.transform import rotate, rescale\n'), ((1180, 1200), 'skimage.transform.rescale', 'rescale', (['obj.imag', '(2)'], {}), '(obj.imag, 2)\n', (1187, 1200), False, 'from skimage.transform import rotate, rescale\n'), ((1254, 1283), 'skimage.transform.rotate', 'rotate', (['obj_rot_r', '(-rot_angle)'], {}), '(obj_rot_r, -rot_angle)\n', (1260, 1283), False, 'from skimage.transform import rotate, rescale\n'), ((1300, 1329), 'skimage.transform.rotate', 'rotate', (['obj_rot_i', '(-rot_angle)'], {}), '(obj_rot_i, -rot_angle)\n', (1306, 1329), False, 'from skimage.transform import rotate, rescale\n'), ((1847, 1876), 'skimage.transform.rescale', 'rescale', (['obj_crop.real', '(1 / 2)'], {}), '(obj_crop.real, 1 / 2)\n', (1854, 1876), False, 'from skimage.transform import rotate, rescale\n'), ((1892, 1921), 'skimage.transform.rescale', 'rescale', (['obj_crop.imag', '(1 / 2)'], {}), '(obj_crop.imag, 1 / 2)\n', (1899, 1921), False, 'from skimage.transform import rotate, rescale\n'), ((638, 668), 'numpy.array', 'np.array', (['[Xf * 0 + 1, Xf, Yf]'], {}), '([Xf * 0 + 1, Xf, Yf])\n', (646, 668), True, 'import numpy as np\n'), ((1804, 1833), 'numpy.exp', 'np.exp', (['(1.0j * obj_crop_phase)'], {}), '(1.0j * obj_crop_phase)\n', (1810, 1833), True, 'import numpy as np\n'), ((393, 409), 'numpy.floor', 'np.floor', (['(Ny / 2)'], {}), '(Ny / 2)\n', (401, 409), True, 'import numpy as np\n'), ((408, 423), 'numpy.ceil', 'np.ceil', (['(Ny / 2)'], {}), '(Ny / 2)\n', (415, 423), True, 'import numpy as np\n'), ((449, 465), 'numpy.floor', 'np.floor', (['(Nx / 2)'], {}), '(Nx / 2)\n', (457, 465), True, 'import numpy as np\n'), ((464, 479), 'numpy.ceil', 'np.ceil', (['(Nx / 2)'], {}), '(Nx / 2)\n', (471, 479), True, 'import numpy as np\n'), ((1393, 1412), 'numpy.size', 'np.size', (['obj_rot', '(1)'], {}), '(obj_rot, 1)\n', (1400, 1412), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `fpipy.raw` module."""
import numpy as np
import xarray as xr
import xarray.testing as xrt
import fpipy.raw as fpr
import fpipy.conventions as c
from fpipy.raw import BayerPattern
def test_read_calibration_format(calib_seq):
assert type(calib_seq) is xr.Dataset
# These must be found from each calibration dataset
dims = [
c.image_index,
c.peak_coord,
c.colour_coord,
c.setpoint_coord,
]
for d in dims:
assert d in calib_seq.dims
variables = [
c.number_of_peaks,
c.wavelength_data,
c.fwhm_data,
c.setpoint_data,
c.sinv_data,
]
for v in variables:
assert v in calib_seq.variables
def test_pattern_strings():
for p in ['gbrg', 'GBRG', 'BayerGB']:
assert BayerPattern.get(p) is BayerPattern.GBRG
for p in ['bggr', 'BGGR', 'BayerBG']:
assert BayerPattern.get(p) is BayerPattern.BGGR
for p in ['rggb', 'rggb', 'BayerRG']:
assert BayerPattern.get(p) is BayerPattern.RGGB
for p in ['grbg', 'GRBG', 'BayerGR']:
assert BayerPattern.get(p) is BayerPattern.GRBG
def test_genicam_patterns():
assert BayerPattern.BayerGB is BayerPattern.GBRG
assert BayerPattern.BayerGR is BayerPattern.GRBG
assert BayerPattern.BayerBG is BayerPattern.BGGR
assert BayerPattern.BayerRG is BayerPattern.RGGB
def test_raw_format(raw):
assert type(raw) is xr.Dataset
# These must be found from each raw dataset for radiance calculations to be
# possible
dims = [
c.peak_coord,
c.colour_coord,
*c.cfa_dims,
]
for d in dims:
assert d in raw.dims
variables = [
c.number_of_peaks,
c.camera_exposure,
c.camera_gain,
c.cfa_pattern_data,
c.wavelength_data,
c.sinv_data,
c.cfa_data
]
for v in variables:
assert v in raw.variables
def test_raw_to_radiance_format(rad_computed):
assert type(rad_computed) is xr.Dataset
# These should exist in radiances computed from CFA data
dims = c.radiance_dims
for d in dims:
assert d in rad_computed.dims
variables = [
c.radiance_data,
c.image_index,
c.peak_coord,
c.number_of_peaks,
c.camera_exposure,
c.camera_gain,
c.cfa_pattern_data,
c.wavelength_data,
c.sinv_data,
]
for v in variables:
assert v in rad_computed.variables
def test_all_peaks_computed(raw, rad_computed):
idx_counts = np.bincount(rad_computed[c.image_index])
for i in raw[c.image_index]:
assert(
idx_counts[i] == raw.sel(**{c.image_index: i})[c.number_of_peaks]
)
def test_all_wavelengths_in_order(raw, rad_computed):
wls = raw.where(raw[c.wavelength_data] > 0)[c.wavelength_data].values
expected = np.sort(wls[~np.isnan(wls)].ravel())
actual = rad_computed[c.wavelength_data].values
assert np.all(expected == actual)
def test_raw_to_radiance_correctness(rad_expected, rad_computed):
# Demosaicing is actually not interpolation on edges currently
expected = rad_expected[c.radiance_data].isel(
x=slice(1, -2), y=slice(1, -2)
).transpose(*c.radiance_dims).compute()
actual = rad_computed[c.radiance_data].isel(
x=slice(1, -2), y=slice(1, -2)
).transpose(*c.radiance_dims).compute()
xrt.assert_equal(expected, actual)
def test_subtract_dark_keep_variables(raw):
variables = [
c.dark_reference_data,
c.cfa_data,
]
default = fpr.subtract_dark(raw)
keep_all = fpr.subtract_dark(raw, keep_variables=variables)
for v in variables:
assert(v not in default.variables)
assert(v in keep_all.variables)
keep_one = fpr.subtract_dark(raw, keep_variables=[v])
assert(v in keep_one.variables)
for notv in [var for var in variables if var is not v]:
assert(notv not in keep_one.variables)
def test_raw_to_radiance_keep_variables(raw, rad_computed):
variables = [
c.cfa_data,
c.dark_corrected_cfa_data,
c.dark_reference_data,
c.rgb_data,
]
default = rad_computed
keep_all = fpr.raw_to_radiance(raw, keep_variables=variables)
for v in variables:
assert(v not in default.variables)
assert(v in keep_all.variables)
keep_one = fpr.raw_to_radiance(raw, keep_variables=[v])
assert(v in keep_one.variables)
for notv in [var for var in variables if var is not v]:
assert(notv not in keep_one.variables)
def test_radiance_to_reflectance_keep_variables(rad_expected):
variables = [
c.radiance_data
]
default = fpr.radiance_to_reflectance(rad_expected, rad_expected)
keep_all = fpr.radiance_to_reflectance(
rad_expected, rad_expected, keep_variables=variables)
for v in variables:
assert(v not in default.variables)
assert(v in keep_all.variables)
keep_one = fpr.radiance_to_reflectance(
rad_expected, rad_expected, keep_variables=[v])
assert(v in keep_one.variables)
for notv in [var for var in variables if var is not v]:
assert(notv not in keep_one.variables)
def test_reflectance_is_sensible(rad_expected):
"""Reflectance should be 1 if dataset is used as its own white reference
except where reflectance is 0 / 0, resulting in NaN.
"""
actual = fpr.radiance_to_reflectance(rad_expected, rad_expected)
expected = xr.DataArray(
np.ones(actual[c.reflectance_data].shape),
dims=actual[c.reflectance_data].dims,
coords=actual[c.reflectance_data].coords
)
expected.data[rad_expected[c.radiance_data].values == 0] = np.nan
xrt.assert_equal(actual[c.reflectance_data], expected)
| [
"fpipy.raw.BayerPattern.get",
"numpy.ones",
"fpipy.raw.subtract_dark",
"numpy.isnan",
"fpipy.raw.raw_to_radiance",
"numpy.all",
"numpy.bincount",
"xarray.testing.assert_equal",
"fpipy.raw.radiance_to_reflectance"
] | [((2619, 2659), 'numpy.bincount', 'np.bincount', (['rad_computed[c.image_index]'], {}), '(rad_computed[c.image_index])\n', (2630, 2659), True, 'import numpy as np\n'), ((3046, 3072), 'numpy.all', 'np.all', (['(expected == actual)'], {}), '(expected == actual)\n', (3052, 3072), True, 'import numpy as np\n'), ((3510, 3544), 'xarray.testing.assert_equal', 'xrt.assert_equal', (['expected', 'actual'], {}), '(expected, actual)\n', (3526, 3544), True, 'import xarray.testing as xrt\n'), ((3685, 3707), 'fpipy.raw.subtract_dark', 'fpr.subtract_dark', (['raw'], {}), '(raw)\n', (3702, 3707), True, 'import fpipy.raw as fpr\n'), ((3723, 3771), 'fpipy.raw.subtract_dark', 'fpr.subtract_dark', (['raw'], {'keep_variables': 'variables'}), '(raw, keep_variables=variables)\n', (3740, 3771), True, 'import fpipy.raw as fpr\n'), ((4338, 4388), 'fpipy.raw.raw_to_radiance', 'fpr.raw_to_radiance', (['raw'], {'keep_variables': 'variables'}), '(raw, keep_variables=variables)\n', (4357, 4388), True, 'import fpipy.raw as fpr\n'), ((4850, 4905), 'fpipy.raw.radiance_to_reflectance', 'fpr.radiance_to_reflectance', (['rad_expected', 'rad_expected'], {}), '(rad_expected, rad_expected)\n', (4877, 4905), True, 'import fpipy.raw as fpr\n'), ((4921, 5007), 'fpipy.raw.radiance_to_reflectance', 'fpr.radiance_to_reflectance', (['rad_expected', 'rad_expected'], {'keep_variables': 'variables'}), '(rad_expected, rad_expected, keep_variables=\n variables)\n', (4948, 5007), True, 'import fpipy.raw as fpr\n'), ((5598, 5653), 'fpipy.raw.radiance_to_reflectance', 'fpr.radiance_to_reflectance', (['rad_expected', 'rad_expected'], {}), '(rad_expected, rad_expected)\n', (5625, 5653), True, 'import fpipy.raw as fpr\n'), ((5915, 5969), 'xarray.testing.assert_equal', 'xrt.assert_equal', (['actual[c.reflectance_data]', 'expected'], {}), '(actual[c.reflectance_data], expected)\n', (5931, 5969), True, 'import xarray.testing as xrt\n'), ((3900, 3942), 'fpipy.raw.subtract_dark', 'fpr.subtract_dark', (['raw'], {'keep_variables': '[v]'}), '(raw, keep_variables=[v])\n', (3917, 3942), True, 'import fpipy.raw as fpr\n'), ((4517, 4561), 'fpipy.raw.raw_to_radiance', 'fpr.raw_to_radiance', (['raw'], {'keep_variables': '[v]'}), '(raw, keep_variables=[v])\n', (4536, 4561), True, 'import fpipy.raw as fpr\n'), ((5144, 5219), 'fpipy.raw.radiance_to_reflectance', 'fpr.radiance_to_reflectance', (['rad_expected', 'rad_expected'], {'keep_variables': '[v]'}), '(rad_expected, rad_expected, keep_variables=[v])\n', (5171, 5219), True, 'import fpipy.raw as fpr\n'), ((5692, 5733), 'numpy.ones', 'np.ones', (['actual[c.reflectance_data].shape'], {}), '(actual[c.reflectance_data].shape)\n', (5699, 5733), True, 'import numpy as np\n'), ((860, 879), 'fpipy.raw.BayerPattern.get', 'BayerPattern.get', (['p'], {}), '(p)\n', (876, 879), False, 'from fpipy.raw import BayerPattern\n'), ((958, 977), 'fpipy.raw.BayerPattern.get', 'BayerPattern.get', (['p'], {}), '(p)\n', (974, 977), False, 'from fpipy.raw import BayerPattern\n'), ((1056, 1075), 'fpipy.raw.BayerPattern.get', 'BayerPattern.get', (['p'], {}), '(p)\n', (1072, 1075), False, 'from fpipy.raw import BayerPattern\n'), ((1154, 1173), 'fpipy.raw.BayerPattern.get', 'BayerPattern.get', (['p'], {}), '(p)\n', (1170, 1173), False, 'from fpipy.raw import BayerPattern\n'), ((2959, 2972), 'numpy.isnan', 'np.isnan', (['wls'], {}), '(wls)\n', (2967, 2972), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue May 25 11:41:02 2021
@author: ike
"""
import numpy as np
import pandas as pd
from ..utils.csvreader import CSVReader
from ..utils.csvcolumns import STIM
voltage=2.437588
def count_frames(
filename, threshold=1, volCol="AIN4", fCol="frames",
gtCol="global_time", dtCol="dt"):
"""
Reads in a stimulus output file and assigns an image frame number to each
stimulus frame
"""
stim = CSVReader.fromFile(filename)
# R = np.asarray(rows, dtype='float') # convert stim file list to an array
# output_array=np.zeros((R.shape[0],R.shape[1]+2))
# header.extend(['dt','frames'])
vs = stim.getColumn(volCol)
vs[1:] = (vs[1:] - vs[:-1])
vs[0] = 0
# count image frames based on the change in voltage signal
count_on, count_off = 0, 0
frame_labels = [0]
# F_on = [0]; F_off = [0]
for n in range(1, len(vs) - 1, 1):
if all((
vs[n] > vs[n - 1],
vs[n] > vs[n + 1],
vs[n] > threshold)):
count_on += 1
elif all((
vs[n] < vs[n - 1],
vs[n] < vs[n + 1],
vs[n] < -threshold)):
count_off -= 1
# F_on.extend([count_on]); F_off.extend([count_off])
frame_labels += [count_on * (count_on + count_off)]
stim = stim.setColumn(fCol, frame_labels + [0])
stim = stim.sortColumn(fCol)
stim = stim.thresholdColumn(fCol, 1, ">=")
stim = stim.dropDuplicates(fCol)
gt = stim.getColumn(gtCol)
gt[0] = 0
stim = stim.setColumn(dtCol, gt)
return stim
def find_dropped_frames(
frames, time_interval, oldStim, newStim, fCol="frames",
gtCol="global_time", dtCol="dt", ):
sFrames = newStim[-1][fCol]
print("N image frames: {:d} \nN stim frames: {:d}".format(frames, sFrames))
if sFrames == frames:
print("looks fine!")
return
print("uh oh!")
target_T = frames * time_interval
stim_T = np.sum(newStim.getColumn(dtCol))
print("total time should be {:.3f}s, got {:.3f}s ".format(
target_T, stim_T))
max_t_step = np.max(newStim.getColumn(dtCol))
if np.round(max_t_step / time_interval) < 2:
print(
("stim frames and image frames do not match, but no dropped "
"frames found... double check the stim file :-( "))
return
print("stimulus dropped at least one frame!")
OUT = []
num_df = 0
for rowDict in newStim:
if np.round(rowDict[dtCol] / time_interval) >= 2:
num_df = num_df + 1
gt_dropped = rowDict[gtCol] - time_interval
stim_frame = np.searchsorted(
oldStim.getColumn(gtCol), gt_dropped)
print("check row {} of original stim file (maybe)".format(
stim_frame))
OUT.append(rowDict)
print("found {} potential dropped frames".format(num_df))
def parse_stim_file(
newStim, fCol="frames", rtCol="rel_time", stCol="stim_type"):
"""
Get frame numbers, global time, relative time per epoch, and stim_state
(if it's in the stim_file)
"""
frames = newStim.getColumn(fCol)
rel_time = newStim.getColumn(rtCol)
stim_type = (
newStim.getCol(stCol) if stCol in newStim else np.ones(frames.size))
return frames, rel_time, stim_type
def splitEpochs(newStim, rtCol="rel_time"):
rel = newStim.getColumn(rtCol)
maximum = np.max(rel)
rel[1:] = rel[1:] - rel[:-1]
rel[0] = 0
rel = np.cumsum((rel < (maximum * -0.5)).astype(int))
return rel
def define_stim_state(newStim, on_time, off_time, rtCol="rel_time"):
"""
Define stimulus state (1 = ON; 0 = OFF) based on relative stimulus time
"""
rel_time = newStim.getColumn(rtCol)
stim_state = ((rel_time > on_time) * (rel_time < off_time)).astype(int)
return stim_state
def stimswitch(newStim, on_time, off_time, rtCol="rel_time"):
"""
identify stim switch points
"""
rel_time = newStim.getColumn(rtCol)
stim_state = (on_time < rel_time) * (rel_time < off_time)
stim_state = stim_state.astype(int)
ON_indices = list(np.where(np.diff(stim_state) == 1) + 1)
OFF_indices = list(np.where(np.diff(stim_state) == -1) + 1)
return ON_indices, OFF_indices
def _addEpochNumber(self):
# add a dummy "epoch_number" column to csv files that lack one
self.dfs.insert(0, STIM["enm"], 1)
def _extractImagingTimings(
newStim, fCol="frames", gtCol="global_time", rtCol="rel_time"):
"""
Extract frequency, time per frame, and epoch length information from
stimulus CSV
@return: tuple with following entries as floating-point numbers:
0: temporal duration of each epoch in seconds
1: number of frames in each epoch
2: total number of frames
3: temporal duration of each frame in seconds
4: imaging frequency, averagenumber of frames per second
@rtype: tuple
"""
# extract epoch length as the maximum relative time within an epoch
epochTime = int(np.ceil(np.max(newStim.getColumn(rtCol))))
frames = int(np.max(newStim.getColumn(fCol)))
# extract time per frame as the average change in global time
dfs = newStim.dfs.copy().groupby(fCol).mean().reset_index()
dfs = dfs[gtCol].copy().to_numpy()
frameTime = np.mean(dfs[1:] - dfs[:-1])
epochFrames = int(epochTime // frameTime)
# extract imaging frequency as the reciprocal of time per frame
frequency = 1 / frameTime
return epochTime, epochFrames, frames, frameTime, frequency
def generateSplitEpochs(self):
for e in self.getColumn(STIM["epc"], unique=True)[1:-1]:
sub = self.dfs[self.dfs[STIM["epc"]] == e].copy()
frames = sub[STIM["frm"]].tolist()
frames = frames + ([frames[-1]] * (self.epochFrames - len(frames)))
frames = frames[:self.epochFrames]
number = sub.mode()[STIM["enm"]][0]
yield e, number, frames
# def binFrames(self, scalar):
# """
# Extract an ordering of frames needed to bin a corresponding image
# at a scalar multiple of the frameTime. Equivalent to resampling the
# image at a scalar^-1 multiple of the imaging frequency
#
# Note: "binning" a sample with a scalar of 1 is an identical operation
# to averaing between, but not within, all identical epochs
#
# @param dfs: stimulus CSV file stored as pandas dataframe. This
# CSV should already have imaging frames counted
# @type dfs: pandas.DataFrame
# @param scalar: multiple of interval at which to conduct binning
# @type scalar: float
#
# @return: list of imaging frames in each bin. Each index in list is a
# sublist of all frames that should be averaged to yield the index-th
# frame in the binned image. ie the following list of lists:
# [[1, 5, 8, 9]
# [2, 3, 6, 10]
# [4, 7, 11]]
# indicates that there are 3 binned frames from an imaging array with
# 11 unbinned frames. The first binned image in this example includes all
# frames in bins[0] -- frames 1, 5, 8, and 9.
#
# Second returned entity is a second list that tracks the epoch frame
# corresponding to each bin in the previous list
# @rtype: list, list
# """
# binFrames = list()
# stmFrames = list()
# width = self.frameTime * scalar
# # for every unique epoch type in "epoch_number" column
# for epoch in sorted(self.dfs[STIM["enm"]].unique().tolist()):
# # isolate all stimulus rows corresponding to a particualr epoch
# dfn = self.dfs[self.dfs[STIM["enm"]] == epoch].copy()
# for t in np.arange(0, self.epochTime, width):
# # a bin is the relative time windown [t, t + width)
# dff = dfn[
# (dfn[STIM["rlt"]] >= t) & (dfn[STIM["rlt"]] < (t + width))]
# if dff.empty:
# continue
#
# frm = np.squeeze(
# (dff[STIM["frm"]].to_numpy(dtype=int)) - 1).tolist()
# binFrames += ([frm] if type(frm) == list else [[frm]])
# stmFrames += [int(dff[STIM["enm"]].max())]
# if STIM["smt"] in self:
# stmFrames[-1] = [int(dff["stim_type"].max())]
#
# return binFrames, stmFrames
| [
"numpy.mean",
"numpy.ones",
"numpy.diff",
"numpy.max",
"numpy.round"
] | [((3491, 3502), 'numpy.max', 'np.max', (['rel'], {}), '(rel)\n', (3497, 3502), True, 'import numpy as np\n'), ((5390, 5417), 'numpy.mean', 'np.mean', (['(dfs[1:] - dfs[:-1])'], {}), '(dfs[1:] - dfs[:-1])\n', (5397, 5417), True, 'import numpy as np\n'), ((2217, 2253), 'numpy.round', 'np.round', (['(max_t_step / time_interval)'], {}), '(max_t_step / time_interval)\n', (2225, 2253), True, 'import numpy as np\n'), ((3335, 3355), 'numpy.ones', 'np.ones', (['frames.size'], {}), '(frames.size)\n', (3342, 3355), True, 'import numpy as np\n'), ((2546, 2586), 'numpy.round', 'np.round', (['(rowDict[dtCol] / time_interval)'], {}), '(rowDict[dtCol] / time_interval)\n', (2554, 2586), True, 'import numpy as np\n'), ((4210, 4229), 'numpy.diff', 'np.diff', (['stim_state'], {}), '(stim_state)\n', (4217, 4229), True, 'import numpy as np\n'), ((4273, 4292), 'numpy.diff', 'np.diff', (['stim_state'], {}), '(stim_state)\n', (4280, 4292), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import cv2
import os
import dlib
from scipy import misc
import numpy as np
from PIL import Image
def getBound(img, shape):
xMin = len(img[0])
xMax = 0
yMin = len(img)
yMax = 0
for i in range(shape.num_parts):
if (shape.part(i).x < xMin):
xMin = shape.part(i).x
if (shape.part(i).x > xMax):
xMax = shape.part(i).x
if (shape.part(i).y < yMin):
yMin = shape.part(i).y
if (shape.part(i).y > yMax):
yMax = shape.part(i).y
return xMin, xMax, yMin, yMax
def combineImg(imga, imgb):
target = Image.new('RGB', (imga.shape[0]*2, imga.shape[1]))
target.paste(Image.fromarray(np.uint8(imga)), (0, 0))
target.paste(Image.fromarray(np.uint8(imgb)), (imgb.shape[0] + 1, 0))
return target
def getFace(detector, shapePredict, img):
dets = detector(img, 1)
if (len(dets) == 0):
return None, None, None, None
det = dets[0]
shape = shapePredict(img, det)
xmin, xmax, ymin, ymax = getBound(img, shape)
if xmin < 0 or xmax < 0 or ymin < 0 or ymax < 0:
return None, None, None, None
return xmin, xmax, ymin, ymax
def headFromDir(inDir, outDir, shape_model, size, faceSize, outBleed_x=0, outBleed_y=0):
shapePredict = dlib.shape_predictor(shape_model)
detector = dlib.get_frontal_face_detector()
if not os.path.exists(outDir):
os.mkdir(outDir)
count = 0
fileList = os.listdir(inDir)
for name in fileList:
count += 1
print("processing %s, current %d of total %d" % (name, count, len(fileList)))
fileName = os.path.join(inDir, name)
if not fileName.endswith('.jpg'):
continue
img = cv2.cvtColor(cv2.imread(fileName), cv2.COLOR_BGR2RGB)
dets = detector(img, 1)
if (len(dets) == 0):
print("file %s has no face" % name)
continue
det = dets[0]
shape = shapePredict(img, det)
xmin, xmax, ymin, ymax = getBound(img, shape)
if xmin < 0 or xmax < 0 or ymin < 0 or ymax < 0:
print("file %s can't get bound" % name)
continue
left = xmin
right = xmax
top = ymin
bottom = ymax
longEdge = xmax - xmin
shortEdge = ymax - ymin
if longEdge < (ymax - ymin):
longEdge = ymax - ymin
shortEdge = xmax - xmin
# To get square crop area, begin from face middle, take 1 facesize to the upward
# take 0.5 facesize to the downward, take 1.5/2 facesize to the left and right respectively.
top = int(ymin - longEdge)
bottom = int(ymax + longEdge / 2)
left = int(xmin - longEdge * 1.5 / 2)
right = int(xmax + longEdge * 1.5 / 2)
else:
left = int(xmin - shortEdge * 1.5 / 2)
right = int(xmax + shortEdge * 1.5 / 2)
top = int(ymin - shortEdge)
bottom = int(ymax + shortEdge / 2)
fullImg = np.zeros((size, size, 3))
marginLeft = 0
if left < 0:
marginLeft = -int(left * size / (right - left))
left = 0
marginTop = 0
if top < 0:
marginTop = -int(top * size / (bottom - top))
top = 0
marginRight = 0
if right > img.shape[1]:
marginRight = int((right - img.shape[1]) * size / (right - left))
right = img.shape[1]
marginBottom = 0
if bottom > img.shape[0]:
marginBottom = int((bottom - img.shape[0]) * size / (bottom - top))
bottom = img.shape[0]
cropedImg = img[top:bottom, left:right, :]
cropedImg = cv2.resize(cropedImg, dsize=(size - marginLeft - marginRight, size - marginTop - marginBottom))
fullImg[marginTop : size - marginBottom, marginLeft : size - marginRight, :] = cropedImg
if marginLeft > 0:
fullImg[marginTop:(size - marginBottom), 0:marginLeft, :] = np.tile(np.reshape(cropedImg[:,0,:], (size - marginTop - marginBottom, 1, 3)), (1, marginLeft, 1))
if marginRight > 0:
fullImg[marginTop:(size - marginBottom), (size - marginRight):size, :] = np.tile(np.reshape(cropedImg[:, cropedImg.shape[1] - 1, :], (size - marginTop - marginBottom, 1, 3)), (1, marginRight, 1))
if marginTop > 0:
fullImg[0:marginTop, :, :] = np.tile(np.reshape(fullImg[marginTop, :, :], (1, size, 3)), (marginTop, 1, 1))
if marginBottom > 0:
fullImg[(size - marginBottom):size, :, :] = np.tile(np.reshape(fullImg[(size - marginBottom), :, :], (1, size, 3)), (marginBottom, 1, 1))
fullFace = np.zeros((size, size, 3))
xminFace, xmaxFace, yminFace, ymaxFace = getFace(detector, shapePredict, fullImg.astype(np.uint8))
if xminFace == None:
print("file %s can't get face in fullImg" % name)
continue
if outBleed_x > 0:
xminFace -= outBleed_x
if xminFace < 0:
xminFace = 0
xmaxFace += outBleed_x
if xmaxFace > fullImg.shape[1]:
xmaxFace = fullImg.shape[1]
if outBleed_y > 0:
yminFace -= outBleed_y
if yminFace < 0:
yminFace = 0
fullFace[yminFace:ymaxFace, xminFace:xmaxFace, :] = fullImg[yminFace:ymaxFace, xminFace:xmaxFace, :]
combine = combineImg(fullImg, fullFace)
outPath = os.path.join(outDir, str(count).zfill(6) + '.jpg')
misc.imsave(outPath, combine)
| [
"numpy.uint8",
"os.path.exists",
"os.listdir",
"numpy.reshape",
"PIL.Image.new",
"scipy.misc.imsave",
"os.path.join",
"dlib.shape_predictor",
"dlib.get_frontal_face_detector",
"numpy.zeros",
"os.mkdir",
"cv2.resize",
"cv2.imread"
] | [((648, 700), 'PIL.Image.new', 'Image.new', (['"""RGB"""', '(imga.shape[0] * 2, imga.shape[1])'], {}), "('RGB', (imga.shape[0] * 2, imga.shape[1]))\n", (657, 700), False, 'from PIL import Image\n'), ((1337, 1370), 'dlib.shape_predictor', 'dlib.shape_predictor', (['shape_model'], {}), '(shape_model)\n', (1357, 1370), False, 'import dlib\n'), ((1387, 1419), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (1417, 1419), False, 'import dlib\n'), ((1513, 1530), 'os.listdir', 'os.listdir', (['inDir'], {}), '(inDir)\n', (1523, 1530), False, 'import os\n'), ((1432, 1454), 'os.path.exists', 'os.path.exists', (['outDir'], {}), '(outDir)\n', (1446, 1454), False, 'import os\n'), ((1465, 1481), 'os.mkdir', 'os.mkdir', (['outDir'], {}), '(outDir)\n', (1473, 1481), False, 'import os\n'), ((1685, 1710), 'os.path.join', 'os.path.join', (['inDir', 'name'], {}), '(inDir, name)\n', (1697, 1710), False, 'import os\n'), ((3124, 3149), 'numpy.zeros', 'np.zeros', (['(size, size, 3)'], {}), '((size, size, 3))\n', (3132, 3149), True, 'import numpy as np\n'), ((3835, 3934), 'cv2.resize', 'cv2.resize', (['cropedImg'], {'dsize': '(size - marginLeft - marginRight, size - marginTop - marginBottom)'}), '(cropedImg, dsize=(size - marginLeft - marginRight, size -\n marginTop - marginBottom))\n', (3845, 3934), False, 'import cv2\n'), ((4818, 4843), 'numpy.zeros', 'np.zeros', (['(size, size, 3)'], {}), '((size, size, 3))\n', (4826, 4843), True, 'import numpy as np\n'), ((5693, 5722), 'scipy.misc.imsave', 'misc.imsave', (['outPath', 'combine'], {}), '(outPath, combine)\n', (5704, 5722), False, 'from scipy import misc\n'), ((733, 747), 'numpy.uint8', 'np.uint8', (['imga'], {}), '(imga)\n', (741, 747), True, 'import numpy as np\n'), ((792, 806), 'numpy.uint8', 'np.uint8', (['imgb'], {}), '(imgb)\n', (800, 806), True, 'import numpy as np\n'), ((1814, 1834), 'cv2.imread', 'cv2.imread', (['fileName'], {}), '(fileName)\n', (1824, 1834), False, 'import cv2\n'), ((4138, 4209), 'numpy.reshape', 'np.reshape', (['cropedImg[:, 0, :]', '(size - marginTop - marginBottom, 1, 3)'], {}), '(cropedImg[:, 0, :], (size - marginTop - marginBottom, 1, 3))\n', (4148, 4209), True, 'import numpy as np\n'), ((4352, 4448), 'numpy.reshape', 'np.reshape', (['cropedImg[:, cropedImg.shape[1] - 1, :]', '(size - marginTop - marginBottom, 1, 3)'], {}), '(cropedImg[:, cropedImg.shape[1] - 1, :], (size - marginTop -\n marginBottom, 1, 3))\n', (4362, 4448), True, 'import numpy as np\n'), ((4544, 4594), 'numpy.reshape', 'np.reshape', (['fullImg[marginTop, :, :]', '(1, size, 3)'], {}), '(fullImg[marginTop, :, :], (1, size, 3))\n', (4554, 4594), True, 'import numpy as np\n'), ((4710, 4770), 'numpy.reshape', 'np.reshape', (['fullImg[size - marginBottom, :, :]', '(1, size, 3)'], {}), '(fullImg[size - marginBottom, :, :], (1, size, 3))\n', (4720, 4770), True, 'import numpy as np\n')] |
# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle
import numpy as np
class VQATokenPad(object):
def __init__(self,
max_seq_len=512,
pad_to_max_seq_len=True,
return_attention_mask=True,
return_token_type_ids=True,
truncation_strategy="longest_first",
return_overflowing_tokens=False,
return_special_tokens_mask=False,
infer_mode=False,
**kwargs):
self.max_seq_len = max_seq_len
self.pad_to_max_seq_len = max_seq_len
self.return_attention_mask = return_attention_mask
self.return_token_type_ids = return_token_type_ids
self.truncation_strategy = truncation_strategy
self.return_overflowing_tokens = return_overflowing_tokens
self.return_special_tokens_mask = return_special_tokens_mask
self.pad_token_label_id = paddle.nn.CrossEntropyLoss().ignore_index
self.infer_mode = infer_mode
def __call__(self, data):
needs_to_be_padded = self.pad_to_max_seq_len and len(data[
"input_ids"]) < self.max_seq_len
if needs_to_be_padded:
if 'tokenizer_params' in data:
tokenizer_params = data.pop('tokenizer_params')
else:
tokenizer_params = dict(
padding_side='right', pad_token_type_id=0, pad_token_id=1)
difference = self.max_seq_len - len(data["input_ids"])
if tokenizer_params['padding_side'] == 'right':
if self.return_attention_mask:
data["attention_mask"] = [1] * len(data[
"input_ids"]) + [0] * difference
if self.return_token_type_ids:
data["token_type_ids"] = (
data["token_type_ids"] +
[tokenizer_params['pad_token_type_id']] * difference)
if self.return_special_tokens_mask:
data["special_tokens_mask"] = data[
"special_tokens_mask"] + [1] * difference
data["input_ids"] = data["input_ids"] + [
tokenizer_params['pad_token_id']
] * difference
if not self.infer_mode:
data["labels"] = data[
"labels"] + [self.pad_token_label_id] * difference
data["bbox"] = data["bbox"] + [[0, 0, 0, 0]] * difference
elif tokenizer_params['padding_side'] == 'left':
if self.return_attention_mask:
data["attention_mask"] = [0] * difference + [
1
] * len(data["input_ids"])
if self.return_token_type_ids:
data["token_type_ids"] = (
[tokenizer_params['pad_token_type_id']] * difference +
data["token_type_ids"])
if self.return_special_tokens_mask:
data["special_tokens_mask"] = [
1
] * difference + data["special_tokens_mask"]
data["input_ids"] = [tokenizer_params['pad_token_id']
] * difference + data["input_ids"]
if not self.infer_mode:
data["labels"] = [self.pad_token_label_id
] * difference + data["labels"]
data["bbox"] = [[0, 0, 0, 0]] * difference + data["bbox"]
else:
if self.return_attention_mask:
data["attention_mask"] = [1] * len(data["input_ids"])
for key in data:
if key in [
'input_ids', 'labels', 'token_type_ids', 'bbox',
'attention_mask'
]:
if self.infer_mode:
if key != 'labels':
length = min(len(data[key]), self.max_seq_len)
data[key] = data[key][:length]
else:
continue
data[key] = np.array(data[key], dtype='int64')
return data
| [
"numpy.array",
"paddle.nn.CrossEntropyLoss"
] | [((1505, 1533), 'paddle.nn.CrossEntropyLoss', 'paddle.nn.CrossEntropyLoss', ([], {}), '()\n', (1531, 1533), False, 'import paddle\n'), ((4704, 4738), 'numpy.array', 'np.array', (['data[key]'], {'dtype': '"""int64"""'}), "(data[key], dtype='int64')\n", (4712, 4738), True, 'import numpy as np\n')] |
'''
This contains a number of methods and functions to calculate the iRep metric
https://github.com/christophertbrown/iRep
https://www.nature.com/articles/nbt.3704
'''
import os
import sys
import glob
import scipy
import lmfit
import scipy.signal
import numpy as np
import pandas as pd
import seaborn as sns
from Bio import SeqIO
from collections import defaultdict
def calculate_iRep_from_coverage_array(rcov, num_contigs, gc_windows=None):
'''
Argumemts:
rcov: genome-wide array of coverage values
num_contigs: number of contigs in genome
gc_windows: GC content of windows to do GC coverage correction
Returns:
iRep: iRep value or np.nan if filtered out
iRep_junk: Dictionary of other iRep outputs
'''
FilterChriteria = {'kept_windows':np.nan, # Calculated in coverage_windows
'avg_cov':np.nan, # Calculated on raw array
'r2':np.nan, # Calculated from iRep itself
'fragMbp':np.nan } # Calculated from iRep itself
# Filter chriteria
length = len(rcov)
FilterChriteria['avg_cov'] = np.mean(rcov)
FilterChriteria['fragMbp'] = num_contigs/(float(length)/1000000)
# Calculate the windows
oIdb = _iRep_windows(rcov)
# Add GC if appropriate
if gc_windows is not None:
oIdb = pd.merge(oIdb, gc_windows, on='index')
# Filter out junk windows
Idb = _iRep_filter_windows(oIdb, on='coverage')
FilterChriteria['kept_windows'] = len(Idb) / len(oIdb)
# Get raw iRep values
Idb.loc[:,'coverage_OLT'] = _iRep_log_transform(Idb['coverage'])
iRep = _calc_iRep(Idb, length, on='coverage_OLT', FilterChriteria=FilterChriteria)
FilterChriteria['unfiltered_raw_iRep'] = iRep
# Get gc-corrected iRep values
FilterChriteria['iRep_GC_corrected'] = False
if gc_windows is not None:
Idb = _iRep_gc_bias(Idb)
Idb.loc[:,'coverage_LT'] = _iRep_log_transform(Idb['corrected_coverage'])
iRep = _calc_iRep(Idb, length, on='coverage_LT')
FilterChriteria['unfiltered_iRep'] = iRep
FilterChriteria['iRep_GC_corrected'] = True
# Get raw iRep values
Idb.loc[:,'coverage_OLT'] = _iRep_log_transform(Idb['coverage'])
iRep = _calc_iRep(Idb, length, on='coverage_OLT', FilterChriteria=FilterChriteria)
FilterChriteria['unfiltered_raw_iRep'] = iRep
# See if iRep passes
if (FilterChriteria['kept_windows'] < 0.98) or \
(FilterChriteria['avg_cov'] < 5) or \
(FilterChriteria['r2'] < 0.9) or \
(FilterChriteria['fragMbp'] > 175):
iRep = np.nan
return iRep, FilterChriteria
def generate_gc_windows(order, scaff2sequence, mask_edges=100):
'''
Calculate the GC content for windows across a .fasta file
Argumnets:
order = list of scaffolds in the order they should be in
scaff2sequence = scaffold2sequence (scaff2sequence = SeqIO.to_dict(SeqIO.parse(fasta_loc, "fasta")))
mask_edges = remove this many bp from start and end of contigs
Modified for speed
'''
# Load the .fasta
#scaff2sequence = SeqIO.to_dict(SeqIO.parse(file_loc, "fasta"))
# Make the genome sequence into a list
splits = []
for scaff in order:
seq = scaff2sequence[scaff]
if mask_edges:
seq = seq[100:len(seq)-100]
splits.append(seq)
#genome_seq.extend(seq)
genome_seq = sum(splits, [])
# Calculate GC content
gcdb = _iRep_gc_content(genome_seq)
return gcdb
def _iRep_gc_content(seq, window = 5000, slide = 100):
"""
iRep gc_content message
calculate gc content over sequence windows
"""
# convert GC
replacements = {'G':1, 'C':1, 'A':0, 'T':0, 'N':0}
GC = [] # G - C
for base in seq:
try:
GC.append(replacements[base.upper()])
except:
GC.append(0)
# calculate gc content over sliding windows
i = 0
weights = np.ones(window)
table = defaultdict(list)
for gc in scipy.signal.fftconvolve(GC, weights, 'valid').tolist()[0::slide]:
table['index'].append(i)
table['GC_content'].append(gc/window)
i += slide
return pd.DataFrame(table)
def _iRep_windows(cov, window=5000, slide=100, FilterChriteria=None):
'''
Replicates *coverage_windows()*
'''
table = defaultdict(list)
i = 0
weights = np.ones(window)
for c in scipy.signal.fftconvolve(cov, weights, 'valid').tolist()[0::slide]:
table['index'].append(i)
table['coverage'].append(c/window)
i += slide
return pd.DataFrame(table)
def _iRep_filter_windows(cov, on='coverage', mdif = float(8)):
'''
Replicates *filter_windows()*
Remove windows with weird coverage
Edited to improve speed in version 1.3.0l
'''
med = np.median(cov[on])
return cov[[True if \
((y > 0) and (med > 0) and
(abs(float(max([y, med])) / float(min([y, med]))) <= mdif))
else False for y in cov[on]]]
# keeper_index = []
# for i, row in cov.iterrows():
# y = row[on]
# if y <= 0 or med <= 0:
# continue
# if abs(float(max([y, med])) / float(min([y, med]))) > mdif:
# continue
# keeper_index.append(row['index'])
#
# return cov[cov['index'].isin(keeper_index)]
def _iRep_log_transform(array):
lt = []
eps = 1e-50
for i in array:
if i < eps:
lt.append(np.log2(eps))
else:
lt.append(np.log2(i))
return lt
def _calc_iRep(db, length, on='coverage_OLT', FilterChriteria=None):
'''
Replicates iRep_from_windows
'''
Ys = sorted(list(db[on]))
windows = len(Ys)
if windows == 0:
return np.nan
dif = float(length)/float(windows)
Xs = [int(i * dif) + 1 for i, value in enumerate(Ys, 0)]
Xt, Yt = trim_data((Xs, Ys), xy = True)
db = pd.DataFrame({'index':Xt, 'cov':Yt})
m, b, fit, r2, info = fit_coverage((Xt, Yt, None, True))
iRep = 2**(m * length)
if FilterChriteria is not None:
FilterChriteria['r2'] = r2
return iRep
def trim_data(data, xy, p = 0.1):
"""
RIGHT FROM iREP
remove data from ends of sorted list
"""
if xy is False:
length = len(data)
num = int(length * (p/2))
return data[num:length - num]
X, Y = data
length = len(X)
num = int(length * (p/2))
return X[num:length - num], Y[num:length - num]
def fit_coverage(pars):
"""
RIGHT FROM iREP
fit line to sorted coverage values to get slope
"""
x, y, info, return_fit = pars
if len(x) <= 2: # make sure there is more data than parameters
if return_fit is False:
return (False, False, False, info)
else:
return (False, False, False, False, info)
# Parameters
Pars = lmfit.Parameters()
## y = mx + b
Pars.add('m', value = 1, vary = True)
Pars.add('b', value = 1)
# fit data to model
mi = lmfit.minimize(coverage_function, Pars, args = (x,), \
kws = {'data':y, 'printPs':False}, method = 'leastsq')
# calculate r-squared
r2 = 1 - (mi.residual.var() / np.var(y))
if return_fit is False:
return (mi.params['m'].value, mi.params['b'].value, r2, info)
# get fitted values
fit = [x, coverage_function(mi.params, x)]
return (mi.params['m'].value, mi.params['b'].value, fit, r2, info)
def coverage_function(pars, X, data = None, printPs = False):
"""
RIGHT FROM iREP
linear function for sorted coverage profile
y = mx + b
"""
m = pars['m'].value
b = pars['b'].value
if printPs is True:
print('m: %s b: %s' % \
('{:,}'.format(int(m)), '{:,}'.format(int(b))))
results = [float(m * x) + b for x in X]
if data is None:
return np.asarray(results)
return np.asarray([y - data[i] for i, y in enumerate(results)]) # model - data
def _iRep_gc_bias(Idb, correction_threshold=0.0):
'''
iRep gc_bias method
'''
# fit line
m, b, fit, r2, l = fit_coverage((Idb['GC_content'].tolist(), Idb['coverage'].tolist(), False, True))
# remove outliers
Idb.loc[:,'error'] = [abs(cov - (m * gc + b)) for gc, cov in zip(Idb['GC_content'], Idb['coverage'])]
try:
cutoff = sorted(Idb['error'].tolist(), reverse = True)[int(len(Idb['error'])*0.01)]
except:
cutoff = 0
FIdb = Idb[~(Idb['error'] >= cutoff)]
# re-fit with filtered data
m, b, fit, r2, l = fit_coverage((FIdb['GC_content'].tolist(), FIdb['coverage'].tolist(), False, True))
if r2 < correction_threshold:
Idb['corrected_coverage'] = Idb['coverage']
return Idb
# correct coverge
corrected = [[], []]
av = np.average(Idb['coverage'])
Idb['corrected_coverage'] = [cov + (av - (m * gc + b)) for cov, gc in zip(Idb['coverage'], Idb['GC_content'])]
return Idb
| [
"numpy.mean",
"numpy.median",
"numpy.ones",
"numpy.average",
"pandas.merge",
"numpy.asarray",
"scipy.signal.fftconvolve",
"numpy.var",
"collections.defaultdict",
"pandas.DataFrame",
"numpy.log2",
"lmfit.Parameters",
"lmfit.minimize"
] | [((1130, 1143), 'numpy.mean', 'np.mean', (['rcov'], {}), '(rcov)\n', (1137, 1143), True, 'import numpy as np\n'), ((3977, 3992), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (3984, 3992), True, 'import numpy as np\n'), ((4005, 4022), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4016, 4022), False, 'from collections import defaultdict\n'), ((4213, 4232), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {}), '(table)\n', (4225, 4232), True, 'import pandas as pd\n'), ((4368, 4385), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (4379, 4385), False, 'from collections import defaultdict\n'), ((4411, 4426), 'numpy.ones', 'np.ones', (['window'], {}), '(window)\n', (4418, 4426), True, 'import numpy as np\n'), ((4615, 4634), 'pandas.DataFrame', 'pd.DataFrame', (['table'], {}), '(table)\n', (4627, 4634), True, 'import pandas as pd\n'), ((4846, 4864), 'numpy.median', 'np.median', (['cov[on]'], {}), '(cov[on])\n', (4855, 4864), True, 'import numpy as np\n'), ((5944, 5982), 'pandas.DataFrame', 'pd.DataFrame', (["{'index': Xt, 'cov': Yt}"], {}), "({'index': Xt, 'cov': Yt})\n", (5956, 5982), True, 'import pandas as pd\n'), ((6898, 6916), 'lmfit.Parameters', 'lmfit.Parameters', ([], {}), '()\n', (6914, 6916), False, 'import lmfit\n'), ((7039, 7146), 'lmfit.minimize', 'lmfit.minimize', (['coverage_function', 'Pars'], {'args': '(x,)', 'kws': "{'data': y, 'printPs': False}", 'method': '"""leastsq"""'}), "(coverage_function, Pars, args=(x,), kws={'data': y,\n 'printPs': False}, method='leastsq')\n", (7053, 7146), False, 'import lmfit\n'), ((8798, 8825), 'numpy.average', 'np.average', (["Idb['coverage']"], {}), "(Idb['coverage'])\n", (8808, 8825), True, 'import numpy as np\n'), ((1348, 1386), 'pandas.merge', 'pd.merge', (['oIdb', 'gc_windows'], {'on': '"""index"""'}), "(oIdb, gc_windows, on='index')\n", (1356, 1386), True, 'import pandas as pd\n'), ((7879, 7898), 'numpy.asarray', 'np.asarray', (['results'], {}), '(results)\n', (7889, 7898), True, 'import numpy as np\n'), ((7221, 7230), 'numpy.var', 'np.var', (['y'], {}), '(y)\n', (7227, 7230), True, 'import numpy as np\n'), ((4037, 4083), 'scipy.signal.fftconvolve', 'scipy.signal.fftconvolve', (['GC', 'weights', '"""valid"""'], {}), "(GC, weights, 'valid')\n", (4061, 4083), False, 'import scipy\n'), ((4440, 4487), 'scipy.signal.fftconvolve', 'scipy.signal.fftconvolve', (['cov', 'weights', '"""valid"""'], {}), "(cov, weights, 'valid')\n", (4464, 4487), False, 'import scipy\n'), ((5499, 5511), 'numpy.log2', 'np.log2', (['eps'], {}), '(eps)\n', (5506, 5511), True, 'import numpy as np\n'), ((5549, 5559), 'numpy.log2', 'np.log2', (['i'], {}), '(i)\n', (5556, 5559), True, 'import numpy as np\n')] |
import numpy as np
import torch.optim as optim
import networks.networks as net
from networks.gtsrb import *
from networks.svhn import *
import torchvision as tv
from torchvision import transforms
from torch.utils.data import DataLoader
from data.idadataloader import DoubleDataset
from config import get_transform
from data.mnist_m import MNISTM
import argparse
parser = argparse.ArgumentParser(description='Sanity Checks Only')
parser.add_argument('setting', default="SO", help='Setting to run (see config.py)')
args = parser.parse_args()
root = '/home/fcdl/dataset/'
#target_path = root + "GTSRB/Final_Training/Images"
#source_path = root + "synthetic_data"
#test_path = root + "GTSRB/Final_Test"
#target_path = root + 'sketchy/photo_train'
#source_path = root + 'sketchy/sketch'
#test_path = root + 'sketchy/photo_test'
EPOCHS = 40
NUM_CLASSES = 10
device = 'cuda' if torch.cuda.is_available() else 'cpu'
const = 1
def train_epoch_single(network, train_loader, optimizer):
src_criterion = nn.CrossEntropyLoss()
network.train()
train_loss = 0
train_correct = 0
train_total = 0
batch_idx = 0
for batch in train_loader:
optimizer.zero_grad()
inputs, targets = batch
inputs = inputs.to(device)
targets = targets.to(device)
logits, feat = network.forward(inputs) # feature vector only
prediction = network.predict(logits) # class scores
loss_bx = src_criterion(prediction, targets) # CE loss
loss_bx.backward()
optimizer.step()
# get predictions
_, predicted = prediction.max(1)
tr_tot = targets.size(0)
tr_crc = predicted.eq(targets).sum().item()
# compute statistics
train_loss += loss_bx.item()
train_total += tr_tot
train_correct += tr_crc
batch_idx += 1
if batch_idx % 200 == 0:
print(f"{batch_idx:3d} | Source Loss: {loss_bx:.6f} "
f"Source Acc : {100.0 * train_correct / train_total:.2f}")
train_acc = 100. * train_correct / train_total
return train_loss/batch_idx, train_acc
def train_epoch(network, train_loader, optimizer):
src_criterion = nn.CrossEntropyLoss()
dom_criterion = nn.BCEWithLogitsLoss()
network.train()
train_loss = 0
train_correct = 0
train_total = 0
train_total_src = 0
train_correct_src = 0
batch_idx = 0
# scheduler.step()
for source_batch, target_batch in train_loader:
p = float(batch_idx + start_steps) / total_steps
lam = 2. / (1. + np.exp(-10 * p)) - 1
optimizer.zero_grad()
inputs, targets = source_batch
inputs = inputs.to(device)
targets = targets.to(device) # ground truth class scores
domains = torch.zeros(inputs.shape[0], 1).to(device) # source is index 0
logits, feat = network.forward(inputs) # feature vector only
prediction = network.predict(logits) # class scores
s_prediction = network.discriminate_domain(feat, lam) # domain score
loss_bx_src = src_criterion(prediction, targets) # CE loss
loss_bx_dom_s = dom_criterion(s_prediction, domains)
_, predicted = prediction.max(1)
tr_tot = targets.size(0) # only on target
tr_crc = predicted.eq(targets).sum().item() # only on target
train_total_src += tr_tot
train_correct_src += tr_crc
# train the target
inputs, targets = target_batch
inputs, targets = inputs.to(device), targets.to(device) # class gt
domains = torch.ones(inputs.shape[0], 1).to(device) # target is index 1
logits, feat = network.forward(inputs) # feature vector only
prediction = network.predict(logits) # class scores
d_prediction = network.discriminate_domain(feat, lam) # domain score
loss_bx_tar = src_criterion(prediction, targets)
loss_bx_dom_t = dom_criterion(d_prediction, domains)
# sum the losses and do backward propagation
loss_dom = (loss_bx_dom_s + loss_bx_dom_t)
#loss_bx = loss_bx_src + loss_bx_tar + const * lam * loss_dom # using target labels
loss_bx = loss_bx_src + const * loss_dom # don't use target labels
loss_bx.backward()
optimizer.step()
_, predicted = prediction.max(1)
tr_tot = targets.size(0) # only on target
tr_crc = predicted.eq(targets).sum().item() # only on target
# compute statistics
train_loss += loss_bx.item()
train_total += tr_tot
train_correct += tr_crc
batch_idx += 1
if batch_idx % 200 == 0:
print(f"Batch {batch_idx} / {len(train_loader)}\n\t"
f"Lambda {lam:.4f} "
f"Domain Loss: {loss_dom:.6f}\n\t"
f"Source Loss: {loss_bx_src:.6f} "
f"Source Acc : {100.0 * train_correct_src / train_total_src:.2f} "
f"SrcDom Acc : {1 - torch.sigmoid(s_prediction.detach()).mean().cpu().item():.3f}\n\t"
f"Target Loss: {loss_bx_tar:.6f} "
f"Target Acc : {100.0 * train_correct / train_total:.2f} "
f"TarDom Acc : {torch.sigmoid(d_prediction.detach()).cpu().mean().item():.3f}"
)
train_acc = 100. * train_correct / train_total
return train_loss/batch_idx, train_acc
def valid(network, valid_loader):
criterion = nn.CrossEntropyLoss()
# make validation
network.eval()
test_loss = 0
test_correct = 0
test_total = 0
domain_acc = 0
with torch.no_grad():
for inputs, targets in valid_loader:
inputs = inputs.to(device)
targets = targets.to(device)
outputs, feats = network.forward(inputs)
predictions = network.predict(outputs) # class score
domains = network.discriminate_domain(feats, 0) # domain score (correct if 1., 0.5 is wanted)
loss_bx = criterion(predictions, targets)
test_loss += loss_bx.item()
_, predicted = predictions.max(1)
test_total += targets.size(0)
test_correct += predicted.eq(targets).sum().item()
domain_acc += torch.sigmoid(domains.cpu().detach()).sum().item()
# normalize and print stats
test_acc = 100. * test_correct / test_total
domain_acc = 100. * domain_acc / test_total
test_loss /= len(valid_loader)
return test_loss, test_acc, domain_acc
if __name__ == '__main__':
# define transform
transform, augmentation = get_transform('svhn')
augmentation = transforms.Compose([augmentation, transform])
print(transform, augmentation)
# define dataset
#target = tv.datasets.ImageFolder(target_path, transform=augmentation)
#source = tv.datasets.ImageFolder(source_path, transform=augmentation)
#test = tv.datasets.ImageFolder(test_path, transform=transform)
source = tv.datasets.SVHN(root, transform=augmentation)
target = tv.datasets.MNIST(root, transform=tv.transforms.Compose([tv.transforms.Grayscale(3), transform]))
test = tv.datasets.MNIST(root, train=False, transform=tv.transforms.Compose([tv.transforms.Grayscale(3), transform]))
#source = tv.datasets.MNIST(root, transform=tv.transforms.Compose([tv.transforms.Grayscale(3), transform]))
#target = MNISTM(root, transform=transform)
#test = MNISTM(root, train=False, transform=transform)
train = DoubleDataset(source, target)
# define dataloader
train_loader = DataLoader(train, 128, True, num_workers=8)
source_loader = DataLoader(source, 128, True, num_workers=8)
target_loader = DataLoader(target, 128, True, num_workers=8)
test_loader = DataLoader(test, 128, False, num_workers=8)
# get network
#net = net.cifar_resnet_revgrad(None, NUM_CLASSES).to(device)
#net = GTSRB_net(43).to(device)
net = SVHN_net(10).to(device)
#net = net.wide_resnet_revgrad(None, 125).to(device)
#net = net.resnet50(True, 125).to(device)
#net = LeNet().to(device)
#optimizer = optim.SGD(net.parameters(), lr=0.1)
#scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [int(0.7*EPOCHS), int(0.9*EPOCHS)], gamma=0.1)
total_steps = EPOCHS * len(train_loader)
print("Do a validation before starting to check it is ok...")
val_loss, val_acc, dom_acc = valid(net, valid_loader=test_loader)
print(f"Epoch {-1:03d} : Test Loss {val_loss:.6f}, Test Acc {val_acc:.2f}, Domain Acc {dom_acc:.2f}")
print("Result should be random guessing, i.e. 10% accuracy")
# define training steps
for epoch in range(EPOCHS):
# steps
start_steps = epoch * len(train_loader)
# train epoch
learning_rate = 0.01 / ((1 + 10 * (epoch)/EPOCHS)**0.75)
optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9)
# scheduler.step()
print(f"Learning rate: {learning_rate}")
if args.setting == 'SO':
train_loss, train_acc = train_epoch_single(net, train_loader=source_loader, optimizer=optimizer)
elif args.setting == 'TO':
train_loss, train_acc = train_epoch_single(net, train_loader=target_loader, optimizer=optimizer)
else:
train_loss, train_acc = train_epoch(net, train_loader=train_loader, optimizer=optimizer)
# valid!
val_loss, val_acc, dom_acc = valid(net, valid_loader=test_loader)
print(f"\nEpoch {epoch+1:03d} : Test Loss {val_loss:.6f}, Test Acc {val_acc:.2f}, Domain Acc {dom_acc:.2f}\n")
if train_loss < 1e-4:
break
print(".... END")
| [
"config.get_transform",
"argparse.ArgumentParser",
"torchvision.transforms.Grayscale",
"numpy.exp",
"torchvision.datasets.SVHN",
"networks.networks.parameters",
"torch.utils.data.DataLoader",
"data.idadataloader.DoubleDataset",
"torchvision.transforms.Compose"
] | [((372, 429), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Sanity Checks Only"""'}), "(description='Sanity Checks Only')\n", (395, 429), False, 'import argparse\n'), ((6591, 6612), 'config.get_transform', 'get_transform', (['"""svhn"""'], {}), "('svhn')\n", (6604, 6612), False, 'from config import get_transform\n'), ((6632, 6677), 'torchvision.transforms.Compose', 'transforms.Compose', (['[augmentation, transform]'], {}), '([augmentation, transform])\n', (6650, 6677), False, 'from torchvision import transforms\n'), ((6967, 7013), 'torchvision.datasets.SVHN', 'tv.datasets.SVHN', (['root'], {'transform': 'augmentation'}), '(root, transform=augmentation)\n', (6983, 7013), True, 'import torchvision as tv\n'), ((7480, 7509), 'data.idadataloader.DoubleDataset', 'DoubleDataset', (['source', 'target'], {}), '(source, target)\n', (7493, 7509), False, 'from data.idadataloader import DoubleDataset\n'), ((7554, 7597), 'torch.utils.data.DataLoader', 'DataLoader', (['train', '(128)', '(True)'], {'num_workers': '(8)'}), '(train, 128, True, num_workers=8)\n', (7564, 7597), False, 'from torch.utils.data import DataLoader\n'), ((7618, 7662), 'torch.utils.data.DataLoader', 'DataLoader', (['source', '(128)', '(True)'], {'num_workers': '(8)'}), '(source, 128, True, num_workers=8)\n', (7628, 7662), False, 'from torch.utils.data import DataLoader\n'), ((7683, 7727), 'torch.utils.data.DataLoader', 'DataLoader', (['target', '(128)', '(True)'], {'num_workers': '(8)'}), '(target, 128, True, num_workers=8)\n', (7693, 7727), False, 'from torch.utils.data import DataLoader\n'), ((7747, 7790), 'torch.utils.data.DataLoader', 'DataLoader', (['test', '(128)', '(False)'], {'num_workers': '(8)'}), '(test, 128, False, num_workers=8)\n', (7757, 7790), False, 'from torch.utils.data import DataLoader\n'), ((8836, 8852), 'networks.networks.parameters', 'net.parameters', ([], {}), '()\n', (8850, 8852), True, 'import networks.networks as net\n'), ((2569, 2584), 'numpy.exp', 'np.exp', (['(-10 * p)'], {}), '(-10 * p)\n', (2575, 2584), True, 'import numpy as np\n'), ((7084, 7110), 'torchvision.transforms.Grayscale', 'tv.transforms.Grayscale', (['(3)'], {}), '(3)\n', (7107, 7110), True, 'import torchvision as tv\n'), ((7206, 7232), 'torchvision.transforms.Grayscale', 'tv.transforms.Grayscale', (['(3)'], {}), '(3)\n', (7229, 7232), True, 'import torchvision as tv\n')] |
"""Inversion Tools
This file contains the classes that compute least squares inversions using data
stored in DesignMatrix and DataArray objects.
This file can also be imported as a module and contains the following
classes:
* Inversion
"""
import numpy as np
from typing import Union
from scipy.linalg import lstsq
from scipy.sparse import coo_matrix, vstack, hstack
from .operations import compress_matrices, apply_constraints
from threadpoolctl import threadpool_limits
from .constructors import DesignMatrix, DataArray, ModelArray
from .constraints import Constraints
from .regularisation import Regularisation
from .equation import Equation
from .utils import get_timestamp_now
# TODO: Write constraints containter, add_constraints and __collect_constraints
class Inversion():
"""
A class used to perform least squares inversions from DesignMatrix
and DataArray objects.
...
Attributes
----------
G : .constructors.DesignMatrix
A sparse matrix of coefficients for an arbitrary linear equation
of form Gm=d.
d : .constructors.DataArray
An array of data for an arbitrary linear equation.
m : .constructors.ModelArray
An array of recovered model parameters as returned from
scipy.linalg.lstqr.
Methods
-------
N/A
"""
def __init__(self, name: str):
self.name = name
self.id = "-".join((name, get_timestamp_now()))
def invert(self,
G: DesignMatrix,
d: DataArray,
inplace: bool = True,
constraints: Union[Constraints, None] = None,
regularisation: Union[Regularisation, None] = None,
) -> Union[None, ModelArray]:
"""
This function takes the DesignMatrix (G) and DataArray (d) and passes
them to a least-squares inversion (scipy.linalg.lstsq) function to
recover the ModelArray (m) of a Gm=d type matrix equation. It can
return m if inplace=False, which can be useful in certain cases
(e.g. when bootstrapping). The constraints must be passed as a
Constraints (see .constrains.Constraints) object.
Parameters
----------
G : .constructors.DesignMatrix
A sparse matrix of coefficients for an arbitrary linear equation
of form Gm=d.
d : .constructors.DataArray
An array of data for an arbitrary linear equation.
inplace : bool = True
Specifies wether or not to return the output (m) an array of model
parameters as an .constructors.ModelArray or to assign G, m and d
back to the current instance of Inversion.
constraints : .constraints.Constraints
An object that handles the constraint conditions that wish to be
used to constraint the inversion. The constraints will be solved
for exactly using the method of lagrange multipliers.
regularisation : .regularisation.Regulariser
An object that handles the populating the regularisation matrices
(Γ) and the regularisation coefficients (α) for all terms in the
Equation. It is stored as a single matrix that is composed of sub-
matrices of Γ applied to each term in the equation as they occur
in the Equation. In other words each term has its own separate
regulariser.
Returns
-------
None or .constructors.ModelArray
"""
g, D = G.matrix, d.array
if regularisation is not None:
gamma = regularisation.gamma.matrix.T \
@ regularisation.gamma.matrix
gamma = coo_matrix(gamma)
self.__apply_alpha(gamma, regularisation.term_map)
g = vstack((g, gamma))
D = vstack((D, np.zeros((gamma.shape[0], 1))))
GTG, GTd = compress_matrices(g, D)
if constraints is not None: # apply constraints from Constraints.
F = constraints.F.matrix
h = constraints.h.array
GTG, GTd = apply_constraints(GTG, GTd, F, h)
with threadpool_limits(limits=1, user_api='blas'):
invout = lstsq(GTG.toarray(), GTd.toarray())
m = ModelArray(G.term_map, invout[0])
if not inplace:
return m
else:
self.m = m
self.G = G
self.d = d
if constraints is not None:
self.constraints = constraints
def forward(self, G: DesignMatrix, m: ModelArray) -> DataArray:
return DataArray(np.dot(G.matrix.toarray(),
m.array.toarray()[:G.matrix.shape[1]]
.reshape((G.matrix.shape[1], 1))))
def __apply_alpha(self, reg: coo_matrix, term_map: Equation):
for term, stuff in term_map.values.items():
# print(term)
if stuff['regularisation']:
alpha = stuff['regularisation']['alpha']
reg.data[np.isin(reg.row, stuff['model_indices'])] *= alpha
# GETTERS AND SETTERS
@property
def constraints(self) -> Constraints:
return self._constraints
@constraints.setter
def constraints(self, cons):
assert type(cons) is Constraints
@property
def name(self) -> str:
return self._name
@name.setter
def name(self, nme: str):
assert type(nme) is str, "id must be a unique string."
self._name = nme
@property
def id(self) -> str:
return self._id
@id.setter
def id(self, identifier: str):
assert type(identifier) is str, "id must be a unique string."
self._id = identifier
@property
def G(self) -> DesignMatrix:
return self._G
@G.setter
def G(self, g):
assert type(g) is DesignMatrix,\
f"G must be type {type(DesignMatrix)} not {type(g)}."
self._G = g
@property
def d(self) -> DataArray:
return self._d
@d.setter
def d(self, data: DataArray):
assert type(data) is DataArray,\
f"G must be type {type(DataArray)} not {type(data)}."
self._d = data
@property
def m(self) -> ModelArray:
return self._m
@m.setter
def m(self, model: ModelArray):
assert type(model) is ModelArray,\
f"G must be type {type(ModelArray)} not {type(model)}."
self._m = model
| [
"numpy.isin",
"numpy.zeros",
"scipy.sparse.coo_matrix",
"threadpoolctl.threadpool_limits",
"scipy.sparse.vstack"
] | [((3718, 3735), 'scipy.sparse.coo_matrix', 'coo_matrix', (['gamma'], {}), '(gamma)\n', (3728, 3735), False, 'from scipy.sparse import coo_matrix, vstack, hstack\n'), ((3817, 3835), 'scipy.sparse.vstack', 'vstack', (['(g, gamma)'], {}), '((g, gamma))\n', (3823, 3835), False, 'from scipy.sparse import coo_matrix, vstack, hstack\n'), ((4160, 4204), 'threadpoolctl.threadpool_limits', 'threadpool_limits', ([], {'limits': '(1)', 'user_api': '"""blas"""'}), "(limits=1, user_api='blas')\n", (4177, 4204), False, 'from threadpoolctl import threadpool_limits\n'), ((3863, 3892), 'numpy.zeros', 'np.zeros', (['(gamma.shape[0], 1)'], {}), '((gamma.shape[0], 1))\n', (3871, 3892), True, 'import numpy as np\n'), ((5037, 5077), 'numpy.isin', 'np.isin', (['reg.row', "stuff['model_indices']"], {}), "(reg.row, stuff['model_indices'])\n", (5044, 5077), True, 'import numpy as np\n')] |
#!/usr/bin/python3.6
import argparse
import multiprocessing
import os
import sys
from typing import List
from functools import partial
import numpy as np
from tqdm import tqdm
def read_confidences(s: str) -> List[float]:
return list(map(float, s.split()[7::6]))
def trim_line(threshold: float, s: str) -> str:
if s.startswith('ImageID'):
return s
values = s.split()
res = []
for i in range(0, len(values), 6):
if i < 6 or float(values[i + 1]) > threshold:
res.extend(values[i : i + 6])
return ' '.join(res) + '\n'
if '__main__' == __name__:
parser = argparse.ArgumentParser()
parser.add_argument('result', help='result filename', type=str)
parser.add_argument('filename', help='submission', type=str)
parser.add_argument('--max_num', help='maximum number of predictions', type=int,
default=150 * 10**6)
parser.add_argument('-f', help='force overwrite', action='store_true')
args = parser.parse_args()
print(args)
if os.path.exists(args.result) and not args.f:
print(args.result, 'already exists, exiting')
sys.exit()
pool = multiprocessing.Pool()
print('reading predictions')
all_confs: List[float] = []
with open(args.filename) as f:
for confs in tqdm(pool.imap(read_confidences, f), total=100000):
all_confs.extend(confs)
print(f'sorting scores (total {len(all_confs)})')
assert len(all_confs) >= args.max_num
pos = len(all_confs) - args.max_num
threshold = np.partition(all_confs, pos)[pos]
print('applying threshold', threshold)
with open(args.filename) as f:
with open(args.result, 'w') as out:
for line in tqdm(pool.imap(partial(trim_line, threshold), f), total=100000):
out.write(line)
| [
"os.path.exists",
"argparse.ArgumentParser",
"numpy.partition",
"functools.partial",
"multiprocessing.Pool",
"sys.exit"
] | [((617, 642), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (640, 642), False, 'import argparse\n'), ((1165, 1187), 'multiprocessing.Pool', 'multiprocessing.Pool', ([], {}), '()\n', (1185, 1187), False, 'import multiprocessing\n'), ((1036, 1063), 'os.path.exists', 'os.path.exists', (['args.result'], {}), '(args.result)\n', (1050, 1063), False, 'import os\n'), ((1142, 1152), 'sys.exit', 'sys.exit', ([], {}), '()\n', (1150, 1152), False, 'import sys\n'), ((1552, 1580), 'numpy.partition', 'np.partition', (['all_confs', 'pos'], {}), '(all_confs, pos)\n', (1564, 1580), True, 'import numpy as np\n'), ((1748, 1777), 'functools.partial', 'partial', (['trim_line', 'threshold'], {}), '(trim_line, threshold)\n', (1755, 1777), False, 'from functools import partial\n')] |
'''
Assign stellar mass/magnitude to subhalos via abundance matching.
Masses in log {M_sun}, luminosities in log {L_sun / h^2}, distances in {Mpc comoving}.
'''
# system -----
#from __future__ import division
import numpy as np
from numpy import log10, Inf
from scipy import integrate, interpolate, ndimage
# local -----
#from visualize import plot_sm
try:
from utilities import utility as ut
except ImportError:
pass
def assign(sub, m_kind='m.star', scat=0, dis_mf=0.007, source='', sham_prop='m.max', zis=None):
'''
Assign Mag_r or M_star via abundance matching.
Import catalog of subhalo [at snapshot], mass kind (mag.r, m.star),
1-sigma mass scatter at fixed sham prop [dex], disruption mass fraction (for both cens & sats),
mass source, property to abundance match against, [snapshot index[s]].
'''
if isinstance(sub, list):
if zis is None:
raise ValueError('subhalo catalog is a tree list, but no input snapshot index[s]')
elif isinstance(sub, dict):
if zis is not None:
raise ValueError('input snapshot index[s], but input catalog of subhalo at snapshot')
sub = [sub]
zis = [0]
subz = sub[zis[0]]
vol = subz.info['box.length'] ** 3
print('Box Length', subz.info['box.length'])
print('Box Hubble', subz.Cosmo['hubble'])
zis = ut.array.arrayize(zis)
if m_kind == 'm.star':
if not source:
source = 'li-drory-march'
redshift = subz.snap['z']
if redshift < 0.1:
redshift = 0.1
MF = SMFClass(source, redshift, scat, subz.Cosmo['hubble'])
elif m_kind == 'mag.r':
if source == 'cool_ages':
redshift = subz.snap['z']
if redshift < 0.1:
redshift = 0.1
MF = LFClass(source, scat, subz.Cosmo['hubble'], redshift)
else:
if not source:
source = 'blanton'
MF = LFClass(source, scat, subz.Cosmo['hubble'])
else:
raise ValueError('not recognize m_kind = %s' % m_kind)
for zi in zis:
subz = sub[zi]
subz[m_kind] = np.zeros(subz[sham_prop].size, np.float32)
if m_kind == 'm.star':
z = subz.snap['z']
if z < 0.1:
z = 0.1
MF.initialize_redshift(z)
elif m_kind == 'mag.r':
if source == 'cool_ages':
z = subz.snap['z']
if z < 0.1:
z = 0.1
MF.initialize_redshift(z)
# maximum number of objects in volume to assign given SMF/LF threshold
num_max = int(round(MF.numden(MF.mmin) * vol))
sis = ut.array.elements(subz[sham_prop], [0.001, Inf])
if dis_mf:
sis = ut.array.elements(subz['m.frac.min'], [dis_mf, Inf], sis)
siis_sort = np.argsort(subz[sham_prop][sis]).astype(sis.dtype)[::-1][:num_max]
num_sums = ut.array.arange_length(num_max) + 1
if scat:
if m_kind == 'm.star':
scats = np.random.normal(np.zeros(num_max), MF.scat).astype(np.float32)
elif m_kind == 'mag.r':
scats = np.random.normal(np.zeros(num_max), 2.5 * MF.scat).astype(np.float32)
#print MF.m_scat(num_sums / vol) + scats
subz[m_kind][sis[siis_sort]] = MF.m_scat(num_sums / vol) + scats
else:
subz[m_kind][sis[siis_sort]] = MF.m(num_sums / vol)
class SMFClass:
'''
Relate number density [dnumden / dlog(M_star/M_sun)] <-> stellar mass [log10(M_star/M_sun)]
using fits to observed stellar mass functions.
All SMFs assume input Hubble constant.
'''
def __init__(self, source='li-march', redshift=0.1, scat=0, hubble=0.7):
'''
Import SMF source, redshift, log scatter in M_star at fixed Msub.
'''
self.source = source
self.scat = scat
self.hubble = hubble
if source == 'li':
'''
Li & White 2009. z = 0.1 from SDSS. Chabrier IMF. Complete to 1e8 M_sun/h^2.
'''
self.redshifts = np.array([0.1])
self.mchars = np.array([10.525]) - 2 * log10(hubble) # {M_sun}
self.amplitudes = np.array([0.0083]) * hubble ** 3 # {Mpc ^ -3 / log(M/M_sun)}
self.slopes = np.array([-1.155])
self.initialize_redshift(redshift)
elif source == 'baldry':
'''
Baldry et al 2008. z = 0.1 from SDSS. diet Salpeter IMF = 0.7 Salpeter.
Complete to 1e8 M_sun.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1])
# covert to Chabrier
self.mchars = (np.array([10.525]) + 2 * log10(h_them / hubble) + log10(1 / 1.6 / 0.7))
self.amplitudes = np.array([0.00426]) * (hubble / h_them) ** 3
self.amplitudes2 = np.array([0.00058]) * (hubble / h_them) ** 3
self.slopes = np.array([-0.46])
self.slopes2 = np.array([-1.58])
self.initialize_redshift(redshift)
elif source == 'cole-march':
'''
Marchesini et al 2009. 1.3 < z < 4.0. Kroupa IMF.
z = 0.1 from Cole et al 2001 (2dF), converting their Salpeter to Kroupa.
*** In order to use out to z ~ 4, made evolution flat from z = 3.5 to 4.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.65, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
# converted to {Mpc ^ -3 dex ^ -1}
self.amplitudes = np.array([90.00, 29.65, 11.52, 1.55, 1.55]) * 1e-4 * hubble ** 3
self.slopes = np.array([-1.18, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march':
'''
Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-march-extreme':
'''
More extreme version of Marchesini et al 2009, using Li & White at z = 0.1.
'''
self.redshifts = np.array([0.1, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble)
self.amplitudes = (np.array([0.0083, 0.00001, 0.00001, 0.00001, 0.000001]) *
hubble ** 3)
self.slopes = np.array([-1.155, -1.00, -1.01, -1.39, -1.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'constant-li':
'''
Li & White at all redshifts
'''
self.redshifts = np.arange(0.1, 4.03, 0.1)
self.mchars = np.repeat(10.525, len(self.redshifts)) - 2 * log10(hubble)
self.amplitudes = (np.repeat(0.0083, len(self.redshifts))* hubble ** 3)
self.slopes = np.repeat(-1.155, len(self.redshifts))
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'fontana':
'''
Fontana et al 2006. 0.4 < z < 4 from GOODS-MUSIC. Salpeter IMF.
z = 0.1 from Cole et al 2001.
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1, 4.0]) # store redshift range of validity
self.amplitude0 = 0.0035 * (hubble / h_them) ** 3 # to {Mpc ^ -3 / log10(M/M_sun)}
self.amplitude1 = -2.2
self.slope0 = -1.18
self.slope1 = -0.082
self.mchar0 = 11.16 # log10(M/M_sun)
self.mchar1 = 0.17 # log10(M/M_sun)
self.mchar2 = -0.07 # log10(M/M_sun)
# convert to my hubble & Chabrier IMF
self.mchar0 += 2 * log10(h_them / hubble) - log10(1.6)
self.initialize_redshift(redshift)
elif source == 'li-drory-march':
'''
Drory et al 2009. 0.3 < z < 1.0 from COSMOS.
Chabrier IMF limited to 0.1 - 100 M_sun.
Complete to (8.0, 8.6, 8.9, 9.1) M_sun/h^2 at z = (0.3, 0.5, 0.7, 0.9).
Anchor to Li & White at z = 0.1, Marchesini et al at higher redshift.
See Ilbert et al 2010 for alternate COSMOS version.
'''
h_them = 0.72 # their assumed hubble constant
self.redshifts = np.array([0.3, 0.5, 0.7, 0.9])
self.mchars = np.array([10.90, 10.91, 10.95, 10.92]) + 2 * log10(h_them / hubble)
# convert to [Mpc ^ -3 dex^-1]
self.amplitudes = (np.array([0.00289, 0.00174, 0.00216, 0.00294]) *
(hubble / h_them) ** 3)
self.slopes = np.array([-1.06, -1.05, -0.93, -0.91])
self.mchars2 = np.array([9.63, 9.70, 9.75, 9.85]) + 2 * log10(h_them / hubble)
self.amplitudes2 = (np.array([0.00180, 0.00143, 0.00289, 0.00212]) *
(hubble / h_them) ** 3)
self.slopes2 = np.array([-1.73, -1.76, -1.65, -1.65])
# add li & white
self.redshifts = np.append(0.1, self.redshifts)
self.mchars = np.append(10.525 - 2 * log10(hubble), self.mchars)
self.amplitudes = np.append(0.0083 * hubble ** 3, self.amplitudes)
self.slopes = np.append(-1.155, self.slopes)
self.mchars2 = np.append(self.mchars2[0], self.mchars2)
self.amplitudes2 = np.append(0, self.amplitudes2)
self.slopes2 = np.append(self.slopes2[0], self.slopes2)
# add marchesini et al
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.append(self.redshifts, [1.6, 2.5, 3.56, 4.03])
self.mchars = np.append(self.mchars,
np.array([10.60, 10.65, 11.07, 11.07]) - 2 * log10(hubble))
self.amplitudes = np.append(self.amplitudes,
np.array([0.002965, 0.00115, 0.000155, 0.000155]) *
hubble ** 3)
self.slopes = np.append(self.slopes, [-1.00, -1.01, -1.39, -1.39])
self.mchars2 = np.append(self.mchars2, np.zeros(4) + self.mchars2[0])
self.amplitudes2 = np.append(self.amplitudes2, np.zeros(4))
self.slopes2 = np.append(self.slopes2, np.zeros(4) + self.slopes2[0])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'li-drory-march_sameslope':
'''
Apply low-mass slope from Drory et al 2009 to Li & White, Marchesini et al.
'''
self.redshifts = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.6, 2.5, 3.56, 4.03])
self.mchars = np.array([10.525, 10.61, 10.62, 10.66, 10.63, 10.60, 10.65, 11.07,
11.07] - 2 * log10(hubble))
self.amplitudes = np.array([0.0083, 0.00774, 0.00466, 0.00579, 0.00787, 0.00297,
0.00115, 0.000155, 0.000155]) * hubble ** 3
self.slopes = np.array([-1.155, -1.06, -1.05, -0.93, -0.91, -1.00, -1.01, -1.39, -1.39])
self.mchars2 = (np.array([9.35, 9.34, 9.41, 9.46, 9.56, 9.41, 9.46, 9.83, 9.83]) -
2 * log10(hubble))
self.amplitudes2 = np.array([0.00269, 0.00482, 0.00383, 0.00774, 0.00568, 0.000962,
0.000375, 0.0000503, 0.0000503]) * hubble ** 3
self.slopes2 = np.array([-1.70, -1.73, -1.76, -1.65, -1.65, -1.72, -1.74, -2.39, -2.39])
self.make_splines()
self.initialize_redshift(redshift)
elif source == 'perez':
'''
Perez-Gonzalez et al 2008. 0.1 < z < 4.0 from Spitzer, Hubble, Chandra.
Salpeter IMF.
Complete to (8, 9.5, 10, 11) M_star at z = (0, 1, 2, 3).
'''
h_them = 0.7 # their assumed hubble constant
self.redshifts = np.array([0.1, 0.3, 0.5, 0.7, 0.9, 1.15, 1.45, 1.8, 2.25, 2.75, 3.25,
3.75])
self.mchars = np.array([11.16, 11.20, 11.26, 11.25, 11.27, 11.31, 11.34, 11.40, 11.46,
11.34, 11.33, 11.36]) + 2 * log10(h_them / hubble)
# convert to Chabrier IMF
self.mchars -= log10(1.6)
# convert to [Mpc ^ -3 dex ^ -1]
self.amplitudes = (10 ** np.array([-2.47, -2.65, -2.76, -2.82, -2.91, -3.06, -3.27,
- 3.49, -3.69, -3.64, -3.74, -3.94]) *
(hubble / h_them) ** 3)
self.slopes = np.array([-1.18, -1.19, -1.22, -1.26, -1.23, -1.26, -1.29, -1.27, -1.26,
- 1.20, -1.14, -1.23])
self.make_splines()
self.initialize_redshift(redshift)
else:
raise ValueError('not recognize source = %s' % source)
def make_splines(self):
'''
Make spline fits to SMF fit parameters v redshift.
Use 1st order spline (k) to avoid ringing.
'''
self.mchar_z_spl = interpolate.splrep(self.redshifts, self.mchars, k=1)
self.slope_z_spl = interpolate.splrep(self.redshifts, self.slopes, k=1)
self.amplitude_z_spl = interpolate.splrep(self.redshifts, self.amplitudes, k=1)
if self.source in ('li-drory-march', 'li-drory-march_sameslope'):
self.mchar2_z_spl = interpolate.splrep(self.redshifts, self.mchars2, k=1)
self.slope2_z_spl = interpolate.splrep(self.redshifts, self.slopes2, k=1)
self.amplitude2_z_spl = interpolate.splrep(self.redshifts, self.amplitudes2, k=1)
def initialize_redshift(self, redshift=0.1):
'''
Make spline to get mass from number density.
Import redshift.
Find SMF fit parameters at redshift, correcting amplitude by * log(10) & slope
by + 1 to make dndm call faster.
'''
if redshift < self.redshifts.min() - 1e-5 or redshift > self.redshifts.max() + 1e-5:
raise ValueError('z = %.2f out of range for %s' % (redshift, self.source))
self.redshift = redshift
if self.source in ('li'):
self.m_char = self.mchars[0]
self.amplitude = self.amplitudes[0] * np.log(10)
self.slope = self.slopes[0] + 1
elif self.source in ('baldry'):
self.m_char = self.mchars[0]
self.mchar2 = self.mchars[0]
self.amplitude = self.amplitudes[0] * np.log(10)
self.amplitude2 = self.amplitudes2[0] * np.log(10)
self.slope = self.slopes[0] + 1
self.slope2 = self.slopes2[0] + 1
elif self.source in ('cole-march', 'li-march', 'perez', 'constant-li', 'li-march-extreme'):
self.m_char = interpolate.splev(redshift, self.mchar_z_spl)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl) * np.log(10)
self.slope = interpolate.splev(redshift, self.slope_z_spl) + 1
elif self.source == 'fontana':
self.m_char = self.mchar0 + self.mchar1 * redshift + self.mchar2 * redshift ** 2
self.amplitude = (self.amplitude0 * (1 + redshift) ** self.amplitude1) * np.log(10)
self.slope = (self.slope0 + self.slope1 * redshift) + 1
elif self.source in ('li-drory-march', 'li-drory-march_sameslope'):
self.m_char = interpolate.splev(redshift, self.mchar_z_spl)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl) * np.log(10)
self.slope = interpolate.splev(redshift, self.slope_z_spl) + 1
self.mchar2 = interpolate.splev(redshift, self.mchar2_z_spl)
self.amplitude2 = interpolate.splev(redshift, self.amplitude2_z_spl) * np.log(10)
self.slope2 = interpolate.splev(redshift, self.slope2_z_spl) + 1
self.make_numden_m_spline(self.redshift, self.scat)
def dndm(self, m_star):
'''
Compute d(num-den) / d(log m) = ln(10) * amplitude * (10^(m_star - m_char)) ** (1 + slope) *
exp(-10^(m_star - m_char)).
Import stellar mass.
'''
m_rats = 10 ** (m_star - self.m_char)
if 'drory' in self.source or self.source == 'baldry':
dm2s = 10 ** (m_star - self.mchar2)
return (self.amplitude * m_rats ** self.slope * np.exp(-m_rats) +
self.amplitude2 * dm2s ** self.slope2 * np.exp(-dm2s))
else:
return self.amplitude * m_rats ** self.slope * np.exp(-m_rats)
def numden(self, m_min, m_max=14):
'''
Compute number density within range.
Import stellar mass range.
'''
return integrate.quad(self.dndm, m_min, m_max)[0]
def make_numden_m_spline(self, redshift=0.1, scat=0):
'''
Make splines to relate d(num-den) / d[log]m & num-den(> m) to m.
Import redshift (if want to change), mass scatter [dex].
'''
iter_num = 30
if redshift != self.redshift:
self.initialize_redshift(redshift)
if scat != self.scat:
self.scat = scat
dm = 0.01
dm_scat_lo = 3 * scat # extend fit for deconvolute b.c.'s
dm_scat_hi = 0.5 * scat # extend fit for deconvolute b.c.'s
self.mmin = 7.3
self.mmax = 12.3
m_stars = np.arange(self.mmin - dm_scat_lo, self.mmax + dm_scat_hi, dm, np.float32)
numdens = np.zeros(m_stars.size)
dndms = np.zeros(m_stars.size)
for mi in xrange(m_stars.size):
# make sure numdens are monotonically decreasing even if = -infinity
numdens[mi] = self.numden(m_stars[mi]) + 1e-9 * (1 - mi * 0.001)
dndms[mi] = self.dndm(m_stars[mi]) + 1e-9 * (1 - mi * 0.001)
# make no scatter splines
self.log_numden_m_spl = interpolate.splrep(m_stars, log10(numdens))
self.m_log_numden_spl = interpolate.splrep(log10(numdens)[::-1], m_stars[::-1])
# at high z, smf not monotonically decreasing, so spline not work on below
# self.m_log_dndm_spl = interpolate.splrep(log10(dndms)[::-1], m_stars[::-1])
# make scatter splines
if scat:
# deconvolve osbserved smf assuming scatter to find unscattered one
dndms_scat = ut.math.deconvolute(dndms, scat, dm, iter_num)
# chop off lower boundaries, unreliable
m_stars = m_stars[int(dm_scat_lo / dm):]
dndms_scat = dndms_scat[int(dm_scat_lo / dm):]
# find spline to integrate over
self.dndm_m_scat_spl = interpolate.splrep(m_stars, dndms_scat)
numdens_scat = np.zeros(m_stars.size)
for mi in xrange(m_stars.size):
numdens_scat[mi] = interpolate.splint(m_stars[mi], m_stars.max(),
self.dndm_m_scat_spl)
numdens_scat[mi] += 1e-9 * (1 - mi * 0.001)
self.log_numden_m_scat_spl = interpolate.splrep(m_stars, log10(numdens_scat))
self.m_log_numden_scat_spl = interpolate.splrep(log10(numdens_scat)[::-1],
m_stars[::-1])
def m(self, num_den):
'''
Get mass at threshold.
Import threshold number density.
'''
return interpolate.splev(log10(num_den), self.m_log_numden_spl).astype(np.float32)
def m_scat(self, num_den):
'''
Get mass at threshold, using de-scattered source.
Import threshold number density.
'''
return interpolate.splev(log10(num_den), self.m_log_numden_scat_spl).astype(np.float32)
def m_dndm(self, dn_dm):
'''
Get mass at d(num-den)/d[log]m.
Import d(num-den) / d[log]m.
'''
return interpolate.splev(log10(dn_dm), self.m_log_dndm_spl)
def dndm_scat(self, m):
'''
Get d(num-den) / d[log]m at m, using de-scattered source.
Import mass.
'''
return interpolate.splev(m, self.dndm_m_scat_spl)
def numden_scat(self, m):
'''
Get num-den(>[log]m) at m, using de-scattered source.
Import mass.
'''
return 10 ** (interpolate.splev(m, self.log_numden_m_scat_spl))
class LFClass(SMFClass):
'''
Relate number density [Mpc ^ -3] <-> magnitude/luminosity using spline fit to luminosity
functions.
Import spline querying functions from SMFClass.
'''
def __init__(self, source='blanton', scat=0, hubble=0.7, redshift=0.1):
'''
Import source, log-normal scatter.
'''
self.source = source
self.scat = scat
self.hubble = hubble
if source == 'norberg':
# Norberg et al 2002: 2dF r-band at z ~ 0.1.
self.m_char = -19.66
self.amplitude = 1.61e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.21
elif source == 'blanton':
# Blanton et al 03: SDSS r-band z ~ 0.1.
self.m_char = -20.44
self.amplitude = 1.49e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.05
elif source == 'sheldon':
# Sheldon et al 07: SDSS i-band z = 0.25. Valid for Mag < -19.08 (0.19L*).
self.m_char = -20.9 # Hansen et al 09 catalog has -20.8
self.amplitude = 1.02e-2 * hubble ** 3 # Mpc ^ -3
self.slope = -1.21
elif source == 'cool_ages':
# Cool et al 2012: AGES.
self.redshifts = np.array([0.1, 0.2, 0.3, 0.4, 0.5, 0.65])
self.mchars = np.array([-20.58, -20.81, -20.81, -20.99, -21.29, -21.38])
self.amplitudes = (np.array([1.59e-2, 1.52e-2, 1.24e-2, 1.44e-2, 1.08e-2, 1.05e-2]) * hubble ** 3) # Mpc ^ -3
self.slopes = np.repeat(-1.05, len(self.redshifts))
self.make_splines()
self.initialize_redshift(redshift)
else:
raise ValueError('not recognize source = %s in LFClass' % source)
if source != 'cool_ages':
self.make_numden_m_spline(scat, redshift=None)
def dndm(self, mag):
'''
Get d(num-den) / d(mag).
Import (positive) magnitude.
'''
mag *= -1.
return (np.log(10) / 2.5 * self.amplitude *
10 ** ((self.slope + 1) / 2.5 * (self.m_char - mag)) *
np.exp(-10 ** ((self.m_char - mag) / 2.5)))
def numden(self, m_min, m_max=25):
'''
Get number density within range.
Import (positive) magnitude range.
'''
return integrate.quad(self.dndm, m_min, m_max)[0]
def initialize_redshift(self, redshift=0.1):
'''
Make spline to get mass from number density.
Import redshift.
Find SMF fit parameters at redshift, correcting amplitude by * log(10) & slope
by + 1 to make dndm call faster.
'''
if redshift < self.redshifts.min() - 1e-5:# or redshift > self.redshifts.max() + 1e-5:
raise ValueError('z = %.2f out of range for %s' % (redshift, self.source))
self.redshift = redshift
self.m_char = interpolate.splev(redshift, self.mchar_z_spl, ext=0)
self.amplitude = interpolate.splev(redshift, self.amplitude_z_spl, ext=0)
self.slope = interpolate.splev(redshift, self.slope_z_spl, ext=0)
self.make_numden_m_spline(scat = self.scat, redshift = self.redshift)
def make_numden_m_spline(self, scat=0, redshift=0.1):
'''
Make splines to relate d(num-den)/d(mag) & num-den(> mag) to mag.
Import scatter [dex].
'''
try:
if redshift != self.redshift:
self.initialize_redshift(redshift)
except AttributeError:
pass
if scat != self.scat:
self.scat = scat # convert scatter in log(lum) to scatter in magnitude
mag_scat = 2.5 * self.scat
deconvol_iter_num = 20
dmag = 0.01
dmag_scat_lo = 2 * mag_scat # extend fit for b.c.'s of deconvolute
dmag_scat_hi = 1 * mag_scat
self.mmin = 17.0
self.mmax = 23.3
mags = np.arange(self.mmin - dmag_scat_lo, self.mmax + dmag_scat_hi, dmag, np.float32)
numdens = np.zeros(mags.size)
dndms = np.zeros(mags.size)
for mi in xrange(len(mags)):
numdens[mi] = np.abs(self.numden(mags[mi]))
dndms[mi] = self.dndm(mags[mi])
#print 'numden ', numdens[:10]
#print mags[:10]
# make no scatter splines
self.log_numden_m_spl = interpolate.splrep(mags, log10(numdens))
self.dndm_m_spl = interpolate.splrep(mags, dndms)
self.m_log_numden_spl = interpolate.splrep(log10(numdens)[::-1], mags[::-1])
# make scatter splines
if self.scat:
# deconvolve observed lf assuming scatter to find unscattered one
dndms_scat = ut.math.deconvolute(dndms, mag_scat, dmag, deconvol_iter_num)
# chop off boundaries, unreliable
mags = mags[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
dndms_scat = dndms_scat[dmag_scat_lo / dmag:-dmag_scat_hi / dmag]
# find spline to integrate over
self.dndm_m_scat_spl = interpolate.splrep(mags, dndms_scat)
numdens_scat = np.zeros(mags.size)
for mi in xrange(mags.size):
numdens_scat[mi] = np.abs(interpolate.splint(mags[mi], mags.max(), self.dndm_m_scat_spl))
numdens_scat[mi] += 1e-9 * (1 - mi * 0.001)
self.log_numden_m_scat_spl = interpolate.splrep(mags, log10(numdens_scat))
self.m_log_numden_scat_spl = interpolate.splrep(log10(numdens_scat)[::-1], mags[::-1])
#===================================================================================================
# test/plot
#===================================================================================================
def test_sham(sub, zi, m_kind, m_min, m_max, scat=0.2, mfracmin=0, m_wid=0.1, source='',
sham_kind='m.max'):
'''
Plot mass functions.
Import subhalo catalog, snapshot index,
mass kind (m.star, mag.r) & range & scatter at fixed m_max,
disruption mass fraction, bin size, GMF source, subhalo property to assign against.
'''
m_wid_scat = 3 * scat
m_bins = np.arange(m_min - m_wid_scat, m_max + m_wid_scat, m_wid, np.float32) + 0.5 * m_wid
if m_kind == 'm.star':
if not source:
source = 'li-march'
Sf = SMFClass(source, sub.snap['z'][zi], scat, sub.Cosmo['hubble'])
elif m_kind == 'mag.r':
if not source:
source = 'blanton'
Sf = LFClass(source, scat, sub.Cosmo['hubble'])
# analytic gmf, no scatter
dndm_anal = Sf.dndm(m_bins)
if scat:
# convolve above gmf with scatter, then deconvolve, to see if can recover
dndm_anal_conv = ndimage.filters.gaussian_filter1d(dndm_anal, Sf.scat / m_wid)
dndm_anal_decon = ut.math.deconvolute(dndm_anal_conv, Sf.scat, m_wid, 30)
# mean (underlying) relation
dndm_anal_pre = Sf.dndm_scat(m_bins)
# observed gmf after convolution (no random noise)
dndm_anal_recov = ndimage.filters.gaussian_filter1d(dndm_anal_pre, Sf.scat / m_wid)
# cut out extremes, unreliable
cutoff = int(round(m_wid_scat / m_wid))
if cutoff > 0:
m_bins = m_bins[cutoff:-cutoff]
dndm_anal = dndm_anal[cutoff:-cutoff]
dndm_anal_conv = dndm_anal_conv[cutoff:-cutoff]
dndm_anal_pre = dndm_anal_pre[cutoff:-cutoff]
dndm_anal_decon = dndm_anal_decon[cutoff:-cutoff]
dndm_anal_recov = dndm_anal_recov[cutoff:-cutoff]
m_bins -= 0.5 * m_wid
# assign mass to subhalo, with or without scatter (random noise at high mass end)
assign(sub, zi, m_kind, scat, mfracmin, source, sham_kind)
ims = ut.bin.idigitize(sub[zi][m_kind], m_bins)
gal_nums = np.zeros(m_bins.size)
for mi in xrange(m_bins.size):
gal_nums[mi] = ims[ims == mi].size
print('bin count min %d' % np.min(gal_nums))
dndm_sham = gal_nums / sub.info['box.length'] ** 3 / m_wid
print('assign ratio ave %.3f' % np.mean(abs(dndm_sham / dndm_anal)))
if scat:
print('recov ratio ave %.3f' % np.mean(abs(dndm_anal_recov / dndm_anal)))
# plot ----------
Plot = plot_sm.PlotClass()
Plot.set_axis('lin', 'lin', [m_min, m_max], log10(dndm_anal))
Plot.make_window()
Plot.draw('c', m_bins, log10(dndm_anal))
Plot.draw('c', m_bins, log10(dndm_sham), ct='red')
if scat:
Plot.draw('c', m_bins, log10(dndm_anal_pre), ct='green')
Plot.draw('c', m_bins, log10(dndm_anal_recov), ct='blue')
def plot_source_compare(sources=['li-march', 'perez'], redshifts=0.1, m_lim=[8.0, 11.7], m_wid=0.1,
plot_kind='value'):
'''
Plot each source at each redshift.
Import mass functions, redshifts, plotting mass range & bin width, plot kind (value, ratio).
'''
sources = ut.array.arrayize(sources)
redshifts = ut.array.arrayize(redshifts)
Mbin = ut.bin.BinClass(m_lim, m_wid)
log_dn_dlogms = []
for src_i in xrange(sources.size):
log_dn_dlogms_so = []
for zi in xrange(redshifts.size):
Smf = SMFClass(sources[src_i], redshifts[zi], scat=0, hubble=0.7)
log_dn_dlogms_so.append(log10(Smf.dndm(Mbin.mids)))
log_dn_dlogms.append(log_dn_dlogms_so)
# plot ----------
Plot = plot_sm.PlotClass()
if plot_kind == 'ratio':
ys = 10 ** (log_dn_dlogms - log_dn_dlogms[0][0])
Plot.axis.space_y = 'lin'
elif plot_kind == 'value':
ys = log_dn_dlogms
Plot.axis.space_y = 'log'
Plot.set_axis('log', '', Mbin.mids, ys, tick_lab_kind='log')
Plot.set_axis_label('m.star', 'dn/dlog(M_{star}) [h^{3}Mpc^{-3}]')
Plot.make_window()
Plot.set_label(pos_y=0.4)
for src_i in xrange(sources.size):
for zi in xrange(redshifts.size):
Plot.draw('c', Mbin.mids, log_dn_dlogms[src_i][zi], ct=src_i, lt=zi)
Plot.make_label(sources[src_i] + ' z=%.1f' % redshifts[zi])
# add in cosmos at z = 0.35
'''
cosmos = [[8.8, 0.015216 , 1.250341e-03],
[9, 0.01257, 1.210321e-03],
[9.2, 0.01009, 1.047921e-03],
[9.4, 0.007941, 8.908445e-04],
[9.6, 0.006871, 7.681928e-04],
[9.8, 0.005688, 6.825634e-04],
[10, 0.005491, 6.136567e-04],
[10.2, 0.004989, 6.004422e-04],
[10.4, 0.00478, 5.917784e-04],
[10.6, 0.00423, 5.851342e-04],
[10.8, 0.003651, 4.919025e-04],
[11, 0.002253, 3.562664e-04],
[11.2, 0.001117, 2.006811e-04],
[11.4, 0.0004182, 8.486049e-05],
[11.6, 8.365e-05, 2.802892e-05],
[11.8, 1.195e-05, 8.770029e-06]]
'''
# cosmos at z = 0.87
cosmos = [[9.8, 0.005377, 3.735001e-04],
[10, 0.004206, 3.443666e-04],
[10.2, 0.003292, 3.235465e-04],
[10.4, 0.003253, 3.318173e-04],
[10.6, 0.002985, 3.198681e-04],
[10.8, 0.002994, 2.735925e-04],
[11, 0.002218, 1.922526e-04],
[11.2, 0.001202, 1.067172e-04],
[11.4, 0.0005681, 3.983348e-05],
[11.6, 0.0001837, 1.195015e-05],
[11.8, 4.214e-05, 3.200856e-06],
[12, 1.686e-06, 7.463160e-07]]
cosmos = np.array(cosmos)
cosmos = cosmos.transpose()
cosmos[1] = log10(cosmos[1] * 0.72 ** -3)
# Plot.draw('pp', cosmos[0], cosmos[1], pt=123)
| [
"numpy.log10",
"numpy.log",
"numpy.argsort",
"numpy.array",
"utilities.utility.math.deconvolute",
"numpy.arange",
"utilities.utility.bin.idigitize",
"numpy.exp",
"scipy.interpolate.splev",
"numpy.min",
"utilities.utility.array.elements",
"scipy.integrate.quad",
"scipy.ndimage.filters.gaussia... | [((1353, 1375), 'utilities.utility.array.arrayize', 'ut.array.arrayize', (['zis'], {}), '(zis)\n', (1370, 1375), True, 'from utilities import utility as ut\n'), ((28456, 28497), 'utilities.utility.bin.idigitize', 'ut.bin.idigitize', (['sub[zi][m_kind]', 'm_bins'], {}), '(sub[zi][m_kind], m_bins)\n', (28472, 28497), True, 'from utilities import utility as ut\n'), ((28513, 28534), 'numpy.zeros', 'np.zeros', (['m_bins.size'], {}), '(m_bins.size)\n', (28521, 28534), True, 'import numpy as np\n'), ((29593, 29619), 'utilities.utility.array.arrayize', 'ut.array.arrayize', (['sources'], {}), '(sources)\n', (29610, 29619), True, 'from utilities import utility as ut\n'), ((29636, 29664), 'utilities.utility.array.arrayize', 'ut.array.arrayize', (['redshifts'], {}), '(redshifts)\n', (29653, 29664), True, 'from utilities import utility as ut\n'), ((29676, 29705), 'utilities.utility.bin.BinClass', 'ut.bin.BinClass', (['m_lim', 'm_wid'], {}), '(m_lim, m_wid)\n', (29691, 29705), True, 'from utilities import utility as ut\n'), ((32485, 32501), 'numpy.array', 'np.array', (['cosmos'], {}), '(cosmos)\n', (32493, 32501), True, 'import numpy as np\n'), ((32550, 32579), 'numpy.log10', 'log10', (['(cosmos[1] * 0.72 ** -3)'], {}), '(cosmos[1] * 0.72 ** -3)\n', (32555, 32579), False, 'from numpy import log10, Inf\n'), ((2128, 2170), 'numpy.zeros', 'np.zeros', (['subz[sham_prop].size', 'np.float32'], {}), '(subz[sham_prop].size, np.float32)\n', (2136, 2170), True, 'import numpy as np\n'), ((2670, 2718), 'utilities.utility.array.elements', 'ut.array.elements', (['subz[sham_prop]', '[0.001, Inf]'], {}), '(subz[sham_prop], [0.001, Inf])\n', (2687, 2718), True, 'from utilities import utility as ut\n'), ((13612, 13664), 'scipy.interpolate.splrep', 'interpolate.splrep', (['self.redshifts', 'self.mchars'], {'k': '(1)'}), '(self.redshifts, self.mchars, k=1)\n', (13630, 13664), False, 'from scipy import integrate, interpolate, ndimage\n'), ((13692, 13744), 'scipy.interpolate.splrep', 'interpolate.splrep', (['self.redshifts', 'self.slopes'], {'k': '(1)'}), '(self.redshifts, self.slopes, k=1)\n', (13710, 13744), False, 'from scipy import integrate, interpolate, ndimage\n'), ((13776, 13832), 'scipy.interpolate.splrep', 'interpolate.splrep', (['self.redshifts', 'self.amplitudes'], {'k': '(1)'}), '(self.redshifts, self.amplitudes, k=1)\n', (13794, 13832), False, 'from scipy import integrate, interpolate, ndimage\n'), ((17872, 17945), 'numpy.arange', 'np.arange', (['(self.mmin - dm_scat_lo)', '(self.mmax + dm_scat_hi)', 'dm', 'np.float32'], {}), '(self.mmin - dm_scat_lo, self.mmax + dm_scat_hi, dm, np.float32)\n', (17881, 17945), True, 'import numpy as np\n'), ((17964, 17986), 'numpy.zeros', 'np.zeros', (['m_stars.size'], {}), '(m_stars.size)\n', (17972, 17986), True, 'import numpy as np\n'), ((18003, 18025), 'numpy.zeros', 'np.zeros', (['m_stars.size'], {}), '(m_stars.size)\n', (18011, 18025), True, 'import numpy as np\n'), ((20534, 20576), 'scipy.interpolate.splev', 'interpolate.splev', (['m', 'self.dndm_m_scat_spl'], {}), '(m, self.dndm_m_scat_spl)\n', (20551, 20576), False, 'from scipy import integrate, interpolate, ndimage\n'), ((23679, 23731), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.mchar_z_spl'], {'ext': '(0)'}), '(redshift, self.mchar_z_spl, ext=0)\n', (23696, 23731), False, 'from scipy import integrate, interpolate, ndimage\n'), ((23757, 23813), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.amplitude_z_spl'], {'ext': '(0)'}), '(redshift, self.amplitude_z_spl, ext=0)\n', (23774, 23813), False, 'from scipy import integrate, interpolate, ndimage\n'), ((23836, 23888), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.slope_z_spl'], {'ext': '(0)'}), '(redshift, self.slope_z_spl, ext=0)\n', (23853, 23888), False, 'from scipy import integrate, interpolate, ndimage\n'), ((24692, 24771), 'numpy.arange', 'np.arange', (['(self.mmin - dmag_scat_lo)', '(self.mmax + dmag_scat_hi)', 'dmag', 'np.float32'], {}), '(self.mmin - dmag_scat_lo, self.mmax + dmag_scat_hi, dmag, np.float32)\n', (24701, 24771), True, 'import numpy as np\n'), ((24790, 24809), 'numpy.zeros', 'np.zeros', (['mags.size'], {}), '(mags.size)\n', (24798, 24809), True, 'import numpy as np\n'), ((24826, 24845), 'numpy.zeros', 'np.zeros', (['mags.size'], {}), '(mags.size)\n', (24834, 24845), True, 'import numpy as np\n'), ((25180, 25211), 'scipy.interpolate.splrep', 'interpolate.splrep', (['mags', 'dndms'], {}), '(mags, dndms)\n', (25198, 25211), False, 'from scipy import integrate, interpolate, ndimage\n'), ((26886, 26954), 'numpy.arange', 'np.arange', (['(m_min - m_wid_scat)', '(m_max + m_wid_scat)', 'm_wid', 'np.float32'], {}), '(m_min - m_wid_scat, m_max + m_wid_scat, m_wid, np.float32)\n', (26895, 26954), True, 'import numpy as np\n'), ((27448, 27509), 'scipy.ndimage.filters.gaussian_filter1d', 'ndimage.filters.gaussian_filter1d', (['dndm_anal', '(Sf.scat / m_wid)'], {}), '(dndm_anal, Sf.scat / m_wid)\n', (27481, 27509), False, 'from scipy import integrate, interpolate, ndimage\n'), ((27536, 27591), 'utilities.utility.math.deconvolute', 'ut.math.deconvolute', (['dndm_anal_conv', 'Sf.scat', 'm_wid', '(30)'], {}), '(dndm_anal_conv, Sf.scat, m_wid, 30)\n', (27555, 27591), True, 'from utilities import utility as ut\n'), ((27759, 27824), 'scipy.ndimage.filters.gaussian_filter1d', 'ndimage.filters.gaussian_filter1d', (['dndm_anal_pre', '(Sf.scat / m_wid)'], {}), '(dndm_anal_pre, Sf.scat / m_wid)\n', (27792, 27824), False, 'from scipy import integrate, interpolate, ndimage\n'), ((28995, 29011), 'numpy.log10', 'log10', (['dndm_anal'], {}), '(dndm_anal)\n', (29000, 29011), False, 'from numpy import log10, Inf\n'), ((29063, 29079), 'numpy.log10', 'log10', (['dndm_anal'], {}), '(dndm_anal)\n', (29068, 29079), False, 'from numpy import log10, Inf\n'), ((29108, 29124), 'numpy.log10', 'log10', (['dndm_sham'], {}), '(dndm_sham)\n', (29113, 29124), False, 'from numpy import log10, Inf\n'), ((2756, 2813), 'utilities.utility.array.elements', 'ut.array.elements', (["subz['m.frac.min']", '[dis_mf, Inf]', 'sis'], {}), "(subz['m.frac.min'], [dis_mf, Inf], sis)\n", (2773, 2813), True, 'from utilities import utility as ut\n'), ((2920, 2951), 'utilities.utility.array.arange_length', 'ut.array.arange_length', (['num_max'], {}), '(num_max)\n', (2942, 2951), True, 'from utilities import utility as ut\n'), ((4095, 4110), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (4103, 4110), True, 'import numpy as np\n'), ((4309, 4327), 'numpy.array', 'np.array', (['[-1.155]'], {}), '([-1.155])\n', (4317, 4327), True, 'import numpy as np\n'), ((13939, 13992), 'scipy.interpolate.splrep', 'interpolate.splrep', (['self.redshifts', 'self.mchars2'], {'k': '(1)'}), '(self.redshifts, self.mchars2, k=1)\n', (13957, 13992), False, 'from scipy import integrate, interpolate, ndimage\n'), ((14025, 14078), 'scipy.interpolate.splrep', 'interpolate.splrep', (['self.redshifts', 'self.slopes2'], {'k': '(1)'}), '(self.redshifts, self.slopes2, k=1)\n', (14043, 14078), False, 'from scipy import integrate, interpolate, ndimage\n'), ((14115, 14172), 'scipy.interpolate.splrep', 'interpolate.splrep', (['self.redshifts', 'self.amplitudes2'], {'k': '(1)'}), '(self.redshifts, self.amplitudes2, k=1)\n', (14133, 14172), False, 'from scipy import integrate, interpolate, ndimage\n'), ((17215, 17254), 'scipy.integrate.quad', 'integrate.quad', (['self.dndm', 'm_min', 'm_max'], {}), '(self.dndm, m_min, m_max)\n', (17229, 17254), False, 'from scipy import integrate, interpolate, ndimage\n'), ((18391, 18405), 'numpy.log10', 'log10', (['numdens'], {}), '(numdens)\n', (18396, 18405), False, 'from numpy import log10, Inf\n'), ((18817, 18863), 'utilities.utility.math.deconvolute', 'ut.math.deconvolute', (['dndms', 'scat', 'dm', 'iter_num'], {}), '(dndms, scat, dm, iter_num)\n', (18836, 18863), True, 'from utilities import utility as ut\n'), ((19107, 19146), 'scipy.interpolate.splrep', 'interpolate.splrep', (['m_stars', 'dndms_scat'], {}), '(m_stars, dndms_scat)\n', (19125, 19146), False, 'from scipy import integrate, interpolate, ndimage\n'), ((19174, 19196), 'numpy.zeros', 'np.zeros', (['m_stars.size'], {}), '(m_stars.size)\n', (19182, 19196), True, 'import numpy as np\n'), ((20343, 20355), 'numpy.log10', 'log10', (['dn_dm'], {}), '(dn_dm)\n', (20348, 20355), False, 'from numpy import log10, Inf\n'), ((20738, 20786), 'scipy.interpolate.splev', 'interpolate.splev', (['m', 'self.log_numden_m_scat_spl'], {}), '(m, self.log_numden_m_scat_spl)\n', (20755, 20786), False, 'from scipy import integrate, interpolate, ndimage\n'), ((22906, 22948), 'numpy.exp', 'np.exp', (['(-10 ** ((self.m_char - mag) / 2.5))'], {}), '(-10 ** ((self.m_char - mag) / 2.5))\n', (22912, 22948), True, 'import numpy as np\n'), ((23114, 23153), 'scipy.integrate.quad', 'integrate.quad', (['self.dndm', 'm_min', 'm_max'], {}), '(self.dndm, m_min, m_max)\n', (23128, 23153), False, 'from scipy import integrate, interpolate, ndimage\n'), ((25138, 25152), 'numpy.log10', 'log10', (['numdens'], {}), '(numdens)\n', (25143, 25152), False, 'from numpy import log10, Inf\n'), ((25453, 25514), 'utilities.utility.math.deconvolute', 'ut.math.deconvolute', (['dndms', 'mag_scat', 'dmag', 'deconvol_iter_num'], {}), '(dndms, mag_scat, dmag, deconvol_iter_num)\n', (25472, 25514), True, 'from utilities import utility as ut\n'), ((25784, 25820), 'scipy.interpolate.splrep', 'interpolate.splrep', (['mags', 'dndms_scat'], {}), '(mags, dndms_scat)\n', (25802, 25820), False, 'from scipy import integrate, interpolate, ndimage\n'), ((25848, 25867), 'numpy.zeros', 'np.zeros', (['mags.size'], {}), '(mags.size)\n', (25856, 25867), True, 'import numpy as np\n'), ((28644, 28660), 'numpy.min', 'np.min', (['gal_nums'], {}), '(gal_nums)\n', (28650, 28660), True, 'import numpy as np\n'), ((29180, 29200), 'numpy.log10', 'log10', (['dndm_anal_pre'], {}), '(dndm_anal_pre)\n', (29185, 29200), False, 'from numpy import log10, Inf\n'), ((29245, 29267), 'numpy.log10', 'log10', (['dndm_anal_recov'], {}), '(dndm_anal_recov)\n', (29250, 29267), False, 'from numpy import log10, Inf\n'), ((4137, 4155), 'numpy.array', 'np.array', (['[10.525]'], {}), '([10.525])\n', (4145, 4155), True, 'import numpy as np\n'), ((4219, 4237), 'numpy.array', 'np.array', (['[0.0083]'], {}), '([0.0083])\n', (4227, 4237), True, 'import numpy as np\n'), ((4648, 4663), 'numpy.array', 'np.array', (['[0.1]'], {}), '([0.1])\n', (4656, 4663), True, 'import numpy as np\n'), ((4973, 4990), 'numpy.array', 'np.array', (['[-0.46]'], {}), '([-0.46])\n', (4981, 4990), True, 'import numpy as np\n'), ((5018, 5035), 'numpy.array', 'np.array', (['[-1.58]'], {}), '([-1.58])\n', (5026, 5035), True, 'import numpy as np\n'), ((14792, 14802), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (14798, 14802), True, 'import numpy as np\n'), ((17039, 17054), 'numpy.exp', 'np.exp', (['(-m_rats)'], {}), '(-m_rats)\n', (17045, 17054), True, 'import numpy as np\n'), ((18458, 18472), 'numpy.log10', 'log10', (['numdens'], {}), '(numdens)\n', (18463, 18472), False, 'from numpy import log10, Inf\n'), ((19528, 19547), 'numpy.log10', 'log10', (['numdens_scat'], {}), '(numdens_scat)\n', (19533, 19547), False, 'from numpy import log10, Inf\n'), ((25263, 25277), 'numpy.log10', 'log10', (['numdens'], {}), '(numdens)\n', (25268, 25277), False, 'from numpy import log10, Inf\n'), ((26141, 26160), 'numpy.log10', 'log10', (['numdens_scat'], {}), '(numdens_scat)\n', (26146, 26160), False, 'from numpy import log10, Inf\n'), ((4162, 4175), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (4167, 4175), False, 'from numpy import log10, Inf\n'), ((4774, 4794), 'numpy.log10', 'log10', (['(1 / 1.6 / 0.7)'], {}), '(1 / 1.6 / 0.7)\n', (4779, 4794), False, 'from numpy import log10, Inf\n'), ((4826, 4845), 'numpy.array', 'np.array', (['[0.00426]'], {}), '([0.00426])\n', (4834, 4845), True, 'import numpy as np\n'), ((4902, 4921), 'numpy.array', 'np.array', (['[0.00058]'], {}), '([0.00058])\n', (4910, 4921), True, 'import numpy as np\n'), ((5413, 5450), 'numpy.array', 'np.array', (['[0.1, 1.6, 2.5, 3.56, 4.03]'], {}), '([0.1, 1.6, 2.5, 3.56, 4.03])\n', (5421, 5450), True, 'import numpy as np\n'), ((5711, 5755), 'numpy.array', 'np.array', (['[-1.18, -1.0, -1.01, -1.39, -1.39]'], {}), '([-1.18, -1.0, -1.01, -1.39, -1.39])\n', (5719, 5755), True, 'import numpy as np\n'), ((15019, 15029), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (15025, 15029), True, 'import numpy as np\n'), ((15082, 15092), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (15088, 15092), True, 'import numpy as np\n'), ((15309, 15354), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.mchar_z_spl'], {}), '(redshift, self.mchar_z_spl)\n', (15326, 15354), False, 'from scipy import integrate, interpolate, ndimage\n'), ((16873, 16888), 'numpy.exp', 'np.exp', (['(-m_rats)'], {}), '(-m_rats)\n', (16879, 16888), True, 'import numpy as np\n'), ((16951, 16964), 'numpy.exp', 'np.exp', (['(-dm2s)'], {}), '(-dm2s)\n', (16957, 16964), True, 'import numpy as np\n'), ((19609, 19628), 'numpy.log10', 'log10', (['numdens_scat'], {}), '(numdens_scat)\n', (19614, 19628), False, 'from numpy import log10, Inf\n'), ((19868, 19882), 'numpy.log10', 'log10', (['num_den'], {}), '(num_den)\n', (19873, 19882), False, 'from numpy import log10, Inf\n'), ((20115, 20129), 'numpy.log10', 'log10', (['num_den'], {}), '(num_den)\n', (20120, 20129), False, 'from numpy import log10, Inf\n'), ((26222, 26241), 'numpy.log10', 'log10', (['numdens_scat'], {}), '(numdens_scat)\n', (26227, 26241), False, 'from numpy import log10, Inf\n'), ((2834, 2866), 'numpy.argsort', 'np.argsort', (['subz[sham_prop][sis]'], {}), '(subz[sham_prop][sis])\n', (2844, 2866), True, 'import numpy as np\n'), ((4724, 4742), 'numpy.array', 'np.array', (['[10.525]'], {}), '([10.525])\n', (4732, 4742), True, 'import numpy as np\n'), ((5477, 5521), 'numpy.array', 'np.array', (['[10.65, 10.6, 10.65, 11.07, 11.07]'], {}), '([10.65, 10.6, 10.65, 11.07, 11.07])\n', (5485, 5521), True, 'import numpy as np\n'), ((5996, 6033), 'numpy.array', 'np.array', (['[0.1, 1.6, 2.5, 3.56, 4.03]'], {}), '([0.1, 1.6, 2.5, 3.56, 4.03])\n', (6004, 6033), True, 'import numpy as np\n'), ((6288, 6333), 'numpy.array', 'np.array', (['[-1.155, -1.0, -1.01, -1.39, -1.39]'], {}), '([-1.155, -1.0, -1.01, -1.39, -1.39])\n', (6296, 6333), True, 'import numpy as np\n'), ((15384, 15433), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.amplitude_z_spl'], {}), '(redshift, self.amplitude_z_spl)\n', (15401, 15433), False, 'from scipy import integrate, interpolate, ndimage\n'), ((15436, 15446), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (15442, 15446), True, 'import numpy as np\n'), ((15472, 15517), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.slope_z_spl'], {}), '(redshift, self.slope_z_spl)\n', (15489, 15517), False, 'from scipy import integrate, interpolate, ndimage\n'), ((22043, 22084), 'numpy.array', 'np.array', (['[0.1, 0.2, 0.3, 0.4, 0.5, 0.65]'], {}), '([0.1, 0.2, 0.3, 0.4, 0.5, 0.65])\n', (22051, 22084), True, 'import numpy as np\n'), ((22111, 22169), 'numpy.array', 'np.array', (['[-20.58, -20.81, -20.81, -20.99, -21.29, -21.38]'], {}), '([-20.58, -20.81, -20.81, -20.99, -21.29, -21.38])\n', (22119, 22169), True, 'import numpy as np\n'), ((22783, 22793), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (22789, 22793), True, 'import numpy as np\n'), ((3050, 3067), 'numpy.zeros', 'np.zeros', (['num_max'], {}), '(num_max)\n', (3058, 3067), True, 'import numpy as np\n'), ((4749, 4771), 'numpy.log10', 'log10', (['(h_them / hubble)'], {}), '(h_them / hubble)\n', (4754, 4771), False, 'from numpy import log10, Inf\n'), ((5529, 5542), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (5534, 5542), False, 'from numpy import log10, Inf\n'), ((5620, 5662), 'numpy.array', 'np.array', (['[90.0, 29.65, 11.52, 1.55, 1.55]'], {}), '([90.0, 29.65, 11.52, 1.55, 1.55])\n', (5628, 5662), True, 'import numpy as np\n'), ((6060, 6105), 'numpy.array', 'np.array', (['[10.525, 10.6, 10.65, 11.07, 11.07]'], {}), '([10.525, 10.6, 10.65, 11.07, 11.07])\n', (6068, 6105), True, 'import numpy as np\n'), ((6158, 6215), 'numpy.array', 'np.array', (['[0.0083, 0.002965, 0.00115, 0.000155, 0.000155]'], {}), '([0.0083, 0.002965, 0.00115, 0.000155, 0.000155])\n', (6166, 6215), True, 'import numpy as np\n'), ((6607, 6644), 'numpy.array', 'np.array', (['[0.1, 1.6, 2.5, 3.56, 4.03]'], {}), '([0.1, 1.6, 2.5, 3.56, 4.03])\n', (6615, 6644), True, 'import numpy as np\n'), ((6897, 6942), 'numpy.array', 'np.array', (['[-1.155, -1.0, -1.01, -1.39, -1.39]'], {}), '([-1.155, -1.0, -1.01, -1.39, -1.39])\n', (6905, 6942), True, 'import numpy as np\n'), ((15739, 15749), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (15745, 15749), True, 'import numpy as np\n'), ((15920, 15965), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.mchar_z_spl'], {}), '(redshift, self.mchar_z_spl)\n', (15937, 15965), False, 'from scipy import integrate, interpolate, ndimage\n'), ((16159, 16205), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.mchar2_z_spl'], {}), '(redshift, self.mchar2_z_spl)\n', (16176, 16205), False, 'from scipy import integrate, interpolate, ndimage\n'), ((22202, 22260), 'numpy.array', 'np.array', (['[0.0159, 0.0152, 0.0124, 0.0144, 0.0108, 0.0105]'], {}), '([0.0159, 0.0152, 0.0124, 0.0144, 0.0108, 0.0105])\n', (22210, 22260), True, 'import numpy as np\n'), ((3175, 3192), 'numpy.zeros', 'np.zeros', (['num_max'], {}), '(num_max)\n', (3183, 3192), True, 'import numpy as np\n'), ((6113, 6126), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (6118, 6126), False, 'from numpy import log10, Inf\n'), ((6671, 6716), 'numpy.array', 'np.array', (['[10.525, 10.6, 10.65, 11.07, 11.07]'], {}), '([10.525, 10.6, 10.65, 11.07, 11.07])\n', (6679, 6716), True, 'import numpy as np\n'), ((6769, 6815), 'numpy.array', 'np.array', (['[0.0083, 1e-05, 1e-05, 1e-05, 1e-06]'], {}), '([0.0083, 1e-05, 1e-05, 1e-05, 1e-06])\n', (6777, 6815), True, 'import numpy as np\n'), ((7164, 7189), 'numpy.arange', 'np.arange', (['(0.1)', '(4.03)', '(0.1)'], {}), '(0.1, 4.03, 0.1)\n', (7173, 7189), True, 'import numpy as np\n'), ((15995, 16044), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.amplitude_z_spl'], {}), '(redshift, self.amplitude_z_spl)\n', (16012, 16044), False, 'from scipy import integrate, interpolate, ndimage\n'), ((16047, 16057), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (16053, 16057), True, 'import numpy as np\n'), ((16083, 16128), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.slope_z_spl'], {}), '(redshift, self.slope_z_spl)\n', (16100, 16128), False, 'from scipy import integrate, interpolate, ndimage\n'), ((16236, 16286), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.amplitude2_z_spl'], {}), '(redshift, self.amplitude2_z_spl)\n', (16253, 16286), False, 'from scipy import integrate, interpolate, ndimage\n'), ((16289, 16299), 'numpy.log', 'np.log', (['(10)'], {}), '(10)\n', (16295, 16299), True, 'import numpy as np\n'), ((16326, 16372), 'scipy.interpolate.splev', 'interpolate.splev', (['redshift', 'self.slope2_z_spl'], {}), '(redshift, self.slope2_z_spl)\n', (16343, 16372), False, 'from scipy import integrate, interpolate, ndimage\n'), ((6724, 6737), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (6729, 6737), False, 'from numpy import log10, Inf\n'), ((7778, 7798), 'numpy.array', 'np.array', (['[0.1, 4.0]'], {}), '([0.1, 4.0])\n', (7786, 7798), True, 'import numpy as np\n'), ((7262, 7275), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (7267, 7275), False, 'from numpy import log10, Inf\n'), ((8296, 8306), 'numpy.log10', 'log10', (['(1.6)'], {}), '(1.6)\n', (8301, 8306), False, 'from numpy import log10, Inf\n'), ((8857, 8887), 'numpy.array', 'np.array', (['[0.3, 0.5, 0.7, 0.9]'], {}), '([0.3, 0.5, 0.7, 0.9])\n', (8865, 8887), True, 'import numpy as np\n'), ((9186, 9224), 'numpy.array', 'np.array', (['[-1.06, -1.05, -0.93, -0.91]'], {}), '([-1.06, -1.05, -0.93, -0.91])\n', (9194, 9224), True, 'import numpy as np\n'), ((9480, 9518), 'numpy.array', 'np.array', (['[-1.73, -1.76, -1.65, -1.65]'], {}), '([-1.73, -1.76, -1.65, -1.65])\n', (9488, 9518), True, 'import numpy as np\n'), ((9577, 9607), 'numpy.append', 'np.append', (['(0.1)', 'self.redshifts'], {}), '(0.1, self.redshifts)\n', (9586, 9607), True, 'import numpy as np\n'), ((9715, 9763), 'numpy.append', 'np.append', (['(0.0083 * hubble ** 3)', 'self.amplitudes'], {}), '(0.0083 * hubble ** 3, self.amplitudes)\n', (9724, 9763), True, 'import numpy as np\n'), ((9790, 9820), 'numpy.append', 'np.append', (['(-1.155)', 'self.slopes'], {}), '(-1.155, self.slopes)\n', (9799, 9820), True, 'import numpy as np\n'), ((9848, 9888), 'numpy.append', 'np.append', (['self.mchars2[0]', 'self.mchars2'], {}), '(self.mchars2[0], self.mchars2)\n', (9857, 9888), True, 'import numpy as np\n'), ((9920, 9950), 'numpy.append', 'np.append', (['(0)', 'self.amplitudes2'], {}), '(0, self.amplitudes2)\n', (9929, 9950), True, 'import numpy as np\n'), ((9978, 10018), 'numpy.append', 'np.append', (['self.slopes2[0]', 'self.slopes2'], {}), '(self.slopes2[0], self.slopes2)\n', (9987, 10018), True, 'import numpy as np\n'), ((10143, 10192), 'numpy.append', 'np.append', (['self.redshifts', '[1.6, 2.5, 3.56, 4.03]'], {}), '(self.redshifts, [1.6, 2.5, 3.56, 4.03])\n', (10152, 10192), True, 'import numpy as np\n'), ((10546, 10597), 'numpy.append', 'np.append', (['self.slopes', '[-1.0, -1.01, -1.39, -1.39]'], {}), '(self.slopes, [-1.0, -1.01, -1.39, -1.39])\n', (10555, 10597), True, 'import numpy as np\n'), ((8271, 8293), 'numpy.log10', 'log10', (['(h_them / hubble)'], {}), '(h_them / hubble)\n', (8276, 8293), False, 'from numpy import log10, Inf\n'), ((8914, 8951), 'numpy.array', 'np.array', (['[10.9, 10.91, 10.95, 10.92]'], {}), '([10.9, 10.91, 10.95, 10.92])\n', (8922, 8951), True, 'import numpy as np\n'), ((9056, 9102), 'numpy.array', 'np.array', (['[0.00289, 0.00174, 0.00216, 0.00294]'], {}), '([0.00289, 0.00174, 0.00216, 0.00294])\n', (9064, 9102), True, 'import numpy as np\n'), ((9252, 9285), 'numpy.array', 'np.array', (['[9.63, 9.7, 9.75, 9.85]'], {}), '([9.63, 9.7, 9.75, 9.85])\n', (9260, 9285), True, 'import numpy as np\n'), ((9348, 9393), 'numpy.array', 'np.array', (['[0.0018, 0.00143, 0.00289, 0.00212]'], {}), '([0.0018, 0.00143, 0.00289, 0.00212])\n', (9356, 9393), True, 'import numpy as np\n'), ((10740, 10751), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (10748, 10751), True, 'import numpy as np\n'), ((11114, 11171), 'numpy.array', 'np.array', (['[0.1, 0.3, 0.5, 0.7, 0.9, 1.6, 2.5, 3.56, 4.03]'], {}), '([0.1, 0.3, 0.5, 0.7, 0.9, 1.6, 2.5, 3.56, 4.03])\n', (11122, 11171), True, 'import numpy as np\n'), ((11532, 11605), 'numpy.array', 'np.array', (['[-1.155, -1.06, -1.05, -0.93, -0.91, -1.0, -1.01, -1.39, -1.39]'], {}), '([-1.155, -1.06, -1.05, -0.93, -0.91, -1.0, -1.01, -1.39, -1.39])\n', (11540, 11605), True, 'import numpy as np\n'), ((11960, 12032), 'numpy.array', 'np.array', (['[-1.7, -1.73, -1.76, -1.65, -1.65, -1.72, -1.74, -2.39, -2.39]'], {}), '([-1.7, -1.73, -1.76, -1.65, -1.65, -1.72, -1.74, -2.39, -2.39])\n', (11968, 12032), True, 'import numpy as np\n'), ((8959, 8981), 'numpy.log10', 'log10', (['(h_them / hubble)'], {}), '(h_them / hubble)\n', (8964, 8981), False, 'from numpy import log10, Inf\n'), ((9293, 9315), 'numpy.log10', 'log10', (['(h_them / hubble)'], {}), '(h_them / hubble)\n', (9298, 9315), False, 'from numpy import log10, Inf\n'), ((10258, 10295), 'numpy.array', 'np.array', (['[10.6, 10.65, 11.07, 11.07]'], {}), '([10.6, 10.65, 11.07, 11.07])\n', (10266, 10295), True, 'import numpy as np\n'), ((10415, 10464), 'numpy.array', 'np.array', (['[0.002965, 0.00115, 0.000155, 0.000155]'], {}), '([0.002965, 0.00115, 0.000155, 0.000155])\n', (10423, 10464), True, 'import numpy as np\n'), ((10650, 10661), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (10658, 10661), True, 'import numpy as np\n'), ((10804, 10815), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (10812, 10815), True, 'import numpy as np\n'), ((11359, 11456), 'numpy.array', 'np.array', (['[0.0083, 0.00774, 0.00466, 0.00579, 0.00787, 0.00297, 0.00115, 0.000155, \n 0.000155]'], {}), '([0.0083, 0.00774, 0.00466, 0.00579, 0.00787, 0.00297, 0.00115, \n 0.000155, 0.000155])\n', (11367, 11456), True, 'import numpy as np\n'), ((11635, 11699), 'numpy.array', 'np.array', (['[9.35, 9.34, 9.41, 9.46, 9.56, 9.41, 9.46, 9.83, 9.83]'], {}), '([9.35, 9.34, 9.41, 9.46, 9.56, 9.41, 9.46, 9.83, 9.83])\n', (11643, 11699), True, 'import numpy as np\n'), ((11780, 11880), 'numpy.array', 'np.array', (['[0.00269, 0.00482, 0.00383, 0.00774, 0.00568, 0.000962, 0.000375, 5.03e-05,\n 5.03e-05]'], {}), '([0.00269, 0.00482, 0.00383, 0.00774, 0.00568, 0.000962, 0.000375, \n 5.03e-05, 5.03e-05])\n', (11788, 11880), True, 'import numpy as np\n'), ((12445, 12521), 'numpy.array', 'np.array', (['[0.1, 0.3, 0.5, 0.7, 0.9, 1.15, 1.45, 1.8, 2.25, 2.75, 3.25, 3.75]'], {}), '([0.1, 0.3, 0.5, 0.7, 0.9, 1.15, 1.45, 1.8, 2.25, 2.75, 3.25, 3.75])\n', (12453, 12521), True, 'import numpy as np\n'), ((12812, 12822), 'numpy.log10', 'log10', (['(1.6)'], {}), '(1.6)\n', (12817, 12822), False, 'from numpy import log10, Inf\n'), ((13130, 13228), 'numpy.array', 'np.array', (['[-1.18, -1.19, -1.22, -1.26, -1.23, -1.26, -1.29, -1.27, -1.26, -1.2, -1.14,\n -1.23]'], {}), '([-1.18, -1.19, -1.22, -1.26, -1.23, -1.26, -1.29, -1.27, -1.26, -\n 1.2, -1.14, -1.23])\n', (13138, 13228), True, 'import numpy as np\n'), ((9657, 9670), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (9662, 9670), False, 'from numpy import log10, Inf\n'), ((10303, 10316), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (10308, 10316), False, 'from numpy import log10, Inf\n'), ((11734, 11747), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (11739, 11747), False, 'from numpy import log10, Inf\n'), ((12587, 12684), 'numpy.array', 'np.array', (['[11.16, 11.2, 11.26, 11.25, 11.27, 11.31, 11.34, 11.4, 11.46, 11.34, 11.33,\n 11.36]'], {}), '([11.16, 11.2, 11.26, 11.25, 11.27, 11.31, 11.34, 11.4, 11.46, \n 11.34, 11.33, 11.36])\n', (12595, 12684), True, 'import numpy as np\n'), ((11314, 11327), 'numpy.log10', 'log10', (['hubble'], {}), '(hubble)\n', (11319, 11327), False, 'from numpy import log10, Inf\n'), ((12724, 12746), 'numpy.log10', 'log10', (['(h_them / hubble)'], {}), '(h_them / hubble)\n', (12729, 12746), False, 'from numpy import log10, Inf\n'), ((12905, 13004), 'numpy.array', 'np.array', (['[-2.47, -2.65, -2.76, -2.82, -2.91, -3.06, -3.27, -3.49, -3.69, -3.64, -\n 3.74, -3.94]'], {}), '([-2.47, -2.65, -2.76, -2.82, -2.91, -3.06, -3.27, -3.49, -3.69, -\n 3.64, -3.74, -3.94])\n', (12913, 13004), True, 'import numpy as np\n')] |
"""
Morphology operations on multi-label ANTsImage types
"""
__all__ = ['multi_label_morphology']
import numpy as np
def multi_label_morphology(image, operation, radius, dilation_mask=None, label_list=None, force=False):
"""
Morphology on multi label images.
Wraps calls to iMath binary morphology. Additionally, dilation and closing operations preserve
pre-existing labels. The choices of operation are:
Dilation: dilates all labels sequentially, but does not overwrite original labels.
This reduces dependence on the intensity ordering of adjoining labels. Ordering dependence
can still arise if two or more labels dilate into the same space - in this case, the label
with the lowest intensity is retained. With a mask, dilated labels are multiplied by the
mask and then added to the original label, thus restricting dilation to the mask region.
Erosion: Erodes labels independently, equivalent to calling iMath iteratively.
Closing: Close holes in each label sequentially, but does not overwrite original labels.
Opening: Opens each label independently, equivalent to calling iMath iteratively.
Arguments
---------
image : ANTsImage
Input image should contain only 0 for background and positive integers for labels.
operation : string
One of MD, ME, MC, MO, passed to iMath.
radius : integer
radius of the morphological operation.
dilation_mask : ANTsImage
Optional binary mask to constrain dilation only (eg dilate cortical label into WM).
label_list : list or tuple or numpy.ndarray
Optional list of labels, to perform operation upon. Defaults to all unique
intensities in image.
Returns
-------
ANTsImage
Example
-------
>>> import ants
>>> img = ants.image_read(ants.get_data('r16'))
>>> labels = ants.get_mask(img,1,150) + ants.get_mask(img,151,225) * 2
>>> labels_dilated = ants.multi_label_morphology(labels, 'MD', 2)
>>> # should see original label regions preserved in dilated version
>>> # label N should have mean N and 0 variance
>>> print(ants.label_stats(labels_dilated, labels))
"""
if (label_list is None) or (len(label_list) == 1):
label_list = np.sort(np.unique(image[image > 0]))
if (len(label_list) > 200) and (not force):
raise ValueError('More than 200 labels... Make sure the image is discrete'
' and call this function again with `force=True` if you'
' really want to do this.')
image_binary = image.clone()
image_binary[image_binary > 1] = 1
# Erosion / opening is simply a case of looping over the input labels
if (operation == 'ME') or (operation == 'MO'):
output = image.clone()
for current_label in label_list:
output = output.iMath(operation, radius, current_label)
return output
if dilation_mask is not None:
if int(dilation_mask.max()) != 1:
raise ValueError('Mask is either empty or not binary')
output = image.clone()
for current_label in label_list:
current_label_region = image.threshold_image(current_label, current_label)
other_labels = output - current_label_region
clab_binary_morphed = current_label_region.iMath(operation, radius, 1)
if (operation == 'MD') and (dilation_mask is not None):
clab_binary_morphed_nooverlap = current_label_region + dilation_mask * clab_binary_morphed - other_labels
clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 2)
else:
clab_binary_morphed_nooverlap = clab_binary_morphed - other_labels
clab_binary_morphed_nooverlap = clab_binary_morphed_nooverlap.threshold_image(1, 1)
output = output + clab_binary_morphed_nooverlap * current_label
return output
| [
"numpy.unique"
] | [((2317, 2344), 'numpy.unique', 'np.unique', (['image[image > 0]'], {}), '(image[image > 0])\n', (2326, 2344), True, 'import numpy as np\n')] |
import numpy as np
from conftest import EPS
from testutils import (
CLUSTER_LABEL_FIRST_CLUSTER,
CLUSTER_LABEL_NOISE,
assert_cluster_labels,
assert_label_of_object_is_among_possible_ones,
assert_two_objects_are_in_same_cluster,
insert_objects_then_assert_cluster_labels,
reflect_horizontally
)
def test_new_single_object_is_labeled_as_noise(incdbscan4, object_far_away):
incdbscan4.insert(object_far_away)
assert_cluster_labels(incdbscan4, object_far_away, CLUSTER_LABEL_NOISE)
def test_new_object_far_from_cluster_is_labeled_as_noise(
incdbscan4,
blob_in_middle,
object_far_away):
incdbscan4.insert(blob_in_middle)
incdbscan4.insert(object_far_away)
assert_cluster_labels(incdbscan4, object_far_away, CLUSTER_LABEL_NOISE)
def test_new_border_object_gets_label_from_core(incdbscan4):
cluster = np.array([
[1., 1.],
[0., 1.],
[1., 0.],
[0., 0.],
])
new_border_object = np.array([[1 + EPS, 1]])
incdbscan4.insert(cluster)
incdbscan4.insert(new_border_object)
print(incdbscan4.get_cluster_labels(cluster[[0]]))
print(incdbscan4.get_cluster_labels(new_border_object))
assert_two_objects_are_in_same_cluster(
incdbscan4, cluster[[0]], new_border_object)
def test_labels_are_noise_only_until_not_enough_objects_in_cluster(
incdbscan4,
blob_in_middle):
for i in range(len(blob_in_middle)):
incdbscan4.insert(blob_in_middle[[i]])
expected_label = (
CLUSTER_LABEL_NOISE if i + 1 < incdbscan4.min_pts
else CLUSTER_LABEL_FIRST_CLUSTER
)
assert_cluster_labels(incdbscan4, blob_in_middle[:i+1], expected_label)
def test_more_than_two_clusters_can_be_created(incdbscan4, blob_in_middle):
cluster_1 = blob_in_middle
cluster_1_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
insert_objects_then_assert_cluster_labels(
incdbscan4, cluster_1, cluster_1_expected_label)
cluster_2 = cluster_1 + 10
cluster_2_expected_label = cluster_1_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan4, cluster_2, cluster_2_expected_label)
cluster_3 = cluster_2 + 10
cluster_3_expected_label = cluster_2_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan4, cluster_3, cluster_3_expected_label)
def test_two_clusters_can_be_born_at_the_same_time(
incdbscan4,
point_at_origin):
cluster_1 = np.array([
[EPS * 1, 0],
[EPS * 2, 0],
[EPS * 2, 0],
])
cluster_2 = reflect_horizontally(cluster_1)
incdbscan4.insert(cluster_1)
incdbscan4.insert(cluster_2)
assert_cluster_labels(incdbscan4, cluster_1, CLUSTER_LABEL_NOISE)
assert_cluster_labels(incdbscan4, cluster_2, CLUSTER_LABEL_NOISE)
new_object = point_at_origin
incdbscan4.insert(new_object)
cluster_1_label_expected = incdbscan4.get_cluster_labels(cluster_1[[0]])[0]
assert_cluster_labels(incdbscan4, cluster_1, cluster_1_label_expected)
cluster_2_label_expected = \
CLUSTER_LABEL_FIRST_CLUSTER + 1 - cluster_1_label_expected
assert_cluster_labels(incdbscan4, cluster_2, cluster_2_label_expected)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
new_object,
{cluster_1_label_expected, cluster_2_label_expected}
)
def test_absorption_with_noise(incdbscan3, point_at_origin):
expected_cluster_label = CLUSTER_LABEL_FIRST_CLUSTER
cluster_values = np.array([
[EPS, 0],
[EPS * 2, 0],
[EPS * 3, 0],
])
insert_objects_then_assert_cluster_labels(
incdbscan3, cluster_values, expected_cluster_label)
noise = np.array([[0, EPS]])
insert_objects_then_assert_cluster_labels(
incdbscan3, noise, CLUSTER_LABEL_NOISE)
new_object_value = point_at_origin
insert_objects_then_assert_cluster_labels(
incdbscan3, new_object_value, expected_cluster_label)
assert_cluster_labels(incdbscan3, noise, expected_cluster_label)
def test_merge_two_clusters(incdbscan3, point_at_origin):
cluster_1 = np.array([
[EPS, 0],
[EPS * 2, 0],
[EPS * 3, 0],
[EPS * 4, 0],
])
cluster_1_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
insert_objects_then_assert_cluster_labels(
incdbscan3, cluster_1, cluster_1_expected_label)
cluster_2 = reflect_horizontally(cluster_1)
cluster_2_expected_label = cluster_1_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan3, cluster_2, cluster_2_expected_label)
new_object = point_at_origin
merged_cluster_expected_label = \
max([cluster_1_expected_label, cluster_2_expected_label])
insert_objects_then_assert_cluster_labels(
incdbscan3, new_object, merged_cluster_expected_label)
assert_cluster_labels(incdbscan3, cluster_1, merged_cluster_expected_label)
assert_cluster_labels(incdbscan3, cluster_2, merged_cluster_expected_label)
def test_merger_and_creation_can_happen_at_the_same_time(
incdbscan4,
point_at_origin,
hourglass_on_the_right):
# Insert objects to the right
hourglass = hourglass_on_the_right
top_right = hourglass[:3]
top_right_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
bottom_right = hourglass[-3:]
bottom_right_expected_label = top_right_expected_label + 1
bridge_point = hourglass[[3]]
incdbscan4.insert(top_right)
incdbscan4.insert(bridge_point)
incdbscan4.insert(bottom_right)
assert_cluster_labels(incdbscan4, top_right, top_right_expected_label)
assert_cluster_labels(
incdbscan4, bottom_right, bottom_right_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point,
{bottom_right_expected_label, bottom_right_expected_label}
)
merged_cluster_expected_label = \
incdbscan4.get_cluster_labels(bridge_point)[0]
# Insert objects to the left
left_pre_cluster = np.array([
[-EPS, 0],
[-EPS * 2, 0],
[-EPS * 2, 0],
])
left_cluster_expected_label = bottom_right_expected_label + 1
insert_objects_then_assert_cluster_labels(
incdbscan4,
left_pre_cluster,
CLUSTER_LABEL_NOISE
)
# Insert object to the center
new_object = point_at_origin
incdbscan4.insert(new_object)
assert_cluster_labels(
incdbscan4, top_right, merged_cluster_expected_label)
assert_cluster_labels(
incdbscan4, bottom_right, merged_cluster_expected_label)
assert_cluster_labels(
incdbscan4, bridge_point, merged_cluster_expected_label)
assert_cluster_labels(
incdbscan4, left_pre_cluster, left_cluster_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
new_object,
{merged_cluster_expected_label, left_cluster_expected_label}
)
def test_two_mergers_can_happen_at_the_same_time(
incdbscan4,
point_at_origin,
hourglass_on_the_right):
# Insert objects to the right
top_right = hourglass_on_the_right[:3]
top_right_expected_label = CLUSTER_LABEL_FIRST_CLUSTER
bottom_right = hourglass_on_the_right[-3:]
bottom_right_expected_label = top_right_expected_label + 1
bridge_point_right = hourglass_on_the_right[[3]]
incdbscan4.insert(top_right)
incdbscan4.insert(bridge_point_right)
incdbscan4.insert(bottom_right)
assert_cluster_labels(incdbscan4, top_right, top_right_expected_label)
assert_cluster_labels(
incdbscan4, bottom_right, bottom_right_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_right,
{bottom_right_expected_label, bottom_right_expected_label}
)
# Insert objects to the left
hourglass_on_the_left = reflect_horizontally(hourglass_on_the_right)
top_left = hourglass_on_the_left[:3]
top_left_expected_label = bottom_right_expected_label + 1
bottom_left = hourglass_on_the_left[-3:]
bottom_left_expected_label = top_left_expected_label + 1
bridge_point_left = hourglass_on_the_left[[3]]
incdbscan4.insert(top_left)
incdbscan4.insert(bridge_point_left)
incdbscan4.insert(bottom_left)
assert_cluster_labels(incdbscan4, top_left, top_left_expected_label)
assert_cluster_labels(incdbscan4, bottom_left, bottom_left_expected_label)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_left,
{top_left_expected_label, bottom_left_expected_label}
)
# Insert object to the center
new_object = point_at_origin
incdbscan4.insert(new_object)
assert_cluster_labels(
incdbscan4,
np.vstack([top_right, bottom_right]),
bottom_right_expected_label
)
assert_cluster_labels(
incdbscan4,
np.vstack([top_left, bottom_left]),
bottom_left_expected_label
)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_right,
{bottom_left_expected_label, bottom_right_expected_label}
)
assert_label_of_object_is_among_possible_ones(
incdbscan4,
bridge_point_left,
{top_left_expected_label, bottom_left_expected_label}
)
def test_object_is_core_if_it_has_more_than_enough_neighors(
incdbscan3,
point_at_origin):
neigbors = np.array([
[0, EPS],
[0, -EPS],
[EPS, 0],
[-EPS, 0],
])
expected_label = CLUSTER_LABEL_FIRST_CLUSTER
incdbscan3.insert(neigbors)
incdbscan3.insert(point_at_origin)
assert_cluster_labels(incdbscan3, neigbors, expected_label)
assert_cluster_labels(incdbscan3, point_at_origin, expected_label)
| [
"testutils.assert_label_of_object_is_among_possible_ones",
"testutils.assert_cluster_labels",
"numpy.array",
"numpy.vstack",
"testutils.assert_two_objects_are_in_same_cluster",
"testutils.insert_objects_then_assert_cluster_labels",
"testutils.reflect_horizontally"
] | [((445, 516), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'object_far_away', 'CLUSTER_LABEL_NOISE'], {}), '(incdbscan4, object_far_away, CLUSTER_LABEL_NOISE)\n', (466, 516), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((730, 801), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'object_far_away', 'CLUSTER_LABEL_NOISE'], {}), '(incdbscan4, object_far_away, CLUSTER_LABEL_NOISE)\n', (751, 801), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((879, 937), 'numpy.array', 'np.array', (['[[1.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.0, 0.0]]'], {}), '([[1.0, 1.0], [0.0, 1.0], [1.0, 0.0], [0.0, 0.0]])\n', (887, 937), True, 'import numpy as np\n'), ((994, 1018), 'numpy.array', 'np.array', (['[[1 + EPS, 1]]'], {}), '([[1 + EPS, 1]])\n', (1002, 1018), True, 'import numpy as np\n'), ((1213, 1300), 'testutils.assert_two_objects_are_in_same_cluster', 'assert_two_objects_are_in_same_cluster', (['incdbscan4', 'cluster[[0]]', 'new_border_object'], {}), '(incdbscan4, cluster[[0]],\n new_border_object)\n', (1251, 1300), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((1909, 2003), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan4', 'cluster_1', 'cluster_1_expected_label'], {}), '(incdbscan4, cluster_1,\n cluster_1_expected_label)\n', (1950, 2003), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((2106, 2200), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan4', 'cluster_2', 'cluster_2_expected_label'], {}), '(incdbscan4, cluster_2,\n cluster_2_expected_label)\n', (2147, 2200), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((2303, 2397), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan4', 'cluster_3', 'cluster_3_expected_label'], {}), '(incdbscan4, cluster_3,\n cluster_3_expected_label)\n', (2344, 2397), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((2520, 2572), 'numpy.array', 'np.array', (['[[EPS * 1, 0], [EPS * 2, 0], [EPS * 2, 0]]'], {}), '([[EPS * 1, 0], [EPS * 2, 0], [EPS * 2, 0]])\n', (2528, 2572), True, 'import numpy as np\n'), ((2621, 2652), 'testutils.reflect_horizontally', 'reflect_horizontally', (['cluster_1'], {}), '(cluster_1)\n', (2641, 2652), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((2725, 2790), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'cluster_1', 'CLUSTER_LABEL_NOISE'], {}), '(incdbscan4, cluster_1, CLUSTER_LABEL_NOISE)\n', (2746, 2790), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((2795, 2860), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'cluster_2', 'CLUSTER_LABEL_NOISE'], {}), '(incdbscan4, cluster_2, CLUSTER_LABEL_NOISE)\n', (2816, 2860), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((3014, 3084), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'cluster_1', 'cluster_1_label_expected'], {}), '(incdbscan4, cluster_1, cluster_1_label_expected)\n', (3035, 3084), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((3190, 3260), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'cluster_2', 'cluster_2_label_expected'], {}), '(incdbscan4, cluster_2, cluster_2_label_expected)\n', (3211, 3260), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((3266, 3394), 'testutils.assert_label_of_object_is_among_possible_ones', 'assert_label_of_object_is_among_possible_ones', (['incdbscan4', 'new_object', '{cluster_1_label_expected, cluster_2_label_expected}'], {}), '(incdbscan4, new_object, {\n cluster_1_label_expected, cluster_2_label_expected})\n', (3311, 3394), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((3562, 3610), 'numpy.array', 'np.array', (['[[EPS, 0], [EPS * 2, 0], [EPS * 3, 0]]'], {}), '([[EPS, 0], [EPS * 2, 0], [EPS * 3, 0]])\n', (3570, 3610), True, 'import numpy as np\n'), ((3647, 3744), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan3', 'cluster_values', 'expected_cluster_label'], {}), '(incdbscan3, cluster_values,\n expected_cluster_label)\n', (3688, 3744), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((3763, 3783), 'numpy.array', 'np.array', (['[[0, EPS]]'], {}), '([[0, EPS]])\n', (3771, 3783), True, 'import numpy as np\n'), ((3789, 3874), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan3', 'noise', 'CLUSTER_LABEL_NOISE'], {}), '(incdbscan3, noise,\n CLUSTER_LABEL_NOISE)\n', (3830, 3874), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((3925, 4024), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan3', 'new_object_value', 'expected_cluster_label'], {}), '(incdbscan3, new_object_value,\n expected_cluster_label)\n', (3966, 4024), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((4035, 4099), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan3', 'noise', 'expected_cluster_label'], {}), '(incdbscan3, noise, expected_cluster_label)\n', (4056, 4099), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((4176, 4238), 'numpy.array', 'np.array', (['[[EPS, 0], [EPS * 2, 0], [EPS * 3, 0], [EPS * 4, 0]]'], {}), '([[EPS, 0], [EPS * 2, 0], [EPS * 3, 0], [EPS * 4, 0]])\n', (4184, 4238), True, 'import numpy as np\n'), ((4342, 4436), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan3', 'cluster_1', 'cluster_1_expected_label'], {}), '(incdbscan3, cluster_1,\n cluster_1_expected_label)\n', (4383, 4436), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((4459, 4490), 'testutils.reflect_horizontally', 'reflect_horizontally', (['cluster_1'], {}), '(cluster_1)\n', (4479, 4490), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((4556, 4650), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan3', 'cluster_2', 'cluster_2_expected_label'], {}), '(incdbscan3, cluster_2,\n cluster_2_expected_label)\n', (4597, 4650), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((4799, 4899), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan3', 'new_object', 'merged_cluster_expected_label'], {}), '(incdbscan3, new_object,\n merged_cluster_expected_label)\n', (4840, 4899), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((4910, 4985), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan3', 'cluster_1', 'merged_cluster_expected_label'], {}), '(incdbscan3, cluster_1, merged_cluster_expected_label)\n', (4931, 4985), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((4990, 5065), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan3', 'cluster_2', 'merged_cluster_expected_label'], {}), '(incdbscan3, cluster_2, merged_cluster_expected_label)\n', (5011, 5065), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((5612, 5682), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'top_right', 'top_right_expected_label'], {}), '(incdbscan4, top_right, top_right_expected_label)\n', (5633, 5682), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((5687, 5763), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'bottom_right', 'bottom_right_expected_label'], {}), '(incdbscan4, bottom_right, bottom_right_expected_label)\n', (5708, 5763), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((5778, 5914), 'testutils.assert_label_of_object_is_among_possible_ones', 'assert_label_of_object_is_among_possible_ones', (['incdbscan4', 'bridge_point', '{bottom_right_expected_label, bottom_right_expected_label}'], {}), '(incdbscan4, bridge_point, {\n bottom_right_expected_label, bottom_right_expected_label})\n', (5823, 5914), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((6091, 6142), 'numpy.array', 'np.array', (['[[-EPS, 0], [-EPS * 2, 0], [-EPS * 2, 0]]'], {}), '([[-EPS, 0], [-EPS * 2, 0], [-EPS * 2, 0]])\n', (6099, 6142), True, 'import numpy as np\n'), ((6245, 6341), 'testutils.insert_objects_then_assert_cluster_labels', 'insert_objects_then_assert_cluster_labels', (['incdbscan4', 'left_pre_cluster', 'CLUSTER_LABEL_NOISE'], {}), '(incdbscan4, left_pre_cluster,\n CLUSTER_LABEL_NOISE)\n', (6286, 6341), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((6475, 6550), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'top_right', 'merged_cluster_expected_label'], {}), '(incdbscan4, top_right, merged_cluster_expected_label)\n', (6496, 6550), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((6564, 6642), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'bottom_right', 'merged_cluster_expected_label'], {}), '(incdbscan4, bottom_right, merged_cluster_expected_label)\n', (6585, 6642), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((6656, 6734), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'bridge_point', 'merged_cluster_expected_label'], {}), '(incdbscan4, bridge_point, merged_cluster_expected_label)\n', (6677, 6734), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((6748, 6833), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'left_pre_cluster', 'left_cluster_expected_label'], {}), '(incdbscan4, left_pre_cluster, left_cluster_expected_label\n )\n', (6769, 6833), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((6843, 6979), 'testutils.assert_label_of_object_is_among_possible_ones', 'assert_label_of_object_is_among_possible_ones', (['incdbscan4', 'new_object', '{merged_cluster_expected_label, left_cluster_expected_label}'], {}), '(incdbscan4, new_object, {\n merged_cluster_expected_label, left_cluster_expected_label})\n', (6888, 6979), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((7554, 7624), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'top_right', 'top_right_expected_label'], {}), '(incdbscan4, top_right, top_right_expected_label)\n', (7575, 7624), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((7629, 7705), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'bottom_right', 'bottom_right_expected_label'], {}), '(incdbscan4, bottom_right, bottom_right_expected_label)\n', (7650, 7705), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((7720, 7865), 'testutils.assert_label_of_object_is_among_possible_ones', 'assert_label_of_object_is_among_possible_ones', (['incdbscan4', 'bridge_point_right', '{bottom_right_expected_label, bottom_right_expected_label}'], {}), '(incdbscan4,\n bridge_point_right, {bottom_right_expected_label,\n bottom_right_expected_label})\n', (7765, 7865), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((7950, 7994), 'testutils.reflect_horizontally', 'reflect_horizontally', (['hourglass_on_the_right'], {}), '(hourglass_on_the_right)\n', (7970, 7994), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((8372, 8440), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'top_left', 'top_left_expected_label'], {}), '(incdbscan4, top_left, top_left_expected_label)\n', (8393, 8440), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((8445, 8519), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'bottom_left', 'bottom_left_expected_label'], {}), '(incdbscan4, bottom_left, bottom_left_expected_label)\n', (8466, 8519), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((8525, 8660), 'testutils.assert_label_of_object_is_among_possible_ones', 'assert_label_of_object_is_among_possible_ones', (['incdbscan4', 'bridge_point_left', '{top_left_expected_label, bottom_left_expected_label}'], {}), '(incdbscan4, bridge_point_left,\n {top_left_expected_label, bottom_left_expected_label})\n', (8570, 8660), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((9063, 9207), 'testutils.assert_label_of_object_is_among_possible_ones', 'assert_label_of_object_is_among_possible_ones', (['incdbscan4', 'bridge_point_right', '{bottom_left_expected_label, bottom_right_expected_label}'], {}), '(incdbscan4,\n bridge_point_right, {bottom_left_expected_label,\n bottom_right_expected_label})\n', (9108, 9207), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((9235, 9370), 'testutils.assert_label_of_object_is_among_possible_ones', 'assert_label_of_object_is_among_possible_ones', (['incdbscan4', 'bridge_point_left', '{top_left_expected_label, bottom_left_expected_label}'], {}), '(incdbscan4, bridge_point_left,\n {top_left_expected_label, bottom_left_expected_label})\n', (9280, 9370), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((9522, 9574), 'numpy.array', 'np.array', (['[[0, EPS], [0, -EPS], [EPS, 0], [-EPS, 0]]'], {}), '([[0, EPS], [0, -EPS], [EPS, 0], [-EPS, 0]])\n', (9530, 9574), True, 'import numpy as np\n'), ((9740, 9799), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan3', 'neigbors', 'expected_label'], {}), '(incdbscan3, neigbors, expected_label)\n', (9761, 9799), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((9804, 9870), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan3', 'point_at_origin', 'expected_label'], {}), '(incdbscan3, point_at_origin, expected_label)\n', (9825, 9870), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((1664, 1737), 'testutils.assert_cluster_labels', 'assert_cluster_labels', (['incdbscan4', 'blob_in_middle[:i + 1]', 'expected_label'], {}), '(incdbscan4, blob_in_middle[:i + 1], expected_label)\n', (1685, 1737), False, 'from testutils import CLUSTER_LABEL_FIRST_CLUSTER, CLUSTER_LABEL_NOISE, assert_cluster_labels, assert_label_of_object_is_among_possible_ones, assert_two_objects_are_in_same_cluster, insert_objects_then_assert_cluster_labels, reflect_horizontally\n'), ((8845, 8881), 'numpy.vstack', 'np.vstack', (['[top_right, bottom_right]'], {}), '([top_right, bottom_right])\n', (8854, 8881), True, 'import numpy as np\n'), ((8981, 9015), 'numpy.vstack', 'np.vstack', (['[top_left, bottom_left]'], {}), '([top_left, bottom_left])\n', (8990, 9015), True, 'import numpy as np\n')] |
import socket
import cv2
import numpy
import os
import win32serviceutil
import win32service
import win32event
import servicemanager
import socket
from datetime import datetime, date, time
class AppServerSvc (win32serviceutil.ServiceFramework):
_svc_name_ = "TestService"
_svc_display_name_ = "Test Service"
def __init__(self,args):
win32serviceutil.ServiceFramework.__init__(self,args)
self.hWaitStop = win32event.CreateEvent(None,0,0,None)
socket.setdefaulttimeout(60)
def SvcStop(self):
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
win32event.SetEvent(self.hWaitStop)
def SvcDoRun(self):
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,
servicemanager.PYS_SERVICE_STARTED,
(self._svc_name_,''))
self.main()
def main(self):
client()
#if __name__ == '__main__':
#win32serviceutil.HandleCommandLine(AppServerSvc)
login = os.environ.get("USERNAME")
computer = socket.gethostname()
signature = login + '#' + computer
f = open('config.txt', 'r')
config = f.read()
config = config.split('\n')
TCP_IP = config[0]
TCP_PORT = int(config[1])
sock = socket.socket()
try:
sock.connect((TCP_IP, TCP_PORT))
except:
import winlock
def take_photo():
capture = cv2.VideoCapture(0)
ret, frame = capture.read()
encode_param=[int(cv2.IMWRITE_JPEG_QUALITY),90]
result, imgencode = cv2.imencode('.jpg', frame, encode_param)
data = numpy.array(imgencode)
stringData = data.tostring()
return stringData
def send_data(data,signature):
sock.send( str(len(signature)).ljust(16).encode())
sock.send(signature.encode())
sock.send( str(len(data)).ljust(16).encode())
sock.send(data)
#sock.close()
def recvall(sock, count):
buf = b''
while count:
newbuf = sock.recv(count)
if not newbuf: return None
buf += newbuf
count -= len(newbuf)
return buf
def get_data():
recieve_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
recieve_socket.bind((TCP_IP, TCP_PORT+1))
recieve_socket.listen(True)
conn, addr = recieve_socket.accept()
#sign = sign.decode()
length = recvall(conn,16)
stringData = recvall(conn, int(length))
#data = numpy.fromstring(stringData, dtype='uint8')
#server_socket.close()
return stringData.decode()
def client():
#msg = computer + '#' + login
#send_data(msg)
photo = take_photo()
send_data(photo,signature)
answer = get_data()
sock.close()
#decimg=cv2.imdecode(data,1)
#cv2.imshow('CLIENT',decimg)
#cv2.waitKey(0)
#cv2.destroyAllWindows()
if answer == '1':
print(computer)
else:
import winlock
if False:
win32serviceutil.HandleCommandLine(AppServerSvc)
client()
| [
"cv2.imencode",
"socket.socket",
"os.environ.get",
"win32serviceutil.HandleCommandLine",
"servicemanager.LogMsg",
"numpy.array",
"win32serviceutil.ServiceFramework.__init__",
"cv2.VideoCapture",
"win32event.SetEvent",
"socket.gethostname",
"win32event.CreateEvent",
"socket.setdefaulttimeout"
] | [((1049, 1075), 'os.environ.get', 'os.environ.get', (['"""USERNAME"""'], {}), "('USERNAME')\n", (1063, 1075), False, 'import os\n'), ((1088, 1108), 'socket.gethostname', 'socket.gethostname', ([], {}), '()\n', (1106, 1108), False, 'import socket\n'), ((1281, 1296), 'socket.socket', 'socket.socket', ([], {}), '()\n', (1294, 1296), False, 'import socket\n'), ((1408, 1427), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (1424, 1427), False, 'import cv2\n'), ((1541, 1582), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'frame', 'encode_param'], {}), "('.jpg', frame, encode_param)\n", (1553, 1582), False, 'import cv2\n'), ((1595, 1617), 'numpy.array', 'numpy.array', (['imgencode'], {}), '(imgencode)\n', (1606, 1617), False, 'import numpy\n'), ((2134, 2183), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_STREAM'], {}), '(socket.AF_INET, socket.SOCK_STREAM)\n', (2147, 2183), False, 'import socket\n'), ((2925, 2973), 'win32serviceutil.HandleCommandLine', 'win32serviceutil.HandleCommandLine', (['AppServerSvc'], {}), '(AppServerSvc)\n', (2959, 2973), False, 'import win32serviceutil\n'), ((372, 426), 'win32serviceutil.ServiceFramework.__init__', 'win32serviceutil.ServiceFramework.__init__', (['self', 'args'], {}), '(self, args)\n', (414, 426), False, 'import win32serviceutil\n'), ((452, 492), 'win32event.CreateEvent', 'win32event.CreateEvent', (['None', '(0)', '(0)', 'None'], {}), '(None, 0, 0, None)\n', (474, 492), False, 'import win32event\n'), ((499, 527), 'socket.setdefaulttimeout', 'socket.setdefaulttimeout', (['(60)'], {}), '(60)\n', (523, 527), False, 'import socket\n'), ((632, 667), 'win32event.SetEvent', 'win32event.SetEvent', (['self.hWaitStop'], {}), '(self.hWaitStop)\n', (651, 667), False, 'import win32event\n'), ((704, 830), 'servicemanager.LogMsg', 'servicemanager.LogMsg', (['servicemanager.EVENTLOG_INFORMATION_TYPE', 'servicemanager.PYS_SERVICE_STARTED', "(self._svc_name_, '')"], {}), "(servicemanager.EVENTLOG_INFORMATION_TYPE,\n servicemanager.PYS_SERVICE_STARTED, (self._svc_name_, ''))\n", (725, 830), False, 'import servicemanager\n')] |
import tensorflow as tf
import numpy as np
import os
import sys
import time
import cv2
from collections import defaultdict
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
# Helper code
def load_image_into_numpy_array(image):
(im_width, im_height) = image.size
return np.array(image.getdata()).reshape(
(im_height, im_width, 3)).astype(np.uint8)
def gen():
i=1
while i<10:
yield (b'--frame\r\n'b'Content-Type: text/plain\r\n\r\n'+str(i)+b'\r\n')
i+=1
def get_frame():
video = sys.argv[1] # video file path, ex) *.mp4
# video = "rtsp://192.168.0.128:8091/test1.mp4" # To artik710
# video = "rtsp://192.168.0.145:8091/test1.mp4" # To raspberryPi3
cap = cv2.VideoCapture(video)
# This is needed since the notebook is stored in the object_detection folder.
sys.path.append("..")
# Object detection imports
# Here are the imports from the object detection module.
from object_detection.utils import label_map_util
from object_detection.utils import visualization_utils as vis_util
# Model preparation
MODEL_NAME = 'object_detection/export_models/inference_graph_rfcn_resnet101_30000'
# Path to frozen detection graph. This is the actual model that is used for the object detection.
PATH_TO_CKPT = MODEL_NAME + '/frozen_inference_graph.pb'
# List of the strings that is used to add correct label for each box.
PATH_TO_LABELS = os.path.join('object_detection/data', 'object-detection.pbtxt')
NUM_CLASSES = 1
# Load a (frozen) Tensorflow model into memory.
detection_graph = tf.Graph()
with detection_graph.as_default():
od_graph_def = tf.GraphDef()
with tf.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:
serialized_graph = fid.read()
od_graph_def.ParseFromString(serialized_graph)
tf.import_graph_def(od_graph_def, name='')
# Loading label map
# Label maps map indices to category names, so that when our convolution network predicts `5`, we know that this corresponds to `airplane`. Here we use internal utility functions, but anything that returns a dictionary mapping integers to appropriate string labels would be fine
label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=NUM_CLASSES,
use_display_name=True)
category_index = label_map_util.create_category_index(categories)
# Detection
with detection_graph.as_default():
with tf.Session(graph=detection_graph) as sess:
prevTime = 0 # Frame time variable
i=1
while True:
ret, image_np = cap.read()
# Expand dimensions since the model expects images to have shape: [1, None, None, 3]
image_np_expanded = np.expand_dims(image_np, axis=0)
image_tensor = detection_graph.get_tensor_by_name('image_tensor:0')
# Each box represents a part of the image where a particular object was detected.
boxes = detection_graph.get_tensor_by_name('detection_boxes:0')
# Each score represent how level of confidence for each of the objects.
# Score is shown on the result image, together with the class label.
scores = detection_graph.get_tensor_by_name('detection_scores:0')
classes = detection_graph.get_tensor_by_name('detection_classes:0')
num_detections = detection_graph.get_tensor_by_name('num_detections:0')
# Actual detection.
(boxes, scores, classes, num_detections) = sess.run(
[boxes, scores, classes, num_detections],
feed_dict={image_tensor: image_np_expanded})
# Visualization of the results of a detection.
vis_util.visualize_boxes_and_labels_on_image_array(
image_np,
np.squeeze(boxes),
np.squeeze(classes).astype(np.int32),
np.squeeze(scores),
category_index,
use_normalized_coordinates=True,
min_score_thresh=.5,
line_thickness=2)
################### Data analysis ###################
final_score = np.squeeze(scores) # scores
r_count = 0 # counting
r_score = [] # temp score, <class 'numpy.ndarray'>
final_category = np.array([category_index.get(i) for i in classes[0]]) # category
r_category = np.array([]) # temp category
for i in range(100):
if scores is None or final_score[i] > 0.5:
r_count = r_count + 1
r_score = np.append(r_score, final_score[i])
r_category = np.append(r_category, final_category[i])
if r_count > 0:
for i in range(len(r_score)): # socre array`s length
data = "Object Num: {} || Category: {} || Score: {}%".format(i + 1, r_category[i]['name'], 100 * r_score[i])
cv2.putText(image_np, data, (5, 60), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))
final_boxes = np.squeeze(boxes)[i] # ymin, xmin, ymax, xmax
xmin = final_boxes[1]
ymin = final_boxes[0]
xmax = final_boxes[3]
ymax = final_boxes[2]
location_x = (xmax + xmin) / 2
location_y = (ymax + ymin) / 2
data2 = "Location (x: {}, y: {})".format(location_x, location_y)
cv2.putText(image_np, "Location (x: {}, y: {})".format(location_x, location_y), (5, 80), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))
else:
cv2.putText(image_np, "Not Detect", (5, 60), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))
#####################################################
# Frame
curTime = time.time()
sec = curTime - prevTime
prevTime = curTime
fps = 1 / (sec)
str = "FPS : %0.1f" % fps
cv2.putText(image_np, str, (5, 40), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))
# Trained Model Name
model_name = MODEL_NAME.split('/')[2]
cv2.putText(image_np, model_name, (5, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))
imgencode=cv2.imencode('.jpg',image_np)[1]
stringData=imgencode.tostring()
yield (b'--frame\r\n'
b'Content-Type: text/plain\r\n\r\n'+stringData+b'\r\n')
i+=1
del(cap)
| [
"tensorflow.Graph",
"cv2.imencode",
"tensorflow.Session",
"time.time",
"os.path.join",
"tensorflow.GraphDef",
"numpy.squeeze",
"cv2.putText",
"numpy.array",
"numpy.append",
"cv2.VideoCapture",
"object_detection.utils.label_map_util.convert_label_map_to_categories",
"tensorflow.import_graph_d... | [((723, 746), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (739, 746), False, 'import cv2\n'), ((830, 851), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (845, 851), False, 'import sys\n'), ((1422, 1485), 'os.path.join', 'os.path.join', (['"""object_detection/data"""', '"""object-detection.pbtxt"""'], {}), "('object_detection/data', 'object-detection.pbtxt')\n", (1434, 1485), False, 'import os\n'), ((1576, 1586), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (1584, 1586), True, 'import tensorflow as tf\n'), ((2166, 2210), 'object_detection.utils.label_map_util.load_labelmap', 'label_map_util.load_labelmap', (['PATH_TO_LABELS'], {}), '(PATH_TO_LABELS)\n', (2194, 2210), False, 'from object_detection.utils import label_map_util\n'), ((2226, 2340), 'object_detection.utils.label_map_util.convert_label_map_to_categories', 'label_map_util.convert_label_map_to_categories', (['label_map'], {'max_num_classes': 'NUM_CLASSES', 'use_display_name': '(True)'}), '(label_map, max_num_classes=\n NUM_CLASSES, use_display_name=True)\n', (2272, 2340), False, 'from object_detection.utils import label_map_util\n'), ((2417, 2465), 'object_detection.utils.label_map_util.create_category_index', 'label_map_util.create_category_index', (['categories'], {}), '(categories)\n', (2453, 2465), False, 'from object_detection.utils import label_map_util\n'), ((1643, 1656), 'tensorflow.GraphDef', 'tf.GraphDef', ([], {}), '()\n', (1654, 1656), True, 'import tensorflow as tf\n'), ((1666, 1700), 'tensorflow.gfile.GFile', 'tf.gfile.GFile', (['PATH_TO_CKPT', '"""rb"""'], {}), "(PATH_TO_CKPT, 'rb')\n", (1680, 1700), True, 'import tensorflow as tf\n'), ((1804, 1846), 'tensorflow.import_graph_def', 'tf.import_graph_def', (['od_graph_def'], {'name': '""""""'}), "(od_graph_def, name='')\n", (1823, 1846), True, 'import tensorflow as tf\n'), ((2527, 2560), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'detection_graph'}), '(graph=detection_graph)\n', (2537, 2560), True, 'import tensorflow as tf\n'), ((2796, 2828), 'numpy.expand_dims', 'np.expand_dims', (['image_np'], {'axis': '(0)'}), '(image_np, axis=0)\n', (2810, 2828), True, 'import numpy as np\n'), ((4105, 4123), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (4115, 4123), True, 'import numpy as np\n'), ((4338, 4350), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (4346, 4350), True, 'import numpy as np\n'), ((5626, 5637), 'time.time', 'time.time', ([], {}), '()\n', (5635, 5637), False, 'import time\n'), ((5764, 5839), 'cv2.putText', 'cv2.putText', (['image_np', 'str', '(5, 40)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 0, 0)'], {}), '(image_np, str, (5, 40), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n', (5775, 5839), False, 'import cv2\n'), ((5923, 6009), 'cv2.putText', 'cv2.putText', (['image_np', 'model_name', '(5, 20)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 0, 0)'], {}), '(image_np, model_name, (5, 20), cv2.FONT_HERSHEY_PLAIN, 1, (255,\n 0, 0))\n', (5934, 6009), False, 'import cv2\n'), ((3795, 3812), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (3805, 3812), True, 'import numpy as np\n'), ((3872, 3890), 'numpy.squeeze', 'np.squeeze', (['scores'], {}), '(scores)\n', (3882, 3890), True, 'import numpy as np\n'), ((5444, 5533), 'cv2.putText', 'cv2.putText', (['image_np', '"""Not Detect"""', '(5, 60)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 0, 0)'], {}), "(image_np, 'Not Detect', (5, 60), cv2.FONT_HERSHEY_PLAIN, 1, (\n 255, 0, 0))\n", (5455, 5533), False, 'import cv2\n'), ((6025, 6055), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'image_np'], {}), "('.jpg', image_np)\n", (6037, 6055), False, 'import cv2\n'), ((4507, 4541), 'numpy.append', 'np.append', (['r_score', 'final_score[i]'], {}), '(r_score, final_score[i])\n', (4516, 4541), True, 'import numpy as np\n'), ((4567, 4607), 'numpy.append', 'np.append', (['r_category', 'final_category[i]'], {}), '(r_category, final_category[i])\n', (4576, 4607), True, 'import numpy as np\n'), ((4830, 4906), 'cv2.putText', 'cv2.putText', (['image_np', 'data', '(5, 60)', 'cv2.FONT_HERSHEY_PLAIN', '(1)', '(255, 0, 0)'], {}), '(image_np, data, (5, 60), cv2.FONT_HERSHEY_PLAIN, 1, (255, 0, 0))\n', (4841, 4906), False, 'import cv2\n'), ((3824, 3843), 'numpy.squeeze', 'np.squeeze', (['classes'], {}), '(classes)\n', (3834, 3843), True, 'import numpy as np\n'), ((4933, 4950), 'numpy.squeeze', 'np.squeeze', (['boxes'], {}), '(boxes)\n', (4943, 4950), True, 'import numpy as np\n')] |
import flask
import numpy as np
import os
import requests
import sys
from cv2 import cv2 as cv
from socket import AF_INET, SOCK_DGRAM, INADDR_ANY, IPPROTO_IP, IP_ADD_MEMBERSHIP, SOL_SOCKET, SO_REUSEADDR, socket, inet_aton, error as socket_error
import struct
from threading import Thread
import imagehash
from PIL import Image
class Request():
def __init__(self, frame, method):
self.frame = frame
self.method = method
self.checksum = ""
def update_checksum(self, checksum):
self.checksum = checksum
def get_frame(self):
return self.frame
def get_method(self):
return self.method
def get_checksum(self):
return self.checksum
replica_number = 1
host = "localhost"
multicast_group = "172.16.31.10"
multicast_port = 20000
#sequencer_port = 20000
timeout = 3
buf = 1024
app = flask.Flask(__name__)
requests_awaiting = {}
requests_finished = []
success_req = {}
delivered_req = {}
fail_req = {}
# TODO : Figure out how to synchronize sequence count between sequencer and implementation
seq_count = 1
@app.route('/getUDPPort', methods=['GET'])
def getUDPPort():
_, temp_port = serv.get_port()
return str(temp_port)
@app.route('/getJob/<seq_num>', methods=['GET'])
def publishFrame(seq_num):
print(str(seq_num))
file_path = "../python/jobs/f" + str(seq_num) + ".jpg"
if os.path.isfile(file_path):
return flask.send_file(file_path, mimetype='image/jpg')
else:
return flask.send_file("images/color_fail.jpg", mimetype='image/jpg')
def getFrame(frame_num):
img_url = "http://localhost:8080/rest/openiss/getStaticFrame/" + str(frame_num)
response = requests.get(img_url)
result = response.content
return np.frombuffer(result, dtype=np.uint8)
def deliverFrame(frame_num):
# checksum = requests_awaiting[frame_num].checksum
addr = (multicast_group, multicast_port)
udp_string = str(frame_num) + ",delivered," + str(replica_number)
udp_socket = socket(AF_INET,SOCK_DGRAM)
udp_socket.sendto(udp_string.encode(), addr)
print("Sending %s ..." % udp_string)
udp_socket.close()
def processFrame(frame_num):
if requests_awaiting[frame_num].get_method() == "canny":
doCanny(frame_num)
elif requests_awaiting[frame_num].get_method() == "contour":
doContour(frame_num)
else:
print("Method called does not exist on web service! Skipping...")
requests_awaiting.pop(frame_num, None)
def checkRequestsAwaiting():
global seq_count
while seq_count in requests_awaiting:
deliverFrame(seq_count)
requests_awaiting.pop(seq_count, None)
requests_finished.append(seq_count)
seq_count += 1
def addToSharedQueues(frame_num, method, replica_num):
global success_req, seq_count
if method == "success":
if frame_num not in success_req:
success_req[frame_num] = []
success_req[frame_num].append(replica_num)
elif method == "fail":
if frame_num not in fail_req:
fail_req[frame_num] = []
fail_req[frame_num].append(replica_num)
else:
if frame_num not in delivered_req:
delivered_req[frame_num] = []
delivered_req[frame_num].append(replica_num)
def doCanny(seq_num):
x = getFrame(seq_num)
img = cv.imdecode(x, cv.IMREAD_UNCHANGED)
if img is None:
print("Error loading image")
return
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
edges = cv.Canny(img_gray, 50, 150, 3, L2gradient=False)
edges = cv.cvtColor(edges, cv.COLOR_GRAY2BGR)
print("Saving canny...")
# cv.imwrite("../python/jobs/canny.jpg", edges)
file_name = "../python/jobs/f" + str(seq_num) + ".jpg"
sys.stdout.flush()
cv.imwrite(file_name, edges)
checksum = imagehash.average_hash(Image.open(file_name))
requests_awaiting[seq_num].checksum = checksum
def doContour(seq_num):
x = getFrame(seq_num)
img = cv.imdecode(x, cv.IMREAD_UNCHANGED)
if img is None:
print("Error loading image")
return
img_gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
_, img_thresh = cv.threshold(img_gray ,100, 255, cv.THRESH_BINARY)
print("Saving contour...")
img_thresh = cv.cvtColor(img_thresh, cv.COLOR_GRAY2BGR)
#cv.imwrite("contour.jpg", img_thresh)
file_name = "../python/jobs/f" + str(seq_num) + ".jpg"
cv.imwrite(file_name, img_thresh)
checksum = imagehash.average_hash(Image.open(file_name))
requests_awaiting[seq_num].checksum = checksum
class UDPServer():
def __init__(self):
self._running = True
self.sock = socket(AF_INET, SOCK_DGRAM)
self.buf = buf
self.timeout = timeout
self.group = inet_aton(multicast_group) + inet_aton("0.0.0.0")
self.sock.setsockopt(IPPROTO_IP, IP_ADD_MEMBERSHIP, self.group)
self.sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
self.sock.bind(("", multicast_port))
def terminate(self):
self._running = False
self.sock.shutdown(0)
self.sock.close()
def is_running(self):
return self._running
def get_port(self):
return self.sock.getsockname()
def run(self):
global seq_count
while True:
try:
print("Waiting to receive data...")
sys.stdout.flush()
data,address = self.sock.recvfrom(self.buf)
if data:
strings = data.decode('utf-8')
seq_num = int(strings.split(',')[0])
method = strings.split(',')[1]
print("Message:", method, seq_num, "Address: ", address)
if(method == "success" or method == "fail" or method == "delivered"):
replica_num = int(strings.split(',')[2])
if replica_num != replica_number:
addToSharedQueues(seq_num, method, replica_num)
elif(seq_num >= seq_count and seq_num not in requests_finished and seq_num not in requests_awaiting):
requests_awaiting[seq_num] = Request(seq_num, method)
processFrame(seq_num)
checkRequestsAwaiting()
else:
print("Packet with sequence number ", seq_num, " already received!")
sys.stdout.flush()
except socket_error:
self.sock.close()
break
# Main execution
serv = UDPServer()
t = Thread(target=serv.run)
t.start()
if __name__ == '__main__':
app.run(host='127.0.0.1', port=8001)
# If here, ctrl+c was called
serv.terminate()
t.join()
sys.exit()
| [
"sys.stdout.flush",
"numpy.frombuffer",
"cv2.cv2.threshold",
"PIL.Image.open",
"socket.socket",
"flask.Flask",
"requests.get",
"os.path.isfile",
"cv2.cv2.imdecode",
"flask.send_file",
"socket.inet_aton",
"sys.exit",
"cv2.cv2.Canny",
"threading.Thread",
"cv2.cv2.cvtColor",
"cv2.cv2.imwr... | [((858, 879), 'flask.Flask', 'flask.Flask', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import flask\n'), ((6565, 6588), 'threading.Thread', 'Thread', ([], {'target': 'serv.run'}), '(target=serv.run)\n', (6571, 6588), False, 'from threading import Thread\n'), ((6723, 6733), 'sys.exit', 'sys.exit', ([], {}), '()\n', (6731, 6733), False, 'import sys\n'), ((1372, 1397), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1386, 1397), False, 'import os\n'), ((1676, 1697), 'requests.get', 'requests.get', (['img_url'], {}), '(img_url)\n', (1688, 1697), False, 'import requests\n'), ((1739, 1776), 'numpy.frombuffer', 'np.frombuffer', (['result'], {'dtype': 'np.uint8'}), '(result, dtype=np.uint8)\n', (1752, 1776), True, 'import numpy as np\n'), ((1994, 2021), 'socket.socket', 'socket', (['AF_INET', 'SOCK_DGRAM'], {}), '(AF_INET, SOCK_DGRAM)\n', (2000, 2021), False, 'from socket import AF_INET, SOCK_DGRAM, INADDR_ANY, IPPROTO_IP, IP_ADD_MEMBERSHIP, SOL_SOCKET, SO_REUSEADDR, socket, inet_aton, error as socket_error\n'), ((3331, 3366), 'cv2.cv2.imdecode', 'cv.imdecode', (['x', 'cv.IMREAD_UNCHANGED'], {}), '(x, cv.IMREAD_UNCHANGED)\n', (3342, 3366), True, 'from cv2 import cv2 as cv\n'), ((3454, 3489), 'cv2.cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (3465, 3489), True, 'from cv2 import cv2 as cv\n'), ((3502, 3550), 'cv2.cv2.Canny', 'cv.Canny', (['img_gray', '(50)', '(150)', '(3)'], {'L2gradient': '(False)'}), '(img_gray, 50, 150, 3, L2gradient=False)\n', (3510, 3550), True, 'from cv2 import cv2 as cv\n'), ((3563, 3600), 'cv2.cv2.cvtColor', 'cv.cvtColor', (['edges', 'cv.COLOR_GRAY2BGR'], {}), '(edges, cv.COLOR_GRAY2BGR)\n', (3574, 3600), True, 'from cv2 import cv2 as cv\n'), ((3745, 3763), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (3761, 3763), False, 'import sys\n'), ((3768, 3796), 'cv2.cv2.imwrite', 'cv.imwrite', (['file_name', 'edges'], {}), '(file_name, edges)\n', (3778, 3796), True, 'from cv2 import cv2 as cv\n'), ((3970, 4005), 'cv2.cv2.imdecode', 'cv.imdecode', (['x', 'cv.IMREAD_UNCHANGED'], {}), '(x, cv.IMREAD_UNCHANGED)\n', (3981, 4005), True, 'from cv2 import cv2 as cv\n'), ((4093, 4128), 'cv2.cv2.cvtColor', 'cv.cvtColor', (['img', 'cv.COLOR_BGR2GRAY'], {}), '(img, cv.COLOR_BGR2GRAY)\n', (4104, 4128), True, 'from cv2 import cv2 as cv\n'), ((4149, 4199), 'cv2.cv2.threshold', 'cv.threshold', (['img_gray', '(100)', '(255)', 'cv.THRESH_BINARY'], {}), '(img_gray, 100, 255, cv.THRESH_BINARY)\n', (4161, 4199), True, 'from cv2 import cv2 as cv\n'), ((4248, 4290), 'cv2.cv2.cvtColor', 'cv.cvtColor', (['img_thresh', 'cv.COLOR_GRAY2BGR'], {}), '(img_thresh, cv.COLOR_GRAY2BGR)\n', (4259, 4290), True, 'from cv2 import cv2 as cv\n'), ((4397, 4430), 'cv2.cv2.imwrite', 'cv.imwrite', (['file_name', 'img_thresh'], {}), '(file_name, img_thresh)\n', (4407, 4430), True, 'from cv2 import cv2 as cv\n'), ((1414, 1462), 'flask.send_file', 'flask.send_file', (['file_path'], {'mimetype': '"""image/jpg"""'}), "(file_path, mimetype='image/jpg')\n", (1429, 1462), False, 'import flask\n'), ((1488, 1550), 'flask.send_file', 'flask.send_file', (['"""images/color_fail.jpg"""'], {'mimetype': '"""image/jpg"""'}), "('images/color_fail.jpg', mimetype='image/jpg')\n", (1503, 1550), False, 'import flask\n'), ((3835, 3856), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (3845, 3856), False, 'from PIL import Image\n'), ((4469, 4490), 'PIL.Image.open', 'Image.open', (['file_name'], {}), '(file_name)\n', (4479, 4490), False, 'from PIL import Image\n'), ((4636, 4663), 'socket.socket', 'socket', (['AF_INET', 'SOCK_DGRAM'], {}), '(AF_INET, SOCK_DGRAM)\n', (4642, 4663), False, 'from socket import AF_INET, SOCK_DGRAM, INADDR_ANY, IPPROTO_IP, IP_ADD_MEMBERSHIP, SOL_SOCKET, SO_REUSEADDR, socket, inet_aton, error as socket_error\n'), ((4739, 4765), 'socket.inet_aton', 'inet_aton', (['multicast_group'], {}), '(multicast_group)\n', (4748, 4765), False, 'from socket import AF_INET, SOCK_DGRAM, INADDR_ANY, IPPROTO_IP, IP_ADD_MEMBERSHIP, SOL_SOCKET, SO_REUSEADDR, socket, inet_aton, error as socket_error\n'), ((4768, 4788), 'socket.inet_aton', 'inet_aton', (['"""0.0.0.0"""'], {}), "('0.0.0.0')\n", (4777, 4788), False, 'from socket import AF_INET, SOCK_DGRAM, INADDR_ANY, IPPROTO_IP, IP_ADD_MEMBERSHIP, SOL_SOCKET, SO_REUSEADDR, socket, inet_aton, error as socket_error\n'), ((5354, 5372), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (5370, 5372), False, 'import sys\n'), ((6416, 6434), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (6432, 6434), False, 'import sys\n')] |
import os
import cv2
import numpy as np
from PIL import Image
from keras.engine.topology import Layer, InputSpec
import keras.utils.conv_utils as conv_utils
import tensorflow as tf
import keras.backend as K
import skimage.io as io
class DenseDepthAnalysis:
def __init__(self, image_id, model, object_attr, width, height, depth_to_distance, min_depth=10, max_depth=1000, batch_size=1):
self.object_attr = object_attr
self.image_id = image_id
self.model = model
self.width = width
self.height = height
self.min_depth = min_depth
self.max_depth = max_depth
self.batch_size = batch_size
self.depth_to_distance = depth_to_distance
def rescale_img(self, imgs, width, height):
return imgs.resize((width, height))
def read_image(self):
img = Image.open(os.path.join(os.getenv('TEST_IMAGES_PATH'), self.image_id+'.jpg'))
return img
def depth_norm(self, x, max_depth_):
return max_depth_ / x
def get_prediction(self):
inp = self.read_image()
img_width = np.asarray(inp).shape[0]
img_height = np.asarray(inp).shape[1]
rescaled_img = self.rescale_img(inp, self.width, self.height)
inp = np.clip(np.asarray(rescaled_img, dtype=float) / 255, 0, 1)
inp = np.expand_dims(inp, axis=0)
predictions = self.model.predict(inp, batch_size=self.batch_size)
outputs = np.clip(self.depth_norm(predictions, max_depth_=self.max_depth), self.min_depth, self.max_depth) / self.max_depth
rescaled_output = cv2.resize(outputs.copy()[0], (img_height, img_width), interpolation=cv2.INTER_AREA)
# io.imsave('test/results/xyz.png', rescaled_output)
return rescaled_output
def find_distance(self, depth):
return int(depth * self.depth_to_distance)
def revise_object_attr(self, depth_map):
revised_object_attr = list()
for attr in self.object_attr:
object_depth = depth_map[attr[0]:attr[2], attr[1]:attr[3]].mean()
if object_depth < int(os.getenv('DEPTH_THRESHOLD')):
revised_object_attr.append(tuple((attr[0], attr[1], attr[2], attr[3], attr[4], self.find_distance(object_depth))))
return revised_object_attr
class BilinearUpSampling2D(Layer):
def __init__(self, size=(2, 2), data_format=None, **kwargs):
super(BilinearUpSampling2D, self).__init__(**kwargs)
self.data_format = K.normalize_data_format(data_format)
self.size = conv_utils.normalize_tuple(size, 2, 'size')
self.input_spec = InputSpec(ndim=4)
def compute_output_shape(self, input_shape):
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
return (input_shape[0],
input_shape[1],
height,
width)
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return (input_shape[0],
height,
width,
input_shape[3])
def call(self, inputs):
input_shape = K.shape(inputs)
if self.data_format == 'channels_first':
height = self.size[0] * input_shape[2] if input_shape[2] is not None else None
width = self.size[1] * input_shape[3] if input_shape[3] is not None else None
elif self.data_format == 'channels_last':
height = self.size[0] * input_shape[1] if input_shape[1] is not None else None
width = self.size[1] * input_shape[2] if input_shape[2] is not None else None
return tf.image.resize_images(inputs, [height, width], method=tf.image.ResizeMethod.BILINEAR, align_corners=True)
def get_config(self):
config = {'size': self.size, 'data_format': self.data_format}
base_config = super(BilinearUpSampling2D, self).get_config()
return dict(list(base_config.items()) + list(config.items())) | [
"keras.engine.topology.InputSpec",
"tensorflow.image.resize_images",
"keras.backend.shape",
"os.getenv",
"numpy.asarray",
"numpy.expand_dims",
"keras.backend.normalize_data_format",
"keras.utils.conv_utils.normalize_tuple"
] | [((1363, 1390), 'numpy.expand_dims', 'np.expand_dims', (['inp'], {'axis': '(0)'}), '(inp, axis=0)\n', (1377, 1390), True, 'import numpy as np\n'), ((2548, 2584), 'keras.backend.normalize_data_format', 'K.normalize_data_format', (['data_format'], {}), '(data_format)\n', (2571, 2584), True, 'import keras.backend as K\n'), ((2606, 2649), 'keras.utils.conv_utils.normalize_tuple', 'conv_utils.normalize_tuple', (['size', '(2)', '"""size"""'], {}), "(size, 2, 'size')\n", (2632, 2649), True, 'import keras.utils.conv_utils as conv_utils\n'), ((2677, 2694), 'keras.engine.topology.InputSpec', 'InputSpec', ([], {'ndim': '(4)'}), '(ndim=4)\n', (2686, 2694), False, 'from keras.engine.topology import Layer, InputSpec\n'), ((3530, 3545), 'keras.backend.shape', 'K.shape', (['inputs'], {}), '(inputs)\n', (3537, 3545), True, 'import keras.backend as K\n'), ((4039, 4150), 'tensorflow.image.resize_images', 'tf.image.resize_images', (['inputs', '[height, width]'], {'method': 'tf.image.ResizeMethod.BILINEAR', 'align_corners': '(True)'}), '(inputs, [height, width], method=tf.image.\n ResizeMethod.BILINEAR, align_corners=True)\n', (4061, 4150), True, 'import tensorflow as tf\n'), ((893, 922), 'os.getenv', 'os.getenv', (['"""TEST_IMAGES_PATH"""'], {}), "('TEST_IMAGES_PATH')\n", (902, 922), False, 'import os\n'), ((1129, 1144), 'numpy.asarray', 'np.asarray', (['inp'], {}), '(inp)\n', (1139, 1144), True, 'import numpy as np\n'), ((1176, 1191), 'numpy.asarray', 'np.asarray', (['inp'], {}), '(inp)\n', (1186, 1191), True, 'import numpy as np\n'), ((1297, 1334), 'numpy.asarray', 'np.asarray', (['rescaled_img'], {'dtype': 'float'}), '(rescaled_img, dtype=float)\n', (1307, 1334), True, 'import numpy as np\n'), ((2151, 2179), 'os.getenv', 'os.getenv', (['"""DEPTH_THRESHOLD"""'], {}), "('DEPTH_THRESHOLD')\n", (2160, 2179), False, 'import os\n')] |
# coding: utf-8
# In[ ]:
# 0. 執行指令:
# !python predict.py -c config.json -i /path/to/image/or/video
# 輸入為 圖片: !python predict.py -c config.json -i ./o_input
# 輸入為 影片: !python predict.py -c config.json -i ./o_input/Produce.mp4
# 1. 輸入檔案擺放位置:
# 將要偵測的 影片或圖片 放到 資料夾 o_input (影片必須為mp4格式;圖片可以多張,必須為 '.jpg','.JPG','.png','JPEG' 格式)。
# 2. 程式設定:
# (第17行) 假設 影片名稱為Produce.mp4,則 input_path = './o_input/Produce.mp4'。
# (第17行) 假設 要偵測圖片(可以多張),則 input_path = './o_input/' 。
#
# (第34行) infer_model = load_model('kholes_448_an_ne4.h5')
# model 為 kholes_448_an_ne4.h5,大於100M,無法上傳github。
# 下載點: https://drive.google.com/file/d/1wbhtz99RANQ2-EDhSCW3hKhsHSrHWXw3/view?usp=sharing。
# 3. 輸出結果:
# 執行結束後,輸出會在 資料夾 output。6秒鐘的影片,大約需要9分鐘;一張圖片,約3秒鐘(在很普通的筆電)。
# 4. 資料蒐集:
# 使用 A8+ 手機。
# 5. 測試環境:
# windows。
# 6. 取消utils/bbox.py的所有註解,會輸出bounding box的座標與 類別(["hole", "square", "repair"] # ["圓孔蓋", "方孔蓋", "修補"])。
# In[6]:
# -*- coding: utf-8 -*-
# predict
import os
import argparse
import json
import cv2
from utils.utils import get_yolo_boxes, makedirs
from utils.bbox import draw_boxes
from keras.models import load_model
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import time
# get_ipython().run_line_magic('matplotlib', 'inline')
# ./o_input/
input_path = './o_input/' # 影片輸入設定: input_path = './o_input/test.mp4'
output_path = 'output/'
makedirs(output_path)
# Set some parameter
net_h, net_w = 416, 416 # a multiple of 32, the smaller the faster
obj_thresh, nms_thresh = 0.5, 0.45
# [9,13, 10,7, 19,20, 38,36, 57,22, 90,81, 91,41, 144,67, 209,119] # kholes1
# anchors = [55,69, 75,234, 133,240, 136,129, 142,363, 203,290, 228,184, 285,359, 341,260]
anchors = [15,15, 19,46, 40,101, 42,22, 81,41, 84,15, 125,71, 181,33, 196,118]
# labels = ["1_mediumcircle", "4_mediumsquare", "6_patch"] # TIGER
labels = ["hole", "square", "repair"] # ["圓孔蓋", "方孔蓋", "修補"]
# labels = ["aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"] # TIGER
# Load the model
# print("b")
infer_model = load_model('kholes_448_an_ne4.h5')
start_time = time.time()
# Predict bounding boxes
if input_path[-4:] == '.mp4': # do detection on a video
video_out = output_path + input_path.split('/')[-1]
video_reader = cv2.VideoCapture(input_path)
nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
# fourcc = cv2.VideoWriter_fourcc(*'DIVX')
video_writer = cv2.VideoWriter(video_out,
cv2.VideoWriter_fourcc(*'H264'),
30.0,
(frame_w, frame_h))
# the main loop
batch_size = 1
images = []
start_point = 0 #%
show_window = False
for i in tqdm(range(nb_frames)):
_, image = video_reader.read()
if image is None:
continue
if (float(i+1)/nb_frames) > start_point/100.:
images += [image]
if (i%batch_size == 0) or (i == (nb_frames-1) and len(images) > 0):
# predict the bounding boxes
batch_boxes = get_yolo_boxes(infer_model, images, net_h, net_w, anchors, obj_thresh, nms_thresh)
for i in range(len(images)):
# draw bounding boxes on the image using labels
draw_boxes(images[i], batch_boxes[i], labels, obj_thresh)
# show the video with detection bounding boxes
if show_window: cv2.imshow('video with bboxes', images[i])
# write result to the output video
video_writer.write(images[i])
images = []
if show_window and cv2.waitKey(1) == 27: break # esc to quit
if show_window: cv2.destroyAllWindows()
video_reader.release()
video_writer.release()
else: # do detection on an image or a set of images
image_paths = []
if os.path.isdir(input_path):
for inp_file in os.listdir(input_path):
image_paths += [input_path + inp_file]
else:
image_paths += [input_path]
image_paths = [inp_file for inp_file in image_paths if (inp_file[-4:] in ['.jpg','.JPG', '.png', 'JPEG'])]
# the main loop
for image_path in image_paths:
image = cv2.imread(image_path)
print(image_path)
# predict the bounding boxes
boxes = get_yolo_boxes(infer_model, [image], net_h, net_w, anchors, obj_thresh, nms_thresh)[0]
# draw bounding boxes on the image using labels
draw_boxes(image, boxes, labels, obj_thresh)
# write the image with bounding boxes to file
output_img_path = output_path + image_path.split('/')[-1]
cv2.imwrite(output_img_path, np.uint8(image))
img = cv2.imread(output_img_path)[:,:,::-1]
plt.imshow(img)
elapsed_time = time.time() - start_time
print("執行時間: " + time.strftime("%H:%M:%S", time.gmtime(elapsed_time)) )
| [
"matplotlib.pyplot.imshow",
"numpy.uint8",
"os.listdir",
"keras.models.load_model",
"time.gmtime",
"utils.utils.get_yolo_boxes",
"cv2.imshow",
"utils.bbox.draw_boxes",
"os.path.isdir",
"utils.utils.makedirs",
"cv2.VideoCapture",
"cv2.VideoWriter_fourcc",
"cv2.destroyAllWindows",
"cv2.waitK... | [((1453, 1474), 'utils.utils.makedirs', 'makedirs', (['output_path'], {}), '(output_path)\n', (1461, 1474), False, 'from utils.utils import get_yolo_boxes, makedirs\n'), ((2241, 2275), 'keras.models.load_model', 'load_model', (['"""kholes_448_an_ne4.h5"""'], {}), "('kholes_448_an_ne4.h5')\n", (2251, 2275), False, 'from keras.models import load_model\n'), ((2292, 2303), 'time.time', 'time.time', ([], {}), '()\n', (2301, 2303), False, 'import time\n'), ((2463, 2491), 'cv2.VideoCapture', 'cv2.VideoCapture', (['input_path'], {}), '(input_path)\n', (2479, 2491), False, 'import cv2\n'), ((4226, 4251), 'os.path.isdir', 'os.path.isdir', (['input_path'], {}), '(input_path)\n', (4239, 4251), False, 'import os\n'), ((5159, 5170), 'time.time', 'time.time', ([], {}), '()\n', (5168, 5170), False, 'import time\n'), ((2803, 2834), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'H264'"], {}), "(*'H264')\n", (2825, 2834), False, 'import cv2\n'), ((4056, 4079), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (4077, 4079), False, 'import cv2\n'), ((4278, 4300), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (4288, 4300), False, 'import os\n'), ((4583, 4605), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (4593, 4605), False, 'import cv2\n'), ((4838, 4882), 'utils.bbox.draw_boxes', 'draw_boxes', (['image', 'boxes', 'labels', 'obj_thresh'], {}), '(image, boxes, labels, obj_thresh)\n', (4848, 4882), False, 'from utils.bbox import draw_boxes\n'), ((5119, 5134), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (5129, 5134), True, 'import matplotlib.pyplot as plt\n'), ((4686, 4773), 'utils.utils.get_yolo_boxes', 'get_yolo_boxes', (['infer_model', '[image]', 'net_h', 'net_w', 'anchors', 'obj_thresh', 'nms_thresh'], {}), '(infer_model, [image], net_h, net_w, anchors, obj_thresh,\n nms_thresh)\n', (4700, 4773), False, 'from utils.utils import get_yolo_boxes, makedirs\n'), ((5042, 5057), 'numpy.uint8', 'np.uint8', (['image'], {}), '(image)\n', (5050, 5057), True, 'import numpy as np\n'), ((5073, 5100), 'cv2.imread', 'cv2.imread', (['output_img_path'], {}), '(output_img_path)\n', (5083, 5100), False, 'import cv2\n'), ((5235, 5260), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (5246, 5260), False, 'import time\n'), ((3389, 3475), 'utils.utils.get_yolo_boxes', 'get_yolo_boxes', (['infer_model', 'images', 'net_h', 'net_w', 'anchors', 'obj_thresh', 'nms_thresh'], {}), '(infer_model, images, net_h, net_w, anchors, obj_thresh,\n nms_thresh)\n', (3403, 3475), False, 'from utils.utils import get_yolo_boxes, makedirs\n'), ((3606, 3663), 'utils.bbox.draw_boxes', 'draw_boxes', (['images[i]', 'batch_boxes[i]', 'labels', 'obj_thresh'], {}), '(images[i], batch_boxes[i], labels, obj_thresh)\n', (3616, 3663), False, 'from utils.bbox import draw_boxes\n'), ((3992, 4006), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (4003, 4006), False, 'import cv2\n'), ((3781, 3823), 'cv2.imshow', 'cv2.imshow', (['"""video with bboxes"""', 'images[i]'], {}), "('video with bboxes', images[i])\n", (3791, 3823), False, 'import cv2\n')] |
import mne
import numpy as np
size = (600, 600)
renderer = mne.viz.backends.renderer.create_3d_figure(bgcolor='w', size=size, scene=False)
mne.viz.set_3d_backend('pyvista')
print("Creating image")
renderer.sphere((0, 0, 0), 'k', 1, resolution=1000)
renderer.plotter.camera.enable_parallel_projection(True)
renderer.figure.plotter.camera.SetParallelScale(1)
renderer.show()
data = (renderer.screenshot() / 255.).mean(-1) # colors
renderer.close()
print("Validating image")
want = np.ones(size)
dists = np.sqrt(
np.linspace(-1, 1, size[0])[:, np.newaxis] ** 2 +
np.linspace(-1, 1, size[1]) ** 2)
want = (dists > 0.5).astype(float)
corr = np.corrcoef(want.ravel(), data.ravel())[0, 1]
assert 0.99 <= corr <= 1
print("Tests passed!")
| [
"mne.viz.set_3d_backend",
"numpy.linspace",
"mne.viz.backends.renderer.create_3d_figure",
"numpy.ones"
] | [((59, 138), 'mne.viz.backends.renderer.create_3d_figure', 'mne.viz.backends.renderer.create_3d_figure', ([], {'bgcolor': '"""w"""', 'size': 'size', 'scene': '(False)'}), "(bgcolor='w', size=size, scene=False)\n", (101, 138), False, 'import mne\n'), ((139, 172), 'mne.viz.set_3d_backend', 'mne.viz.set_3d_backend', (['"""pyvista"""'], {}), "('pyvista')\n", (161, 172), False, 'import mne\n'), ((480, 493), 'numpy.ones', 'np.ones', (['size'], {}), '(size)\n', (487, 493), True, 'import numpy as np\n'), ((569, 596), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'size[1]'], {}), '(-1, 1, size[1])\n', (580, 596), True, 'import numpy as np\n'), ((515, 542), 'numpy.linspace', 'np.linspace', (['(-1)', '(1)', 'size[0]'], {}), '(-1, 1, size[0])\n', (526, 542), True, 'import numpy as np\n')] |
import pystan
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
from pystan_vb_extract import pystan_vb_extract
### Model Name ###
model_name = 'mix'
# model_name = 'mix-dp'
### Use variational inference? ###
# use_vb = True
use_vb = False
# Compile stan model, if needed. Otherwise, load model.
if os.path.exists('{}.pickle'.format(model_name)):
# Load model if it is cached.
sm = pickle.load(open('{}.pickle'.format(model_name), 'rb'))
else:
# compile model
sm = pystan.StanModel(file='{}.stan'.format(model_name))
# save model for later use.
with open('{}.pickle'.format(model_name), 'wb') as f:
pickle.dump(sm, f)
# Set random seed
np.random.seed(0)
# Simulate data
N = 500
y = np.random.randn(N) * .3 + 2
# y = np.random.randn(N) * .3 # doesn't work as well
K = 5
# Fit STAN model (NUTS)
data = dict(N=N, K=K, y=y, alpha=np.ones(K) / (10 * K),
m_mu=0, s_mu=.1, m_sig=1, s_sig=.5)
if model_name == 'mix-dp':
data['alpha'] = .5
if use_vb:
fit = sm.vb(data=data, iter=10000, seed=1)
else:
fit = sm.sampling(data=data, iter=1000, chains=1, seed=0)
# Extract samples
if use_vb:
samples = pystan_vb_extract(fit)
else:
samples = fit.extract()
# store params
mu = samples['mu']
sig = samples['sig']
w = samples['w']
print('mu: {}'.format(mu.mean(0)))
print('w: {}'.format(w.mean(0)))
print('sig: {}'.format(sig.mean()))
| [
"pickle.dump",
"numpy.ones",
"pystan_vb_extract.pystan_vb_extract",
"numpy.random.seed",
"numpy.random.randn"
] | [((699, 716), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (713, 716), True, 'import numpy as np\n'), ((1186, 1208), 'pystan_vb_extract.pystan_vb_extract', 'pystan_vb_extract', (['fit'], {}), '(fit)\n', (1203, 1208), False, 'from pystan_vb_extract import pystan_vb_extract\n'), ((660, 678), 'pickle.dump', 'pickle.dump', (['sm', 'f'], {}), '(sm, f)\n', (671, 678), False, 'import pickle\n'), ((746, 764), 'numpy.random.randn', 'np.random.randn', (['N'], {}), '(N)\n', (761, 764), True, 'import numpy as np\n'), ((892, 902), 'numpy.ones', 'np.ones', (['K'], {}), '(K)\n', (899, 902), True, 'import numpy as np\n')] |
import numpy as np
from test.util import generate_kernel_test_case
from webdnn.graph.graph import Graph
from webdnn.graph.operators.col2im import Col2Im
from webdnn.graph.order import OrderNHWC
from webdnn.graph.variable import Variable
def generate_data_311():
v_col = np.array([[[[[
[0, 0, 0],
[0, 1, 2],
[0, 3, 4],
], [
[0, 0, 0],
[1, 2, 0],
[3, 4, 0],
]], [[
[0, 1, 2],
[0, 3, 4],
[0, 0, 0],
], [
[1, 2, 0],
[3, 4, 0],
[0, 0, 0],
]]], [[[
[0, 0, 0],
[0, 5, 6],
[0, 7, 8],
], [
[0, 0, 0],
[5, 6, 0],
[7, 8, 0],
]], [[
[0, 5, 6],
[0, 7, 8],
[0, 0, 0],
], [
[5, 6, 0],
[7, 8, 0],
[0, 0, 0],
]]]]]).astype(np.float) # Order: (N, C, H, W, KH, KW)
v_col = np.rollaxis(v_col, 1, 6).reshape(1, 2, 2, 3 * 3 * 2) # Order: NHWC
v_col /= 4
v_im = np.array([[[
[1, 2],
[3, 4]
], [
[5, 6],
[7, 8]
]]]).astype(np.float) # Order: NCHW
v_im = np.rollaxis(v_im, 1, 4) # Order: NHWC
return v_im, v_col
def generate_data_212():
v_col = np.array([[[[[
[0, 0],
[0, 1]
], [
[0, 0],
[2, 0]
]], [[
[0, 3],
[0, 0],
], [
[4, 0],
[0, 0]
]]], [[[
[0, 0],
[0, 5]
], [
[0, 0],
[6, 0]
]], [[
[0, 7],
[0, 0]
], [
[8, 0],
[0, 0],
]]]]]).astype(np.float) # Order: (N, C, H, W, KH, KW)
v_col = np.rollaxis(v_col, 1, 6).reshape(1, 2, 2, 2 * 2 * 2) # Order: NHWC
v_im = np.array([[[
[1, 2],
[3, 4]
], [
[5, 6],
[7, 8]
]]]).astype(np.float) # Order: NCHW
v_im = np.rollaxis(v_im, 1, 4) # Order: NHWC
return v_im, v_col
def test_NHWC():
v_im, v_col = generate_data_311()
col = Variable(v_col.shape, order=OrderNHWC)
im, = Col2Im(None, ksize=3, padding=1, stride=1)(col)
im.change_order(OrderNHWC)
generate_kernel_test_case(
description=f"Col2Im output=NHWC",
backend=["webgpu", "webgl", "webassembly"],
graph=Graph([col], [im]),
inputs={col: v_col},
expected={im: v_im}
)
def test_wide_stride_NHWC():
v_im, v_col = generate_data_212()
col = Variable(v_col.shape, order=OrderNHWC)
im, = Col2Im(None, ksize=2, padding=1, stride=2)(col)
generate_kernel_test_case(
description=f"Col2Im output=NHWC stride=2",
backend=["webgpu", "webgl", "webassembly"],
graph=Graph([col], [im]),
inputs={col: v_col},
expected={im: v_im}
)
| [
"webdnn.graph.variable.Variable",
"numpy.rollaxis",
"numpy.array",
"webdnn.graph.operators.col2im.Col2Im",
"webdnn.graph.graph.Graph"
] | [((1121, 1144), 'numpy.rollaxis', 'np.rollaxis', (['v_im', '(1)', '(4)'], {}), '(v_im, 1, 4)\n', (1132, 1144), True, 'import numpy as np\n'), ((1846, 1869), 'numpy.rollaxis', 'np.rollaxis', (['v_im', '(1)', '(4)'], {}), '(v_im, 1, 4)\n', (1857, 1869), True, 'import numpy as np\n'), ((1977, 2015), 'webdnn.graph.variable.Variable', 'Variable', (['v_col.shape'], {'order': 'OrderNHWC'}), '(v_col.shape, order=OrderNHWC)\n', (1985, 2015), False, 'from webdnn.graph.variable import Variable\n'), ((2410, 2448), 'webdnn.graph.variable.Variable', 'Variable', (['v_col.shape'], {'order': 'OrderNHWC'}), '(v_col.shape, order=OrderNHWC)\n', (2418, 2448), False, 'from webdnn.graph.variable import Variable\n'), ((2027, 2069), 'webdnn.graph.operators.col2im.Col2Im', 'Col2Im', (['None'], {'ksize': '(3)', 'padding': '(1)', 'stride': '(1)'}), '(None, ksize=3, padding=1, stride=1)\n', (2033, 2069), False, 'from webdnn.graph.operators.col2im import Col2Im\n'), ((2460, 2502), 'webdnn.graph.operators.col2im.Col2Im', 'Col2Im', (['None'], {'ksize': '(2)', 'padding': '(1)', 'stride': '(2)'}), '(None, ksize=2, padding=1, stride=2)\n', (2466, 2502), False, 'from webdnn.graph.operators.col2im import Col2Im\n'), ((277, 599), 'numpy.array', 'np.array', (['[[[[[[0, 0, 0], [0, 1, 2], [0, 3, 4]], [[0, 0, 0], [1, 2, 0], [3, 4, 0]]],\n [[[0, 1, 2], [0, 3, 4], [0, 0, 0]], [[1, 2, 0], [3, 4, 0], [0, 0, 0]]]],\n [[[[0, 0, 0], [0, 5, 6], [0, 7, 8]], [[0, 0, 0], [5, 6, 0], [7, 8, 0]]],\n [[[0, 5, 6], [0, 7, 8], [0, 0, 0]], [[5, 6, 0], [7, 8, 0], [0, 0, 0]]]]]]'], {}), '([[[[[[0, 0, 0], [0, 1, 2], [0, 3, 4]], [[0, 0, 0], [1, 2, 0], [3, \n 4, 0]]], [[[0, 1, 2], [0, 3, 4], [0, 0, 0]], [[1, 2, 0], [3, 4, 0], [0,\n 0, 0]]]], [[[[0, 0, 0], [0, 5, 6], [0, 7, 8]], [[0, 0, 0], [5, 6, 0], [\n 7, 8, 0]]], [[[0, 5, 6], [0, 7, 8], [0, 0, 0]], [[5, 6, 0], [7, 8, 0],\n [0, 0, 0]]]]]])\n', (285, 599), True, 'import numpy as np\n'), ((890, 914), 'numpy.rollaxis', 'np.rollaxis', (['v_col', '(1)', '(6)'], {}), '(v_col, 1, 6)\n', (901, 914), True, 'import numpy as np\n'), ((985, 1033), 'numpy.array', 'np.array', (['[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]'], {}), '([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]])\n', (993, 1033), True, 'import numpy as np\n'), ((1223, 1400), 'numpy.array', 'np.array', (['[[[[[[0, 0], [0, 1]], [[0, 0], [2, 0]]], [[[0, 3], [0, 0]], [[4, 0], [0, 0]\n ]]], [[[[0, 0], [0, 5]], [[0, 0], [6, 0]]], [[[0, 7], [0, 0]], [[8, 0],\n [0, 0]]]]]]'], {}), '([[[[[[0, 0], [0, 1]], [[0, 0], [2, 0]]], [[[0, 3], [0, 0]], [[4, 0\n ], [0, 0]]]], [[[[0, 0], [0, 5]], [[0, 0], [6, 0]]], [[[0, 7], [0, 0]],\n [[8, 0], [0, 0]]]]]])\n', (1231, 1400), True, 'import numpy as np\n'), ((1630, 1654), 'numpy.rollaxis', 'np.rollaxis', (['v_col', '(1)', '(6)'], {}), '(v_col, 1, 6)\n', (1641, 1654), True, 'import numpy as np\n'), ((1710, 1758), 'numpy.array', 'np.array', (['[[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]]'], {}), '([[[[1, 2], [3, 4]], [[5, 6], [7, 8]]]])\n', (1718, 1758), True, 'import numpy as np\n'), ((2247, 2265), 'webdnn.graph.graph.Graph', 'Graph', (['[col]', '[im]'], {}), '([col], [im])\n', (2252, 2265), False, 'from webdnn.graph.graph import Graph\n'), ((2658, 2676), 'webdnn.graph.graph.Graph', 'Graph', (['[col]', '[im]'], {}), '([col], [im])\n', (2663, 2676), False, 'from webdnn.graph.graph import Graph\n')] |
# -*- coding: utf-8 -*-
"""DECONVOLUTION FILE INPUT/OUTPUT
This module defines methods for file input and output for
deconvolution_script.py.
:Author: <NAME> <<EMAIL>>
:Version: 1.0
:Date: 13/03/2017
"""
import numpy as np
from os.path import splitext
from astropy.io import fits
from .types import check_npndarray
def check_data_format(data, n_dim):
"""Check data format
This method checks that the input data has the correct number of dimensions
Parameters
----------
data : np.ndarray
Input data array
n_dim : int or list of ints
Expected number of dimensions
Raises
------
ValueError
For invalid array dimensions
"""
check_npndarray(data, dtype=float, writeable=False, verbose=False)
if data.ndim not in list(n_dim):
raise ValueError('Input data array has an invalid number of '
'dimensions.')
def read_from_fits(file_name):
"""Read FITS file
This method reads image array data from a FITS file.
Parameters
----------
file_name : str
Name of file with path
Returns
-------
np.ndarray array of image data
"""
return fits.getdata(file_name)
def write_to_fits(file_name, data):
"""Write FITS file
This method writes the output image array data to a FITS file.
Parameters
----------
file_name : str
Name of file with path
data : np.ndarray
Image data array
"""
fits.PrimaryHDU(data).writeto(file_name)
def read_file(file_name):
"""Read file
This method reads image array data from a file.
Parameters
----------
file_name : str
Name of file with path
Returns
-------
np.ndarray array of image data
Raises
------
ValueError
For invalid file extension
"""
if file_name.endswith('.npy'):
data = np.load(file_name)
elif file_name.endswith(('.fits', '.fit', '.FITS', '.FIT', '.mr')):
data = read_from_fits(file_name)
else:
raise ValueError(('Invalid file extension [{}]. Files must be FITS or .mr or '
'numpy binary.').format(splitext(file_name)[-1]))
check_data_format(data, [2, 3])
return data
def read_input_files(data_file_name, psf_file_name, current_file_name=None):
"""Read input files
This method reads image array data from the specified input files.
Parameters
----------
data_file_name : str
Name of file with path for the noisy image data
psf_file_name : str
Name of file with path for the PSF image data
current_file_name : str, optional
Name of file with path for the current results
Returns
-------
tuple of np.ndarray arrays of image data
Raises
------
ValueError
If number of noisy images less than the number of PSFs
ValueError
If the shape of the current results does not match the input data
"""
input_data = read_file(data_file_name)
if input_data.ndim == 2:
input_data = input_data.reshape(1, *input_data.shape)
psf_data = read_file(psf_file_name)
if psf_data.ndim == 3 and input_data.shape[0] < psf_data.shape[0]:
raise ValueError('The number of input images must be greater than or '
'or equal to the number of PSF images.')
if not isinstance(current_file_name, type(None)):
current_data = read_file(current_file_name)
if current_data.shape != input_data.shape:
raise ValueError('The number of current rescontruction images '
'must match the number of input images.')
else:
current_data = None
return input_data, psf_data, current_data
def write_output_files(output_file_name, primal_res, dual_res=None,
psf_res=None, output_format='npy'):
"""Write output files
This method writes the image data results to the specified output file(s)
Parameters
----------
output_file_name : str
Name of file with path for the output data
primal_res : np.ndarray
Array of primal output results
dual_res : np.ndarray, optional
Array of dual output results
psf_res : np.ndarray, optional
Array of PSF output results
output_format : str, optional
Output file format (numpy binary or FITS)
"""
if output_format == 'fits':
write_to_fits(output_file_name + '_primal.fits', primal_res)
if not isinstance(dual_res, type(None)):
write_to_fits(output_file_name + '_dual.fits', dual_res)
if not isinstance(psf_res, type(None)):
write_to_fits(output_file_name + '_psf.fits', psf_res)
else:
np.save(output_file_name + '_primal', primal_res)
if not isinstance(dual_res, type(None)):
np.save(output_file_name + '_dual', dual_res)
if not isinstance(psf_res, type(None)):
np.save(output_file_name + '_psf', psf_res)
| [
"astropy.io.fits.PrimaryHDU",
"os.path.splitext",
"astropy.io.fits.getdata",
"numpy.load",
"numpy.save"
] | [((1194, 1217), 'astropy.io.fits.getdata', 'fits.getdata', (['file_name'], {}), '(file_name)\n', (1206, 1217), False, 'from astropy.io import fits\n'), ((1904, 1922), 'numpy.load', 'np.load', (['file_name'], {}), '(file_name)\n', (1911, 1922), True, 'import numpy as np\n'), ((4783, 4832), 'numpy.save', 'np.save', (["(output_file_name + '_primal')", 'primal_res'], {}), "(output_file_name + '_primal', primal_res)\n", (4790, 4832), True, 'import numpy as np\n'), ((1490, 1511), 'astropy.io.fits.PrimaryHDU', 'fits.PrimaryHDU', (['data'], {}), '(data)\n', (1505, 1511), False, 'from astropy.io import fits\n'), ((4895, 4940), 'numpy.save', 'np.save', (["(output_file_name + '_dual')", 'dual_res'], {}), "(output_file_name + '_dual', dual_res)\n", (4902, 4940), True, 'import numpy as np\n'), ((5002, 5045), 'numpy.save', 'np.save', (["(output_file_name + '_psf')", 'psf_res'], {}), "(output_file_name + '_psf', psf_res)\n", (5009, 5045), True, 'import numpy as np\n'), ((2186, 2205), 'os.path.splitext', 'splitext', (['file_name'], {}), '(file_name)\n', (2194, 2205), False, 'from os.path import splitext\n')] |
import cv2
import gc
import numpy as np
from ctypes import *
__all__ = ['darknet_resize']
class IMAGE(Structure):
_fields_ = [("w", c_int),
("h", c_int),
("c", c_int),
("data", POINTER(c_float))]
lib = CDLL("darknet/libdarknet.so", RTLD_GLOBAL)
resize_image = lib.resize_image
resize_image.argtypes = [IMAGE, c_int, c_int]
resize_image.restype = IMAGE
free_image = lib.free_image
free_image.argtypes = [IMAGE]
@profile
def array_to_image(arr):
# share memory
arr = arr.transpose(2, 0, 1)
c, h, w = arr.shape[:3]
arr = np.ascontiguousarray(arr.flat, dtype=np.float32)
data = arr.ctypes.data_as(POINTER(c_float))
im = IMAGE(w, h, c, data)
return im, arr
@profile
def image_to_array(im, shape):
# share memory
# python won't free c objects
arr = np.ctypeslib.as_array(im.data, shape=(shape[2], shape[0], shape[1]))
arr = arr.transpose((1, 2, 0))
return arr
@profile
def darknet_resize(im, shape):
# shape: (h, w)
image, _ = array_to_image(im)
image_resize = resize_image(image, shape[1], shape[0])
image_resize_np = image_to_array(image_resize, shape)
free_image(image_resize)
return image_resize_np
@profile
def test_darknet_resize():
image_path = 'darknet/data/dog.jpg'
a = cv2.imread(image_path)
ar = darknet_resize(a, (416, 416, 3))
del a
del ar
gc.collect()
b = cv2.imread(image_path)
br = darknet_resize(b, (416, 416, 3))
del b
del br
gc.collect()
c = cv2.imread(image_path)
cr = darknet_resize(c, (416, 416, 3))
del c
del cr
gc.collect()
"""
image_resize_cv2 = cv2.resize(image, (416, 416), interpolation=cv2.INTER_LINEAR)
print(image_resize_cv2.shape)
"""
"""
python3 -m memory_profiler models/darknet_utils.py
"""
if __name__ == '__main__':
test_darknet_resize()
| [
"cv2.imread",
"numpy.ctypeslib.as_array",
"gc.collect",
"numpy.ascontiguousarray"
] | [((595, 643), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['arr.flat'], {'dtype': 'np.float32'}), '(arr.flat, dtype=np.float32)\n', (615, 643), True, 'import numpy as np\n'), ((846, 914), 'numpy.ctypeslib.as_array', 'np.ctypeslib.as_array', (['im.data'], {'shape': '(shape[2], shape[0], shape[1])'}), '(im.data, shape=(shape[2], shape[0], shape[1]))\n', (867, 914), True, 'import numpy as np\n'), ((1321, 1343), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1331, 1343), False, 'import cv2\n'), ((1411, 1423), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1421, 1423), False, 'import gc\n'), ((1433, 1455), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1443, 1455), False, 'import cv2\n'), ((1523, 1535), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1533, 1535), False, 'import gc\n'), ((1545, 1567), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (1555, 1567), False, 'import cv2\n'), ((1635, 1647), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1645, 1647), False, 'import gc\n')] |
import numpy as np
import os
''' This is a simple script to collect all of the file outputs from individual CorrCal runs (saved in an `output_runs' directory), and create a 2D numpy array (full_runs.npy)
containing all recovered gains.'''
full_runs_output = np.array([])
data_path = '/data/zahrakad/hirax_corrcal/output_runs/'
aa = [file for file in os.listdir(data_path)]
for data in aa:
inf_from_every_file = np.load(os.path.join(data_path,data))
full_runs_output = np.append(full_runs_output, inf_from_every_file)
full_runs_output = full_runs_output.reshape(-1, len(inf_from_every_file))
np.save('/data/zahrakad/hirax_corrcal/full_runs.npy', full_runs_output)
| [
"os.listdir",
"os.path.join",
"numpy.append",
"numpy.array",
"numpy.save"
] | [((260, 272), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (268, 272), True, 'import numpy as np\n'), ((606, 677), 'numpy.save', 'np.save', (['"""/data/zahrakad/hirax_corrcal/full_runs.npy"""', 'full_runs_output'], {}), "('/data/zahrakad/hirax_corrcal/full_runs.npy', full_runs_output)\n", (613, 677), True, 'import numpy as np\n'), ((478, 526), 'numpy.append', 'np.append', (['full_runs_output', 'inf_from_every_file'], {}), '(full_runs_output, inf_from_every_file)\n', (487, 526), True, 'import numpy as np\n'), ((352, 373), 'os.listdir', 'os.listdir', (['data_path'], {}), '(data_path)\n', (362, 373), False, 'import os\n'), ((425, 454), 'os.path.join', 'os.path.join', (['data_path', 'data'], {}), '(data_path, data)\n', (437, 454), False, 'import os\n')] |
from collections import Counter, defaultdict, OrderedDict
from sklearn.neighbors.kde import KernelDensity
import itertools
import numpy as np
import os
import pysam
import random as rnd
import sys
import matplotlib
matplotlib.use('Agg') # required if X11 display is not present
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from arcsv.constants import CIGAR_SOFT_CLIP
from arcsv.conditional_mappable_model import process_aggregate_mapstats
from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, \
add_time_checkpoint, normpdf, is_read_through, len_without_gaps
from arcsv.invertedreads import get_inverted_pair
from arcsv.pecluster import process_discordant_pair
from arcsv.softclip import process_softclip
from arcsv.splitreads import parse_splits, splits_are_mirrored
def extract_approximate_library_stats(opts, bam, rough_insert_median):
reads_per_chunk = int(np.floor(opts['approx_stats_nreads'] / opts['approx_stats_nchunks']))
# lib_patterns, lib_stats = parse_library_stats(meta)
# maps read groups matching lib_patterns to indices in lib_stats
# lib_dict = {}
# MULTILIB
nlib = opts['nlib']
insert_len = [[] for i in range(nlib)]
read_len_shorter = [[] for i in range(nlib)]
read_len_longer = [[] for i in range(nlib)]
chrom_name = opts['chromosome']
chrom_size = get_chrom_size_from_bam(chrom_name, bam)
chunk_size = 10 * opts['insert_max_mu_multiple'] * rough_insert_median
rough_insert_max = opts['insert_max_mu_multiple'] * rough_insert_median
reads_processed = [0 for i in range(nlib)]
chunks_processed = 0
# MINOR reads_per_chunk should mean completed
while min(reads_processed) < opts['approx_stats_nreads']:
# extract random chunk
start = np.random.randint(0, chrom_size - chunk_size)
end = start + chunk_size
# parse reads
seen_aln = {}
chunk_reads_seen = 0
alns = list(bam.fetch_unsorted(chrom_name, start, end))
if bam.num_bam > 1:
alns.sort(key=lambda a: a.pos)
for aln in list(bam.fetch_unsorted(chrom_name, start, end)):
# conditioning on mate position introduces slight bias,
# but insignificant if chunk_size >> insert size
if not_primary(aln) or aln.is_duplicate or aln.is_unmapped or \
aln.mpos < start or aln.mpos >= end or aln.mate_is_unmapped:
continue
if aln.qname not in seen_aln:
if chunk_reads_seen < reads_per_chunk:
seen_aln[aln.qname] = aln
chunk_reads_seen += 1
continue
else:
continue
# pair completed
mate = seen_aln[aln.qname]
pair = (aln, mate)
del seen_aln[aln.qname]
lib_idx = 0 # get_lib_idx(aln.get_tag('RG'), lib_dict, lib_patterns)
process_insert_len(pair, insert_len[lib_idx], opts['min_mapq_reads'],
opts['read_len'], maximum_insert_size=rough_insert_max)
process_read_len(pair, read_len_shorter[lib_idx], read_len_longer[lib_idx])
reads_processed[lib_idx] += 1
if min(reads_processed) % 200000 == 0 and opts['verbosity'] > 0:
print('[library_stats] processed {0} reads ({1} chunks) for each lib'.
format(min(reads_processed), chunks_processed))
chunks_processed += 1
insert_mean = [np.median(il) for il in insert_len]
insert_sd = [robust_sd(il) for il in insert_len]
insert_lower = [np.percentile(il, 0.15) for il in insert_len]
insert_upper = [np.percentile(il, 99.85) for il in insert_len]
insert_pmf = [pmf_kernel_smooth(il, 0, opts['insert_max_mu_multiple'] * mu,
opts['max_kde_samples'])
for (il, mu) in zip(insert_len, insert_mean)]
rlen_short = [round(np.median(rl)) for rl in read_len_shorter]
rlen_long = [round(np.median(rl)) for rl in read_len_longer]
rlen_medians = list(zip(rlen_short, rlen_long))
return insert_mean, insert_sd, insert_pmf, insert_lower, insert_upper, rlen_medians
# parse a single bam file, extracting breakpoints,
# insert size distribution, and/or visualization tracks in bed/bigwig format
def parse_bam(opts, reference_files, bamfiles):
chrom_name = opts['chromosome']
start, end = opts['region_start'], opts['region_end']
outdir = opts['outdir']
min_mapq_reads = opts['min_mapq_reads']
nlib = opts['nlib'] # MULTILIB
# lib_patterns, lib_stats = parse_library_stats(meta)
# lib_dict = {}
bam = BamGroup(bamfiles)
opts['read_len'] = bam_read_len(bam)
# bam_has_unmapped = has_unmapped_records(bam)
# if opts['verbosity'] > 0:
# if bam_has_unmapped:
# print('[parse_bam] bam file DOES contain unmapped records')
# else:
# print('[parse_bam] bam file DOES NOT contain unmapped records')
if opts['verbosity'] > 0:
print('\n[parse_bam] extracting approximate library stats')
rough_insert_median = get_rough_insert_median(opts, bam)
if opts['verbosity'] > 0:
print('[parse_bam] read_len: {0}; rough_insert_median: {1}'.
format(opts['read_len'], rough_insert_median))
als = extract_approximate_library_stats(opts, bam, rough_insert_median)
mean_approx, sd_approx, pmf_approx, qlower, qupper, rlen_medians = als
for i in range(len(pmf_approx)):
with open(os.path.join(outdir, 'logging', '{0}_insert_pmf.txt'
.format(opts['library_names'][i])), 'w') as f:
for j in range(len(pmf_approx[i])):
f.write('{0}\t{1}\n'.format(j, pmf_approx[i][j]))
if opts['verbosity'] > 0:
print('[parse_bam] library stats:\n\tmu = {0}\n\tsigma = {1}'
.format(mean_approx, sd_approx))
add_time_checkpoint(opts, 'lib. stats')
def get_lr_cutoff(opts, pmf, do_min=False):
cutoff_normal_equivalent = opts['insert_cutoff']
lr_cutoff = normpdf(0) - normpdf(cutoff_normal_equivalent)
mode = max(pmf)
logmode = np.log(mode)
which_mode = [i for i in range(len(pmf)) if pmf[i] == mode]
cutoff = None
if do_min:
for i in range(1, len(pmf)):
if pmf[i] != 0 and logmode - np.log(pmf[i]) < lr_cutoff:
cutoff = i - 1
break
else:
for i in range(len(pmf) - 2, -1, -1):
if pmf[i] != 0 and logmode - np.log(pmf[i]) < lr_cutoff:
cutoff = i + 1
break
if opts['verbosity'] > 0:
print('[insert_cutoff] lr_cutoff is {0}'.format(lr_cutoff))
print('[insert_cutoff] mode (log) {0} at {1}'.format(logmode, which_mode))
print('[insert_cutoff] cutoff ratio (log) {0} at {1}'.
format(logmode - np.log(pmf[i]), cutoff))
return cutoff
min_concordant_insert = [get_lr_cutoff(opts, pmf, do_min=True) for pmf in pmf_approx]
max_concordant_insert = [get_lr_cutoff(opts, pmf) for pmf in pmf_approx]
if opts['verbosity'] > 0:
print('[parse_bam] insert size cutoffs:')
print('[parse_bam]' + '\n'
.join(['{0}-{1}'.format(min_concordant_insert[i], max_concordant_insert[i])
for i in range(len(mean_approx))]))
print('[parse_bam] equivalent to mu +/- 3 sigma in normal:\n\t{0}\n\t{1}\n'
.format(qlower, qupper))
seen_aln = {}
nreads, npairs = 0, 0
num_read_through = 0
insert_len = [[] for i in range(nlib)]
softclips = [(defaultdict(list), defaultdict(list)) for i in range(nlib)]
splits = [[] for i in range(nlib)]
if opts['do_pecluster']:
discordant_pairs = [OrderedDict() for i in range(nlib)]
if not opts['use_mate_tags']: # need to estimate mappability proportions
mapstats = [defaultdict(int) for i in range(nlib)]
else:
mapstats = None
if opts['verbosity'] > 0:
print('[parse_bam] starting alignment parsing. . .')
alignments = bam.fetch_unsorted(chrom_name, start, end)
for aln in alignments:
if not_primary(aln) or aln.is_unmapped or aln.is_duplicate:
continue
nreads += 1
if opts['verbosity'] > 0 and nreads % (1000000) == 0:
print('[parse_bam] %d reads processed' % nreads)
# TODO this can be done cleaner -- check for is_unmapped above
# and use handle_unpaired for everything with mate_is_unmapped
if aln.qname not in seen_aln:
# read is not going to pair, so handle now
if aln.mate_is_unmapped or aln.rname != aln.mrnm:
handle_unpaired_read(opts, aln, softclips, splits, bam, mapstats)
# waiting for this read's pair
else:
seen_aln[aln.qname] = aln
continue
# Completed a pair!
npairs += 1
mate = seen_aln[aln.qname]
pair = (aln, mate)
del seen_aln[aln.qname]
if opts['filter_read_through'] and is_read_through(opts, pair):
num_read_through += 1
continue
# MULTILIB
lib_idx = 0
# handle softclip information, insert len, mapping stats, splits/discordants
if not opts['use_mate_tags']:
process_aggregate_mapstats(pair, mapstats[lib_idx],
min_mapq_reads, opts['max_pair_distance'])
ilen = process_insert_len(pair, insert_len[lib_idx],
opts['min_mapq_reads'], opts['read_len'])
if opts['do_pecluster']:
process_discordant_pair(pair[0], pair[1], chrom_name,
discordant_pairs[lib_idx], min_mapq_reads,
ilen, min_concordant_insert[lib_idx],
max_concordant_insert[lib_idx],
opts['library_is_rf'])
if any(op == CIGAR_SOFT_CLIP for (op, oplen) in
itertools.chain(aln.cigartuples, mate.cigartuples)):
if opts['do_splits']:
a1_split = process_splits(pair[0], splits[lib_idx],
bam, min_mapq=min_mapq_reads,
mate=pair[1])
a2_split = process_splits(pair[1], splits[lib_idx],
bam, min_mapq=min_mapq_reads,
mate=pair[0])
else:
a1_split, a2_split = False, False
# if we found the same breakpoint in both reads,
# it's quite likely that the reads were overlapping due to a short insert
if a1_split and a2_split and splits_are_mirrored(splits[lib_idx][-1],
splits[lib_idx][-2]):
if opts['verbosity'] > 1:
print('[bamparser] mirrored split: {0} {1} {2}'.
format(chrom_name, splits[lib_idx][-1].bp2, pair[0].qname))
del splits[lib_idx][-1]
process_softclip(opts, pair, (a1_split, a2_split), softclips[lib_idx], lib_idx)
# handle unpaired reads
if opts['verbosity'] > 0:
print('[parse_bam] handling unpaired reads')
for aln in seen_aln.values():
handle_unpaired_read(opts, aln, softclips, splits, bam, mapstats)
if any(len(ins) == 0 for ins in insert_len): # MULTILIB should only fail if all()
print('Error: region specified contains no reads!')
sys.exit(1)
# report stats
if opts['verbosity'] > 0:
print('[parse_bam] processed a total of {0} reads'.format(nreads))
if opts['filter_read_through']:
print('[parse_bam] found {0} read-through pairs out of {1} total'
.format(num_read_through, npairs))
add_time_checkpoint(opts, 'parse bam')
# compute insert length distributions and save plots
if opts['verbosity'] > 1:
print('[parse_bam] observed insert size min:')
print('\n'.join([str(min(insert_len[i])) for i in range(nlib)]))
print('\n'.join([str(Counter(sorted(insert_len[i]))) for i in range(nlib)]))
print('[parse_bam] insert 25-50-75 percentiles by library:')
percentiles = [np.percentile(ins, (25, 50, 75)) for ins in insert_len]
print(''.join(['{0}: {1}\n'.
format(opts['library_names'][l], tuple(percentiles[l]))
for l in range(nlib)]))
if opts['verbosity'] > 0:
print('[parse_bam] computing insert length pmfs')
insert_mean = [np.median(il) for il in insert_len]
insert_sd = [robust_sd(il) for il in insert_len]
max_mult = opts['insert_max_mu_multiple']
insert_len_dist = [pmf_kernel_smooth(insert_len[i], 0,
max_mult * mu, opts['max_kde_samples'])
for (i, mu) in zip(range(nlib), insert_mean)]
if opts['verbosity'] > 1:
for i in range(nlib):
print('[parse_bam] lib {0} mu {1} sigma {2}'
.format(i, insert_mean[i], insert_sd[i]))
# insert dist plots
plot_insert_dist(opts, insert_len_dist, outdir)
# compute average coverage
# MULTILIB this needs adjusting -- keeping track of nreads from each bamgroup
region_len = len_without_gaps(chrom_name, start, end, reference_files['gap'])
opts['seq_coverage'] = [nreads * opts['read_len'] / (nlib * region_len) for _ in range(nlib)]
opts['phys_coverage'] = [npairs * m / region_len for m in insert_mean]
opts['max_pecluster_size'] = [pc * opts['pecluster_size_coverage_ratio']
for pc in opts['phys_coverage']]
if opts['verbosity'] > 0:
print('[parse_bam] average sequence coverage: %.1fx' % opts['seq_coverage'][0])
print('[parse_bam] average physical coverage: %.1fx' % opts['phys_coverage'][0])
if opts['do_pecluster']:
return (softclips, splits, mapstats, rlen_medians, insert_len_dist,
insert_mean, insert_sd,
discordant_pairs, min_concordant_insert, max_concordant_insert)
else:
return (softclips, splits, mapstats, rlen_medians, insert_len_dist,
insert_mean, insert_sd,
None, None, None)
def process_coverage(aln, coverage):
for base in aln.get_reference_positions():
coverage[base] += 1
def process_inverted(pair, inverted_pairs, bam):
# see invertedreads.py
if pair[0].is_reverse != pair[1].is_reverse:
return 0
else:
inverted_pairs.append(get_inverted_pair(pair, bam))
return 1
def process_hanging(anchor_aln, hanging_plus, hanging_minus):
if anchor_aln.is_reverse:
anchor_pos = anchor_aln.reference_end
hanging_minus.add(anchor_pos)
else:
anchor_pos = anchor_aln.reference_start
hanging_plus.add(anchor_pos)
def process_splits(aln, splits, bam, min_mapq, mate):
spl = parse_splits(aln, bam, min_mapq, mate)
if spl is not None:
splits.append(spl)
return 1
else:
return 0
# Input: pair of reads on the same chromosome
# Output: none if read pair invalid (mapq or orientation), else insert length
# Side effects: adds to len_array (checking truncate = True)
def process_insert_len(pair, len_array, min_mapq, read_len,
truncate=True, maximum_insert_size=np.Inf,
lib_is_rf=False, lib_insert_is_inner=False):
# if not fully_aligned(pair[0]) or \
# not fully_aligned(pair[1]) or \
if pair[0].is_reverse == pair[1].is_reverse or \
min(pair[0].mapq, pair[1].mapq) < min_mapq:
return None
which_minus = 0 if pair[0].is_reverse else 1
which_first = which_minus if lib_is_rf else (1 - which_minus)
which_last = 1 - which_first
if lib_insert_is_inner:
ilen = pair[which_last].reference_start - pair[which_first].reference_end
else:
ilen = pair[which_last].reference_end - pair[which_first].reference_start
# adjust for read trimming
if read_len != 0:
ilen += 2 * read_len - pair[0].query_length - pair[1].query_length
# adjust for soft-clipping of 5' end (3' end of MP)
ilen += pair[which_first].query_alignment_start + \
pair[which_last].query_length - pair[which_last].query_alignment_end
if (not truncate) or (ilen <= maximum_insert_size and ilen >= 0):
len_array.append(ilen)
return ilen
def process_insert_viz(pair, insert_plus, insert_minus, library_info):
if pair[0].is_reverse == pair[1].is_reverse:
return 0
which_minus = 0 if pair[0].is_reverse else 1
which_first = which_minus if library_info['is_rf'] else (1 - which_minus)
which_last = 1 - which_first
if library_info['inner_insert']:
ilen = pair[which_last].reference_start - pair[which_first].reference_end
ilen -= pair[which_last].query_alignment_start
ilen -= pair[which_last].query_length - pair[which_last].query_alignment_end
else:
ilen = pair[which_last].reference_end - pair[which_first].reference_start
ilen += pair[which_last].query_length - pair[which_last].query_alignment_end
ilen += pair[which_first].query_alignment_start
if library_info['readlen'] != 0:
ilen += 2 * library_info['readlen'] - pair[0].query_length - pair[1].query_length
insert_plus.add(pair[which_first].reference_start, ilen)
insert_minus.add(pair[which_last].reference_end, ilen)
return 1
def handle_unpaired_read(opts, aln, softclips, splits, bam, mapstats):
pair = (aln, None)
# MULTILIB
lib_idx = 0
if not opts['use_mate_tags']:
process_aggregate_mapstats(pair, mapstats[lib_idx],
opts['min_mapq_reads'], opts['max_pair_distance'])
if any(op == CIGAR_SOFT_CLIP for (op, oplen) in aln.cigartuples):
if opts['do_splits']:
has_split = process_splits(aln, splits[lib_idx], bam,
min_mapq=opts['min_mapq_reads'], mate=None)
else:
has_split = False
process_softclip(opts, pair, (has_split, False), softclips[lib_idx], lib_idx)
# assume no hard-clipping so sequence length is calculated correctly by pysam
def process_read_len(pair, len_short_array, len_long_array):
lens = [aln.query_length for aln in pair]
len_short_array.append(min(lens))
len_long_array.append(max(lens))
# abstraction for a group of bam files
class BamGroup:
def __init__(self, bamfiles):
self.bamlist = [pysam.AlignmentFile(bf) for bf in bamfiles]
def fetch_unsorted(self, *o1, **o2):
return itertools.chain.from_iterable(b.fetch(*o1, **o2) for b in self.bamlist)
def fetch_sorted(self, *o1, **o2):
raise Warning('fetch_sorted not implemented')
# fs = [b.fetch(*o1, **o2) for b in self.bamlist]
def getrname(self, *o1, **o2):
return self.bamlist[0].getrname(*o1, **o2)
def gettid(self, *o1, **o2):
return self.bamlist[0].gettid(*o1, **o2)
@property
def references(self):
return self.bamlist[0].references
@property
def nreferences(self):
return self.bamlist[0].nreferences
@property
def lengths(self):
return self.bamlist[0].lengths
@property
def num_bam(self):
return len(self.bamlist)
def pmf_kernel_smooth(a, xmin, xmax, max_kde_samples):
if len(a) == 0:
raise Warning('[pmf_kernel_smooth] array is empty -- there are no insert lengths!')
if len(a) > max_kde_samples:
a = rnd.sample(a, max_kde_samples)
# Siverman's rule of thumb to choose bandwidth
a_trunc = np.matrix([x for x in a if x >= xmin and x <= xmax]).T
pct = np.percentile(a_trunc, (25, 75))
IQR = pct[1] - pct[0]
bw = max(1.0, .785 * IQR / a_trunc.shape[0]**(1/5))
kde = KernelDensity(kernel='gaussian', bandwidth=bw, rtol=1e-6).fit(a_trunc)
pmf = np.exp(kde.score_samples(np.matrix(np.linspace(xmin, xmax, xmax-xmin+1)).T))
S = sum(pmf)
return [p/S for p in pmf]
# def has_unmapped_records(bam, pairs_to_check=10):
# alignments = bam.fetch_unsorted()
# # find several reads with mates unmapped
# hanging = []
# for aln in alignments:
# if not (aln.is_unmapped or aln.is_supplementary or
# aln.is_secondary or aln.is_duplicate) and \
# aln.mate_is_unmapped:
# hanging.append(aln)
# if len(hanging) >= pairs_to_check:
# break
# # do all hanging reads have mates?
# for aln in hanging:
# alns = bam.fetch_unsorted(bam.getrname(aln.rname), aln.mpos, aln.mpos + 1)
# if any([a.is_unmapped and a.qname == aln.qname for a in alns]):
# continue
# else:
# return False
# return True
def bam_read_len(bam, reads_to_check=1000):
rlen = -np.Inf
nreads = 0
for aln in bam.fetch_unsorted():
if aln.is_unmapped or 'H' in aln.cigarstring:
continue
rlen = max(rlen, aln.query_length)
nreads += 1
if nreads > reads_to_check:
break
return rlen
def get_rough_insert_median(opts, bam, pairs_to_check=10000):
# check min_mapq, neither unmapped, neither supp
ilen = []
seen = {}
rej = set()
for aln in bam.fetch_unsorted():
if aln.qname in seen:
if aln.mapq < opts['min_mapq_reads'] or aln.is_unmapped or not_primary(aln):
del seen[aln.qname]
else:
pair = (aln, seen[aln.qname])
process_insert_len(pair, ilen, opts['min_mapq_reads'],
opts['read_len'], truncate=False)
del seen[aln.qname]
else:
if aln.mapq < opts['min_mapq_reads'] or aln.is_unmapped or not_primary(aln):
rej.add(aln.qname)
else:
seen[aln.qname] = aln
if len(ilen) >= pairs_to_check:
break
return np.median(ilen)
def plot_insert_dist(opts, insert_len_dists, outdir):
for l in range(opts['nlib']):
outfile = os.path.join(outdir, 'insert_' + opts['library_names'][l] + '.pdf')
pp = PdfPages(outfile)
plt.figure()
plt.plot(insert_len_dists[l])
plt.title(opts['library_names'][l])
pp.savefig()
plt.close()
pp.close()
| [
"itertools.chain",
"numpy.log",
"pysam.AlignmentFile",
"arcsv.helper.is_read_through",
"arcsv.softclip.process_softclip",
"arcsv.helper.normpdf",
"sys.exit",
"arcsv.helper.add_time_checkpoint",
"arcsv.helper.not_primary",
"arcsv.pecluster.process_discordant_pair",
"arcsv.helper.len_without_gaps"... | [((216, 237), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (230, 237), False, 'import matplotlib\n'), ((1394, 1434), 'arcsv.helper.get_chrom_size_from_bam', 'get_chrom_size_from_bam', (['chrom_name', 'bam'], {}), '(chrom_name, bam)\n', (1417, 1434), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((12095, 12133), 'arcsv.helper.add_time_checkpoint', 'add_time_checkpoint', (['opts', '"""parse bam"""'], {}), "(opts, 'parse bam')\n", (12114, 12133), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((13583, 13647), 'arcsv.helper.len_without_gaps', 'len_without_gaps', (['chrom_name', 'start', 'end', "reference_files['gap']"], {}), "(chrom_name, start, end, reference_files['gap'])\n", (13599, 13647), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((15243, 15281), 'arcsv.splitreads.parse_splits', 'parse_splits', (['aln', 'bam', 'min_mapq', 'mate'], {}), '(aln, bam, min_mapq, mate)\n', (15255, 15281), False, 'from arcsv.splitreads import parse_splits, splits_are_mirrored\n'), ((20058, 20090), 'numpy.percentile', 'np.percentile', (['a_trunc', '(25, 75)'], {}), '(a_trunc, (25, 75))\n', (20071, 20090), True, 'import numpy as np\n'), ((22339, 22354), 'numpy.median', 'np.median', (['ilen'], {}), '(ilen)\n', (22348, 22354), True, 'import numpy as np\n'), ((943, 1011), 'numpy.floor', 'np.floor', (["(opts['approx_stats_nreads'] / opts['approx_stats_nchunks'])"], {}), "(opts['approx_stats_nreads'] / opts['approx_stats_nchunks'])\n", (951, 1011), True, 'import numpy as np\n'), ((1818, 1863), 'numpy.random.randint', 'np.random.randint', (['(0)', '(chrom_size - chunk_size)'], {}), '(0, chrom_size - chunk_size)\n', (1835, 1863), True, 'import numpy as np\n'), ((3546, 3559), 'numpy.median', 'np.median', (['il'], {}), '(il)\n', (3555, 3559), True, 'import numpy as np\n'), ((3599, 3612), 'arcsv.helper.robust_sd', 'robust_sd', (['il'], {}), '(il)\n', (3608, 3612), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((3655, 3678), 'numpy.percentile', 'np.percentile', (['il', '(0.15)'], {}), '(il, 0.15)\n', (3668, 3678), True, 'import numpy as np\n'), ((3721, 3745), 'numpy.percentile', 'np.percentile', (['il', '(99.85)'], {}), '(il, 99.85)\n', (3734, 3745), True, 'import numpy as np\n'), ((5989, 6028), 'arcsv.helper.add_time_checkpoint', 'add_time_checkpoint', (['opts', '"""lib. stats"""'], {}), "(opts, 'lib. stats')\n", (6008, 6028), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((6244, 6256), 'numpy.log', 'np.log', (['mode'], {}), '(mode)\n', (6250, 6256), True, 'import numpy as np\n'), ((11783, 11794), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (11791, 11794), False, 'import sys\n'), ((12853, 12866), 'numpy.median', 'np.median', (['il'], {}), '(il)\n', (12862, 12866), True, 'import numpy as np\n'), ((12906, 12919), 'arcsv.helper.robust_sd', 'robust_sd', (['il'], {}), '(il)\n', (12915, 12919), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((17976, 18082), 'arcsv.conditional_mappable_model.process_aggregate_mapstats', 'process_aggregate_mapstats', (['pair', 'mapstats[lib_idx]', "opts['min_mapq_reads']", "opts['max_pair_distance']"], {}), "(pair, mapstats[lib_idx], opts['min_mapq_reads'],\n opts['max_pair_distance'])\n", (18002, 18082), False, 'from arcsv.conditional_mappable_model import process_aggregate_mapstats\n'), ((18416, 18493), 'arcsv.softclip.process_softclip', 'process_softclip', (['opts', 'pair', '(has_split, False)', 'softclips[lib_idx]', 'lib_idx'], {}), '(opts, pair, (has_split, False), softclips[lib_idx], lib_idx)\n', (18432, 18493), False, 'from arcsv.softclip import process_softclip\n'), ((19896, 19926), 'random.sample', 'rnd.sample', (['a', 'max_kde_samples'], {}), '(a, max_kde_samples)\n', (19906, 19926), True, 'import random as rnd\n'), ((19992, 20044), 'numpy.matrix', 'np.matrix', (['[x for x in a if x >= xmin and x <= xmax]'], {}), '([x for x in a if x >= xmin and x <= xmax])\n', (20001, 20044), True, 'import numpy as np\n'), ((22463, 22530), 'os.path.join', 'os.path.join', (['outdir', "('insert_' + opts['library_names'][l] + '.pdf')"], {}), "(outdir, 'insert_' + opts['library_names'][l] + '.pdf')\n", (22475, 22530), False, 'import os\n'), ((22544, 22561), 'matplotlib.backends.backend_pdf.PdfPages', 'PdfPages', (['outfile'], {}), '(outfile)\n', (22552, 22561), False, 'from matplotlib.backends.backend_pdf import PdfPages\n'), ((22570, 22582), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (22580, 22582), True, 'import matplotlib.pyplot as plt\n'), ((22591, 22620), 'matplotlib.pyplot.plot', 'plt.plot', (['insert_len_dists[l]'], {}), '(insert_len_dists[l])\n', (22599, 22620), True, 'import matplotlib.pyplot as plt\n'), ((22629, 22664), 'matplotlib.pyplot.title', 'plt.title', (["opts['library_names'][l]"], {}), "(opts['library_names'][l])\n", (22638, 22664), True, 'import matplotlib.pyplot as plt\n'), ((22694, 22705), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22703, 22705), True, 'import matplotlib.pyplot as plt\n'), ((3997, 4010), 'numpy.median', 'np.median', (['rl'], {}), '(rl)\n', (4006, 4010), True, 'import numpy as np\n'), ((4063, 4076), 'numpy.median', 'np.median', (['rl'], {}), '(rl)\n', (4072, 4076), True, 'import numpy as np\n'), ((6155, 6165), 'arcsv.helper.normpdf', 'normpdf', (['(0)'], {}), '(0)\n', (6162, 6165), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((6168, 6201), 'arcsv.helper.normpdf', 'normpdf', (['cutoff_normal_equivalent'], {}), '(cutoff_normal_equivalent)\n', (6175, 6201), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((7764, 7781), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7775, 7781), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((7783, 7800), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (7794, 7800), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((7920, 7933), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7931, 7933), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((8059, 8075), 'collections.defaultdict', 'defaultdict', (['int'], {}), '(int)\n', (8070, 8075), False, 'from collections import Counter, defaultdict, OrderedDict\n'), ((8322, 8338), 'arcsv.helper.not_primary', 'not_primary', (['aln'], {}), '(aln)\n', (8333, 8338), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((9237, 9264), 'arcsv.helper.is_read_through', 'is_read_through', (['opts', 'pair'], {}), '(opts, pair)\n', (9252, 9264), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((9497, 9596), 'arcsv.conditional_mappable_model.process_aggregate_mapstats', 'process_aggregate_mapstats', (['pair', 'mapstats[lib_idx]', 'min_mapq_reads', "opts['max_pair_distance']"], {}), "(pair, mapstats[lib_idx], min_mapq_reads, opts[\n 'max_pair_distance'])\n", (9523, 9596), False, 'from arcsv.conditional_mappable_model import process_aggregate_mapstats\n'), ((9813, 10011), 'arcsv.pecluster.process_discordant_pair', 'process_discordant_pair', (['pair[0]', 'pair[1]', 'chrom_name', 'discordant_pairs[lib_idx]', 'min_mapq_reads', 'ilen', 'min_concordant_insert[lib_idx]', 'max_concordant_insert[lib_idx]', "opts['library_is_rf']"], {}), "(pair[0], pair[1], chrom_name, discordant_pairs[\n lib_idx], min_mapq_reads, ilen, min_concordant_insert[lib_idx],\n max_concordant_insert[lib_idx], opts['library_is_rf'])\n", (9836, 10011), False, 'from arcsv.pecluster import process_discordant_pair\n'), ((11327, 11406), 'arcsv.softclip.process_softclip', 'process_softclip', (['opts', 'pair', '(a1_split, a2_split)', 'softclips[lib_idx]', 'lib_idx'], {}), '(opts, pair, (a1_split, a2_split), softclips[lib_idx], lib_idx)\n', (11343, 11406), False, 'from arcsv.softclip import process_softclip\n'), ((12527, 12559), 'numpy.percentile', 'np.percentile', (['ins', '(25, 50, 75)'], {}), '(ins, (25, 50, 75))\n', (12540, 12559), True, 'import numpy as np\n'), ((14857, 14885), 'arcsv.invertedreads.get_inverted_pair', 'get_inverted_pair', (['pair', 'bam'], {}), '(pair, bam)\n', (14874, 14885), False, 'from arcsv.invertedreads import get_inverted_pair\n'), ((18871, 18894), 'pysam.AlignmentFile', 'pysam.AlignmentFile', (['bf'], {}), '(bf)\n', (18890, 18894), False, 'import pysam\n'), ((20184, 20242), 'sklearn.neighbors.kde.KernelDensity', 'KernelDensity', ([], {'kernel': '"""gaussian"""', 'bandwidth': 'bw', 'rtol': '(1e-06)'}), "(kernel='gaussian', bandwidth=bw, rtol=1e-06)\n", (20197, 20242), False, 'from sklearn.neighbors.kde import KernelDensity\n'), ((2318, 2334), 'arcsv.helper.not_primary', 'not_primary', (['aln'], {}), '(aln)\n', (2329, 2334), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((10953, 11014), 'arcsv.splitreads.splits_are_mirrored', 'splits_are_mirrored', (['splits[lib_idx][-1]', 'splits[lib_idx][-2]'], {}), '(splits[lib_idx][-1], splits[lib_idx][-2])\n', (10972, 11014), False, 'from arcsv.splitreads import parse_splits, splits_are_mirrored\n'), ((21782, 21798), 'arcsv.helper.not_primary', 'not_primary', (['aln'], {}), '(aln)\n', (21793, 21798), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((22161, 22177), 'arcsv.helper.not_primary', 'not_primary', (['aln'], {}), '(aln)\n', (22172, 22177), False, 'from arcsv.helper import get_chrom_size_from_bam, not_primary, robust_sd, add_time_checkpoint, normpdf, is_read_through, len_without_gaps\n'), ((10218, 10268), 'itertools.chain', 'itertools.chain', (['aln.cigartuples', 'mate.cigartuples'], {}), '(aln.cigartuples, mate.cigartuples)\n', (10233, 10268), False, 'import itertools\n'), ((20300, 20340), 'numpy.linspace', 'np.linspace', (['xmin', 'xmax', '(xmax - xmin + 1)'], {}), '(xmin, xmax, xmax - xmin + 1)\n', (20311, 20340), True, 'import numpy as np\n'), ((7034, 7048), 'numpy.log', 'np.log', (['pmf[i]'], {}), '(pmf[i])\n', (7040, 7048), True, 'import numpy as np\n'), ((6452, 6466), 'numpy.log', 'np.log', (['pmf[i]'], {}), '(pmf[i])\n', (6458, 6466), True, 'import numpy as np\n'), ((6650, 6664), 'numpy.log', 'np.log', (['pmf[i]'], {}), '(pmf[i])\n', (6656, 6664), True, 'import numpy as np\n')] |
# sinh() function
import numpy as np
import math
in_array = [0, math.pi / 2, np.pi / 3, np.pi]
print ("Input array : \n", in_array)
Sinh_Values = np.sinh(in_array)
print ("\nSine Hyperbolic values : \n", Sinh_Values) | [
"numpy.sinh"
] | [((152, 169), 'numpy.sinh', 'np.sinh', (['in_array'], {}), '(in_array)\n', (159, 169), True, 'import numpy as np\n')] |
import numpy as np
from edutorch.nn import SpatialGroupNorm
from tests.gradient_check import estimate_gradients
def test_spatial_groupnorm_forward() -> None:
N, C, H, W, G = 2, 6, 4, 5, 2
x = 4 * np.random.randn(N, C, H, W) + 10
model = SpatialGroupNorm(C, G)
model.gamma = np.ones((1, C, 1, 1))
model.beta = np.zeros((1, C, 1, 1))
out = model(x)
out_g = out.reshape((N * G, -1))
assert np.allclose(
out_g.mean(axis=1), np.zeros(4)
), "After batch norm (gamma=1, beta=0), means should be close to 0."
assert np.allclose(
out_g.std(axis=1), np.ones(4)
), "After batch norm (gamma=1, beta=0), stds should be close to 1."
def test_spatial_groupnorm_backward() -> None:
N, C, H, W, G = 2, 6, 4, 5, 2
x = 5 * np.random.randn(N, C, H, W) + 12
gamma = np.random.randn(1, C, 1, 1)
beta = np.random.randn(1, C, 1, 1)
dout = np.random.randn(N, C, H, W)
model = SpatialGroupNorm(C, G)
params = {"gamma": gamma, "beta": beta}
dx_num, dgamma_num, dbeta_num = estimate_gradients(model, dout, x, params)
_ = model(x)
dx, dgamma, dbeta = model.backward(dout)
assert np.allclose(dx_num, dx)
assert np.allclose(dgamma_num, dgamma)
assert np.allclose(dbeta_num, dbeta)
| [
"numpy.allclose",
"numpy.ones",
"edutorch.nn.SpatialGroupNorm",
"numpy.zeros",
"tests.gradient_check.estimate_gradients",
"numpy.random.randn"
] | [((252, 274), 'edutorch.nn.SpatialGroupNorm', 'SpatialGroupNorm', (['C', 'G'], {}), '(C, G)\n', (268, 274), False, 'from edutorch.nn import SpatialGroupNorm\n'), ((293, 314), 'numpy.ones', 'np.ones', (['(1, C, 1, 1)'], {}), '((1, C, 1, 1))\n', (300, 314), True, 'import numpy as np\n'), ((332, 354), 'numpy.zeros', 'np.zeros', (['(1, C, 1, 1)'], {}), '((1, C, 1, 1))\n', (340, 354), True, 'import numpy as np\n'), ((824, 851), 'numpy.random.randn', 'np.random.randn', (['(1)', 'C', '(1)', '(1)'], {}), '(1, C, 1, 1)\n', (839, 851), True, 'import numpy as np\n'), ((863, 890), 'numpy.random.randn', 'np.random.randn', (['(1)', 'C', '(1)', '(1)'], {}), '(1, C, 1, 1)\n', (878, 890), True, 'import numpy as np\n'), ((902, 929), 'numpy.random.randn', 'np.random.randn', (['N', 'C', 'H', 'W'], {}), '(N, C, H, W)\n', (917, 929), True, 'import numpy as np\n'), ((943, 965), 'edutorch.nn.SpatialGroupNorm', 'SpatialGroupNorm', (['C', 'G'], {}), '(C, G)\n', (959, 965), False, 'from edutorch.nn import SpatialGroupNorm\n'), ((1047, 1089), 'tests.gradient_check.estimate_gradients', 'estimate_gradients', (['model', 'dout', 'x', 'params'], {}), '(model, dout, x, params)\n', (1065, 1089), False, 'from tests.gradient_check import estimate_gradients\n'), ((1165, 1188), 'numpy.allclose', 'np.allclose', (['dx_num', 'dx'], {}), '(dx_num, dx)\n', (1176, 1188), True, 'import numpy as np\n'), ((1200, 1231), 'numpy.allclose', 'np.allclose', (['dgamma_num', 'dgamma'], {}), '(dgamma_num, dgamma)\n', (1211, 1231), True, 'import numpy as np\n'), ((1243, 1272), 'numpy.allclose', 'np.allclose', (['dbeta_num', 'dbeta'], {}), '(dbeta_num, dbeta)\n', (1254, 1272), True, 'import numpy as np\n'), ((465, 476), 'numpy.zeros', 'np.zeros', (['(4)'], {}), '(4)\n', (473, 476), True, 'import numpy as np\n'), ((601, 611), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (608, 611), True, 'import numpy as np\n'), ((207, 234), 'numpy.random.randn', 'np.random.randn', (['N', 'C', 'H', 'W'], {}), '(N, C, H, W)\n', (222, 234), True, 'import numpy as np\n'), ((779, 806), 'numpy.random.randn', 'np.random.randn', (['N', 'C', 'H', 'W'], {}), '(N, C, H, W)\n', (794, 806), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Emotion-Analysis.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1Hg2TKSPhWyjQZJDEyHaQxuRiD-A7_WtQ
#Imports
"""
import pandas as pd
import numpy as np
import os
import random
import re
import nltk
nltk.download('punkt')
nltk.download('wordnet')
nltk.download('stopwords')
"""# Data / Paragraph"""
train=pd.read_csv(r'C:\Users\20065\OneDrive\Documents/train.txt',sep=';',names=['Sentences','Emotion'])
test=pd.read_csv(r'C:\Users\20065\OneDrive\Documents/test.txt',sep=';',names=['Sentences','Emotion'])
val=pd.read_csv(r'C:\Users\20065\OneDrive\Documents/val.txt',sep=';',names=['Sentences','Emotion'])
train.drop_duplicates(inplace=True)
train.dropna(inplace=True)
"""#Data Cleaning"""
from nltk.corpus import stopwords
stoplist=stopwords.words('english')
from nltk.stem import WordNetLemmatizer
lemmatizer=WordNetLemmatizer()
def expand(phrase):
phrase = re.sub(r"wont", "will not", phrase)
phrase = re.sub(r"wouldnt", "would not", phrase)
phrase = re.sub(r"shouldnt", "should not", phrase)
phrase = re.sub(r"couldnt", "could not", phrase)
phrase = re.sub(r"cudnt", "could not", phrase)
phrase = re.sub(r"cant", "can not", phrase)
phrase = re.sub(r"dont", "do not", phrase)
phrase = re.sub(r"doesnt", "does not", phrase)
phrase = re.sub(r"didnt", "did not", phrase)
phrase = re.sub(r"wasnt", "was not", phrase)
phrase = re.sub(r"werent", "were not", phrase)
phrase = re.sub(r"havent", "have not", phrase)
phrase = re.sub(r"hadnt", "had not", phrase)
phrase = re.sub(r"n\ t", " not", phrase)
phrase = re.sub(r"\re", " are", phrase)
phrase = re.sub(r"\ s ", " is ", phrase)
phrase = re.sub(r"\ d ", " would ", phrase)
phrase = re.sub(r"\ ll ", " will ", phrase)
phrase = re.sub(r"\dunno", "do not ", phrase)
phrase = re.sub(r"ive ", "i have ", phrase)
phrase = re.sub(r"im ", "i am ", phrase)
phrase = re.sub(r"i m ", "i am ", phrase)
phrase = re.sub(r" w ", " with ", phrase)
return phrase
def process(sentences):
list=[]
for i in range(len(sentences)):
# Removing all characters except alphabets
temp=re.sub('[^a-zA-Z]',' ',sentences[i])
#Expanding the word ( like wont into will not )
temp=expand(temp)
#lowering all characters
temp=temp.lower()
#splitting the sentences into words
temp=temp.split()
# lamaetizing the only words which are not present in stopwords
temp=[lemmatizer.lemmatize(word) for word in temp if word not in set(stoplist)]
# joining the words into sentences
temp=' '.join(temp)
# Appending the new sentences into the new list which will be forward proceeded
list.append(temp)
return list
train['Sentences']=process(np.array(train['Sentences']))
test['Sentences']=process(np.array(test['Sentences']))
val['Sentences']=process(np.array(val['Sentences']))
emotion=np.array(train['Emotion'].unique())
dict={}
for i,e in enumerate(emotion):
dict[e]=i
train['Emotion']=train['Emotion'].replace(dict)
test['Emotion']=test['Emotion'].replace(dict)
val['Emotion']=val['Emotion'].replace(dict)
"""#Word Embedding
##TF-IDF
"""
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf=TfidfVectorizer(max_features=8000)
train_data=tfidf.fit_transform(train['Sentences'])
test_data=tfidf.transform(test['Sentences'])
val_data=tfidf.transform(val['Sentences'])
train_label=train.Emotion.values
test_label=test.Emotion.values
val_label=val.Emotion.values
"""# Machine Learning Implementation"""
from sklearn.linear_model import LogisticRegression
clf=LogisticRegression(max_iter=100000)
clf.fit(train_data,train_label)
pred=clf.predict(test_data)
from sklearn.metrics import classification_report ,confusion_matrix,accuracy_score
print('Accuracy : ',accuracy_score(test_label,pred))
print('\nConfusion Matrix : \n',confusion_matrix(test_label,pred))
print('\n\nClassification Report : ',classification_report(test_label,pred))
import matplotlib.pyplot as plt
import seaborn as sns
label=['Sadness','Anger','Love','Surprise','Fear','Joy']
matrix=confusion_matrix(test_label,pred)
matrix=pd.DataFrame(matrix,columns=label,index=label)
fig, ax = plt.subplots(figsize=(15,15))
ax.set(title='Confusion Matrix')
sns.heatmap(matrix,cmap='Blues',annot=True,ax=ax)
"""#Saving the model"""
import joblib
joblib.dump(clf,'mymodel.pkl')
joblib.dump(tfidf,'TF-IDF_vectorizer.pkl') | [
"sklearn.metrics.accuracy_score",
"nltk.corpus.stopwords.words",
"nltk.download",
"pandas.read_csv",
"sklearn.metrics.classification_report",
"nltk.stem.WordNetLemmatizer",
"seaborn.heatmap",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"sklearn.feature_extraction.text.TfidfVectorizer... | [((301, 323), 'nltk.download', 'nltk.download', (['"""punkt"""'], {}), "('punkt')\n", (314, 323), False, 'import nltk\n'), ((324, 348), 'nltk.download', 'nltk.download', (['"""wordnet"""'], {}), "('wordnet')\n", (337, 348), False, 'import nltk\n'), ((349, 375), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (362, 375), False, 'import nltk\n'), ((410, 517), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\20065\\\\OneDrive\\\\Documents/train.txt"""'], {'sep': '""";"""', 'names': "['Sentences', 'Emotion']"}), "('C:\\\\Users\\\\20065\\\\OneDrive\\\\Documents/train.txt', sep=';',\n names=['Sentences', 'Emotion'])\n", (421, 517), True, 'import pandas as pd\n'), ((513, 619), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\20065\\\\OneDrive\\\\Documents/test.txt"""'], {'sep': '""";"""', 'names': "['Sentences', 'Emotion']"}), "('C:\\\\Users\\\\20065\\\\OneDrive\\\\Documents/test.txt', sep=';',\n names=['Sentences', 'Emotion'])\n", (524, 619), True, 'import pandas as pd\n'), ((614, 720), 'pandas.read_csv', 'pd.read_csv', (['"""C:\\\\Users\\\\20065\\\\OneDrive\\\\Documents/val.txt"""'], {'sep': '""";"""', 'names': "['Sentences', 'Emotion']"}), "('C:\\\\Users\\\\20065\\\\OneDrive\\\\Documents/val.txt', sep=';', names\n =['Sentences', 'Emotion'])\n", (625, 720), True, 'import pandas as pd\n'), ((842, 868), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (857, 868), False, 'from nltk.corpus import stopwords\n'), ((922, 941), 'nltk.stem.WordNetLemmatizer', 'WordNetLemmatizer', ([], {}), '()\n', (939, 941), False, 'from nltk.stem import WordNetLemmatizer\n'), ((3387, 3421), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'max_features': '(8000)'}), '(max_features=8000)\n', (3402, 3421), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((3754, 3789), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'max_iter': '(100000)'}), '(max_iter=100000)\n', (3772, 3789), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4252, 4286), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_label', 'pred'], {}), '(test_label, pred)\n', (4268, 4286), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((4294, 4342), 'pandas.DataFrame', 'pd.DataFrame', (['matrix'], {'columns': 'label', 'index': 'label'}), '(matrix, columns=label, index=label)\n', (4306, 4342), True, 'import pandas as pd\n'), ((4351, 4381), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(15, 15)'}), '(figsize=(15, 15))\n', (4363, 4381), True, 'import matplotlib.pyplot as plt\n'), ((4415, 4467), 'seaborn.heatmap', 'sns.heatmap', (['matrix'], {'cmap': '"""Blues"""', 'annot': '(True)', 'ax': 'ax'}), "(matrix, cmap='Blues', annot=True, ax=ax)\n", (4426, 4467), True, 'import seaborn as sns\n'), ((4506, 4537), 'joblib.dump', 'joblib.dump', (['clf', '"""mymodel.pkl"""'], {}), "(clf, 'mymodel.pkl')\n", (4517, 4537), False, 'import joblib\n'), ((4537, 4580), 'joblib.dump', 'joblib.dump', (['tfidf', '"""TF-IDF_vectorizer.pkl"""'], {}), "(tfidf, 'TF-IDF_vectorizer.pkl')\n", (4548, 4580), False, 'import joblib\n'), ((981, 1015), 're.sub', 're.sub', (['"""wont"""', '"""will not"""', 'phrase'], {}), "('wont', 'will not', phrase)\n", (987, 1015), False, 'import re\n'), ((1030, 1068), 're.sub', 're.sub', (['"""wouldnt"""', '"""would not"""', 'phrase'], {}), "('wouldnt', 'would not', phrase)\n", (1036, 1068), False, 'import re\n'), ((1083, 1123), 're.sub', 're.sub', (['"""shouldnt"""', '"""should not"""', 'phrase'], {}), "('shouldnt', 'should not', phrase)\n", (1089, 1123), False, 'import re\n'), ((1138, 1176), 're.sub', 're.sub', (['"""couldnt"""', '"""could not"""', 'phrase'], {}), "('couldnt', 'could not', phrase)\n", (1144, 1176), False, 'import re\n'), ((1191, 1227), 're.sub', 're.sub', (['"""cudnt"""', '"""could not"""', 'phrase'], {}), "('cudnt', 'could not', phrase)\n", (1197, 1227), False, 'import re\n'), ((1242, 1275), 're.sub', 're.sub', (['"""cant"""', '"""can not"""', 'phrase'], {}), "('cant', 'can not', phrase)\n", (1248, 1275), False, 'import re\n'), ((1290, 1322), 're.sub', 're.sub', (['"""dont"""', '"""do not"""', 'phrase'], {}), "('dont', 'do not', phrase)\n", (1296, 1322), False, 'import re\n'), ((1337, 1373), 're.sub', 're.sub', (['"""doesnt"""', '"""does not"""', 'phrase'], {}), "('doesnt', 'does not', phrase)\n", (1343, 1373), False, 'import re\n'), ((1388, 1422), 're.sub', 're.sub', (['"""didnt"""', '"""did not"""', 'phrase'], {}), "('didnt', 'did not', phrase)\n", (1394, 1422), False, 'import re\n'), ((1437, 1471), 're.sub', 're.sub', (['"""wasnt"""', '"""was not"""', 'phrase'], {}), "('wasnt', 'was not', phrase)\n", (1443, 1471), False, 'import re\n'), ((1486, 1522), 're.sub', 're.sub', (['"""werent"""', '"""were not"""', 'phrase'], {}), "('werent', 'were not', phrase)\n", (1492, 1522), False, 'import re\n'), ((1537, 1573), 're.sub', 're.sub', (['"""havent"""', '"""have not"""', 'phrase'], {}), "('havent', 'have not', phrase)\n", (1543, 1573), False, 'import re\n'), ((1588, 1622), 're.sub', 're.sub', (['"""hadnt"""', '"""had not"""', 'phrase'], {}), "('hadnt', 'had not', phrase)\n", (1594, 1622), False, 'import re\n'), ((1643, 1674), 're.sub', 're.sub', (['"""n\\\\ t"""', '""" not"""', 'phrase'], {}), "('n\\\\ t', ' not', phrase)\n", (1649, 1674), False, 'import re\n'), ((1688, 1718), 're.sub', 're.sub', (['"""\\\\re"""', '""" are"""', 'phrase'], {}), "('\\\\re', ' are', phrase)\n", (1694, 1718), False, 'import re\n'), ((1732, 1763), 're.sub', 're.sub', (['"""\\\\ s """', '""" is """', 'phrase'], {}), "('\\\\ s ', ' is ', phrase)\n", (1738, 1763), False, 'import re\n'), ((1778, 1812), 're.sub', 're.sub', (['"""\\\\ d """', '""" would """', 'phrase'], {}), "('\\\\ d ', ' would ', phrase)\n", (1784, 1812), False, 'import re\n'), ((1826, 1860), 're.sub', 're.sub', (['"""\\\\ ll """', '""" will """', 'phrase'], {}), "('\\\\ ll ', ' will ', phrase)\n", (1832, 1860), False, 'import re\n'), ((1874, 1910), 're.sub', 're.sub', (['"""\\\\dunno"""', '"""do not """', 'phrase'], {}), "('\\\\dunno', 'do not ', phrase)\n", (1880, 1910), False, 'import re\n'), ((1924, 1957), 're.sub', 're.sub', (['"""ive """', '"""i have """', 'phrase'], {}), "('ive ', 'i have ', phrase)\n", (1930, 1957), False, 'import re\n'), ((1972, 2002), 're.sub', 're.sub', (['"""im """', '"""i am """', 'phrase'], {}), "('im ', 'i am ', phrase)\n", (1978, 2002), False, 'import re\n'), ((2017, 2048), 're.sub', 're.sub', (['"""i m """', '"""i am """', 'phrase'], {}), "('i m ', 'i am ', phrase)\n", (2023, 2048), False, 'import re\n'), ((2063, 2094), 're.sub', 're.sub', (['""" w """', '""" with """', 'phrase'], {}), "(' w ', ' with ', phrase)\n", (2069, 2094), False, 'import re\n'), ((2907, 2935), 'numpy.array', 'np.array', (["train['Sentences']"], {}), "(train['Sentences'])\n", (2915, 2935), True, 'import numpy as np\n'), ((2963, 2990), 'numpy.array', 'np.array', (["test['Sentences']"], {}), "(test['Sentences'])\n", (2971, 2990), True, 'import numpy as np\n'), ((3017, 3043), 'numpy.array', 'np.array', (["val['Sentences']"], {}), "(val['Sentences'])\n", (3025, 3043), True, 'import numpy as np\n'), ((3956, 3988), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['test_label', 'pred'], {}), '(test_label, pred)\n', (3970, 3988), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((4021, 4055), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['test_label', 'pred'], {}), '(test_label, pred)\n', (4037, 4055), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((4093, 4132), 'sklearn.metrics.classification_report', 'classification_report', (['test_label', 'pred'], {}), '(test_label, pred)\n', (4114, 4132), False, 'from sklearn.metrics import classification_report, confusion_matrix, accuracy_score\n'), ((2253, 2291), 're.sub', 're.sub', (['"""[^a-zA-Z]"""', '""" """', 'sentences[i]'], {}), "('[^a-zA-Z]', ' ', sentences[i])\n", (2259, 2291), False, 'import re\n')] |
from igraph import *
import numpy as np
# Create the graph
vertices = [i for i in range(7)]
edges = [(0,2),(0,1),(0,3),(1,0),(1,2),(1,3),(2,0),(2,1),(2,3),(3,0),(3,1),(3,2),(2,4),(4,5),(4,6),(5,4),(5,6),(6,4),(6,5)]
g = Graph(vertex_attrs={"label":vertices}, edges=edges, directed=True)
visual_style = {}
# Scale vertices based on degree
outdegree = g.outdegree()
visual_style["vertex_size"] = [x/max(outdegree)*25+50 for x in outdegree]
# Set bbox and margin
visual_style["bbox"] = (800,800)
visual_style["margin"] = 100
# Define colors used for outdegree visualization
colours = ['#fecc5c', '#a31a1c']
# Order vertices in bins based on outdegree
bins = np.linspace(0, max(outdegree), len(colours))
digitized_degrees = np.digitize(outdegree, bins)
# Set colors according to bins
g.vs["color"] = [colours[x-1] for x in digitized_degrees]
# Also color the edges
for ind, color in enumerate(g.vs["color"]):
edges = g.es.select(_source=ind)
edges["color"] = [color]
# Don't curve the edges
visual_style["edge_curved"] = False
# Community detection
communities = g.community_edge_betweenness(directed=True)
clusters = communities.as_clustering()
# Set edge weights based on communities
weights = {v: len(c) for c in clusters for v in c}
g.es["weight"] = [weights[e.tuple[0]] + weights[e.tuple[1]] for e in g.es]
# Choose the layout
N = len(vertices)
visual_style["layout"] = g.layout_fruchterman_reingold(weights=g.es["weight"], maxiter=1000, area=N**3, repulserad=N**3)
# Plot the graph
plot(g, **visual_style)
| [
"numpy.digitize"
] | [((730, 758), 'numpy.digitize', 'np.digitize', (['outdegree', 'bins'], {}), '(outdegree, bins)\n', (741, 758), True, 'import numpy as np\n')] |
from flask import session
from flask import render_template
import os
from flask import Blueprint, request
import numpy as np
from flaskr.auth import login_required
from flask import g
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import base64
import io
from .classes.preProcessClass import PreProcess
from pathlib import Path
import matplotlib.gridspec as gridspec
ROOT_PATH = Path.cwd()
USER_PATH = ROOT_PATH / "flaskr" / "upload" / "users"
bp = Blueprint("visualization", __name__, url_prefix="/vis")
@bp.route("/")
@login_required
def index():
path = USER_PATH / str(g.user["id"])
if os.path.exists(path):
list_names = [f for f in os.listdir(path) if os.path.isfile((path / f))]
session['files'] = list_names
return render_template("visualization/index.html", available_list=list_names)
return render_template("visualization/index.html")
@bp.route("/img/", methods=['GET'])
@login_required
def get_image_src():
file_name = request.args.get('available_file')
feature = request.args.get('feature').lstrip()
img64 = getPlot(file_name, feature)
if img64:
return str(img64)
else:
return ""
@bp.route("/js/", methods=["GET"])
@login_required
def get_col_names_js():
file_name = request.args.get('available_files')
user_id = request.args.get('user_id')
path = USER_PATH / str(user_id) / file_name
df = PreProcess.getDF(path)
col = df.columns.tolist()
col_str = ','.join(e for e in col)
return col_str
def getPlot(file_name, feature):
path = USER_PATH / str(g.user["id"]) / file_name
df = PreProcess.getDF(path)
df = df.reset_index()
if feature not in df.columns:
return None
np.warnings.filterwarnings('ignore')
if 'class' in df.columns:
fig, axs = plt.subplots(2, 2, figsize=(10, 8))
df.plot(kind='line', x=df.columns[0], y=df.columns[1], ax=axs[0, 0])
axs[0, 0].get_legend().remove()
axs[0, 0].set_ylabel('Gene Expression Values')
axs[0, 0].set_xlabel('Sample ID')
axs[0, 0].tick_params(labelrotation=45)
axs[0, 0].set_title('Variation of gene expression values across samples')
axs[0, 1].hist(df[feature])
axs[0, 1].set_title('Histogram')
axs[0, 1].set_ylabel('Frequency')
axs[0, 1].set_xlabel('Gene Expression Values')
df.boxplot(column=[feature], ax=axs[1, 0])
axs[1, 0].set_title('Boxplot')
axs[1, 0].set_ylabel('Gene Expression Values')
axs[1, 0].set_xlabel('Gene Symbol')
df.boxplot(column=[feature], by='class', ax=axs[1, 1])
axs[1, 1].set_title('Boxplot group by class')
axs[1, 1].set_ylabel('Gene Expression Values')
axs[1, 1].set_xlabel('Different Status')
else:
gs = gridspec.GridSpec(2, 2)
fig = plt.figure(figsize=(10, 8))
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[0, 1])
ax3 = plt.subplot(gs[1, :])
ax1.hist(df[feature])
ax1.set_title('Histogram')
ax1.set_ylabel('Frequency')
ax1.set_xlabel('Gene Expression Values')
df.boxplot(column=[feature], ax=ax2)
ax2.set_title('Boxplot')
ax2.set_ylabel('Gene Expression Values')
ax2.set_xlabel('Gene Symbol')
df.plot(kind='line', x=df.columns[0], y=df.columns[1], ax=ax3)
ax3.get_legend().remove()
ax3.set_ylabel('Gene Expression Values')
ax3.set_xlabel('Sample ID')
ax3.tick_params(labelrotation=45)
ax3.set_title('Variation of gene expression values across samples')
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
plt.subplots_adjust(hspace=0.6, wspace=0.25)
fig.suptitle(file_name + ": " + feature, fontsize=16)
pic_IObytes = io.BytesIO()
fig.savefig(pic_IObytes, format='png')
pic_IObytes.seek(0)
pic_hash = base64.b64encode(pic_IObytes.read())
pic_hash = pic_hash.decode("utf-8")
plt.close(fig)
return pic_hash | [
"flask.render_template",
"os.path.exists",
"flask.request.args.get",
"os.listdir",
"matplotlib.use",
"pathlib.Path.cwd",
"numpy.warnings.filterwarnings",
"io.BytesIO",
"matplotlib.pyplot.close",
"os.path.isfile",
"matplotlib.gridspec.GridSpec",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.... | [((206, 227), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (220, 227), False, 'import matplotlib\n'), ((412, 422), 'pathlib.Path.cwd', 'Path.cwd', ([], {}), '()\n', (420, 422), False, 'from pathlib import Path\n'), ((483, 538), 'flask.Blueprint', 'Blueprint', (['"""visualization"""', '__name__'], {'url_prefix': '"""/vis"""'}), "('visualization', __name__, url_prefix='/vis')\n", (492, 538), False, 'from flask import Blueprint, request\n'), ((633, 653), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (647, 653), False, 'import os\n'), ((873, 916), 'flask.render_template', 'render_template', (['"""visualization/index.html"""'], {}), "('visualization/index.html')\n", (888, 916), False, 'from flask import render_template\n'), ((1007, 1041), 'flask.request.args.get', 'request.args.get', (['"""available_file"""'], {}), "('available_file')\n", (1023, 1041), False, 'from flask import Blueprint, request\n'), ((1294, 1329), 'flask.request.args.get', 'request.args.get', (['"""available_files"""'], {}), "('available_files')\n", (1310, 1329), False, 'from flask import Blueprint, request\n'), ((1344, 1371), 'flask.request.args.get', 'request.args.get', (['"""user_id"""'], {}), "('user_id')\n", (1360, 1371), False, 'from flask import Blueprint, request\n'), ((1747, 1783), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1773, 1783), True, 'import numpy as np\n'), ((3633, 3674), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {'rect': '[0, 0.03, 1, 0.95]'}), '(rect=[0, 0.03, 1, 0.95])\n', (3649, 3674), True, 'import matplotlib.pyplot as plt\n'), ((3679, 3723), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.6)', 'wspace': '(0.25)'}), '(hspace=0.6, wspace=0.25)\n', (3698, 3723), True, 'import matplotlib.pyplot as plt\n'), ((3802, 3814), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (3812, 3814), False, 'import io\n'), ((3980, 3994), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (3989, 3994), True, 'import matplotlib.pyplot as plt\n'), ((790, 860), 'flask.render_template', 'render_template', (['"""visualization/index.html"""'], {'available_list': 'list_names'}), "('visualization/index.html', available_list=list_names)\n", (805, 860), False, 'from flask import render_template\n'), ((1835, 1870), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(10, 8)'}), '(2, 2, figsize=(10, 8))\n', (1847, 1870), True, 'import matplotlib.pyplot as plt\n'), ((2827, 2850), 'matplotlib.gridspec.GridSpec', 'gridspec.GridSpec', (['(2)', '(2)'], {}), '(2, 2)\n', (2844, 2850), True, 'import matplotlib.gridspec as gridspec\n'), ((2865, 2892), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 8)'}), '(figsize=(10, 8))\n', (2875, 2892), True, 'import matplotlib.pyplot as plt\n'), ((2908, 2929), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 0]'], {}), '(gs[0, 0])\n', (2919, 2929), True, 'import matplotlib.pyplot as plt\n'), ((2944, 2965), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[0, 1]'], {}), '(gs[0, 1])\n', (2955, 2965), True, 'import matplotlib.pyplot as plt\n'), ((2980, 3001), 'matplotlib.pyplot.subplot', 'plt.subplot', (['gs[1, :]'], {}), '(gs[1, :])\n', (2991, 3001), True, 'import matplotlib.pyplot as plt\n'), ((1056, 1083), 'flask.request.args.get', 'request.args.get', (['"""feature"""'], {}), "('feature')\n", (1072, 1083), False, 'from flask import Blueprint, request\n'), ((688, 704), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (698, 704), False, 'import os\n'), ((708, 732), 'os.path.isfile', 'os.path.isfile', (['(path / f)'], {}), '(path / f)\n', (722, 732), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 29 13:42:29 2018
@author: <NAME>
"""
import numpy as np
import cv2
captura = cv2.VideoCapture(0)
#img = cv2.imread('tucan.jpg', cv2.IMREAD_COLOR)
#print(img.shape)
isFirstFrame = True
# Creates the window where slider will be placed
cv2.namedWindow("Salida")
while(True):
isDisponible, fotograma = captura.read()
if (isDisponible == True):
cv2.imshow('Camera', fotograma)
# Checks the first video frame
if (isFirstFrame):
lastFotograma = np.zeros(fotograma.shape, np.uint8)
isFirstFrame = False
shader = cv2.absdiff(lastFotograma, fotograma)
res = shader.astype(np.uint8)
cv2.imshow('Last', lastFotograma)
cv2.imshow('Actual', fotograma)
cv2.imshow('Shader', shader)
lastFotograma = fotograma;
else:
print('Camera not available')
# Waits for 25ms
wait = 0xFF & cv2.waitKey(10)
if (wait == ord('q') or wait == ord('Q')):
print('Here we go')
break
captura.release()
cv2.destroyAllWindows()
| [
"cv2.imshow",
"numpy.zeros",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"cv2.waitKey",
"cv2.namedWindow",
"cv2.absdiff"
] | [((127, 146), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (143, 146), False, 'import cv2\n'), ((285, 310), 'cv2.namedWindow', 'cv2.namedWindow', (['"""Salida"""'], {}), "('Salida')\n", (300, 310), False, 'import cv2\n'), ((1135, 1158), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (1156, 1158), False, 'import cv2\n'), ((414, 445), 'cv2.imshow', 'cv2.imshow', (['"""Camera"""', 'fotograma'], {}), "('Camera', fotograma)\n", (424, 445), False, 'import cv2\n'), ((653, 690), 'cv2.absdiff', 'cv2.absdiff', (['lastFotograma', 'fotograma'], {}), '(lastFotograma, fotograma)\n', (664, 690), False, 'import cv2\n'), ((764, 797), 'cv2.imshow', 'cv2.imshow', (['"""Last"""', 'lastFotograma'], {}), "('Last', lastFotograma)\n", (774, 797), False, 'import cv2\n'), ((806, 837), 'cv2.imshow', 'cv2.imshow', (['"""Actual"""', 'fotograma'], {}), "('Actual', fotograma)\n", (816, 837), False, 'import cv2\n'), ((846, 874), 'cv2.imshow', 'cv2.imshow', (['"""Shader"""', 'shader'], {}), "('Shader', shader)\n", (856, 874), False, 'import cv2\n'), ((1011, 1026), 'cv2.waitKey', 'cv2.waitKey', (['(10)'], {}), '(10)\n', (1022, 1026), False, 'import cv2\n'), ((549, 584), 'numpy.zeros', 'np.zeros', (['fotograma.shape', 'np.uint8'], {}), '(fotograma.shape, np.uint8)\n', (557, 584), True, 'import numpy as np\n')] |
import colorsys
import numpy as np
def random_colors(N, bright=True):
brightness = 1.0 if bright else 0.7
hsv = [(i / N, 1, brightness) for i in range(N)]
colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv))
return colors
def apply_mask(image, mask, color, alpha=0.5):
for i in range(3):
image[:, :, i] = np.where(mask == 1, image[:, :, i] * (1 - alpha) + alpha * color[i] * 255, image[:, :, i])
return image
def apply_contour(image, mask, color, thickness=4):
t = thickness // 2
mask = mask.copy()
mask1 = mask[thickness:, thickness:]
mask2 = mask[:-thickness, :-thickness]
mask[t:-t, t:-t] -= mask1 * mask2
mask = np.where(mask == 0, 0., 1.)
image = apply_mask(image, mask, color, alpha=1.)
return image
| [
"numpy.where",
"colorsys.hsv_to_rgb"
] | [((684, 713), 'numpy.where', 'np.where', (['(mask == 0)', '(0.0)', '(1.0)'], {}), '(mask == 0, 0.0, 1.0)\n', (692, 713), True, 'import numpy as np\n'), ((343, 437), 'numpy.where', 'np.where', (['(mask == 1)', '(image[:, :, i] * (1 - alpha) + alpha * color[i] * 255)', 'image[:, :, i]'], {}), '(mask == 1, image[:, :, i] * (1 - alpha) + alpha * color[i] * 255,\n image[:, :, i])\n', (351, 437), True, 'import numpy as np\n'), ((197, 220), 'colorsys.hsv_to_rgb', 'colorsys.hsv_to_rgb', (['*c'], {}), '(*c)\n', (216, 220), False, 'import colorsys\n')] |
from __future__ import annotations
from typing import TYPE_CHECKING, List
import numpy as np
if TYPE_CHECKING:
import napari
def make_sample_data() -> List[napari.types.LayerData]:
"""Generate a parabolic gradient to simulate uneven illumination"""
np.random.seed(42)
n_images = 8
# Create a gradient
size = 128
grid = np.meshgrid(*(2 * (np.linspace(-size // 2 + 1, size // 2, size),)))
# Create the gradient (flatfield) with and offset (darkfield)
gradient = sum(d ** 2 for d in grid) ** (1 / 2) + 8
gradient_int = gradient.astype(np.uint8) # type: ignore
# Create an image stack and add poisson noise
images = np.random.poisson(lam=gradient_int.flatten(), size=(n_images, size ** 2))
images = images.transpose().reshape((size, size, n_images))
images = np.moveaxis(images, -1, 0)
images = 255 - images
return [(images, {"name": "Uneven Illumination"}, "image")]
make_sample_data()
| [
"numpy.moveaxis",
"numpy.linspace",
"numpy.random.seed"
] | [((266, 284), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (280, 284), True, 'import numpy as np\n'), ((821, 847), 'numpy.moveaxis', 'np.moveaxis', (['images', '(-1)', '(0)'], {}), '(images, -1, 0)\n', (832, 847), True, 'import numpy as np\n'), ((373, 417), 'numpy.linspace', 'np.linspace', (['(-size // 2 + 1)', '(size // 2)', 'size'], {}), '(-size // 2 + 1, size // 2, size)\n', (384, 417), True, 'import numpy as np\n')] |
import argparse
import glob
import os
import random
import logging
import numpy as np
import math
from tqdm import tqdm
import time
import torch
from transformers import AutoTokenizer, AutoModelForMaskedLM
from transformers import DataCollatorForLanguageModeling
from transformers.optimization import AdamW, get_linear_schedule_with_warmup
from torch.utils.data import Dataset, DataLoader
import pytorch_lightning as ptl
from pytorch_lightning.logging.test_tube import TestTubeLogger
from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateLogger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# DONE: reproduce RoBERTa numbers on the Longformer corpus
# DONE: testing ddp single machine
# DONE: testing ddp multiple machines
# DONE: testing resume from checkpoint
# TODO: try on a TPU-pod
# TODO: run on beaker on ai2-server1/2
try:
import torch_xla.core.xla_model as xm
except ImportError:
XLA_AVAILABLE = False
else:
XLA_AVAILABLE = True
class MMapTextDataset(Dataset):
def __init__(self, mmap_filename, chunk_size, bos_token_id, eos_token_id):
# `chunk_size - 2` to reserve space for <s> and </s>
self.num_instances = np.memmap(mmap_filename, mode='r', dtype=np.uint16).shape[0] // (chunk_size - 2)
# defer loading the token_ids memmap until after the first __getitem__ call.
# when spawning new processes for ddp, there is a hard limit in python < 3.8 that
# pickle files need to be < 4GB. By waiting until after the first __getitem__ we
# don't have to pickle the memmap
self.token_ids = None
self._mmap_filename = mmap_filename
self._chunk_size = chunk_size
self._bos_token_id = bos_token_id
self._eos_token_id = eos_token_id
def __len__(self):
return self.num_instances
def __getitem__(self, i):
if self.token_ids is None:
self.token_ids = np.memmap(self._mmap_filename, mode='r', dtype=np.uint16)
from_index = i * (self._chunk_size - 2)
to_index = (i + 1) * (self._chunk_size - 2)
data = np.concatenate(([self._bos_token_id], self.token_ids[from_index:to_index], [self._eos_token_id]))
return torch.tensor(data, dtype=torch.long)
# ========================= preprocessing code ========================= #
@staticmethod
def _process_file(full_fname):
"Step 1: tokenize an input text file then save token ids into `np.memmap` shards of size `args.shard_size`"
fname = full_fname.split('/')[-1]
log_filename = f'{args.input_dir}/logs-{args.shard_size}/{fname}.log'
if os.path.isfile(log_filename):
logging.info(f'Skipping {full_fname} ...')
return # log file already exists. Skip current file.
logging.info(f'Processing {full_fname} ...')
with open(full_fname, 'r') as fin:
token_list = []
shard_count = 0
tokens_count = 0
def _write_shard():
if len(token_list) == 0:
return
if token_list[-1] != MMapTextDataset.tokenizer.sep_token_id: # handle a rare case
token_list.append(MMapTextDataset.tokenizer.sep_token_id)
shared_filename = f'{args.input_dir}/shards-{args.shard_size}/{fname}-{shard_count}.bin'
logging.info(f'Writing {len(token_list)} tokens to shared {shared_filename}')
fp = np.memmap(shared_filename, dtype=np.uint16, mode='w+', shape=len(token_list))
fp[:] = token_list[:]
del fp # flush and close file
for line in tqdm(fin):
line = line.strip()
if line == '': # drop empty lines
continue
tokens = MMapTextDataset.tokenizer.encode(line, add_special_tokens=False) # `__getitem__` adds special tokens
token_list.extend(tokens)
if len(token_list) > args.shard_size:
_write_shard()
tokens_count += len(token_list)
token_list = []
shard_count += 1
else:
token_list.append(MMapTextDataset.tokenizer.sep_token_id)
_write_shard()
tokens_count += len(token_list)
with open(log_filename, 'w') as f:
f.write(f'Generated {tokens_count} tokens in {shard_count + 1} shards')
@staticmethod
def _combine_shards(output_fname, shards_list):
"Step 2: combining memmap shards into one `train.bin` or `val.bin` file"
total_size = 0
for filename in shards_list:
total_size += np.memmap(filename, mode='r', dtype=np.uint16).shape[0]
logging.info(f'Writing {total_size} tokens to {output_fname}')
all_token_ids = np.empty(total_size, dtype=np.uint16)
last_token_index = 0
for filename in tqdm(shards_list):
shared = np.memmap(filename, mode='r', dtype=np.uint16)
all_token_ids[last_token_index:last_token_index+len(shared)] = shared[:]
last_token_index += len(shared)
fp = np.memmap(output_fname, dtype=np.uint16, mode='w+', shape=total_size)
fp[:] = all_token_ids[:]
del fp
@staticmethod
def raw_text_to_mmap(args):
"""This is the main preprocessing function. It processes all the text files in `args.input_dir` and
outputs two np.memmap files, one for training and one for validation with ratio `args.train_dev_split`.
Processing each input file involves tokenizing it, sharding it into shards of size `args.shard_size`,
then writing each shard as an np.memmap file. The stream of tokens in the memmap file represents documents
separated with `tokenizer.sep_token`. In `__getitem__`, the `tokenizer.bos_token` and `tokenizer.eos_token`
are added. The reason for not adding them at preprocessing time is to allow different sequence lengths
later on. Notice that this is the "FULL-SENTENCES" setting in the RoBERTa paper, Table2.
"""
MMapTextDataset.tokenizer = AutoTokenizer.from_pretrained(args.tokenizer, use_fast=True)
assert len(MMapTextDataset.tokenizer) < 65535 # will use uint16 to store token ids
all_files = glob.glob(f'{args.input_dir}/*.txt')
if os.path.exists(f'{args.input_dir}/cache/train.bin') and os.path.exists(f'{args.input_dir}/cache/val.bin'):
logger.info("Cache already exists. Remove the cache directory to regenerate")
return
try:
os.mkdir(f'{args.input_dir}/cache/')
except FileExistsError:
pass
try:
os.mkdir(f'{args.input_dir}/shards-{args.shard_size}/')
except FileExistsError:
pass
try:
os.mkdir(f'{args.input_dir}/logs-{args.shard_size}/') # log progrss to be able to resume
except FileExistsError:
pass
# STEP1: tokenizing and saving to shards
if args.num_preprocessing_workers > 1:
from multiprocessing.pool import Pool
with Pool(args.num_preprocessing_workers) as p:
list(tqdm(p.imap(MMapTextDataset._process_file, all_files), total=len(all_files)))
else:
[MMapTextDataset._process_file(f) for f in tqdm(all_files)]
# STEP2: shuffling shards and combining them into train.bin and val.bin files
all_shards = glob.glob(f'{args.input_dir}/shards-{args.shard_size}/*.bin')
random.shuffle(all_shards) # shuffling based on shards not individual lines
val_shards_count = int(args.train_dev_split * len(all_shards))
val_shards = all_shards[:val_shards_count]
train_shards = all_shards[val_shards_count:]
# TODO: if MMapTextDataset._combining_shards is very slow for large files, it can be skipped but we nned to
# update the dataset to read from multiple shards directly
MMapTextDataset._combine_shards(f'{args.input_dir}/cache/val.bin', val_shards)
MMapTextDataset._combine_shards(f'{args.input_dir}/cache/train.bin', train_shards)
del MMapTextDataset.tokenizer
# ========================= end preprocessing code ========================= #
class Pretrainer(ptl.LightningModule):
def __init__(self, hparams):
super().__init__()
self.args = hparams
self.hparams = self.args
self.model = AutoModelForMaskedLM.from_pretrained(args.model)
self.config = self.model.config
tokenizer = AutoTokenizer.from_pretrained(args.tokenizer)
self.pad_token_id = tokenizer.pad_token_id
self.eos_token_id = tokenizer.eos_token_id
self.bos_token_id = tokenizer.bos_token_id
logger.info(f'Creating dataset cache from dir {self.args.input_dir}. This could be slow the first time.')
MMapTextDataset.raw_text_to_mmap(args)
# TODO: add support for other objective functions (whole word masking, BART objectives)
self.data_collator = DataCollatorForLanguageModeling(
tokenizer=tokenizer, mlm=True, mlm_probability=self.args.mlm_prob
)
self.start_time = 0
def to(self, *args, **kwargs):
param_count_before_to = len(list(self.parameters()))
super().to(*args, **kwargs)
if self.trainer.use_tpu:
# need to re-tie the weights after moving to XLA!
self.model.tie_weights()
if 'roberta' in self.args.model:
self.model.lm_head.bias = self.model.lm_head.decoder.bias
param_count_after_to = len(list(self.parameters()))
assert param_count_before_to == param_count_after_to
def forward(self, input_ids=None, labels=None):
# get the padding mask - 1 for NOT masked, 0 for MASKED/PAD
attention_mask = (input_ids != self.pad_token_id).int()
# output is loss, prediction_scores, hidden_states
output = self.model(input_ids=input_ids, attention_mask=attention_mask, labels=labels)
return output[0] # loss
def training_step(self, batch, batch_nb):
loss = self(**batch)
input_ids = batch['input_ids']
tensorboard_logs = {
'input_size': input_ids.numel(),
'mlm_loss': loss,
'mlm_bpc': loss/math.log(2),
'mlm_perplexity': torch.exp(loss),
'token_per_step': input_ids.numel() * self.args.grad_accum * self.trainer.world_size,
}
if self.start_time != 0:
elapsed_time = time.time() - self.start_time
tensorboard_logs['second_per_batch'] = elapsed_time
self.start_time = time.time()
if self.on_gpu:
tensorboard_logs['memory'] = torch.cuda.memory_allocated(loss.device) / 1024 ** 3
return {'loss': loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_nb):
# TODO: log how long evaluation takes
self.start_time = 0 # reset training_step timer
loss = self(**batch)
tensorboard_logs = {
'val_mlm_loss': loss.detach(),
}
return {'val_loss': tensorboard_logs["val_mlm_loss"], 'log': tensorboard_logs}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x['log']['val_mlm_loss'] for x in outputs if 'val_mlm_loss' in x['log']]).mean()
if self.use_ddp:
# TODO: PTL is already doing this. Is it still needed here?
# https://github.com/PyTorchLightning/pytorch-lightning/blob/0.8.5/pytorch_lightning/metrics/converters.py#L251
torch.distributed.all_reduce(avg_loss, op=torch.distributed.ReduceOp.SUM)
avg_loss /= torch.distributed.get_world_size()
elif self.use_tpu:
avg_loss = xm.all_reduce(xm.REDUCE_SUM, avg_loss) / xm.xrt_world_size()
logs = {'val_mlm_loss': avg_loss}
return {'log': logs, 'progress_bar': logs, "val_loss": avg_loss}
def configure_optimizers(self):
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in self.named_parameters() if not any(nd in n for nd in no_decay) and p.requires_grad],
"weight_decay": self.args.weight_decay,
},
{
"params": [p for n, p in self.named_parameters() if any(nd in n for nd in no_decay) and p.requires_grad],
"weight_decay": 0.0,
},
]
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon)
scheduler = get_linear_schedule_with_warmup(
optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=self.args.train_steps
)
return [optimizer], [{"scheduler": scheduler, "interval": "step"}]
def _get_loader(self, fname, is_train):
dataset = MMapTextDataset(fname, chunk_size=self.args.seqlen,
bos_token_id=self.bos_token_id, eos_token_id=self.eos_token_id)
# TODO: consider `replace_sampler_ddp=True` and removing the following if statement
if self.trainer.use_ddp:
sampler = torch.utils.data.distributed.DistributedSampler(dataset, shuffle=is_train)
shuffle = False
elif self.trainer.use_tpu:
sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=xm.xrt_world_size(),
rank=xm.get_ordinal(),
shuffle=is_train,
)
shuffle = False
else:
sampler = None
shuffle = is_train
loader = DataLoader(
dataset,
batch_size=self.args.batch_size,
shuffle=shuffle,
sampler=sampler,
num_workers=self.args.num_workers,
collate_fn=self.data_collator,
drop_last=is_train,
)
return loader
def train_dataloader(self):
return self._get_loader(f'{self.args.input_dir}/cache/train.bin', True)
def val_dataloader(self):
return self._get_loader(f'{self.args.input_dir}/cache/val.bin', False)
def grad_norm(self, norm_type):
# Override PTL `grad_norm` function to only return `total_grad_norm` instead norms of individual params
# TODO: grad_norm reporting needs to take fp16 loss scale into account
parameters = [p for p in self.parameters() if p.grad is not None]
device = parameters[0].device
total_norm = torch.zeros([], device=device if parameters else None)
norm_type = float(norm_type)
for p in parameters:
param_norm = p.grad.data.pow(norm_type).sum()
total_norm.add_(param_norm)
total_norm = (total_norm ** (1.0 / norm_type))
return {'total_grad_norm': total_norm}
@staticmethod
def add_args(parser):
parser.add_argument("--seed", type=int, default=3)
# Dataset. Some of these params are only useful when generating the dataset cache
parser.add_argument("--input_dir", type=str, default='/net/nfs.corp/s2-research/beltagy/longformer/data/')
# Used only at the preprocessing phase
parser.add_argument("--train_dev_split", type=float, default=0.05)
parser.add_argument("--shard_size", type=int, default=1024 ** 3 // 4) # 250MB
parser.add_argument("--num_preprocessing_workers", type=int, default=1)
# Used only at the training phase
parser.add_argument("--seqlen", type=int, default=512)
parser.add_argument("--mlm_prob", type=float, default=0.15)
# HF model loading
parser.add_argument("--tokenizer", type=str, default='roberta-base')
parser.add_argument("--model", type=str, default='roberta-base')
# Checkpointing and logging
parser.add_argument("--save_dir", type=str, default='/runs/')
parser.add_argument("--save_prefix", type=str, default='test',
help="path of output directory is --save_dir/--save_prefix")
parser.add_argument("--resume", type=str, default=None, # It is better to use a different output dir.
help="Path to a checkpoint to load model weights and training state. It overwrites args")
parser.add_argument("--resume_model_only", type=str, default=None,
help="Path to a checkpoint to load model weights but not training state")
parser.add_argument("--log_rate", type=int, default=10)
parser.add_argument("--disable_checkpointing", type=bool, default=False)
# Training hyperparams
parser.add_argument("--lr", type=float, default=1e-5)
parser.add_argument("--train_steps", type=int, default=3000, help='# training grad. updates')
parser.add_argument("--warmup_steps", type=int, default=1000, help='# warmup grad. updates')
parser.add_argument("--val_every", type=int, default=1000, help='# training grad. updates between evaluations')
parser.add_argument("--val_batches", type=int, default=1000, help='# evaluation **batches**')
parser.add_argument("--weight_decay", type=float, default=0.01)
parser.add_argument("--adam_epsilon", type=float, default=1e-6)
parser.add_argument("--grad_clip", type=float, default=0) # TODO: test this with fp16. Likely not working
# RoBERTa's tokens_per_step = 2^18 = 512(seqlen) x 1(gpu_count) x 32(batch_size) x 16(grad_accum)
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--grad_accum", type=int, default=1)
# Compute resources
parser.add_argument("--fp16", type=bool, default=False)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--gpu_count", type=int, default=1, # `--gpus` is reserved for internal use by PTL
help="Number of gpus. This respects `CUDA_VISIBLE_DEVICES`")
# For multi-node training, use the PyTorch launch script. The script and instructions can be found here:
# https://github.com/pytorch/pytorch/blob/master/torch/distributed/launch.py.
# To run PTL in a mode compatible with the launch script, two things are needed:
# - pass the argument `--use_env` to `torch.distributed.launch`
# - make sure `--nproc_per_node` matches `--gpu_count` and `--nnodes` matches `--node_count`.
# For example, to run on 2 nodes, 3 gpus each, the command line on node rank 1 would be like:
# >>>> python -m torch.distributed.launch \
# --use_env --nnodes 2 --nproc_per_node 3 \
# --node_rank 1 --master_addr s2-server4 --master_port 12343 \
# scripts/pretrain.py \
# --gpu_count 2 --node_count 2 \
# --input_dir my_data_dir --save_prefix test_multinode
parser.add_argument("--node_count", type=int, default=1,
help="Number of nodes. It needs to match --nnodes of torch.distributed.launch")
parser.add_argument("--tpu_core_count", type=int, default=None)
return parser
def main(args):
random.seed(args.seed * 10)
np.random.seed(args.seed * 100)
torch.manual_seed(args.seed * 1000)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(args.seed * 10000)
if args.resume_model_only is not None:
pretrainer = Pretrainer.load_from_checkpoint(args.resume_model_only, args)
else:
pretrainer = Pretrainer(args)
# logger here is a SummaryWritter for tensorboard
# it is used by the trainer, and certain return variables
# from the model are automatically logged
logger = TestTubeLogger(
save_dir=args.save_dir,
name=args.save_prefix,
version=0 # always use version=0
)
checkpoint_callback = ModelCheckpoint(
# model saved to filepath/prefix_....
filepath=os.path.join(args.save_dir, args.save_prefix, 'checkpoint'),
prefix='',
save_top_k=1,
save_last=True,
verbose=True,
monitor='val_loss',
mode='min',
period=-1, # to allow multiple checkpoints per epoch
)
args.val_every *= args.grad_accum # PTL is expecting number of batches_per_gpu
trainer = ptl.Trainer(
gpus=args.gpu_count,
num_nodes=args.node_count,
num_tpu_cores=args.tpu_core_count,
distributed_backend='ddp' if (args.gpu_count > 1 or args.node_count > 1) else None,
replace_sampler_ddp=False,
track_grad_norm=2,
max_epochs=10000, min_epochs=0, max_steps=args.train_steps, # run for many epochs, but stop after max_steps
val_check_interval=args.val_every, limit_val_batches=args.val_batches,
early_stop_callback=None,
row_log_interval=args.log_rate,
progress_bar_refresh_rate=args.log_rate,
logger=logger,
checkpoint_callback=checkpoint_callback if not args.disable_checkpointing else None,
accumulate_grad_batches=args.grad_accum,
resume_from_checkpoint=args.resume,
gradient_clip_val=args.grad_clip,
precision=16 if args.fp16 else 32, amp_level='O2',
num_sanity_val_steps=2,
callbacks=[LearningRateLogger()],
)
trainer.fit(pretrainer)
if __name__ == "__main__":
parser = Pretrainer.add_args(argparse.ArgumentParser(description="pretrain"))
args = parser.parse_args()
main(args)
| [
"logging.getLogger",
"torch.exp",
"math.log",
"torch.utils.data.distributed.DistributedSampler",
"torch.cuda.is_available",
"transformers.AutoTokenizer.from_pretrained",
"torch_xla.core.xla_model.all_reduce",
"logging.info",
"pytorch_lightning.logging.test_tube.TestTubeLogger",
"transformers.optim... | [((563, 602), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (582, 602), False, 'import logging\n'), ((612, 639), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (629, 639), False, 'import logging\n'), ((19355, 19382), 'random.seed', 'random.seed', (['(args.seed * 10)'], {}), '(args.seed * 10)\n', (19366, 19382), False, 'import random\n'), ((19387, 19418), 'numpy.random.seed', 'np.random.seed', (['(args.seed * 100)'], {}), '(args.seed * 100)\n', (19401, 19418), True, 'import numpy as np\n'), ((19423, 19458), 'torch.manual_seed', 'torch.manual_seed', (['(args.seed * 1000)'], {}), '(args.seed * 1000)\n', (19440, 19458), False, 'import torch\n'), ((19466, 19491), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (19489, 19491), False, 'import torch\n'), ((19898, 19970), 'pytorch_lightning.logging.test_tube.TestTubeLogger', 'TestTubeLogger', ([], {'save_dir': 'args.save_dir', 'name': 'args.save_prefix', 'version': '(0)'}), '(save_dir=args.save_dir, name=args.save_prefix, version=0)\n', (19912, 19970), False, 'from pytorch_lightning.logging.test_tube import TestTubeLogger\n'), ((2114, 2215), 'numpy.concatenate', 'np.concatenate', (['([self._bos_token_id], self.token_ids[from_index:to_index], [self.\n _eos_token_id])'], {}), '(([self._bos_token_id], self.token_ids[from_index:to_index],\n [self._eos_token_id]))\n', (2128, 2215), True, 'import numpy as np\n'), ((2227, 2263), 'torch.tensor', 'torch.tensor', (['data'], {'dtype': 'torch.long'}), '(data, dtype=torch.long)\n', (2239, 2263), False, 'import torch\n'), ((2644, 2672), 'os.path.isfile', 'os.path.isfile', (['log_filename'], {}), '(log_filename)\n', (2658, 2672), False, 'import os\n'), ((2804, 2848), 'logging.info', 'logging.info', (['f"""Processing {full_fname} ..."""'], {}), "(f'Processing {full_fname} ...')\n", (2816, 2848), False, 'import logging\n'), ((4772, 4834), 'logging.info', 'logging.info', (['f"""Writing {total_size} tokens to {output_fname}"""'], {}), "(f'Writing {total_size} tokens to {output_fname}')\n", (4784, 4834), False, 'import logging\n'), ((4859, 4896), 'numpy.empty', 'np.empty', (['total_size'], {'dtype': 'np.uint16'}), '(total_size, dtype=np.uint16)\n', (4867, 4896), True, 'import numpy as np\n'), ((4950, 4967), 'tqdm.tqdm', 'tqdm', (['shards_list'], {}), '(shards_list)\n', (4954, 4967), False, 'from tqdm import tqdm\n'), ((5179, 5248), 'numpy.memmap', 'np.memmap', (['output_fname'], {'dtype': 'np.uint16', 'mode': '"""w+"""', 'shape': 'total_size'}), "(output_fname, dtype=np.uint16, mode='w+', shape=total_size)\n", (5188, 5248), True, 'import numpy as np\n'), ((6165, 6225), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.tokenizer'], {'use_fast': '(True)'}), '(args.tokenizer, use_fast=True)\n', (6194, 6225), False, 'from transformers import AutoTokenizer, AutoModelForMaskedLM\n'), ((6338, 6374), 'glob.glob', 'glob.glob', (['f"""{args.input_dir}/*.txt"""'], {}), "(f'{args.input_dir}/*.txt')\n", (6347, 6374), False, 'import glob\n'), ((7508, 7569), 'glob.glob', 'glob.glob', (['f"""{args.input_dir}/shards-{args.shard_size}/*.bin"""'], {}), "(f'{args.input_dir}/shards-{args.shard_size}/*.bin')\n", (7517, 7569), False, 'import glob\n'), ((7578, 7604), 'random.shuffle', 'random.shuffle', (['all_shards'], {}), '(all_shards)\n', (7592, 7604), False, 'import random\n'), ((8499, 8547), 'transformers.AutoModelForMaskedLM.from_pretrained', 'AutoModelForMaskedLM.from_pretrained', (['args.model'], {}), '(args.model)\n', (8535, 8547), False, 'from transformers import AutoTokenizer, AutoModelForMaskedLM\n'), ((8608, 8653), 'transformers.AutoTokenizer.from_pretrained', 'AutoTokenizer.from_pretrained', (['args.tokenizer'], {}), '(args.tokenizer)\n', (8637, 8653), False, 'from transformers import AutoTokenizer, AutoModelForMaskedLM\n'), ((9095, 9197), 'transformers.DataCollatorForLanguageModeling', 'DataCollatorForLanguageModeling', ([], {'tokenizer': 'tokenizer', 'mlm': '(True)', 'mlm_probability': 'self.args.mlm_prob'}), '(tokenizer=tokenizer, mlm=True,\n mlm_probability=self.args.mlm_prob)\n', (9126, 9197), False, 'from transformers import DataCollatorForLanguageModeling\n'), ((10717, 10728), 'time.time', 'time.time', ([], {}), '()\n', (10726, 10728), False, 'import time\n'), ((12561, 12646), 'transformers.optimization.AdamW', 'AdamW', (['optimizer_grouped_parameters'], {'lr': 'self.args.lr', 'eps': 'self.args.adam_epsilon'}), '(optimizer_grouped_parameters, lr=self.args.lr, eps=self.args.adam_epsilon\n )\n', (12566, 12646), False, 'from transformers.optimization import AdamW, get_linear_schedule_with_warmup\n'), ((12662, 12792), 'transformers.optimization.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'self.args.warmup_steps', 'num_training_steps': 'self.args.train_steps'}), '(optimizer, num_warmup_steps=self.args.\n warmup_steps, num_training_steps=self.args.train_steps)\n', (12693, 12792), False, 'from transformers.optimization import AdamW, get_linear_schedule_with_warmup\n'), ((13735, 13916), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'self.args.batch_size', 'shuffle': 'shuffle', 'sampler': 'sampler', 'num_workers': 'self.args.num_workers', 'collate_fn': 'self.data_collator', 'drop_last': 'is_train'}), '(dataset, batch_size=self.args.batch_size, shuffle=shuffle,\n sampler=sampler, num_workers=self.args.num_workers, collate_fn=self.\n data_collator, drop_last=is_train)\n', (13745, 13916), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((14637, 14691), 'torch.zeros', 'torch.zeros', (['[]'], {'device': '(device if parameters else None)'}), '([], device=device if parameters else None)\n', (14648, 14691), False, 'import torch\n'), ((19501, 19546), 'torch.cuda.manual_seed_all', 'torch.cuda.manual_seed_all', (['(args.seed * 10000)'], {}), '(args.seed * 10000)\n', (19527, 19546), False, 'import torch\n'), ((21568, 21615), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""pretrain"""'}), "(description='pretrain')\n", (21591, 21615), False, 'import argparse\n'), ((1941, 1998), 'numpy.memmap', 'np.memmap', (['self._mmap_filename'], {'mode': '"""r"""', 'dtype': 'np.uint16'}), "(self._mmap_filename, mode='r', dtype=np.uint16)\n", (1950, 1998), True, 'import numpy as np\n'), ((2686, 2728), 'logging.info', 'logging.info', (['f"""Skipping {full_fname} ..."""'], {}), "(f'Skipping {full_fname} ...')\n", (2698, 2728), False, 'import logging\n'), ((3662, 3671), 'tqdm.tqdm', 'tqdm', (['fin'], {}), '(fin)\n', (3666, 3671), False, 'from tqdm import tqdm\n'), ((4990, 5036), 'numpy.memmap', 'np.memmap', (['filename'], {'mode': '"""r"""', 'dtype': 'np.uint16'}), "(filename, mode='r', dtype=np.uint16)\n", (4999, 5036), True, 'import numpy as np\n'), ((6387, 6438), 'os.path.exists', 'os.path.exists', (['f"""{args.input_dir}/cache/train.bin"""'], {}), "(f'{args.input_dir}/cache/train.bin')\n", (6401, 6438), False, 'import os\n'), ((6443, 6492), 'os.path.exists', 'os.path.exists', (['f"""{args.input_dir}/cache/val.bin"""'], {}), "(f'{args.input_dir}/cache/val.bin')\n", (6457, 6492), False, 'import os\n'), ((6628, 6664), 'os.mkdir', 'os.mkdir', (['f"""{args.input_dir}/cache/"""'], {}), "(f'{args.input_dir}/cache/')\n", (6636, 6664), False, 'import os\n'), ((6739, 6794), 'os.mkdir', 'os.mkdir', (['f"""{args.input_dir}/shards-{args.shard_size}/"""'], {}), "(f'{args.input_dir}/shards-{args.shard_size}/')\n", (6747, 6794), False, 'import os\n'), ((6869, 6922), 'os.mkdir', 'os.mkdir', (['f"""{args.input_dir}/logs-{args.shard_size}/"""'], {}), "(f'{args.input_dir}/logs-{args.shard_size}/')\n", (6877, 6922), False, 'import os\n'), ((10412, 10427), 'torch.exp', 'torch.exp', (['loss'], {}), '(loss)\n', (10421, 10427), False, 'import torch\n'), ((11645, 11718), 'torch.distributed.all_reduce', 'torch.distributed.all_reduce', (['avg_loss'], {'op': 'torch.distributed.ReduceOp.SUM'}), '(avg_loss, op=torch.distributed.ReduceOp.SUM)\n', (11673, 11718), False, 'import torch\n'), ((11743, 11777), 'torch.distributed.get_world_size', 'torch.distributed.get_world_size', ([], {}), '()\n', (11775, 11777), False, 'import torch\n'), ((13246, 13320), 'torch.utils.data.distributed.DistributedSampler', 'torch.utils.data.distributed.DistributedSampler', (['dataset'], {'shuffle': 'is_train'}), '(dataset, shuffle=is_train)\n', (13293, 13320), False, 'import torch\n'), ((20132, 20191), 'os.path.join', 'os.path.join', (['args.save_dir', 'args.save_prefix', '"""checkpoint"""'], {}), "(args.save_dir, args.save_prefix, 'checkpoint')\n", (20144, 20191), False, 'import os\n'), ((7172, 7208), 'multiprocessing.pool.Pool', 'Pool', (['args.num_preprocessing_workers'], {}), '(args.num_preprocessing_workers)\n', (7176, 7208), False, 'from multiprocessing.pool import Pool\n'), ((10369, 10380), 'math.log', 'math.log', (['(2)'], {}), '(2)\n', (10377, 10380), False, 'import math\n'), ((10597, 10608), 'time.time', 'time.time', ([], {}), '()\n', (10606, 10608), False, 'import time\n'), ((10794, 10834), 'torch.cuda.memory_allocated', 'torch.cuda.memory_allocated', (['loss.device'], {}), '(loss.device)\n', (10821, 10834), False, 'import torch\n'), ((11318, 11408), 'torch.stack', 'torch.stack', (["[x['log']['val_mlm_loss'] for x in outputs if 'val_mlm_loss' in x['log']]"], {}), "([x['log']['val_mlm_loss'] for x in outputs if 'val_mlm_loss' in\n x['log']])\n", (11329, 11408), False, 'import torch\n'), ((21449, 21469), 'pytorch_lightning.callbacks.LearningRateLogger', 'LearningRateLogger', ([], {}), '()\n', (21467, 21469), False, 'from pytorch_lightning.callbacks import ModelCheckpoint, LearningRateLogger\n'), ((1205, 1256), 'numpy.memmap', 'np.memmap', (['mmap_filename'], {'mode': '"""r"""', 'dtype': 'np.uint16'}), "(mmap_filename, mode='r', dtype=np.uint16)\n", (1214, 1256), True, 'import numpy as np\n'), ((4708, 4754), 'numpy.memmap', 'np.memmap', (['filename'], {'mode': '"""r"""', 'dtype': 'np.uint16'}), "(filename, mode='r', dtype=np.uint16)\n", (4717, 4754), True, 'import numpy as np\n'), ((7383, 7398), 'tqdm.tqdm', 'tqdm', (['all_files'], {}), '(all_files)\n', (7387, 7398), False, 'from tqdm import tqdm\n'), ((11828, 11866), 'torch_xla.core.xla_model.all_reduce', 'xm.all_reduce', (['xm.REDUCE_SUM', 'avg_loss'], {}), '(xm.REDUCE_SUM, avg_loss)\n', (11841, 11866), True, 'import torch_xla.core.xla_model as xm\n'), ((11869, 11888), 'torch_xla.core.xla_model.xrt_world_size', 'xm.xrt_world_size', ([], {}), '()\n', (11886, 11888), True, 'import torch_xla.core.xla_model as xm\n'), ((13509, 13528), 'torch_xla.core.xla_model.xrt_world_size', 'xm.xrt_world_size', ([], {}), '()\n', (13526, 13528), True, 'import torch_xla.core.xla_model as xm\n'), ((13551, 13567), 'torch_xla.core.xla_model.get_ordinal', 'xm.get_ordinal', ([], {}), '()\n', (13565, 13567), True, 'import torch_xla.core.xla_model as xm\n')] |
# ==================================================================================================
# A minimal example that renders a triangle mesh into a depth image using predefined perspective projection matrix
# Copyright 2021 <NAME>
#
# Please run script from repository root, i.e.:
# python3 ./tsdf_management/rendering_test.py
# ==================================================================================================
import sys
import open3d as o3d
import cv2
import numpy as np
import pytorch3d.renderer.cameras
import torch
import os
from pytorch3d.renderer.cameras import PerspectiveCameras
from pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings, MeshRenderer, SoftPhongShader, TexturesVertex
from pytorch3d.renderer.lighting import PointLights
from pytorch3d.structures.meshes import Meshes
from settings import Parameters, process_arguments
from data import camera
PROGRAM_EXIT_SUCCESS = 0
def main():
use_direct_matrix_solution = True
mesh: o3d.geometry.TriangleMesh = o3d.io.read_triangle_mesh(os.path.join(Parameters.path.output_directory.value, "mesh_000000_red_shorts.ply"))
depth_intrinsics_path = os.path.join(Parameters.path.dataset_base_directory.value, "val/seq014/intrinsics.txt")
torch_device = torch.device("cuda:0")
vertices_numpy = np.array(mesh.vertices, dtype=np.float32)
vertex_colors_numpy = np.fliplr(np.array(mesh.vertex_colors, dtype=np.float32)).copy()
faces_numpy = np.array(mesh.triangles, dtype=np.int64)
vertices_torch = torch.from_numpy(vertices_numpy).cuda().unsqueeze(0)
vertices_rgb = torch.from_numpy(vertex_colors_numpy).cuda().unsqueeze(0)
textures = TexturesVertex(verts_features=vertices_rgb)
faces_torch = torch.from_numpy(faces_numpy).cuda().unsqueeze(0)
meshes_torch3d = Meshes(vertices_torch, faces_torch, textures)
camera_translation = torch.zeros((1, 3), dtype=torch.float32, device=torch_device)
if use_direct_matrix_solution:
# TODO: see next TODO, after that bug is fixed, restore the true rotation identity
# camera_rotation = torch.eye(3, dtype=torch.float32, device=torch_device).unsqueeze(0)
camera_rotation = np.array([[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]], dtype=np.float32)
else:
camera_rotation = np.array([[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]], dtype=np.float32)
lights = PointLights(ambient_color=((1.0, 1.0, 1.0),), diffuse_color=((0.0, 0.0, 0.0),),
specular_color=((0.0, 0.0, 0.0),), device=torch_device, location=[[0.0, 0.0, -3.0]])
# region ===== CAMERA SETUP =====
fx_screen, fy_screen, px_screen, py_screen = camera.load_intrinsic_matrix_entries_from_text_4x4_matrix(depth_intrinsics_path)
image_width = 640
image_height = 480
half_image_width = image_width // 2
half_image_height = image_height // 2
if use_direct_matrix_solution:
fx = fx_screen / half_image_height
fy = fy_screen / half_image_height
px = - (px_screen - half_image_width) / half_image_height
py = - (py_screen - half_image_height) / half_image_height
# TODO: report PyTorch3D bug that forces us to use the other K
# K = torch.tensor([[[fx, 0.0, px, 0.0],
# [0.0, fy, py, 0.0],
# [0.0, 0.0, 0.0, -1.0],
# [0.0, 0.0, -1.0, 0.0]]], dtype=torch.float32)
K = torch.tensor([[[fx, 0.0, px, 0.0],
[0.0, fy, py, 0.0],
[0.0, 0.0, 0.0, 1.0],
[0.0, 0.0, 1.0, 0.0]]], dtype=torch.float32)
cameras: pytorch3d.renderer.cameras.PerspectiveCameras \
= PerspectiveCameras(device=torch_device,
R=camera_rotation,
T=camera_translation,
K=K)
print(cameras.get_projection_transform().get_matrix())
else:
cameras: pytorch3d.renderer.cameras.PerspectiveCameras \
= PerspectiveCameras(device=torch_device,
R=camera_rotation,
T=camera_translation,
focal_length=[(fx_screen, fy_screen)],
principal_point=[(px_screen - (half_image_width - half_image_height), py_screen)],
image_size=[(image_height, image_height)])
print(cameras.get_projection_transform().get_matrix())
# endregion
rasterization_settings = RasterizationSettings(image_size=(480, 640),
cull_backfaces=True,
cull_to_frustum=True,
z_clip_value=0.5,
faces_per_pixel=1)
rasterizer = MeshRasterizer(cameras, raster_settings=rasterization_settings)
renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=cameras,
raster_settings=rasterization_settings
),
shader=SoftPhongShader(
device=torch_device,
cameras=cameras,
lights=lights
)
)
fragments = rasterizer.forward(meshes_torch3d)
z_buffer = fragments.zbuf.cpu().numpy().reshape(480, 640, 1)
rendered_depth = z_buffer
rendered_depth[rendered_depth == -1.0] = 0.0
rendered_depth /= 4.0
rendered_depth_uint8 = (rendered_depth * 255).astype(np.uint8)
depth_image_path = os.path.join(Parameters.path.dataset_base_directory.value, "val/seq014/depth/000000.png")
depth_image = cv2.imread(depth_image_path, cv2.IMREAD_UNCHANGED).astype(np.float32) / 4000
depth_image_uint8 = (depth_image * 255).astype(np.uint8)
cv2.imshow("source depth", depth_image_uint8)
cv2.waitKey()
cv2.imwrite(os.path.join(Parameters.path.output_directory.value, "source_depth.png"), depth_image_uint8)
images = renderer(meshes_torch3d)
rendered_mesh = images[0, ..., :3].cpu().numpy()
rendered_mesh_uint8 = (rendered_mesh * 255).astype(np.uint8)
cv2.imshow("rendered mesh", rendered_mesh_uint8)
cv2.waitKey()
cv2.imwrite(os.path.join(Parameters.path.output_directory.value, "rendered_mesh.png"), rendered_mesh_uint8)
cv2.imshow("rendered depth", rendered_depth_uint8)
cv2.waitKey()
cv2.imwrite(os.path.join(Parameters.path.output_directory.value, "rendered_depth.png"), rendered_depth_uint8)
return PROGRAM_EXIT_SUCCESS
if __name__ == "__main__":
process_arguments()
sys.exit(main())
| [
"pytorch3d.renderer.mesh.MeshRasterizer",
"cv2.imread",
"pytorch3d.renderer.mesh.TexturesVertex",
"pytorch3d.renderer.mesh.SoftPhongShader",
"os.path.join",
"settings.process_arguments",
"torch.from_numpy",
"cv2.imshow",
"numpy.array",
"cv2.waitKey",
"data.camera.load_intrinsic_matrix_entries_fr... | [((1166, 1257), 'os.path.join', 'os.path.join', (['Parameters.path.dataset_base_directory.value', '"""val/seq014/intrinsics.txt"""'], {}), "(Parameters.path.dataset_base_directory.value,\n 'val/seq014/intrinsics.txt')\n", (1178, 1257), False, 'import os\n'), ((1274, 1296), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1286, 1296), False, 'import torch\n'), ((1319, 1360), 'numpy.array', 'np.array', (['mesh.vertices'], {'dtype': 'np.float32'}), '(mesh.vertices, dtype=np.float32)\n', (1327, 1360), True, 'import numpy as np\n'), ((1470, 1510), 'numpy.array', 'np.array', (['mesh.triangles'], {'dtype': 'np.int64'}), '(mesh.triangles, dtype=np.int64)\n', (1478, 1510), True, 'import numpy as np\n'), ((1678, 1721), 'pytorch3d.renderer.mesh.TexturesVertex', 'TexturesVertex', ([], {'verts_features': 'vertices_rgb'}), '(verts_features=vertices_rgb)\n', (1692, 1721), False, 'from pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings, MeshRenderer, SoftPhongShader, TexturesVertex\n'), ((1812, 1857), 'pytorch3d.structures.meshes.Meshes', 'Meshes', (['vertices_torch', 'faces_torch', 'textures'], {}), '(vertices_torch, faces_torch, textures)\n', (1818, 1857), False, 'from pytorch3d.structures.meshes import Meshes\n'), ((1884, 1945), 'torch.zeros', 'torch.zeros', (['(1, 3)'], {'dtype': 'torch.float32', 'device': 'torch_device'}), '((1, 3), dtype=torch.float32, device=torch_device)\n', (1895, 1945), False, 'import torch\n'), ((2377, 2551), 'pytorch3d.renderer.lighting.PointLights', 'PointLights', ([], {'ambient_color': '((1.0, 1.0, 1.0),)', 'diffuse_color': '((0.0, 0.0, 0.0),)', 'specular_color': '((0.0, 0.0, 0.0),)', 'device': 'torch_device', 'location': '[[0.0, 0.0, -3.0]]'}), '(ambient_color=((1.0, 1.0, 1.0),), diffuse_color=((0.0, 0.0, 0.0\n ),), specular_color=((0.0, 0.0, 0.0),), device=torch_device, location=[\n [0.0, 0.0, -3.0]])\n', (2388, 2551), False, 'from pytorch3d.renderer.lighting import PointLights\n'), ((2655, 2740), 'data.camera.load_intrinsic_matrix_entries_from_text_4x4_matrix', 'camera.load_intrinsic_matrix_entries_from_text_4x4_matrix', (['depth_intrinsics_path'], {}), '(depth_intrinsics_path\n )\n', (2712, 2740), False, 'from data import camera\n'), ((4567, 4695), 'pytorch3d.renderer.mesh.RasterizationSettings', 'RasterizationSettings', ([], {'image_size': '(480, 640)', 'cull_backfaces': '(True)', 'cull_to_frustum': '(True)', 'z_clip_value': '(0.5)', 'faces_per_pixel': '(1)'}), '(image_size=(480, 640), cull_backfaces=True,\n cull_to_frustum=True, z_clip_value=0.5, faces_per_pixel=1)\n', (4588, 4695), False, 'from pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings, MeshRenderer, SoftPhongShader, TexturesVertex\n'), ((4913, 4976), 'pytorch3d.renderer.mesh.MeshRasterizer', 'MeshRasterizer', (['cameras'], {'raster_settings': 'rasterization_settings'}), '(cameras, raster_settings=rasterization_settings)\n', (4927, 4976), False, 'from pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings, MeshRenderer, SoftPhongShader, TexturesVertex\n'), ((5582, 5675), 'os.path.join', 'os.path.join', (['Parameters.path.dataset_base_directory.value', '"""val/seq014/depth/000000.png"""'], {}), "(Parameters.path.dataset_base_directory.value,\n 'val/seq014/depth/000000.png')\n", (5594, 5675), False, 'import os\n'), ((5832, 5877), 'cv2.imshow', 'cv2.imshow', (['"""source depth"""', 'depth_image_uint8'], {}), "('source depth', depth_image_uint8)\n", (5842, 5877), False, 'import cv2\n'), ((5882, 5895), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (5893, 5895), False, 'import cv2\n'), ((6166, 6214), 'cv2.imshow', 'cv2.imshow', (['"""rendered mesh"""', 'rendered_mesh_uint8'], {}), "('rendered mesh', rendered_mesh_uint8)\n", (6176, 6214), False, 'import cv2\n'), ((6219, 6232), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (6230, 6232), False, 'import cv2\n'), ((6350, 6400), 'cv2.imshow', 'cv2.imshow', (['"""rendered depth"""', 'rendered_depth_uint8'], {}), "('rendered depth', rendered_depth_uint8)\n", (6360, 6400), False, 'import cv2\n'), ((6405, 6418), 'cv2.waitKey', 'cv2.waitKey', ([], {}), '()\n', (6416, 6418), False, 'import cv2\n'), ((6599, 6618), 'settings.process_arguments', 'process_arguments', ([], {}), '()\n', (6616, 6618), False, 'from settings import Parameters, process_arguments\n'), ((1054, 1140), 'os.path.join', 'os.path.join', (['Parameters.path.output_directory.value', '"""mesh_000000_red_shorts.ply"""'], {}), "(Parameters.path.output_directory.value,\n 'mesh_000000_red_shorts.ply')\n", (1066, 1140), False, 'import os\n'), ((2195, 2260), 'numpy.array', 'np.array', (['[[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]]'], {'dtype': 'np.float32'}), '([[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]], dtype=np.float32)\n', (2203, 2260), True, 'import numpy as np\n'), ((2297, 2362), 'numpy.array', 'np.array', (['[[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]]'], {'dtype': 'np.float32'}), '([[[-1, 0, 0], [0, -1, 0], [0, 0, 1]]], dtype=np.float32)\n', (2305, 2362), True, 'import numpy as np\n'), ((3427, 3552), 'torch.tensor', 'torch.tensor', (['[[[fx, 0.0, px, 0.0], [0.0, fy, py, 0.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, \n 1.0, 0.0]]]'], {'dtype': 'torch.float32'}), '([[[fx, 0.0, px, 0.0], [0.0, fy, py, 0.0], [0.0, 0.0, 0.0, 1.0],\n [0.0, 0.0, 1.0, 0.0]]], dtype=torch.float32)\n', (3439, 3552), False, 'import torch\n'), ((3709, 3799), 'pytorch3d.renderer.cameras.PerspectiveCameras', 'PerspectiveCameras', ([], {'device': 'torch_device', 'R': 'camera_rotation', 'T': 'camera_translation', 'K': 'K'}), '(device=torch_device, R=camera_rotation, T=\n camera_translation, K=K)\n', (3727, 3799), False, 'from pytorch3d.renderer.cameras import PerspectiveCameras\n'), ((4046, 4304), 'pytorch3d.renderer.cameras.PerspectiveCameras', 'PerspectiveCameras', ([], {'device': 'torch_device', 'R': 'camera_rotation', 'T': 'camera_translation', 'focal_length': '[(fx_screen, fy_screen)]', 'principal_point': '[(px_screen - (half_image_width - half_image_height), py_screen)]', 'image_size': '[(image_height, image_height)]'}), '(device=torch_device, R=camera_rotation, T=\n camera_translation, focal_length=[(fx_screen, fy_screen)],\n principal_point=[(px_screen - (half_image_width - half_image_height),\n py_screen)], image_size=[(image_height, image_height)])\n', (4064, 4304), False, 'from pytorch3d.renderer.cameras import PerspectiveCameras\n'), ((5912, 5984), 'os.path.join', 'os.path.join', (['Parameters.path.output_directory.value', '"""source_depth.png"""'], {}), "(Parameters.path.output_directory.value, 'source_depth.png')\n", (5924, 5984), False, 'import os\n'), ((6249, 6322), 'os.path.join', 'os.path.join', (['Parameters.path.output_directory.value', '"""rendered_mesh.png"""'], {}), "(Parameters.path.output_directory.value, 'rendered_mesh.png')\n", (6261, 6322), False, 'import os\n'), ((6435, 6509), 'os.path.join', 'os.path.join', (['Parameters.path.output_directory.value', '"""rendered_depth.png"""'], {}), "(Parameters.path.output_directory.value, 'rendered_depth.png')\n", (6447, 6509), False, 'import os\n'), ((5026, 5097), 'pytorch3d.renderer.mesh.MeshRasterizer', 'MeshRasterizer', ([], {'cameras': 'cameras', 'raster_settings': 'rasterization_settings'}), '(cameras=cameras, raster_settings=rasterization_settings)\n', (5040, 5097), False, 'from pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings, MeshRenderer, SoftPhongShader, TexturesVertex\n'), ((5148, 5216), 'pytorch3d.renderer.mesh.SoftPhongShader', 'SoftPhongShader', ([], {'device': 'torch_device', 'cameras': 'cameras', 'lights': 'lights'}), '(device=torch_device, cameras=cameras, lights=lights)\n', (5163, 5216), False, 'from pytorch3d.renderer.mesh import MeshRasterizer, RasterizationSettings, MeshRenderer, SoftPhongShader, TexturesVertex\n'), ((1397, 1443), 'numpy.array', 'np.array', (['mesh.vertex_colors'], {'dtype': 'np.float32'}), '(mesh.vertex_colors, dtype=np.float32)\n', (1405, 1443), True, 'import numpy as np\n'), ((5690, 5740), 'cv2.imread', 'cv2.imread', (['depth_image_path', 'cv2.IMREAD_UNCHANGED'], {}), '(depth_image_path, cv2.IMREAD_UNCHANGED)\n', (5700, 5740), False, 'import cv2\n'), ((1533, 1565), 'torch.from_numpy', 'torch.from_numpy', (['vertices_numpy'], {}), '(vertices_numpy)\n', (1549, 1565), False, 'import torch\n'), ((1605, 1642), 'torch.from_numpy', 'torch.from_numpy', (['vertex_colors_numpy'], {}), '(vertex_colors_numpy)\n', (1621, 1642), False, 'import torch\n'), ((1740, 1769), 'torch.from_numpy', 'torch.from_numpy', (['faces_numpy'], {}), '(faces_numpy)\n', (1756, 1769), False, 'import torch\n')] |
import cv2
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mping
import time
import imutils
import os
focalLength = None
def load_custom_names():
"load names of custom text file"
# create epmty list
class_list = []
# open coco txt file
with open("./object_detection/custom.names", "r") as f:
class_list = [line.strip() for line in f.readlines()]
# return list of classes
print("Loaded custom names")
return class_list
def load_custom_yolo():
"load yolo network. option to load tiny or normal one."
print("started loading yolov3...")
# get yolov3. Normal verison is tiny due to performance, for single pictures also normal yolo is performant
net = cv2.dnn.readNet("./object_detection/yolov3-tiny-custom.weights", "./object_detection/yolov3-tiny-custom.cfg")
print("Loaded custom yolo")
# extract single layers of network
layer_names = net.getLayerNames()
# get output layers
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# print status
print("...loaded yolov3 sucessfully")
# return
return output_layers, net
# loading necessary files for further use
# load custom names
class_list = load_custom_names()
# generate color palette
colors = np.random.uniform(0, 255, size=(len(class_list), 3))
# load network
output_layers, net = load_custom_yolo()
def information_cal(outs, height, width):
"calculate objects in images"
# init lists and set confidence treshhold
confidence_treshold = 0.5
class_ids = []
confidences = []
boxes = []
for out in outs:
for detection in out:
scores = detection[5:]
class_id = np.argmax(scores)
confidence = scores[class_id]
# check if object is detected about given confidence
if confidence > confidence_treshold:
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
# calcluate width and height of box
w = int(detection[2] * width)
h = int(detection[3] * height)
# calculate x and y coordinates of box
x = int(center_x - w / 2)
y = int(center_y - h / 2)
# save informations about boxes, containing confidence and class
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
return boxes, confidences, class_ids
def check_reaction(label):
"if label in speified list send signal"
# set list
check_list = ["PlaymoPerson"]
if label in check_list:
return True
return False
def calculate_distance(image, object_width):
"get image and calculate distance to object"
# set marker attributes
marker_width = 16
marker_distance = 50
# check if calibration is done
if focalLength != None:
# calcuate distance
distance = round((marker_width * focalLength) / object_width, 2)
# return distance
return distance
else:
calibrate("./object_detection/test-images/marker.jpg", marker_width, marker_distance)
# calcuate distance
distance = round((marker_width * focalLength) / object_width, 2)
# return distance
return distance
def calibrate(image, marker_width, marker_distance):
"calibrate first image from camera"
# load image
image = mping.imread(image)
# convert the image to grayscale, blur it, detect edges
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (5, 5), 0)
edged = cv2.Canny(gray, 35, 125)
# find contures and keep largest (marker)
cnts = cv2.findContours(edged.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
c = max(cnts, key = cv2.contourArea)
# compute marker
marker = cv2.minAreaRect(c)
# check edged image
# cv2.imwrite("./object_detection/gray_test.jpg", edged)
# calculate focalLength
focalLength = (marker[1][0] * marker_distance) / marker_width
# get distance to marker and print to check
distance = round((marker_width * focalLength) / marker[1][0], 2)
print("marker distance ckeck", distance)
def information_draw(boxes, confidences, class_ids, colors, class_list, img):
# set Non-maximum Suppression and normal threshold
threshold = 0.5
nms_threshold = 0.4
indexes = cv2.dnn.NMSBoxes(boxes, confidences, threshold, nms_threshold)
# set font and other settings
font = cv2.FONT_HERSHEY_PLAIN
rec_width = 1
txt_height = 1
text_width = 1
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(class_list[class_ids[i]])
full_label = label + ", " + str(round(confidences[i] * 100, 2))
color = colors[class_ids[i]]
cv2.rectangle(img, (x, y), (x + w, y + h), color, rec_width)
cv2.putText(img, full_label, (x, y -5), font, txt_height, color, text_width)
# send information if specific object in image
reaction = check_reaction(label)
if reaction:
# calculate distance
distance = calculate_distance(img, w)
print("distance to ", label, "is ", distance, "cm")
print("STOP!")
# return edited image
return img
def detection_on_image(frame):
try:
height, width, channels = frame.shape
# preprocess
blob = cv2.dnn.blobFromImage(frame, 1 / 255.0, (320, 320), swapRB=True, crop=False)
# detect objects
net.setInput(blob)
outs = net.forward(output_layers)
# calculate boxes and confidences
boxes, confidences, class_ids = information_cal(outs, height, width)
# draw boxes and classification
frame = information_draw(boxes, confidences, class_ids, colors, class_list, frame)
except Exception as e:
print("Error in det")
print(e)
return frame
def show_image_detection(image_path):
# set figure
fig = plt.figure(figsize=(20, 10))
ax2 = fig.add_subplot(1,2,1, xticks = [], yticks = [])
# load and show original image
image_path=os.path.abspath(os.getcwd())+image_path[1:]
img_original = mping.imread(image_path)
ax2.imshow(img_original)
ax2.set_title("Original")
# detect objects in image
img = detection_on_image(img_original)
# show edited image
ax = fig.add_subplot(1,2,2, xticks = [], yticks = [])
ax.set_title("Detected")
ax.imshow(img)
plt.show()
def show_image_detection_on_cam():
# get camera feed
video_capture = PiVideoStream().start()
while True:
# get frame
time.sleep(2)
frame = video_capture.read()
# detect objects in image
img = detection_on_image(frame)
# show edited image
fig = plt.figure(figsize=(20, 10))
#ax2 = fig.add_subplot(1,2,1, xticks = [], yticks = [])
ax = fig.add_subplot(1,2,2, xticks = [], yticks = [])
ax.set_title("Detected")
ax.imshow(img)
plt.show()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
def detect_raspberry_cam_delay(frame):
frame = detection_on_image(frame)
# delay
time.sleep(10)
# return detected frame
return frame | [
"cv2.dnn.blobFromImage",
"cv2.rectangle",
"matplotlib.image.imread",
"numpy.argmax",
"time.sleep",
"cv2.putText",
"cv2.minAreaRect",
"os.getcwd",
"matplotlib.pyplot.figure",
"imutils.grab_contours",
"cv2.cvtColor",
"cv2.dnn.readNet",
"cv2.dnn.NMSBoxes",
"cv2.Canny",
"cv2.waitKey",
"cv2... | [((747, 860), 'cv2.dnn.readNet', 'cv2.dnn.readNet', (['"""./object_detection/yolov3-tiny-custom.weights"""', '"""./object_detection/yolov3-tiny-custom.cfg"""'], {}), "('./object_detection/yolov3-tiny-custom.weights',\n './object_detection/yolov3-tiny-custom.cfg')\n", (762, 860), False, 'import cv2\n'), ((3537, 3556), 'matplotlib.image.imread', 'mping.imread', (['image'], {}), '(image)\n', (3549, 3556), True, 'import matplotlib.image as mping\n'), ((3629, 3668), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3641, 3668), False, 'import cv2\n'), ((3680, 3713), 'cv2.GaussianBlur', 'cv2.GaussianBlur', (['gray', '(5, 5)', '(0)'], {}), '(gray, (5, 5), 0)\n', (3696, 3713), False, 'import cv2\n'), ((3726, 3750), 'cv2.Canny', 'cv2.Canny', (['gray', '(35)', '(125)'], {}), '(gray, 35, 125)\n', (3735, 3750), False, 'import cv2\n'), ((3888, 3915), 'imutils.grab_contours', 'imutils.grab_contours', (['cnts'], {}), '(cnts)\n', (3909, 3915), False, 'import imutils\n'), ((3989, 4007), 'cv2.minAreaRect', 'cv2.minAreaRect', (['c'], {}), '(c)\n', (4004, 4007), False, 'import cv2\n'), ((4551, 4613), 'cv2.dnn.NMSBoxes', 'cv2.dnn.NMSBoxes', (['boxes', 'confidences', 'threshold', 'nms_threshold'], {}), '(boxes, confidences, threshold, nms_threshold)\n', (4567, 4613), False, 'import cv2\n'), ((6261, 6289), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (6271, 6289), True, 'import matplotlib.pyplot as plt\n'), ((6461, 6485), 'matplotlib.image.imread', 'mping.imread', (['image_path'], {}), '(image_path)\n', (6473, 6485), True, 'import matplotlib.image as mping\n'), ((6754, 6764), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6762, 6764), True, 'import matplotlib.pyplot as plt\n'), ((7543, 7557), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (7553, 7557), False, 'import time\n'), ((5652, 5728), 'cv2.dnn.blobFromImage', 'cv2.dnn.blobFromImage', (['frame', '(1 / 255.0)', '(320, 320)'], {'swapRB': '(True)', 'crop': '(False)'}), '(frame, 1 / 255.0, (320, 320), swapRB=True, crop=False)\n', (5673, 5728), False, 'import cv2\n'), ((6918, 6931), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6928, 6931), False, 'import time\n'), ((7104, 7132), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (7114, 7132), True, 'import matplotlib.pyplot as plt\n'), ((7324, 7334), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7332, 7334), True, 'import matplotlib.pyplot as plt\n'), ((1740, 1757), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (1749, 1757), True, 'import numpy as np\n'), ((5012, 5072), 'cv2.rectangle', 'cv2.rectangle', (['img', '(x, y)', '(x + w, y + h)', 'color', 'rec_width'], {}), '(img, (x, y), (x + w, y + h), color, rec_width)\n', (5025, 5072), False, 'import cv2\n'), ((5085, 5162), 'cv2.putText', 'cv2.putText', (['img', 'full_label', '(x, y - 5)', 'font', 'txt_height', 'color', 'text_width'], {}), '(img, full_label, (x, y - 5), font, txt_height, color, text_width)\n', (5096, 5162), False, 'import cv2\n'), ((6414, 6425), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6423, 6425), False, 'import os\n'), ((7355, 7369), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (7366, 7369), False, 'import cv2\n')] |
import numpy as np
import numba as nb
from Bio import pairwise2
from Bio.Seq import Seq
from Bio.SubsMat import MatrixInfo
from Bio.Alphabet import generic_dna
from Bio import SeqUtils
def initialStep(V0, V1, InSeq, In, M, Dir, isLocal = False):
d = 8
for i in range(V0.shape[0]):
top = np.iinfo(np.int16).min
if i > 0:
top = V1[i-1] - d
else:
top = 0
V1[i] = top
if isLocal and top < 0:
V1[i] = 0
Dir[i] = 3
else:
Dir[i] = 2
def nextStep(V0, V1, InSeq, In, M, Dir, isLocal = False):
d = 8
for i in range(V0.shape[0]):
left = V0[i] - d
top = np.iinfo(np.int16).min
corner = np.iinfo(np.int16).min
if i > 0:
top = V1[i-1] - d
if (In,InSeq[i-1]) in M:
corner = V0[i-1] + M[In,InSeq[i-1]]
elif (InSeq[i-1],In) in M:
corner = V0[i-1] + M[InSeq[i-1], In]
V1[i] = max(left, top, corner)
if isLocal:
V1[i] = max(V1[i], 0)
if V1[i] == top:
Dir[i] = 2
elif V1[i] == corner:
Dir[i] = 1
elif V1[i] == left:
Dir[i] = 0
else:
Dir[i] = 3
def pad_seq(sequence):
""" Pad sequence to multiple of 3 with N """
remainder = len(sequence) % 3
return sequence if remainder == 0 else sequence + Seq('N' * (3 - remainder))
def alignment_traceback(codonsA, codonsB, alignment, dnaARange = (0, -1), dnaBRange = (0, -1), ):
(scoreMatrix, dirMatrix) = alignment
colsCount = dirMatrix.shape[0]
rowsCount = dirMatrix.shape[1]
colIndex = dnaARange[1] + 1
rowIndex = dnaBRange[1] + 1
colIndexEnd = dnaARange[0]
rowIndexEnd = dnaBRange[0]
shift = 1
if dnaARange[1] < 0:
colIndex = colsCount - 1
colIndexEnd = 0
shift=2
if dnaBRange[1] < 0:
#rowIndex = np.argmax(scoreMatrix[colIndex])
rowIndex = rowsCount - 1
rowIndexEnd = 0
shift=2
score = scoreMatrix[colIndex, rowIndex];
accA = np.chararray(colsCount + rowsCount)
accB = np.chararray(colsCount + rowsCount)
accAIter = colsCount + rowsCount - 1
accBIter = colsCount + rowsCount - 1
while True:
if (colIndex < colIndexEnd or rowIndex < rowIndexEnd):
break
if dirMatrix[colIndex, rowIndex] == 0:
colIndex = colIndex - 1
accA[accAIter] = codonsA[colIndex]
accAIter = accAIter - 1
accB[accBIter] = '-'
accBIter = accBIter - 1
elif dirMatrix[colIndex, rowIndex] == 1:
colIndex = colIndex - 1
rowIndex = rowIndex - 1
accA[accAIter] = codonsA[colIndex]
accAIter = accAIter - 1
accB[accBIter] = codonsB[rowIndex]
accBIter = accBIter - 1
elif dirMatrix[colIndex, rowIndex] == 2:
rowIndex = rowIndex - 1
accA[accAIter] = '-'
accAIter = accAIter - 1
accB[accBIter] = codonsB[rowIndex]
accBIter = accBIter - 1
else:
break
strA = np.chararray.tostring(accA[accAIter+shift:])
strB = np.chararray.tostring(accB[accBIter+shift:])
return (strA, strB, score, 0, len(strA))
def codons_alignment(codonsA, codonsB, isLocal = False):
lenA = len(codonsA)
lenB = len(codonsB)
if lenA < lenB:
lenA, codonsA, lenB, codonsB = lenB, codonsB, lenA, codonsA
currentState = np.zeros(lenB+1, dtype=np.int16)
nextState = np.zeros(lenB+1, dtype=np.int16)
dirMatrix = np.zeros((lenA+1, lenB+1), dtype=np.int16)
scoreMatrix = np.zeros((lenA+1, lenB+1), dtype=np.int16)
initialStep(currentState, nextState, codonsB, codonsA[0], MatrixInfo.blosum50, dirMatrix[0], isLocal=isLocal)
currentState, nextState = nextState, currentState
for i in range(lenA):
scoreMatrix[i] = currentState
nextStep(currentState, nextState, codonsB, codonsA[i], MatrixInfo.blosum50, dirMatrix[i+1], isLocal=isLocal)
currentState, nextState = nextState, currentState
scoreMatrix[lenA] = currentState
#print(scoreMatrix)
return (scoreMatrix, dirMatrix)
def codons_align(codonsA, codonsB, isLocal = False, dnaARange = (0, -1), dnaBRange = (0, -1)):
return (alignment_traceback(codonsA, codonsB, codons_alignment(codonsA, codonsB, isLocal=isLocal), dnaBRange=dnaBRange, dnaARange=dnaARange))
def dna_local_align3(dnaA, dnaB):
results = []
for i in range(3):
for j in range(3):
shiftA = i
shiftB = j
codonsA = Seq.translate(pad_seq(dnaA[shiftA:]))
codonsB = Seq.translate(pad_seq(dnaB[shiftB:]))
print(codonsA)
print(codonsB)
k = codons_align(codonsA, codonsB)
if k[4] > 0:
results.append(k)
return sorted(results, key=lambda r: -r[2])
dnaA = Seq("GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG", generic_dna)
dnaB = Seq("GTGGCCATTGTAATGGAAAGGGTGAAAGAT", generic_dna)
print(dna_local_align3(dnaA, dnaB)) | [
"Bio.Seq.Seq",
"numpy.iinfo",
"numpy.chararray.tostring",
"numpy.zeros",
"numpy.chararray"
] | [((5095, 5154), 'Bio.Seq.Seq', 'Seq', (['"""GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG"""', 'generic_dna'], {}), "('GTGGCCATTGTAATGGGCCGCTGAAAGGGTGCCCGATAG', generic_dna)\n", (5098, 5154), False, 'from Bio.Seq import Seq\n'), ((5162, 5212), 'Bio.Seq.Seq', 'Seq', (['"""GTGGCCATTGTAATGGAAAGGGTGAAAGAT"""', 'generic_dna'], {}), "('GTGGCCATTGTAATGGAAAGGGTGAAAGAT', generic_dna)\n", (5165, 5212), False, 'from Bio.Seq import Seq\n'), ((2139, 2174), 'numpy.chararray', 'np.chararray', (['(colsCount + rowsCount)'], {}), '(colsCount + rowsCount)\n', (2151, 2174), True, 'import numpy as np\n'), ((2186, 2221), 'numpy.chararray', 'np.chararray', (['(colsCount + rowsCount)'], {}), '(colsCount + rowsCount)\n', (2198, 2221), True, 'import numpy as np\n'), ((3218, 3264), 'numpy.chararray.tostring', 'np.chararray.tostring', (['accA[accAIter + shift:]'], {}), '(accA[accAIter + shift:])\n', (3239, 3264), True, 'import numpy as np\n'), ((3274, 3320), 'numpy.chararray.tostring', 'np.chararray.tostring', (['accB[accBIter + shift:]'], {}), '(accB[accBIter + shift:])\n', (3295, 3320), True, 'import numpy as np\n'), ((3591, 3625), 'numpy.zeros', 'np.zeros', (['(lenB + 1)'], {'dtype': 'np.int16'}), '(lenB + 1, dtype=np.int16)\n', (3599, 3625), True, 'import numpy as np\n'), ((3640, 3674), 'numpy.zeros', 'np.zeros', (['(lenB + 1)'], {'dtype': 'np.int16'}), '(lenB + 1, dtype=np.int16)\n', (3648, 3674), True, 'import numpy as np\n'), ((3689, 3735), 'numpy.zeros', 'np.zeros', (['(lenA + 1, lenB + 1)'], {'dtype': 'np.int16'}), '((lenA + 1, lenB + 1), dtype=np.int16)\n', (3697, 3735), True, 'import numpy as np\n'), ((3750, 3796), 'numpy.zeros', 'np.zeros', (['(lenA + 1, lenB + 1)'], {'dtype': 'np.int16'}), '((lenA + 1, lenB + 1), dtype=np.int16)\n', (3758, 3796), True, 'import numpy as np\n'), ((304, 322), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (312, 322), True, 'import numpy as np\n'), ((684, 702), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (692, 702), True, 'import numpy as np\n'), ((724, 742), 'numpy.iinfo', 'np.iinfo', (['np.int16'], {}), '(np.int16)\n', (732, 742), True, 'import numpy as np\n'), ((1423, 1449), 'Bio.Seq.Seq', 'Seq', (["('N' * (3 - remainder))"], {}), "('N' * (3 - remainder))\n", (1426, 1449), False, 'from Bio.Seq import Seq\n')] |
from ..gui.main_window import Ui_EditorMainWindow
from PySide.QtGui import QApplication, QMainWindow, QPixmap
from PySide import QtGui, QtCore
from PySide.QtCore import QObject
import sys
import numpy as np
from .. import util
from .brush_dialog import BrushDialog
from .about_dialog import AboutDialog
from .new_image_dialog import NewImageDialog
from .helper_threads import IFTThread
class EditorMainWindow(QMainWindow):
def __init__(self, parent=None):
super(EditorMainWindow, self).__init__(parent)
self.ui = Ui_EditorMainWindow()
self.ui.setupUi(self)
self.ui.action_open.triggered.connect(self.open_file)
self.ui.action_save_spatial.triggered.connect(self.save_spatial)
self.ui.action_new_image.triggered.connect(self.new_image)
self.ui.action_linked_zoom.triggered.connect(self.link_zoom)
self.ui.action_save_both.triggered.connect(self.save_both)
self.ui.action_brush.triggered.connect(self.show_brush)
self.ui.action_website.triggered.connect(self.show_website)
self.ui.action_about.triggered.connect(self.show_about)
self.ui.action_none.triggered.connect(self.remove_brush)
self.ui.image_zoom_in_btn.clicked.connect(self.image_zoom_in)
self.ui.image_zoom_out_btn.clicked.connect(self.image_zoom_out)
self.ui.freq_zoom_in_btn.clicked.connect(self.freq_zoom_in)
self.ui.freq_zoom_out_btn.clicked.connect(self.freq_zoom_out)
self.ui.image_label.installEventFilter(self)
self.ui.freq_label.installEventFilter(self)
self.ui.image_label.setMouseTracking(True)
self.ui.freq_label.setMouseTracking(True)
self.spatial_image = None
# This will store the shifted frequency image
self.frequency_array_magnitude = None
self.frequency_array_angle = None
self.freq_pixmap = None
self.scaled_freq_pixmap = None
self.image_pixmap = None
self.scaled_image_pixmap = None
self.spatial_scale = 1.0
self.frequency_scale = 1.0
self.current_brush = None
self.is_zoom_linked = False
def open_file(self):
""" Signal handler for the Open Menu """
filters = "Image Files (*.png *.jpg *.bmp)"
file_name = QtGui.QFileDialog.getOpenFileName(self, "Open File",
filter=filters)[0]
if file_name:
image = QtGui.QImage(file_name)
filters = "Image Files (*.png *.jpg *.bmp)"
if image.isNull():
QtGui.QMessageBox.information(self, "Image Viewer",
"Cannot load %s." % file_name)
return
array = util.qimage_to_numpy(image)
self.load_image_from_array(array)
def load_image_from_array(self, array):
""" Loads an array as spatial domain image.
This function recomputes the fft and updates both the UIs. """
image = util.rgb_to_yuv(array)
garray = image[..., 0]
farray = np.fft.fft2(garray)
farray = np.fft.fftshift(farray)
self.set_yuv_image(image)
self.set_freq_image_angle(np.angle(farray))
self.set_freq_image_magnitude(np.absolute(farray))
def set_freq_image_magnitude(self, fimg):
""" Sets a numpy array as a frequncy domain image magnitude.
This function expects an appropriately shifted numpy array as input.
Except taking log, no manipulation to the values is done before
rendering. The function updates recomputes all internal intermediate
values and re renders the frequency UI.
"""
self.frequency_array_magnitude = fimg
qimage = util.fft_to_qimage(self.frequency_array_magnitude)
pixmap = QPixmap.fromImage(qimage)
self.set_freq_pixmap(pixmap)
self.invalidate_freq_scale()
self.render_freq()
def set_freq_pixmap(self, pixmap):
"""Sets the pixmap to be shown for frequency image.
This function only caches the pixmap, not computation or UI updation
is done.
"""
self.freq_pixmap = pixmap
def invalidate_freq_scale(self):
"""Implies scale has changed and recomputes internal fields
This function is to be called when either `self.freq_pixmap` changes
or `self.frequency_scale` changes. This function merely caches the
scaled pixmap, no UI updation is done.
"""
w, h = self.freq_pixmap.width(), self.freq_pixmap.height()
sw, sh = int(w*self.frequency_scale), int(h*self.frequency_scale)
self.scaled_freq_pixmap = self.freq_pixmap.scaled(sw, sh)
def render_freq(self, pixmap=None):
"""Render `pixmap` as the frequency image. If not given display last
known sclaed spatial image pixmap.
This function does not perform any computations internally. The
function is to be called to update the UI to reflect the state of the
internal fields, when called without the 2nd argument. When a brush
is set, a pixmap with the brush drawn on it can supplied as the 2nd
argument.
"""
if not pixmap:
pixmap = self.scaled_freq_pixmap
self.ui.freq_label.setPixmap(pixmap)
def set_freq_image_angle(self, fimg):
" Sets a numpy array as a frequncy domain image angle. "
self.frequency_array_angle = fimg
def set_yuv_image(self, img):
""" Sets the spatial image as YUV array.
The function expects a `uint8` array and will set the spatial domain
image in the UI along with updating all internal fields.
"""
self.spatial_image = img
img = util.yuv_to_rgb(self.spatial_image)
qimage = util.numpy_to_qimage(img)
pixmap = QPixmap.fromImage(qimage)
self.set_image_pixmap(pixmap)
self.invalidate_image_scale()
self.render_image()
def set_image_pixmap(self, pixmap):
"""Sets the pixmap to be shown for spatial image.
This function only caches the pixmap, not computation or UI updation
is done.
"""
self.image_pixmap = pixmap
def invalidate_image_scale(self):
"""Implies scale has changed and recomputes internal fields.
This function is to be called when either `self.image_pixmap` changes
or `self.spatial_scale` changes. This function merely caches the
scaled pixmap, no UI updation is done.
"""
w, h = self.image_pixmap.width(), self.image_pixmap.height()
sw, sh = int(w*self.spatial_scale), int(h*self.spatial_scale)
self.scaled_image_pixmap = self.image_pixmap.scaled(sw, sh)
def render_image(self, pixmap=None):
"""Render the pixmap as spatial image. If not given, display last known
sclaed spatial image pixmap.
"""
if not pixmap:
pixmap = self.scaled_image_pixmap
self.ui.image_label.setPixmap(pixmap)
def image_zoom_in(self):
" Zoom in the spatial domain image "
if self.spatial_image is None:
return
self.spatial_scale += 0.1
self.invalidate_image_scale()
self.render_image()
if self.is_zoom_linked:
self.frequency_scale = self.spatial_scale
self.invalidate_freq_scale()
self.render_freq()
def image_zoom_out(self):
" Zoom out the spatial domain image "
if self.spatial_image is None:
return
self.spatial_scale -= 0.1
self.invalidate_image_scale()
self.render_image()
if self.is_zoom_linked:
self.frequency_scale = self.spatial_scale
self.invalidate_freq_scale()
self.render_freq()
def freq_zoom_out(self):
"Zoom out the frequency domain image."
if self.frequency_array_magnitude is None:
return
self.frequency_scale -= 0.1
self.invalidate_freq_scale()
self.render_freq()
if self.is_zoom_linked:
self.spatial_scale = self.frequency_scale
self.invalidate_image_scale()
self.render_image()
def freq_zoom_in(self):
"Zoom out the frequency domain image."
if self.frequency_array_magnitude is None:
return
self.frequency_scale += 0.1
self.invalidate_freq_scale()
self.render_freq()
if self.is_zoom_linked:
self.spatial_scale = self.frequency_scale
self.invalidate_image_scale()
self.render_image()
def handle_image_move(self, event):
"Handle mouse move on the spatial image."
if self.spatial_image is None:
return
self.handle_image_stats(event)
def handle_image_stats(self, event):
"""Given an event, take care of displaying stats for spatial image.
The assumption made here is that the QLabel is exactly the size of the
image.
"""
pos = event.pos()
x, y = pos.x(), pos.y()
x, y = int(x/self.spatial_scale), int(y/self.spatial_scale)
r, c = y, x
r = np.clip(r, 0, self.spatial_image.shape[0])
c = np.clip(c, 0, self.spatial_image.shape[1])
value = self.spatial_image[r, c].astype(np.int)
msg = "X:%d Y:%d Value:" % (x, y)
msg += str(value)
self.ui.image_info_label.setText(msg)
def handle_freq_move(self, event):
"""Handle mouse move on the frequency domain image.
"""
if self.frequency_array_magnitude is None:
return
self.handle_freq_stats(event)
if self.current_brush:
pixmap = self.scaled_freq_pixmap.copy()
self.current_brush.draw_marker(event.x(), event.y(), pixmap,
self.frequency_scale)
if event.buttons() & QtCore.Qt.MouseButton.LeftButton:
self.handle_freq_modify(event)
# We use the pre computed scaled pixmap and mark the brush on it
# before displaying
self.render_freq(pixmap)
def handle_freq_stats(self, event):
"""Given an event, show frequency image stats.
The assumption made here is that the QLabel is exactly the size of the
image.
"""
pos = event.pos()
x, y = pos.x(), pos.y()
x, y = int(x/self.frequency_scale), int(y/self.frequency_scale)
r, c = y, x
r = np.clip(r, 0, self.frequency_array_magnitude.shape[0] - 1)
c = np.clip(c, 0, self.frequency_array_magnitude.shape[1] - 1)
value = self.frequency_array_magnitude[r, c]
msg = "X:%d Y:%d Value:%d" % (x, y, value)
self.ui.freq_info_label.setText(msg)
def eventFilter(self, obj, event):
"Call to handle relevant events."
if obj == self.ui.image_label:
if event.type() == QtCore.QEvent.MouseMove:
self.handle_image_move(event)
return True
elif obj == self.ui.freq_label:
if not self.ui.freq_label.isEnabled():
return False
if event.type() == QtCore.QEvent.MouseMove:
self.handle_freq_move(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonPress:
if event.button() == QtCore.Qt.MouseButton.LeftButton:
self.handle_freq_modify(event)
return True
elif event.type() == QtCore.QEvent.MouseButtonRelease:
if event.button() == QtCore.Qt.MouseButton.LeftButton:
if self.current_brush:
self.recompute_spatial_image()
return True
return QObject.eventFilter(self, obj, event)
def handle_freq_modify(self, event):
"Handle an event which will modify the frequency image."
if not self.current_brush is None:
x, y = event.x(), event.y()
x /= self.frequency_scale
y /= self.frequency_scale
h, w = self.frequency_array_magnitude.shape
magnitude = self.frequency_array_magnitude
angle = self.frequency_array_angle
self.current_brush.apply(x, y, magnitude, angle)
self.set_freq_image_magnitude(self.frequency_array_magnitude)
self.render_freq()
def show_brush(self):
"Show the brush dialog box."
d = BrushDialog(self, self.current_brush)
d.exec_()
if d.get_brush():
self.current_brush = d.get_brush()
def remove_brush(self):
"Deselcts a brush."
self.current_brush = None
self.render_freq()
def recompute_spatial_image(self):
"""Recompute the spatial image from the frequency image and render it.
This function just launches a thread to do the task.
"""
magnitude = self.frequency_array_magnitude
angle = self.frequency_array_angle
self.ift_thread = IFTThread(magnitude, angle)
self.ift_thread.ift_done.connect(self.ift_done_recv)
# To prevent mutiple threads modifying images
# we disable is while one thread is working
self.ui.freq_label.setEnabled(False)
self.ift_thread.start()
def ift_done_recv(self, array):
"The reciever for the ift_done signal"
self.spatial_image[:, :, 0] = array
self.set_yuv_image(self.spatial_image)
self.ui.freq_label.setEnabled(True)
def save_spatial(self):
"Save the spatial domain image."
if self.spatial_image is None:
QtGui.QMessageBox.information(self, "Error", "No Image to Save")
return
filters = "Image Files (*.png)"
filename = QtGui.QFileDialog.getSaveFileName(self, "Save Image",
filter=filters)[0]
if not filename.lower().endswith('.png'):
filename += '.png'
arr = util.yuv_to_rgb(self.spatial_image)
image = util.numpy_to_qimage(arr)
success = image.save(filename)
if not success:
msg = "Could not save image at the location."
QtGui.QMessageBox.information(self, "Error", msg)
def save_both(self):
"Save image and its transofrm."
if self.spatial_image is None or \
self.frequency_array_magnitude is None:
QtGui.QMessageBox.information(self, "Error", "No Image to Save")
return
filters = "Image Files (*.png)"
filename = QtGui.QFileDialog.getSaveFileName(self, "Save Image",
filter=filters)[0]
if not filename.lower().endswith('.png'):
filename += '.png'
arr = util.yuv_to_rgb(self.spatial_image)
r, c, ch = arr.shape
out = np.zeros((r, c*2, ch), dtype=arr.dtype)
out[:, :c, :] = arr
freq_img = util.fft_to_qimage(self.frequency_array_magnitude)
freq_arr = util.qimage_to_numpy(freq_img)
out[:, c:, :] = freq_arr
image = util.numpy_to_qimage(out)
success = image.save(filename)
if not success:
msg = "Could not save image at the location."
QtGui.QMessageBox.information(self, "Error", msg)
def show_about(self):
"Display the about dialog."
d = AboutDialog(self)
d.exec_()
def show_website(self):
"Open the website in a browser."
QtGui.QDesktopServices.openUrl("http://fredo-editor.github.io")
def new_image(self):
"Shows a dialog to create a new blank image."
d = NewImageDialog()
d.exec_()
if d.get_size():
w, h = d.get_size()
array = np.zeros((h, w, 3), dtype=np.uint8)
self.load_image_from_array(array)
def link_zoom(self):
"Ensures that both images are at the same scale."
if self.ui.action_linked_zoom.isChecked():
self.is_zoom_linked = True
self.spatial_scale = 1.0
self.invalidate_image_scale()
self.render_image()
self.frequency_scale = 1.0
self.invalidate_freq_scale()
self.render_freq()
else:
self.is_zoom_linked = False
def run():
app = QApplication(sys.argv)
editor = EditorMainWindow()
editor.show()
sys.exit(app.exec_())
if __name__ == '__main__':
run()
| [
"numpy.clip",
"PySide.QtGui.QPixmap.fromImage",
"PySide.QtGui.QFileDialog.getOpenFileName",
"numpy.absolute",
"numpy.fft.fft2",
"numpy.angle",
"numpy.zeros",
"PySide.QtGui.QDesktopServices.openUrl",
"PySide.QtGui.QFileDialog.getSaveFileName",
"PySide.QtGui.QApplication",
"PySide.QtGui.QImage",
... | [((16472, 16494), 'PySide.QtGui.QApplication', 'QApplication', (['sys.argv'], {}), '(sys.argv)\n', (16484, 16494), False, 'from PySide.QtGui import QApplication, QMainWindow, QPixmap\n'), ((3087, 3106), 'numpy.fft.fft2', 'np.fft.fft2', (['garray'], {}), '(garray)\n', (3098, 3106), True, 'import numpy as np\n'), ((3124, 3147), 'numpy.fft.fftshift', 'np.fft.fftshift', (['farray'], {}), '(farray)\n', (3139, 3147), True, 'import numpy as np\n'), ((3830, 3855), 'PySide.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['qimage'], {}), '(qimage)\n', (3847, 3855), False, 'from PySide.QtGui import QApplication, QMainWindow, QPixmap\n'), ((5868, 5893), 'PySide.QtGui.QPixmap.fromImage', 'QPixmap.fromImage', (['qimage'], {}), '(qimage)\n', (5885, 5893), False, 'from PySide.QtGui import QApplication, QMainWindow, QPixmap\n'), ((9238, 9280), 'numpy.clip', 'np.clip', (['r', '(0)', 'self.spatial_image.shape[0]'], {}), '(r, 0, self.spatial_image.shape[0])\n', (9245, 9280), True, 'import numpy as np\n'), ((9293, 9335), 'numpy.clip', 'np.clip', (['c', '(0)', 'self.spatial_image.shape[1]'], {}), '(c, 0, self.spatial_image.shape[1])\n', (9300, 9335), True, 'import numpy as np\n'), ((10578, 10636), 'numpy.clip', 'np.clip', (['r', '(0)', '(self.frequency_array_magnitude.shape[0] - 1)'], {}), '(r, 0, self.frequency_array_magnitude.shape[0] - 1)\n', (10585, 10636), True, 'import numpy as np\n'), ((10649, 10707), 'numpy.clip', 'np.clip', (['c', '(0)', '(self.frequency_array_magnitude.shape[1] - 1)'], {}), '(c, 0, self.frequency_array_magnitude.shape[1] - 1)\n', (10656, 10707), True, 'import numpy as np\n'), ((11870, 11907), 'PySide.QtCore.QObject.eventFilter', 'QObject.eventFilter', (['self', 'obj', 'event'], {}), '(self, obj, event)\n', (11889, 11907), False, 'from PySide.QtCore import QObject\n'), ((15003, 15044), 'numpy.zeros', 'np.zeros', (['(r, c * 2, ch)'], {'dtype': 'arr.dtype'}), '((r, c * 2, ch), dtype=arr.dtype)\n', (15011, 15044), True, 'import numpy as np\n'), ((15643, 15706), 'PySide.QtGui.QDesktopServices.openUrl', 'QtGui.QDesktopServices.openUrl', (['"""http://fredo-editor.github.io"""'], {}), "('http://fredo-editor.github.io')\n", (15673, 15706), False, 'from PySide import QtGui, QtCore\n'), ((2287, 2355), 'PySide.QtGui.QFileDialog.getOpenFileName', 'QtGui.QFileDialog.getOpenFileName', (['self', '"""Open File"""'], {'filter': 'filters'}), "(self, 'Open File', filter=filters)\n", (2320, 2355), False, 'from PySide import QtGui, QtCore\n'), ((2455, 2478), 'PySide.QtGui.QImage', 'QtGui.QImage', (['file_name'], {}), '(file_name)\n', (2467, 2478), False, 'from PySide import QtGui, QtCore\n'), ((3217, 3233), 'numpy.angle', 'np.angle', (['farray'], {}), '(farray)\n', (3225, 3233), True, 'import numpy as np\n'), ((3273, 3292), 'numpy.absolute', 'np.absolute', (['farray'], {}), '(farray)\n', (3284, 3292), True, 'import numpy as np\n'), ((13753, 13817), 'PySide.QtGui.QMessageBox.information', 'QtGui.QMessageBox.information', (['self', '"""Error"""', '"""No Image to Save"""'], {}), "(self, 'Error', 'No Image to Save')\n", (13782, 13817), False, 'from PySide import QtGui, QtCore\n'), ((13897, 13966), 'PySide.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', (['self', '"""Save Image"""'], {'filter': 'filters'}), "(self, 'Save Image', filter=filters)\n", (13930, 13966), False, 'from PySide import QtGui, QtCore\n'), ((14332, 14381), 'PySide.QtGui.QMessageBox.information', 'QtGui.QMessageBox.information', (['self', '"""Error"""', 'msg'], {}), "(self, 'Error', msg)\n", (14361, 14381), False, 'from PySide import QtGui, QtCore\n'), ((14556, 14620), 'PySide.QtGui.QMessageBox.information', 'QtGui.QMessageBox.information', (['self', '"""Error"""', '"""No Image to Save"""'], {}), "(self, 'Error', 'No Image to Save')\n", (14585, 14620), False, 'from PySide import QtGui, QtCore\n'), ((14700, 14769), 'PySide.QtGui.QFileDialog.getSaveFileName', 'QtGui.QFileDialog.getSaveFileName', (['self', '"""Save Image"""'], {'filter': 'filters'}), "(self, 'Save Image', filter=filters)\n", (14733, 14769), False, 'from PySide import QtGui, QtCore\n'), ((15402, 15451), 'PySide.QtGui.QMessageBox.information', 'QtGui.QMessageBox.information', (['self', '"""Error"""', 'msg'], {}), "(self, 'Error', msg)\n", (15431, 15451), False, 'from PySide import QtGui, QtCore\n'), ((15913, 15948), 'numpy.zeros', 'np.zeros', (['(h, w, 3)'], {'dtype': 'np.uint8'}), '((h, w, 3), dtype=np.uint8)\n', (15921, 15948), True, 'import numpy as np\n'), ((2583, 2669), 'PySide.QtGui.QMessageBox.information', 'QtGui.QMessageBox.information', (['self', '"""Image Viewer"""', "('Cannot load %s.' % file_name)"], {}), "(self, 'Image Viewer', 'Cannot load %s.' %\n file_name)\n", (2612, 2669), False, 'from PySide import QtGui, QtCore\n')] |
# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import paddle.fluid as fluid
import unittest
import numpy as np
import six
import paddle
from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode
def numpy_cov(np_arr, rowvar=True, ddof=1, fweights=None, aweights=None):
return np.cov(np_arr,
rowvar=rowvar,
ddof=int(ddof),
fweights=fweights,
aweights=aweights)
class Cov_Test(unittest.TestCase):
def setUp(self):
self.shape = [20, 10]
self.weightshape = [10]
def func_test_tensor_cov_default(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for idx, p in enumerate(places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in typelist:
np_arr = np.random.rand(*self.shape).astype(dtype)
tensor = paddle.to_tensor(np_arr, place=p)
cov = paddle.linalg.cov(tensor,
rowvar=True,
ddof=True,
fweights=None,
aweights=None)
np_cov = numpy_cov(
np_arr, rowvar=True, ddof=1, fweights=None, aweights=None)
self.assertTrue(np.allclose(np_cov, cov.numpy()))
def test_tensor_cov_default(self):
with _test_eager_guard():
self.func_test_tensor_cov_default()
self.func_test_tensor_cov_default()
def func_test_tensor_cov_rowvar(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for idx, p in enumerate(places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in typelist:
np_arr = np.random.rand(*self.shape).astype(dtype)
tensor = paddle.to_tensor(np_arr, place=p)
cov = paddle.linalg.cov(tensor,
rowvar=False,
ddof=True,
fweights=None,
aweights=None)
np_cov = numpy_cov(
np_arr, rowvar=False, ddof=1, fweights=None, aweights=None)
self.assertTrue(np.allclose(np_cov, cov.numpy()))
def test_tensor_cov_rowvar(self):
with _test_eager_guard():
self.func_test_tensor_cov_rowvar()
self.func_test_tensor_cov_rowvar()
def func_test_tensor_cov_ddof(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for idx, p in enumerate(places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in typelist:
np_arr = np.random.rand(*self.shape).astype(dtype)
tensor = paddle.to_tensor(np_arr, place=p)
cov = paddle.linalg.cov(tensor,
rowvar=True,
ddof=False,
fweights=None,
aweights=None)
np_cov = numpy_cov(
np_arr, rowvar=True, ddof=0, fweights=None, aweights=None)
self.assertTrue(np.allclose(np_cov, cov.numpy()))
def test_tensor_cov_ddof(self):
with _test_eager_guard():
self.func_test_tensor_cov_ddof()
self.func_test_tensor_cov_ddof()
def func_test_tensor_cov_fweights(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for idx, p in enumerate(places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in typelist:
np_arr = np.random.rand(*self.shape).astype(dtype)
np_fw = np.random.randint(
10, size=self.weightshape).astype('int32')
tensor = paddle.to_tensor(np_arr, place=p)
fweights = paddle.to_tensor(np_fw, place=p)
cov = paddle.linalg.cov(tensor,
rowvar=True,
ddof=True,
fweights=fweights,
aweights=None)
np_cov = numpy_cov(
np_arr, rowvar=True, ddof=1, fweights=np_fw, aweights=None)
self.assertTrue(np.allclose(np_cov, cov.numpy()))
def test_tensor_cov_fweights(self):
with _test_eager_guard():
self.func_test_tensor_cov_fweights()
self.func_test_tensor_cov_fweights()
def func_test_tensor_cov_aweights(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for idx, p in enumerate(places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in typelist:
np_arr = np.random.rand(*self.shape).astype(dtype)
np_aw = np.random.randint(
10, size=self.weightshape).astype('int32')
tensor = paddle.to_tensor(np_arr, place=p)
aweights = paddle.to_tensor(np_aw, place=p)
cov = paddle.linalg.cov(tensor,
rowvar=True,
ddof=True,
fweights=None,
aweights=aweights)
np_cov = numpy_cov(
np_arr, rowvar=True, ddof=1, fweights=None, aweights=np_aw)
self.assertTrue(np.allclose(np_cov, cov.numpy()))
def test_tensor_cov_aweights(self):
with _test_eager_guard():
self.func_test_tensor_cov_aweights()
self.func_test_tensor_cov_aweights()
def func_test_tensor_cov_weights(self):
typelist = ['float64']
places = [fluid.CPUPlace()]
if fluid.core.is_compiled_with_cuda():
places.append(fluid.CUDAPlace(0))
for idx, p in enumerate(places):
if idx == 0:
paddle.set_device('cpu')
else:
paddle.set_device('gpu')
for dtype in typelist:
np_arr = np.random.rand(*self.shape).astype(dtype)
np_fw = np.random.randint(
10, size=self.weightshape).astype('int64')
np_aw = np.random.rand(*self.weightshape).astype('float64')
tensor = paddle.to_tensor(np_arr, place=p)
fweights = paddle.to_tensor(np_fw, place=p)
aweights = paddle.to_tensor(np_aw, place=p)
cov = paddle.linalg.cov(tensor,
rowvar=True,
ddof=True,
fweights=fweights,
aweights=aweights)
np_cov = numpy_cov(
np_arr, rowvar=True, ddof=1, fweights=np_fw, aweights=np_aw)
self.assertTrue(np.allclose(np_cov, cov.numpy()))
def test_tensor_cov_weights(self):
with _test_eager_guard():
self.func_test_tensor_cov_weights()
self.func_test_tensor_cov_weights()
class Cov_Test2(Cov_Test):
def setUp(self):
self.shape = [10]
self.weightshape = [10]
# Input(x) only support N-D (1<=N<=2) tensor
class Cov_Test3(unittest.TestCase):
def setUp(self):
self.shape = [2, 5, 10]
self.fweightshape = [10]
self.aweightshape = [10]
self.fw_s = 1.
self.aw_s = 1.
def func_test_errors(self):
def test_err():
np_arr = np.random.rand(*self.shape).astype('float64')
np_fw = self.fw_s * np.random.rand(
*self.fweightshape).astype('int32')
np_aw = self.aw_s * np.random.rand(
*self.aweightshape).astype('float64')
tensor = paddle.to_tensor(np_arr)
fweights = paddle.to_tensor(np_fw)
aweights = paddle.to_tensor(np_aw)
cov = paddle.linalg.cov(tensor,
rowvar=True,
ddof=True,
fweights=fweights,
aweights=aweights)
self.assertRaises(ValueError, test_err)
def test_errors(self):
with _test_eager_guard():
self.func_test_errors()
self.func_test_errors()
#Input(fweights) only support N-D (N<=1) tensor
class Cov_Test4(Cov_Test3):
def setUp(self):
self.shape = [5, 10]
self.fweightshape = [2, 10]
self.aweightshape = [10]
self.fw_s = 1.
self.aw_s = 1.
#The number of Input(fweights) should equal to x's dim[1]
class Cov_Test5(Cov_Test3):
def setUp(self):
self.shape = [5, 10]
self.fweightshape = [5]
self.aweightshape = [10]
self.fw_s = 1.
self.aw_s = 1.
#The value of Input(fweights) cannot be negtive
class Cov_Test6(Cov_Test3):
def setUp(self):
self.shape = [5, 10]
self.fweightshape = [10]
self.aweightshape = [10]
self.fw_s = -1.
self.aw_s = 1.
#Input(aweights) only support N-D (N<=1) tensor
class Cov_Test7(Cov_Test3):
def setUp(self):
self.shape = [5, 10]
self.fweightshape = [10]
self.aweightshape = [2, 10]
self.fw_s = 1.
self.aw_s = 1.
#The number of Input(aweights) should equal to x's dim[1]
class Cov_Test8(Cov_Test3):
def setUp(self):
self.shape = [5, 10]
self.fweightshape = [10]
self.aweightshape = [5]
self.fw_s = 1.
self.aw_s = 1.
#The value of Input(aweights) cannot be negtive
class Cov_Test9(Cov_Test3):
def setUp(self):
self.shape = [5, 10]
self.fweightshape = [10]
self.aweightshape = [10]
self.fw_s = 1.
self.aw_s = -1.
if __name__ == '__main__':
unittest.main()
| [
"paddle.fluid.framework._test_eager_guard",
"numpy.random.rand",
"paddle.fluid.CPUPlace",
"numpy.random.randint",
"paddle.to_tensor",
"paddle.linalg.cov",
"paddle.fluid.CUDAPlace",
"unittest.main",
"paddle.fluid.core.is_compiled_with_cuda",
"paddle.set_device"
] | [((11390, 11405), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11403, 11405), False, 'import unittest\n'), ((1256, 1290), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (1288, 1290), True, 'import paddle.fluid as fluid\n'), ((2398, 2432), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (2430, 2432), True, 'import paddle.fluid as fluid\n'), ((3537, 3571), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (3569, 3571), True, 'import paddle.fluid as fluid\n'), ((4673, 4707), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (4705, 4707), True, 'import paddle.fluid as fluid\n'), ((5991, 6025), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (6023, 6025), True, 'import paddle.fluid as fluid\n'), ((7308, 7342), 'paddle.fluid.core.is_compiled_with_cuda', 'fluid.core.is_compiled_with_cuda', ([], {}), '()\n', (7340, 7342), True, 'import paddle.fluid as fluid\n'), ((1227, 1243), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (1241, 1243), True, 'import paddle.fluid as fluid\n'), ((2163, 2182), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (2180, 2182), False, 'from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode\n'), ((2369, 2385), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (2383, 2385), True, 'import paddle.fluid as fluid\n'), ((3306, 3325), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (3323, 3325), False, 'from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode\n'), ((3508, 3524), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (3522, 3524), True, 'import paddle.fluid as fluid\n'), ((4442, 4461), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (4459, 4461), False, 'from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode\n'), ((4644, 4660), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (4658, 4660), True, 'import paddle.fluid as fluid\n'), ((5752, 5771), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (5769, 5771), False, 'from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode\n'), ((5962, 5978), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (5976, 5978), True, 'import paddle.fluid as fluid\n'), ((7070, 7089), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (7087, 7089), False, 'from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode\n'), ((7279, 7295), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (7293, 7295), True, 'import paddle.fluid as fluid\n'), ((8527, 8546), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (8544, 8546), False, 'from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode\n'), ((9343, 9367), 'paddle.to_tensor', 'paddle.to_tensor', (['np_arr'], {}), '(np_arr)\n', (9359, 9367), False, 'import paddle\n'), ((9391, 9414), 'paddle.to_tensor', 'paddle.to_tensor', (['np_fw'], {}), '(np_fw)\n', (9407, 9414), False, 'import paddle\n'), ((9438, 9461), 'paddle.to_tensor', 'paddle.to_tensor', (['np_aw'], {}), '(np_aw)\n', (9454, 9461), False, 'import paddle\n'), ((9480, 9571), 'paddle.linalg.cov', 'paddle.linalg.cov', (['tensor'], {'rowvar': '(True)', 'ddof': '(True)', 'fweights': 'fweights', 'aweights': 'aweights'}), '(tensor, rowvar=True, ddof=True, fweights=fweights,\n aweights=aweights)\n', (9497, 9571), False, 'import paddle\n'), ((9802, 9821), 'paddle.fluid.framework._test_eager_guard', '_test_eager_guard', ([], {}), '()\n', (9819, 9821), False, 'from paddle.fluid.framework import _test_eager_guard, in_dygraph_mode\n'), ((1318, 1336), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (1333, 1336), True, 'import paddle.fluid as fluid\n'), ((1421, 1445), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (1438, 1445), False, 'import paddle\n'), ((1480, 1504), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (1497, 1504), False, 'import paddle\n'), ((1633, 1666), 'paddle.to_tensor', 'paddle.to_tensor', (['np_arr'], {'place': 'p'}), '(np_arr, place=p)\n', (1649, 1666), False, 'import paddle\n'), ((1689, 1768), 'paddle.linalg.cov', 'paddle.linalg.cov', (['tensor'], {'rowvar': '(True)', 'ddof': '(True)', 'fweights': 'None', 'aweights': 'None'}), '(tensor, rowvar=True, ddof=True, fweights=None, aweights=None)\n', (1706, 1768), False, 'import paddle\n'), ((2460, 2478), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (2475, 2478), True, 'import paddle.fluid as fluid\n'), ((2563, 2587), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (2580, 2587), False, 'import paddle\n'), ((2622, 2646), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (2639, 2646), False, 'import paddle\n'), ((2775, 2808), 'paddle.to_tensor', 'paddle.to_tensor', (['np_arr'], {'place': 'p'}), '(np_arr, place=p)\n', (2791, 2808), False, 'import paddle\n'), ((2831, 2916), 'paddle.linalg.cov', 'paddle.linalg.cov', (['tensor'], {'rowvar': '(False)', 'ddof': '(True)', 'fweights': 'None', 'aweights': 'None'}), '(tensor, rowvar=False, ddof=True, fweights=None, aweights=None\n )\n', (2848, 2916), False, 'import paddle\n'), ((3599, 3617), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (3614, 3617), True, 'import paddle.fluid as fluid\n'), ((3702, 3726), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (3719, 3726), False, 'import paddle\n'), ((3761, 3785), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (3778, 3785), False, 'import paddle\n'), ((3914, 3947), 'paddle.to_tensor', 'paddle.to_tensor', (['np_arr'], {'place': 'p'}), '(np_arr, place=p)\n', (3930, 3947), False, 'import paddle\n'), ((3970, 4055), 'paddle.linalg.cov', 'paddle.linalg.cov', (['tensor'], {'rowvar': '(True)', 'ddof': '(False)', 'fweights': 'None', 'aweights': 'None'}), '(tensor, rowvar=True, ddof=False, fweights=None, aweights=None\n )\n', (3987, 4055), False, 'import paddle\n'), ((4735, 4753), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (4750, 4753), True, 'import paddle.fluid as fluid\n'), ((4838, 4862), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (4855, 4862), False, 'import paddle\n'), ((4897, 4921), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (4914, 4921), False, 'import paddle\n'), ((5156, 5189), 'paddle.to_tensor', 'paddle.to_tensor', (['np_arr'], {'place': 'p'}), '(np_arr, place=p)\n', (5172, 5189), False, 'import paddle\n'), ((5217, 5249), 'paddle.to_tensor', 'paddle.to_tensor', (['np_fw'], {'place': 'p'}), '(np_fw, place=p)\n', (5233, 5249), False, 'import paddle\n'), ((5272, 5359), 'paddle.linalg.cov', 'paddle.linalg.cov', (['tensor'], {'rowvar': '(True)', 'ddof': '(True)', 'fweights': 'fweights', 'aweights': 'None'}), '(tensor, rowvar=True, ddof=True, fweights=fweights,\n aweights=None)\n', (5289, 5359), False, 'import paddle\n'), ((6053, 6071), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (6068, 6071), True, 'import paddle.fluid as fluid\n'), ((6156, 6180), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (6173, 6180), False, 'import paddle\n'), ((6215, 6239), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (6232, 6239), False, 'import paddle\n'), ((6474, 6507), 'paddle.to_tensor', 'paddle.to_tensor', (['np_arr'], {'place': 'p'}), '(np_arr, place=p)\n', (6490, 6507), False, 'import paddle\n'), ((6535, 6567), 'paddle.to_tensor', 'paddle.to_tensor', (['np_aw'], {'place': 'p'}), '(np_aw, place=p)\n', (6551, 6567), False, 'import paddle\n'), ((6590, 6678), 'paddle.linalg.cov', 'paddle.linalg.cov', (['tensor'], {'rowvar': '(True)', 'ddof': '(True)', 'fweights': 'None', 'aweights': 'aweights'}), '(tensor, rowvar=True, ddof=True, fweights=None, aweights=\n aweights)\n', (6607, 6678), False, 'import paddle\n'), ((7370, 7388), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (7385, 7388), True, 'import paddle.fluid as fluid\n'), ((7473, 7497), 'paddle.set_device', 'paddle.set_device', (['"""cpu"""'], {}), "('cpu')\n", (7490, 7497), False, 'import paddle\n'), ((7532, 7556), 'paddle.set_device', 'paddle.set_device', (['"""gpu"""'], {}), "('gpu')\n", (7549, 7556), False, 'import paddle\n'), ((7867, 7900), 'paddle.to_tensor', 'paddle.to_tensor', (['np_arr'], {'place': 'p'}), '(np_arr, place=p)\n', (7883, 7900), False, 'import paddle\n'), ((7928, 7960), 'paddle.to_tensor', 'paddle.to_tensor', (['np_fw'], {'place': 'p'}), '(np_fw, place=p)\n', (7944, 7960), False, 'import paddle\n'), ((7988, 8020), 'paddle.to_tensor', 'paddle.to_tensor', (['np_aw'], {'place': 'p'}), '(np_aw, place=p)\n', (8004, 8020), False, 'import paddle\n'), ((8043, 8134), 'paddle.linalg.cov', 'paddle.linalg.cov', (['tensor'], {'rowvar': '(True)', 'ddof': '(True)', 'fweights': 'fweights', 'aweights': 'aweights'}), '(tensor, rowvar=True, ddof=True, fweights=fweights,\n aweights=aweights)\n', (8060, 8134), False, 'import paddle\n'), ((9074, 9101), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (9088, 9101), True, 'import numpy as np\n'), ((1566, 1593), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (1580, 1593), True, 'import numpy as np\n'), ((2708, 2735), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (2722, 2735), True, 'import numpy as np\n'), ((3847, 3874), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (3861, 3874), True, 'import numpy as np\n'), ((4983, 5010), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (4997, 5010), True, 'import numpy as np\n'), ((5049, 5093), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': 'self.weightshape'}), '(10, size=self.weightshape)\n', (5066, 5093), True, 'import numpy as np\n'), ((6301, 6328), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (6315, 6328), True, 'import numpy as np\n'), ((6367, 6411), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': 'self.weightshape'}), '(10, size=self.weightshape)\n', (6384, 6411), True, 'import numpy as np\n'), ((7618, 7645), 'numpy.random.rand', 'np.random.rand', (['*self.shape'], {}), '(*self.shape)\n', (7632, 7645), True, 'import numpy as np\n'), ((7684, 7728), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': 'self.weightshape'}), '(10, size=self.weightshape)\n', (7701, 7728), True, 'import numpy as np\n'), ((7790, 7823), 'numpy.random.rand', 'np.random.rand', (['*self.weightshape'], {}), '(*self.weightshape)\n', (7804, 7823), True, 'import numpy as np\n'), ((9152, 9186), 'numpy.random.rand', 'np.random.rand', (['*self.fweightshape'], {}), '(*self.fweightshape)\n', (9166, 9186), True, 'import numpy as np\n'), ((9252, 9286), 'numpy.random.rand', 'np.random.rand', (['*self.aweightshape'], {}), '(*self.aweightshape)\n', (9266, 9286), True, 'import numpy as np\n')] |
"""
Description: A python 2.7 implementation of gcForest proposed in [1]. A demo implementation of gcForest library as well as some demo client scripts to demostrate how to use the code. The implementation is flexible enough for modifying the model or
fit your own datasets.
Reference: [1] <NAME> and <NAME>. Deep Forest: Towards an Alternative to Deep Neural Networks. In IJCAI-2017. (https://arxiv.org/abs/1702.08835v2 )
Requirements: This package is developed with Python 2.7, please make sure all the demendencies are installed, which is specified in requirements.txt
ATTN: This package is free for academic usage. You can run it at your own risk. For other purposes, please contact Prof. <NAME>(<EMAIL>)
ATTN2: This package was developed by <NAME>(<EMAIL>). The readme file and demo roughly explains how to use the codes. For any problem concerning the codes, please feel free to contact Mr.Feng.
"""
import numpy as np
from scipy.sparse import issparse
from .utils.log_utils import get_logger
LOGGER = get_logger('gcforest.exp_utils')
def load_model_config(model_path, log_name=None):
import json
from .utils.config_utils import load_json
config = load_json(model_path)
if log_name is not None:
logger = get_logger(log_name)
logger.info(log_name)
logger.info("\n" + json.dumps(config, sort_keys=True, indent=4, separators=(',', ':')))
return config
def concat_datas(datas):
if type(datas) != list:
return datas
for i, data in enumerate(datas):
datas[i] = data.reshape((data.shape[0], -1))
return np.concatenate(datas, axis=1)
def data_norm(X_train, X_test):
X_mean = np.mean(X_train, axis=0)
X_std = np.std(X_train, axis=0)
X_train -= X_mean
X_train /= X_std
X_test -= X_mean
X_test /= X_std
return X_mean, X_std
def append_origin(X, X_origin):
return np.hstack(( X.reshape((X.shape[0]), -1), X_origin.reshape((X_origin.shape[0], -1)) ))
def prec_ets(n_trees, X_train, y_train, X_test, y_test, random_state=None):
"""
ExtraTrees
"""
from sklearn.ensemble import ExtraTreesClassifier
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = ExtraTreesClassifier(n_estimators=n_trees, max_depth=None, n_jobs=-1, verbose=1, random_state=random_state)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_ets{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_rf(n_trees, X_train, y_train, X_test, y_test):
"""
ExtraTrees
"""
from sklearn.ensemble import RandomForestClassifier
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = RandomForestClassifier(n_estimators=n_trees, max_depth=None, n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_rf{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def xgb_eval_accuracy(y_pred_proba, y_true):
"""
y_true (DMatrix)
"""
y_pred = np.argmax(y_pred_proba, axis=1)
y_true = y_true.get_label()
acc = float(np.sum(y_pred == y_true)) / len(y_pred)
return 'accuracy', -acc
def prec_xgb(n_trees, max_depth, X_train, y_train, X_test, y_test, learning_rate=0.1):
"""
ExtraTrees
"""
import xgboost as xgb
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: n_trees={},X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
n_trees, X_train.shape, y_train.shape, X_test.shape, y_test.shape))
clf = xgb.XGBClassifier(n_estimators=n_trees, max_depth=max_depth, objective='multi:softprob',
seed=0, silent=True, nthread=-1, learning_rate=learning_rate)
eval_set = [(X_test, y_test)]
clf.fit(X_train, y_train, eval_set=eval_set, eval_metric="merror")
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_xgb_{}={:.6f}%'.format(n_trees, prec*100.0))
return clf, y_pred
def prec_log(X_train, y_train, X_test, y_test):
from sklearn.linear_model import LogisticRegression
if not issparse(X_train):
X_train = X_train.reshape((X_train.shape[0], -1))
if not issparse(X_test):
X_test = X_test.reshape((X_test.shape[0], -1))
LOGGER.info('start predict: X_train.shape={},y_train.shape={},X_test.shape={},y_test.shape={}'.format(
X_train.shape, y_train.shape, X_test.shape, y_test.shape))
X_train = X_train.reshape((X_train.shape[0], -1))
X_test = X_test.reshape((X_test.shape[0], -1))
clf = LogisticRegression(solver='sag', n_jobs=-1, verbose=1)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
prec = float(np.sum(y_pred == y_test)) / len(y_test)
LOGGER.info('prec_log={:.6f}%'.format(prec*100.0))
return clf, y_pred
def plot_forest_all_proba(y_proba_all, y_gt):
from matplotlib import pylab
N = len(y_gt)
num_tree = len(y_proba_all)
pylab.clf()
mat = np.zeros((num_tree, N))
LOGGER.info('mat.shape={}'.format(mat.shape))
for i in range(num_tree):
mat[i,:] = y_proba_all[i][(range(N), y_gt)]
pylab.matshow(mat, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
pylab.grid(False)
pylab.show()
def plot_confusion_matrix(cm, label_list, title='Confusion matrix', cmap=None):
from matplotlib import pylab
cm = np.asarray(cm, dtype=np.float32)
for i, row in enumerate(cm):
cm[i] = cm[i] / np.sum(cm[i])
#import matplotlib.pyplot as plt
#plt.ion()
pylab.clf()
pylab.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
ax = pylab.axes()
ax.set_xticks(range(len(label_list)))
ax.set_xticklabels(label_list, rotation='vertical')
ax.xaxis.set_ticks_position('bottom')
ax.set_yticks(range(len(label_list)))
ax.set_yticklabels(label_list)
pylab.title(title)
pylab.colorbar()
pylab.grid(False)
pylab.xlabel('Predicted class')
pylab.ylabel('True class')
pylab.grid(False)
pylab.savefig('test.jpg')
pylab.show()
| [
"matplotlib.pylab.savefig",
"sklearn.ensemble.ExtraTreesClassifier",
"matplotlib.pylab.show",
"numpy.mean",
"matplotlib.pylab.clf",
"matplotlib.pylab.grid",
"matplotlib.pylab.title",
"json.dumps",
"numpy.asarray",
"numpy.concatenate",
"matplotlib.pylab.axes",
"sklearn.ensemble.RandomForestClas... | [((1582, 1611), 'numpy.concatenate', 'np.concatenate', (['datas'], {'axis': '(1)'}), '(datas, axis=1)\n', (1596, 1611), True, 'import numpy as np\n'), ((1658, 1682), 'numpy.mean', 'np.mean', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (1665, 1682), True, 'import numpy as np\n'), ((1695, 1718), 'numpy.std', 'np.std', (['X_train'], {'axis': '(0)'}), '(X_train, axis=0)\n', (1701, 1718), True, 'import numpy as np\n'), ((2496, 2607), 'sklearn.ensemble.ExtraTreesClassifier', 'ExtraTreesClassifier', ([], {'n_estimators': 'n_trees', 'max_depth': 'None', 'n_jobs': '(-1)', 'verbose': '(1)', 'random_state': 'random_state'}), '(n_estimators=n_trees, max_depth=None, n_jobs=-1,\n verbose=1, random_state=random_state)\n', (2516, 2607), False, 'from sklearn.ensemble import ExtraTreesClassifier\n'), ((3333, 3419), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': 'n_trees', 'max_depth': 'None', 'n_jobs': '(-1)', 'verbose': '(1)'}), '(n_estimators=n_trees, max_depth=None, n_jobs=-1,\n verbose=1)\n', (3355, 3419), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((3720, 3751), 'numpy.argmax', 'np.argmax', (['y_pred_proba'], {'axis': '(1)'}), '(y_pred_proba, axis=1)\n', (3729, 3751), True, 'import numpy as np\n'), ((4322, 4482), 'xgboost.XGBClassifier', 'xgb.XGBClassifier', ([], {'n_estimators': 'n_trees', 'max_depth': 'max_depth', 'objective': '"""multi:softprob"""', 'seed': '(0)', 'silent': '(True)', 'nthread': '(-1)', 'learning_rate': 'learning_rate'}), "(n_estimators=n_trees, max_depth=max_depth, objective=\n 'multi:softprob', seed=0, silent=True, nthread=-1, learning_rate=\n learning_rate)\n", (4339, 4482), True, 'import xgboost as xgb\n'), ((5336, 5390), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'solver': '"""sag"""', 'n_jobs': '(-1)', 'verbose': '(1)'}), "(solver='sag', n_jobs=-1, verbose=1)\n", (5354, 5390), False, 'from sklearn.linear_model import LogisticRegression\n'), ((5723, 5734), 'matplotlib.pylab.clf', 'pylab.clf', ([], {}), '()\n', (5732, 5734), False, 'from matplotlib import pylab\n'), ((5745, 5768), 'numpy.zeros', 'np.zeros', (['(num_tree, N)'], {}), '((num_tree, N))\n', (5753, 5768), True, 'import numpy as np\n'), ((5905, 5969), 'matplotlib.pylab.matshow', 'pylab.matshow', (['mat'], {'fignum': '(False)', 'cmap': '"""Blues"""', 'vmin': '(0)', 'vmax': '(1.0)'}), "(mat, fignum=False, cmap='Blues', vmin=0, vmax=1.0)\n", (5918, 5969), False, 'from matplotlib import pylab\n'), ((5974, 5991), 'matplotlib.pylab.grid', 'pylab.grid', (['(False)'], {}), '(False)\n', (5984, 5991), False, 'from matplotlib import pylab\n'), ((5996, 6008), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (6006, 6008), False, 'from matplotlib import pylab\n'), ((6132, 6164), 'numpy.asarray', 'np.asarray', (['cm'], {'dtype': 'np.float32'}), '(cm, dtype=np.float32)\n', (6142, 6164), True, 'import numpy as np\n'), ((6292, 6303), 'matplotlib.pylab.clf', 'pylab.clf', ([], {}), '()\n', (6301, 6303), False, 'from matplotlib import pylab\n'), ((6308, 6371), 'matplotlib.pylab.matshow', 'pylab.matshow', (['cm'], {'fignum': '(False)', 'cmap': '"""Blues"""', 'vmin': '(0)', 'vmax': '(1.0)'}), "(cm, fignum=False, cmap='Blues', vmin=0, vmax=1.0)\n", (6321, 6371), False, 'from matplotlib import pylab\n'), ((6381, 6393), 'matplotlib.pylab.axes', 'pylab.axes', ([], {}), '()\n', (6391, 6393), False, 'from matplotlib import pylab\n'), ((6615, 6633), 'matplotlib.pylab.title', 'pylab.title', (['title'], {}), '(title)\n', (6626, 6633), False, 'from matplotlib import pylab\n'), ((6638, 6654), 'matplotlib.pylab.colorbar', 'pylab.colorbar', ([], {}), '()\n', (6652, 6654), False, 'from matplotlib import pylab\n'), ((6659, 6676), 'matplotlib.pylab.grid', 'pylab.grid', (['(False)'], {}), '(False)\n', (6669, 6676), False, 'from matplotlib import pylab\n'), ((6681, 6712), 'matplotlib.pylab.xlabel', 'pylab.xlabel', (['"""Predicted class"""'], {}), "('Predicted class')\n", (6693, 6712), False, 'from matplotlib import pylab\n'), ((6717, 6743), 'matplotlib.pylab.ylabel', 'pylab.ylabel', (['"""True class"""'], {}), "('True class')\n", (6729, 6743), False, 'from matplotlib import pylab\n'), ((6748, 6765), 'matplotlib.pylab.grid', 'pylab.grid', (['(False)'], {}), '(False)\n', (6758, 6765), False, 'from matplotlib import pylab\n'), ((6770, 6795), 'matplotlib.pylab.savefig', 'pylab.savefig', (['"""test.jpg"""'], {}), "('test.jpg')\n", (6783, 6795), False, 'from matplotlib import pylab\n'), ((6800, 6812), 'matplotlib.pylab.show', 'pylab.show', ([], {}), '()\n', (6810, 6812), False, 'from matplotlib import pylab\n'), ((2131, 2148), 'scipy.sparse.issparse', 'issparse', (['X_train'], {}), '(X_train)\n', (2139, 2148), False, 'from scipy.sparse import issparse\n'), ((2219, 2235), 'scipy.sparse.issparse', 'issparse', (['X_test'], {}), '(X_test)\n', (2227, 2235), False, 'from scipy.sparse import issparse\n'), ((2968, 2985), 'scipy.sparse.issparse', 'issparse', (['X_train'], {}), '(X_train)\n', (2976, 2985), False, 'from scipy.sparse import issparse\n'), ((3056, 3072), 'scipy.sparse.issparse', 'issparse', (['X_test'], {}), '(X_test)\n', (3064, 3072), False, 'from scipy.sparse import issparse\n'), ((4886, 4903), 'scipy.sparse.issparse', 'issparse', (['X_train'], {}), '(X_train)\n', (4894, 4903), False, 'from scipy.sparse import issparse\n'), ((4974, 4990), 'scipy.sparse.issparse', 'issparse', (['X_test'], {}), '(X_test)\n', (4982, 4990), False, 'from scipy.sparse import issparse\n'), ((2684, 2708), 'numpy.sum', 'np.sum', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (2690, 2708), True, 'import numpy as np\n'), ((3496, 3520), 'numpy.sum', 'np.sum', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (3502, 3520), True, 'import numpy as np\n'), ((3800, 3824), 'numpy.sum', 'np.sum', (['(y_pred == y_true)'], {}), '(y_pred == y_true)\n', (3806, 3824), True, 'import numpy as np\n'), ((4640, 4664), 'numpy.sum', 'np.sum', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (4646, 4664), True, 'import numpy as np\n'), ((5471, 5495), 'numpy.sum', 'np.sum', (['(y_pred == y_test)'], {}), '(y_pred == y_test)\n', (5477, 5495), True, 'import numpy as np\n'), ((6222, 6235), 'numpy.sum', 'np.sum', (['cm[i]'], {}), '(cm[i])\n', (6228, 6235), True, 'import numpy as np\n'), ((1318, 1385), 'json.dumps', 'json.dumps', (['config'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ':')"}), "(config, sort_keys=True, indent=4, separators=(',', ':'))\n", (1328, 1385), False, 'import json\n')] |
from __future__ import print_function
import unittest
import numpy as np
from simpegEM1D import (
GlobalEM1DProblemFD, GlobalEM1DSurveyFD,
get_vertical_discretization_frequency
)
from SimPEG import (
regularization, Inversion, InvProblem,
DataMisfit, Utils, Mesh, Maps, Optimization,
Tests
)
np.random.seed(41)
class GlobalEM1DFD(unittest.TestCase):
def setUp(self, parallel=True):
frequency = np.array([900, 7200, 56000], dtype=float)
hz = get_vertical_discretization_frequency(
frequency, sigma_background=1./10.
)
n_sounding = 10
dx = 20.
hx = np.ones(n_sounding) * dx
mesh = Mesh.TensorMesh([hx, hz], x0='00')
inds = mesh.gridCC[:, 1] < 25
inds_1 = mesh.gridCC[:, 1] < 50
sigma = np.ones(mesh.nC) * 1./100.
sigma[inds_1] = 1./10.
sigma[inds] = 1./50.
sigma_em1d = sigma.reshape(mesh.vnC, order='F').flatten()
mSynth = np.log(sigma_em1d)
x = mesh.vectorCCx
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
mapping = Maps.ExpMap(mesh)
survey = GlobalEM1DSurveyFD(
rx_locations=rx_locations,
src_locations=src_locations,
frequency=frequency,
offset=np.ones_like(frequency) * 8.,
src_type="VMD",
rx_type="Hz",
field_type='secondary',
topo=topo
)
problem = GlobalEM1DProblemFD(
[], sigmaMap=mapping, hz=hz,
parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
# u = np.random.rand(self.mesh.nC * self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
class GlobalEM1DFD_Height(unittest.TestCase):
def setUp(self, parallel=True):
frequency = np.array([900, 7200, 56000], dtype=float)
hz = np.r_[1.]
n_sounding = 10
dx = 20.
hx = np.ones(n_sounding) * dx
e = np.ones(n_sounding)
mSynth = np.r_[e*np.log(1./100.), e*20]
x = np.arange(n_sounding)
y = np.zeros_like(x)
z = np.ones_like(x) * 30.
rx_locations = np.c_[x, y, z]
src_locations = np.c_[x, y, z]
topo = np.c_[x, y, z-30.].astype(float)
wires = Maps.Wires(('sigma', n_sounding),('h', n_sounding))
expmap = Maps.ExpMap(nP=n_sounding)
sigmaMap = expmap * wires.sigma
survey = GlobalEM1DSurveyFD(
rx_locations=rx_locations,
src_locations=src_locations,
frequency=frequency,
offset=np.ones_like(frequency) * 8.,
src_type="VMD",
rx_type="ppm",
field_type='secondary',
topo=topo,
half_switch=True
)
problem = GlobalEM1DProblemFD(
[], sigmaMap=sigmaMap, hMap=wires.h, hz=hz,
parallel=parallel, n_cpu=2
)
problem.pair(survey)
survey.makeSyntheticData(mSynth)
# Now set up the problem to do some minimization
mesh = Mesh.TensorMesh([int(n_sounding * 2)])
dmis = DataMisfit.l2_DataMisfit(survey)
reg = regularization.Tikhonov(mesh)
opt = Optimization.InexactGaussNewton(
maxIterLS=20, maxIter=10, tolF=1e-6,
tolX=1e-6, tolG=1e-6, maxIterCG=6
)
invProb = InvProblem.BaseInvProblem(dmis, reg, opt, beta=0.)
inv = Inversion.BaseInversion(invProb)
self.inv = inv
self.reg = reg
self.p = problem
self.mesh = mesh
self.m0 = mSynth * 1.2
self.survey = survey
self.dmis = dmis
def test_misfit(self):
passed = Tests.checkDerivative(
lambda m: (
self.survey.dpred(m),
lambda mx: self.p.Jvec(self.m0, mx)
),
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
def test_adjoint(self):
# Adjoint Test
# u = np.random.rand(self.mesh.nC * self.survey.nSrc)
v = np.random.rand(self.mesh.nC)
w = np.random.rand(self.survey.dobs.shape[0])
wtJv = w.dot(self.p.Jvec(self.m0, v))
vtJtw = v.dot(self.p.Jtvec(self.m0, w))
passed = np.abs(wtJv - vtJtw) < 1e-10
print('Adjoint Test', np.abs(wtJv - vtJtw), passed)
self.assertTrue(passed)
def test_dataObj(self):
passed = Tests.checkDerivative(
lambda m: [self.dmis(m), self.dmis.deriv(m)],
self.m0,
plotIt=False,
num=3
)
self.assertTrue(passed)
if __name__ == '__main__':
unittest.main()
| [
"numpy.random.rand",
"SimPEG.Maps.ExpMap",
"numpy.log",
"SimPEG.DataMisfit.l2_DataMisfit",
"numpy.array",
"unittest.main",
"numpy.arange",
"simpegEM1D.get_vertical_discretization_frequency",
"numpy.random.seed",
"SimPEG.Optimization.InexactGaussNewton",
"SimPEG.Inversion.BaseInversion",
"numpy... | [((313, 331), 'numpy.random.seed', 'np.random.seed', (['(41)'], {}), '(41)\n', (327, 331), True, 'import numpy as np\n'), ((6276, 6291), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6289, 6291), False, 'import unittest\n'), ((430, 471), 'numpy.array', 'np.array', (['[900, 7200, 56000]'], {'dtype': 'float'}), '([900, 7200, 56000], dtype=float)\n', (438, 471), True, 'import numpy as np\n'), ((485, 562), 'simpegEM1D.get_vertical_discretization_frequency', 'get_vertical_discretization_frequency', (['frequency'], {'sigma_background': '(1.0 / 10.0)'}), '(frequency, sigma_background=1.0 / 10.0)\n', (522, 562), False, 'from simpegEM1D import GlobalEM1DProblemFD, GlobalEM1DSurveyFD, get_vertical_discretization_frequency\n'), ((675, 709), 'SimPEG.Mesh.TensorMesh', 'Mesh.TensorMesh', (['[hx, hz]'], {'x0': '"""00"""'}), "([hx, hz], x0='00')\n", (690, 709), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((974, 992), 'numpy.log', 'np.log', (['sigma_em1d'], {}), '(sigma_em1d)\n', (980, 992), True, 'import numpy as np\n'), ((1033, 1049), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (1046, 1049), True, 'import numpy as np\n'), ((1227, 1244), 'SimPEG.Maps.ExpMap', 'Maps.ExpMap', (['mesh'], {}), '(mesh)\n', (1238, 1244), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((1585, 1661), 'simpegEM1D.GlobalEM1DProblemFD', 'GlobalEM1DProblemFD', (['[]'], {'sigmaMap': 'mapping', 'hz': 'hz', 'parallel': 'parallel', 'n_cpu': '(2)'}), '([], sigmaMap=mapping, hz=hz, parallel=parallel, n_cpu=2)\n', (1604, 1661), False, 'from simpegEM1D import GlobalEM1DProblemFD, GlobalEM1DSurveyFD, get_vertical_discretization_frequency\n'), ((1839, 1871), 'SimPEG.DataMisfit.l2_DataMisfit', 'DataMisfit.l2_DataMisfit', (['survey'], {}), '(survey)\n', (1863, 1871), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((1886, 1915), 'SimPEG.regularization.Tikhonov', 'regularization.Tikhonov', (['mesh'], {}), '(mesh)\n', (1909, 1915), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((1930, 2041), 'SimPEG.Optimization.InexactGaussNewton', 'Optimization.InexactGaussNewton', ([], {'maxIterLS': '(20)', 'maxIter': '(10)', 'tolF': '(1e-06)', 'tolX': '(1e-06)', 'tolG': '(1e-06)', 'maxIterCG': '(6)'}), '(maxIterLS=20, maxIter=10, tolF=1e-06, tolX=\n 1e-06, tolG=1e-06, maxIterCG=6)\n', (1961, 2041), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((2087, 2138), 'SimPEG.InvProblem.BaseInvProblem', 'InvProblem.BaseInvProblem', (['dmis', 'reg', 'opt'], {'beta': '(0.0)'}), '(dmis, reg, opt, beta=0.0)\n', (2112, 2138), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((2152, 2184), 'SimPEG.Inversion.BaseInversion', 'Inversion.BaseInversion', (['invProb'], {}), '(invProb)\n', (2175, 2184), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((2790, 2818), 'numpy.random.rand', 'np.random.rand', (['self.mesh.nC'], {}), '(self.mesh.nC)\n', (2804, 2818), True, 'import numpy as np\n'), ((2831, 2872), 'numpy.random.rand', 'np.random.rand', (['self.survey.dobs.shape[0]'], {}), '(self.survey.dobs.shape[0])\n', (2845, 2872), True, 'import numpy as np\n'), ((3443, 3484), 'numpy.array', 'np.array', (['[900, 7200, 56000]'], {'dtype': 'float'}), '([900, 7200, 56000], dtype=float)\n', (3451, 3484), True, 'import numpy as np\n'), ((3599, 3618), 'numpy.ones', 'np.ones', (['n_sounding'], {}), '(n_sounding)\n', (3606, 3618), True, 'import numpy as np\n'), ((3680, 3701), 'numpy.arange', 'np.arange', (['n_sounding'], {}), '(n_sounding)\n', (3689, 3701), True, 'import numpy as np\n'), ((3714, 3730), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (3727, 3730), True, 'import numpy as np\n'), ((3907, 3959), 'SimPEG.Maps.Wires', 'Maps.Wires', (["('sigma', n_sounding)", "('h', n_sounding)"], {}), "(('sigma', n_sounding), ('h', n_sounding))\n", (3917, 3959), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((3976, 4002), 'SimPEG.Maps.ExpMap', 'Maps.ExpMap', ([], {'nP': 'n_sounding'}), '(nP=n_sounding)\n', (3987, 4002), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((4415, 4511), 'simpegEM1D.GlobalEM1DProblemFD', 'GlobalEM1DProblemFD', (['[]'], {'sigmaMap': 'sigmaMap', 'hMap': 'wires.h', 'hz': 'hz', 'parallel': 'parallel', 'n_cpu': '(2)'}), '([], sigmaMap=sigmaMap, hMap=wires.h, hz=hz, parallel=\n parallel, n_cpu=2)\n', (4434, 4511), False, 'from simpegEM1D import GlobalEM1DProblemFD, GlobalEM1DSurveyFD, get_vertical_discretization_frequency\n'), ((4738, 4770), 'SimPEG.DataMisfit.l2_DataMisfit', 'DataMisfit.l2_DataMisfit', (['survey'], {}), '(survey)\n', (4762, 4770), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((4785, 4814), 'SimPEG.regularization.Tikhonov', 'regularization.Tikhonov', (['mesh'], {}), '(mesh)\n', (4808, 4814), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((4829, 4940), 'SimPEG.Optimization.InexactGaussNewton', 'Optimization.InexactGaussNewton', ([], {'maxIterLS': '(20)', 'maxIter': '(10)', 'tolF': '(1e-06)', 'tolX': '(1e-06)', 'tolG': '(1e-06)', 'maxIterCG': '(6)'}), '(maxIterLS=20, maxIter=10, tolF=1e-06, tolX=\n 1e-06, tolG=1e-06, maxIterCG=6)\n', (4860, 4940), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((4986, 5037), 'SimPEG.InvProblem.BaseInvProblem', 'InvProblem.BaseInvProblem', (['dmis', 'reg', 'opt'], {'beta': '(0.0)'}), '(dmis, reg, opt, beta=0.0)\n', (5011, 5037), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((5051, 5083), 'SimPEG.Inversion.BaseInversion', 'Inversion.BaseInversion', (['invProb'], {}), '(invProb)\n', (5074, 5083), False, 'from SimPEG import regularization, Inversion, InvProblem, DataMisfit, Utils, Mesh, Maps, Optimization, Tests\n'), ((5695, 5723), 'numpy.random.rand', 'np.random.rand', (['self.mesh.nC'], {}), '(self.mesh.nC)\n', (5709, 5723), True, 'import numpy as np\n'), ((5736, 5777), 'numpy.random.rand', 'np.random.rand', (['self.survey.dobs.shape[0]'], {}), '(self.survey.dobs.shape[0])\n', (5750, 5777), True, 'import numpy as np\n'), ((635, 654), 'numpy.ones', 'np.ones', (['n_sounding'], {}), '(n_sounding)\n', (642, 654), True, 'import numpy as np\n'), ((1062, 1077), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (1074, 1077), True, 'import numpy as np\n'), ((2984, 3004), 'numpy.abs', 'np.abs', (['(wtJv - vtJtw)'], {}), '(wtJv - vtJtw)\n', (2990, 3004), True, 'import numpy as np\n'), ((3043, 3063), 'numpy.abs', 'np.abs', (['(wtJv - vtJtw)'], {}), '(wtJv - vtJtw)\n', (3049, 3063), True, 'import numpy as np\n'), ((3562, 3581), 'numpy.ones', 'np.ones', (['n_sounding'], {}), '(n_sounding)\n', (3569, 3581), True, 'import numpy as np\n'), ((3743, 3758), 'numpy.ones_like', 'np.ones_like', (['x'], {}), '(x)\n', (3755, 3758), True, 'import numpy as np\n'), ((5889, 5909), 'numpy.abs', 'np.abs', (['(wtJv - vtJtw)'], {}), '(wtJv - vtJtw)\n', (5895, 5909), True, 'import numpy as np\n'), ((5948, 5968), 'numpy.abs', 'np.abs', (['(wtJv - vtJtw)'], {}), '(wtJv - vtJtw)\n', (5954, 5968), True, 'import numpy as np\n'), ((804, 820), 'numpy.ones', 'np.ones', (['mesh.nC'], {}), '(mesh.nC)\n', (811, 820), True, 'import numpy as np\n'), ((1414, 1437), 'numpy.ones_like', 'np.ones_like', (['frequency'], {}), '(frequency)\n', (1426, 1437), True, 'import numpy as np\n'), ((3644, 3663), 'numpy.log', 'np.log', (['(1.0 / 100.0)'], {}), '(1.0 / 100.0)\n', (3650, 3663), True, 'import numpy as np\n'), ((4213, 4236), 'numpy.ones_like', 'np.ones_like', (['frequency'], {}), '(frequency)\n', (4225, 4236), True, 'import numpy as np\n')] |
from collections import deque
import numpy as np
from gym.spaces import Box
from gym import ObservationWrapper
class FrameStack(ObservationWrapper):
def __init__(self, env, num_frames):
super(FrameStack, self).__init__(env)
self._env = env
self.num_frames = num_frames
self.frames = deque(maxlen=num_frames)
low = np.repeat(self.observation_space['observation'].low[np.newaxis, ...], num_frames, axis=0)
high = np.repeat(self.observation_space['observation'].high[np.newaxis, ...], num_frames, axis=0)
self.observation_space['observation'] = Box(low=low,
high=high,
dtype=self.observation_space['observation'].dtype)
def observation(self):
assert len(self.frames) == self.num_frames, (len(self.frames), self.num_frames)
return np.stack(list(self.frames), axis=0)
def step(self, action):
observation, reward, done, info = self.env.step(action)
self.frames.append(observation['observation'])
return {'observation': self.observation(), 'instruction': observation['instruction']}, reward, done, info
def reset(self, **kwargs):
observation = self.env.reset(**kwargs)
[self.frames.append(observation['observation']) for _ in range(self.num_frames)]
return {'observation': self.observation(), 'instruction': observation['instruction']}
def __getattr__(self, name):
return getattr(self._env, name)
class GrayScaleObservation(ObservationWrapper):
r"""Convert the image observation from RGB to gray scale."""
def __init__(self, env, keep_dim=False):
super(GrayScaleObservation, self).__init__(env)
self._env = env
self.keep_dim = keep_dim
assert (len(env.observation_space['observation'].shape) == 3
and env.observation_space['observation'].shape[-1] == 3)
obs_shape = self.observation_space['observation'].shape[:2]
if self.keep_dim:
self.observation_space['observation'] = Box(low=0,
high=255,
shape=(obs_shape[0], obs_shape[1], 1),
dtype=np.uint8)
else:
self.observation_space['observation'] = Box(low=0, high=255, shape=obs_shape, dtype=np.uint8)
def observation(self, observation):
import cv2
observation['observation'] = cv2.cvtColor(observation['observation'], cv2.COLOR_RGB2GRAY)
if self.keep_dim:
observation['observation'] = np.expand_dims(observation['observation'], -1)
return observation
def __getattr__(self, name):
return getattr(self._env, name)
| [
"collections.deque",
"numpy.repeat",
"gym.spaces.Box",
"cv2.cvtColor",
"numpy.expand_dims"
] | [((322, 346), 'collections.deque', 'deque', ([], {'maxlen': 'num_frames'}), '(maxlen=num_frames)\n', (327, 346), False, 'from collections import deque\n'), ((362, 455), 'numpy.repeat', 'np.repeat', (["self.observation_space['observation'].low[np.newaxis, ...]", 'num_frames'], {'axis': '(0)'}), "(self.observation_space['observation'].low[np.newaxis, ...],\n num_frames, axis=0)\n", (371, 455), True, 'import numpy as np\n'), ((467, 561), 'numpy.repeat', 'np.repeat', (["self.observation_space['observation'].high[np.newaxis, ...]", 'num_frames'], {'axis': '(0)'}), "(self.observation_space['observation'].high[np.newaxis, ...],\n num_frames, axis=0)\n", (476, 561), True, 'import numpy as np\n'), ((606, 680), 'gym.spaces.Box', 'Box', ([], {'low': 'low', 'high': 'high', 'dtype': "self.observation_space['observation'].dtype"}), "(low=low, high=high, dtype=self.observation_space['observation'].dtype)\n", (609, 680), False, 'from gym.spaces import Box\n'), ((2575, 2635), 'cv2.cvtColor', 'cv2.cvtColor', (["observation['observation']", 'cv2.COLOR_RGB2GRAY'], {}), "(observation['observation'], cv2.COLOR_RGB2GRAY)\n", (2587, 2635), False, 'import cv2\n'), ((2114, 2189), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': '(obs_shape[0], obs_shape[1], 1)', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=(obs_shape[0], obs_shape[1], 1), dtype=np.uint8)\n', (2117, 2189), False, 'from gym.spaces import Box\n'), ((2424, 2477), 'gym.spaces.Box', 'Box', ([], {'low': '(0)', 'high': '(255)', 'shape': 'obs_shape', 'dtype': 'np.uint8'}), '(low=0, high=255, shape=obs_shape, dtype=np.uint8)\n', (2427, 2477), False, 'from gym.spaces import Box\n'), ((2703, 2749), 'numpy.expand_dims', 'np.expand_dims', (["observation['observation']", '(-1)'], {}), "(observation['observation'], -1)\n", (2717, 2749), True, 'import numpy as np\n')] |
from torchvision import datasets, transforms
from customImageLoader import CustomImageFolder
from torch.utils.data import SubsetRandomSampler, DataLoader
import numpy as np
class Data:
"""
Class for managing and preparing data for training, validation and testing phases.
"""
def __init__(self, train_path, test_path, batch_size):
"""
:param train_path: string representing train set path
:param test_path: string representing test set path
:param batch_size: batch size to be used in training
"""
self.train_path = train_path
self.test_path = test_path
self.batch_size = batch_size
self.train_transform = None
self.test_transform = None
def get_train_valid(self, validation_size):
"""
spliting training data into train and validation sets based on a given validation size (as class attribute).
:param validation_size: float value between 0 and 1 representing size of validation set
:return: dataloader tuple of training and validation sets
"""
# Defining transformations
self.train_transform = transforms.Compose([
transforms.RandomResizedCrop((224,224)),
transforms.RandomRotation(30),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
data = CustomImageFolder(self.train_path, transform=self.train_transform)
# Getting Data size
data_size = len(data)
# Getting index list of data
index_list = list(range(data_size))
# Shuffling data
np.random.shuffle(index_list)
# Creating splitter
splitter = int(np.floor(data_size * validation_size))
# Creating Train and validation sublists
train_index_list, valid_index_list = index_list[splitter:], index_list[:splitter]
# Creating samples
train_sampler, valid_sampler = SubsetRandomSampler(train_index_list), SubsetRandomSampler(valid_index_list)
# getting data loaders
train_loader = DataLoader(data, batch_size=self.batch_size, sampler=train_sampler)
valid_loader = DataLoader(data, batch_size=self.batch_size, sampler=valid_sampler)
return train_loader, valid_loader
def get_test(self):
"""
loading test set on dataloader object.
:return: dataloader object containing test set
"""
# Defining transformations
self.test_transform = transforms.Compose([
transforms.Resize((224,224)),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
])
# Gettin dataset
data = CustomImageFolder(self.test_path, transform=self.test_transform)
# Getting Data Loader
test_loader = DataLoader(data, batch_size=self.batch_size, shuffle=True)
return test_loader
| [
"torchvision.transforms.CenterCrop",
"torchvision.transforms.RandomRotation",
"numpy.floor",
"torch.utils.data.SubsetRandomSampler",
"torchvision.transforms.RandomHorizontalFlip",
"customImageLoader.CustomImageFolder",
"torchvision.transforms.Normalize",
"torch.utils.data.DataLoader",
"torchvision.t... | [((1515, 1581), 'customImageLoader.CustomImageFolder', 'CustomImageFolder', (['self.train_path'], {'transform': 'self.train_transform'}), '(self.train_path, transform=self.train_transform)\n', (1532, 1581), False, 'from customImageLoader import CustomImageFolder\n'), ((1757, 1786), 'numpy.random.shuffle', 'np.random.shuffle', (['index_list'], {}), '(index_list)\n', (1774, 1786), True, 'import numpy as np\n'), ((2217, 2284), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'self.batch_size', 'sampler': 'train_sampler'}), '(data, batch_size=self.batch_size, sampler=train_sampler)\n', (2227, 2284), False, 'from torch.utils.data import SubsetRandomSampler, DataLoader\n'), ((2308, 2375), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'self.batch_size', 'sampler': 'valid_sampler'}), '(data, batch_size=self.batch_size, sampler=valid_sampler)\n', (2318, 2375), False, 'from torch.utils.data import SubsetRandomSampler, DataLoader\n'), ((2959, 3023), 'customImageLoader.CustomImageFolder', 'CustomImageFolder', (['self.test_path'], {'transform': 'self.test_transform'}), '(self.test_path, transform=self.test_transform)\n', (2976, 3023), False, 'from customImageLoader import CustomImageFolder\n'), ((3077, 3135), 'torch.utils.data.DataLoader', 'DataLoader', (['data'], {'batch_size': 'self.batch_size', 'shuffle': '(True)'}), '(data, batch_size=self.batch_size, shuffle=True)\n', (3087, 3135), False, 'from torch.utils.data import SubsetRandomSampler, DataLoader\n'), ((1839, 1876), 'numpy.floor', 'np.floor', (['(data_size * validation_size)'], {}), '(data_size * validation_size)\n', (1847, 1876), True, 'import numpy as np\n'), ((2085, 2122), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['train_index_list'], {}), '(train_index_list)\n', (2104, 2122), False, 'from torch.utils.data import SubsetRandomSampler, DataLoader\n'), ((2124, 2161), 'torch.utils.data.SubsetRandomSampler', 'SubsetRandomSampler', (['valid_index_list'], {}), '(valid_index_list)\n', (2143, 2161), False, 'from torch.utils.data import SubsetRandomSampler, DataLoader\n'), ((1189, 1229), 'torchvision.transforms.RandomResizedCrop', 'transforms.RandomResizedCrop', (['(224, 224)'], {}), '((224, 224))\n', (1217, 1229), False, 'from torchvision import datasets, transforms\n'), ((1242, 1271), 'torchvision.transforms.RandomRotation', 'transforms.RandomRotation', (['(30)'], {}), '(30)\n', (1267, 1271), False, 'from torchvision import datasets, transforms\n'), ((1285, 1318), 'torchvision.transforms.RandomHorizontalFlip', 'transforms.RandomHorizontalFlip', ([], {}), '()\n', (1316, 1318), False, 'from torchvision import datasets, transforms\n'), ((1332, 1353), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1351, 1353), False, 'from torchvision import datasets, transforms\n'), ((1367, 1442), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (1387, 1442), False, 'from torchvision import datasets, transforms\n'), ((2669, 2698), 'torchvision.transforms.Resize', 'transforms.Resize', (['(224, 224)'], {}), '((224, 224))\n', (2686, 2698), False, 'from torchvision import datasets, transforms\n'), ((2711, 2737), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (2732, 2737), False, 'from torchvision import datasets, transforms\n'), ((2751, 2772), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (2770, 2772), False, 'from torchvision import datasets, transforms\n'), ((2786, 2861), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': '[0.485, 0.456, 0.406]', 'std': '[0.229, 0.224, 0.225]'}), '(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n', (2806, 2861), False, 'from torchvision import datasets, transforms\n')] |
import numpy as np
from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes
from typing import Tuple
from utils import *
from os import path
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from matplotlib import pyplot as plt
from math import atan2, pi
def load_dataset(filename: str) -> Tuple[np.ndarray, np.ndarray]:
"""
Load dataset for comparing the Gaussian Naive Bayes and LDA classifiers. File is assumed to be an
ndarray of shape (n_samples, 3) where the first 2 columns represent features and the third column the class
Parameters
----------
filename: str
Path to .npy data file
Returns
-------
X: ndarray of shape (n_samples, 2)
Design matrix to be used
y: ndarray of shape (n_samples,)
Class vector specifying for each sample its class
"""
data = np.load(filename)
return data[:, :2], data[:, 2].astype(int)
def run_perceptron():
"""
Fit and plot fit progression of the Perceptron algorithm over both the linearly separable and inseparable datasets
Create a line plot that shows the perceptron algorithm's training loss values (y-axis)
as a function of the training iterations (x-axis).
"""
for n, f in [("Linearly Separable", "linearly_separable.npy"),
("Linearly Inseparable", "linearly_inseparable.npy")]:
# Load dataset
X, y = load_dataset(path.join(r"C:\Users\t8864522\Documents\GitHub\IML.HUJI\datasets\\", f))
# Fit Perceptron and record loss in each fit iteration
losses = []
def in_callable(p1: Perceptron, x1: np.ndarray, y1: int) -> None:
losses.append(p1._loss(X, y))
p = Perceptron(callback=in_callable)
p.fit(X, y)
# Plot figure of loss as function of fitting iteration
plt.plot(np.arange(1, len(losses) + 1, 1), losses)
plt.title(f"the loss over iterations on the {n} dataset.\n with {len(losses)} iterations")
plt.ylabel("Loss")
plt.xlabel("number of iterations")
plt.show()
def get_ellipse(mu: np.ndarray, cov: np.ndarray):
"""
Draw an ellipse centered at given location and according to specified covariance matrix
Parameters
----------
mu : ndarray of shape (2,)
Center of ellipse
cov: ndarray of shape (2,2)
Covariance of Gaussian
Returns
-------
scatter: A plotly trace object of the ellipse
"""
l1, l2 = tuple(np.linalg.eigvalsh(cov)[::-1])
theta = atan2(l1 - cov[0, 0], cov[0, 1]) if cov[0, 1] != 0 else (np.pi / 2 if cov[0, 0] < cov[1, 1] else 0)
t = np.linspace(0, 2 * pi, 100)
xs = (l1 * np.cos(theta) * np.cos(t)) - (l2 * np.sin(theta) * np.sin(t))
ys = (l1 * np.sin(theta) * np.cos(t)) + (l2 * np.cos(theta) * np.sin(t))
return go.Scatter(x=mu[0] + xs, y=mu[1] + ys, mode="lines", marker_color="black")
def compare_gaussian_classifiers():
"""
Fit both Gaussian Naive Bayes and LDA classifiers on both gaussians1 and gaussians2 datasets
"""
for f in ["gaussian1.npy", "gaussian2.npy"]:
# Load dataset
X, y = load_dataset(path.join(r"C:\Users\t8864522\Documents\GitHub\IML.HUJI\datasets\\", f))
# Fit models and predict over training set
lda = LDA()
bayes = GaussianNaiveBayes()
lda.fit(X, y)
bayes.fit(X, y)
y_pred_lda = lda.predict(X)
y_pred_b = bayes.predict(X)
# Plot a figure with two suplots, showing the Gaussian Naive Bayes predictions on the left and LDA predictions
# on the right. Plot title should specify dataset used and subplot titles should specify algorithm and accuracy
# Create subplots
from IMLearn.metrics import accuracy
fig = make_subplots(1, 2, subplot_titles=( f"Bayes with "
f"accuracy of "
f""
f""
f"{accuracy(y, y_pred_b):.5f}", f"LDA with accuracy of {accuracy(y, y_pred_lda):.5f}"))
fig.update_layout(showlegend=False, title_text=f"analyzing the data from {f}")
fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', marker=dict(color=y_pred_lda,
symbol=y)), 1, 2)
fig.add_trace(go.Scatter(x=X[:, 0], y=X[:, 1], mode='markers', marker=dict(color=y_pred_b,
symbol=y)), 1, 1)
#
# # Add traces for data-points setting symbols and colors
#
# # Add `X` dots specifying fitted Gaussians' means
for col in [2, 1]:
for center in range(len(lda.mu_)):
fig.add_trace(go.Scatter(x=[lda.mu_[center][0]], y=[lda.mu_[center][1]], mode='markers',
marker_color="black",
marker_symbol=4, marker_size=10), col=col, row=1)
#
# # Add ellipses depicting the covariances of the fitted Gaussians
for col, mu, cov in [(2, lda.mu_, lda.cov_), (1, bayes.mu_, bayes.vars_)]:
var = cov
for center in range(len(lda.mu_)):
if col == 1:
cov = np.diag(var[center])
fig.add_trace(get_ellipse(mu[center], cov), col=col, row=1)
fig.show()
if __name__ == '__main__':
np.random.seed(0)
run_perceptron()
compare_gaussian_classifiers()
| [
"IMLearn.learners.classifiers.Perceptron",
"matplotlib.pyplot.ylabel",
"IMLearn.learners.classifiers.GaussianNaiveBayes",
"matplotlib.pyplot.xlabel",
"os.path.join",
"numpy.diag",
"numpy.linalg.eigvalsh",
"plotly.graph_objects.Scatter",
"numpy.linspace",
"numpy.random.seed",
"math.atan2",
"num... | [((885, 902), 'numpy.load', 'np.load', (['filename'], {}), '(filename)\n', (892, 902), True, 'import numpy as np\n'), ((2656, 2683), 'numpy.linspace', 'np.linspace', (['(0)', '(2 * pi)', '(100)'], {}), '(0, 2 * pi, 100)\n', (2667, 2683), True, 'import numpy as np\n'), ((2849, 2923), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '(mu[0] + xs)', 'y': '(mu[1] + ys)', 'mode': '"""lines"""', 'marker_color': '"""black"""'}), "(x=mu[0] + xs, y=mu[1] + ys, mode='lines', marker_color='black')\n", (2859, 2923), True, 'import plotly.graph_objects as go\n'), ((5739, 5756), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (5753, 5756), True, 'import numpy as np\n'), ((1732, 1764), 'IMLearn.learners.classifiers.Perceptron', 'Perceptron', ([], {'callback': 'in_callable'}), '(callback=in_callable)\n', (1742, 1764), False, 'from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes\n'), ((2014, 2032), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (2024, 2032), True, 'from matplotlib import pyplot as plt\n'), ((2041, 2075), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""number of iterations"""'], {}), "('number of iterations')\n", (2051, 2075), True, 'from matplotlib import pyplot as plt\n'), ((2084, 2094), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2092, 2094), True, 'from matplotlib import pyplot as plt\n'), ((2548, 2580), 'math.atan2', 'atan2', (['(l1 - cov[0, 0])', 'cov[0, 1]'], {}), '(l1 - cov[0, 0], cov[0, 1])\n', (2553, 2580), False, 'from math import atan2, pi\n'), ((3314, 3319), 'IMLearn.learners.classifiers.LDA', 'LDA', ([], {}), '()\n', (3317, 3319), False, 'from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes\n'), ((3336, 3356), 'IMLearn.learners.classifiers.GaussianNaiveBayes', 'GaussianNaiveBayes', ([], {}), '()\n', (3354, 3356), False, 'from IMLearn.learners.classifiers import Perceptron, LDA, GaussianNaiveBayes\n'), ((1446, 1524), 'os.path.join', 'path.join', (['"""C:\\\\Users\\\\t8864522\\\\Documents\\\\GitHub\\\\IML.HUJI\\\\datasets\\\\\\\\"""', 'f'], {}), "('C:\\\\Users\\\\t8864522\\\\Documents\\\\GitHub\\\\IML.HUJI\\\\datasets\\\\\\\\', f)\n", (1455, 1524), False, 'from os import path\n'), ((2505, 2528), 'numpy.linalg.eigvalsh', 'np.linalg.eigvalsh', (['cov'], {}), '(cov)\n', (2523, 2528), True, 'import numpy as np\n'), ((2715, 2724), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2721, 2724), True, 'import numpy as np\n'), ((2750, 2759), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2756, 2759), True, 'import numpy as np\n'), ((2792, 2801), 'numpy.cos', 'np.cos', (['t'], {}), '(t)\n', (2798, 2801), True, 'import numpy as np\n'), ((2827, 2836), 'numpy.sin', 'np.sin', (['t'], {}), '(t)\n', (2833, 2836), True, 'import numpy as np\n'), ((3175, 3253), 'os.path.join', 'path.join', (['"""C:\\\\Users\\\\t8864522\\\\Documents\\\\GitHub\\\\IML.HUJI\\\\datasets\\\\\\\\"""', 'f'], {}), "('C:\\\\Users\\\\t8864522\\\\Documents\\\\GitHub\\\\IML.HUJI\\\\datasets\\\\\\\\', f)\n", (3184, 3253), False, 'from os import path\n'), ((2699, 2712), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2705, 2712), True, 'import numpy as np\n'), ((2734, 2747), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2740, 2747), True, 'import numpy as np\n'), ((2776, 2789), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2782, 2789), True, 'import numpy as np\n'), ((2811, 2824), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2817, 2824), True, 'import numpy as np\n'), ((5069, 5202), 'plotly.graph_objects.Scatter', 'go.Scatter', ([], {'x': '[lda.mu_[center][0]]', 'y': '[lda.mu_[center][1]]', 'mode': '"""markers"""', 'marker_color': '"""black"""', 'marker_symbol': '(4)', 'marker_size': '(10)'}), "(x=[lda.mu_[center][0]], y=[lda.mu_[center][1]], mode='markers',\n marker_color='black', marker_symbol=4, marker_size=10)\n", (5079, 5202), True, 'import plotly.graph_objects as go\n'), ((5590, 5610), 'numpy.diag', 'np.diag', (['var[center]'], {}), '(var[center])\n', (5597, 5610), True, 'import numpy as np\n'), ((4229, 4250), 'IMLearn.metrics.accuracy', 'accuracy', (['y', 'y_pred_b'], {}), '(y, y_pred_b)\n', (4237, 4250), False, 'from IMLearn.metrics import accuracy\n'), ((4282, 4305), 'IMLearn.metrics.accuracy', 'accuracy', (['y', 'y_pred_lda'], {}), '(y, y_pred_lda)\n', (4290, 4305), False, 'from IMLearn.metrics import accuracy\n')] |
import FINE as fn
import pandas as pd
import numpy as np
"""
Here we are testing differnt inputs for time-invariant conversion factors that are
not covered in the minimal test system or other tests.
"""
def create_core_esm():
"""
We create a core esm that only consists of a source and a sink in one location.
"""
numberOfTimeSteps = 4
hoursPerTimeStep = 2190
# Create an energy system model instance
esM = fn.EnergySystemModel(
locations={"ElectrolyzerLocation"},
commodities={"electricity", "hydrogen"},
numberOfTimeSteps=numberOfTimeSteps,
commodityUnitsDict={
"electricity": r"kW$_{el}$",
"hydrogen": r"kW$_{H_{2},LHV}$",
},
hoursPerTimeStep=hoursPerTimeStep,
costUnit="1 Euro",
lengthUnit="km",
verboseLogLevel=2,
)
# Source
esM.add(
fn.Source(
esM=esM,
name="Electricity market",
commodity="electricity",
hasCapacityVariable=False,
)
)
# Sink
demand = pd.Series(np.array([1.0, 1.0, 1.0, 1.0])) * hoursPerTimeStep
esM.add(
fn.Sink(
esM=esM,
name="Industry site",
commodity="hydrogen",
hasCapacityVariable=False,
operationRateFix=demand,
)
)
return esM
def test_conversion_factors_as_series():
"""
Input as pandas.Series for one location.
"""
esM = create_core_esm()
esM.add(
fn.Conversion(
esM=esM,
name="Electrolyzers_VarConvFac",
physicalUnit=r"kW$_{el}$",
commodityConversionFactors=pd.Series(
[0.7, -1], index=["hydrogen", "electricity"]
), # Here we add a Series of time invariant conversion factors.
hasCapacityVariable=True,
investPerCapacity=1000, # euro/kW
opexPerCapacity=500 * 0.025,
interestRate=0.08,
capacityMax=1000,
economicLifetime=10,
locationalEligibility=pd.Series([1], ["ElectrolyzerLocation"]),
)
)
# optimize
esM.optimize(timeSeriesAggregation=False, solver="glpk")
| [
"pandas.Series",
"FINE.Sink",
"numpy.array",
"FINE.EnergySystemModel",
"FINE.Source"
] | [((437, 760), 'FINE.EnergySystemModel', 'fn.EnergySystemModel', ([], {'locations': "{'ElectrolyzerLocation'}", 'commodities': "{'electricity', 'hydrogen'}", 'numberOfTimeSteps': 'numberOfTimeSteps', 'commodityUnitsDict': "{'electricity': 'kW$_{el}$', 'hydrogen': 'kW$_{H_{2},LHV}$'}", 'hoursPerTimeStep': 'hoursPerTimeStep', 'costUnit': '"""1 Euro"""', 'lengthUnit': '"""km"""', 'verboseLogLevel': '(2)'}), "(locations={'ElectrolyzerLocation'}, commodities={\n 'electricity', 'hydrogen'}, numberOfTimeSteps=numberOfTimeSteps,\n commodityUnitsDict={'electricity': 'kW$_{el}$', 'hydrogen':\n 'kW$_{H_{2},LHV}$'}, hoursPerTimeStep=hoursPerTimeStep, costUnit=\n '1 Euro', lengthUnit='km', verboseLogLevel=2)\n", (457, 760), True, 'import FINE as fn\n'), ((885, 986), 'FINE.Source', 'fn.Source', ([], {'esM': 'esM', 'name': '"""Electricity market"""', 'commodity': '"""electricity"""', 'hasCapacityVariable': '(False)'}), "(esM=esM, name='Electricity market', commodity='electricity',\n hasCapacityVariable=False)\n", (894, 986), True, 'import FINE as fn\n'), ((1154, 1270), 'FINE.Sink', 'fn.Sink', ([], {'esM': 'esM', 'name': '"""Industry site"""', 'commodity': '"""hydrogen"""', 'hasCapacityVariable': '(False)', 'operationRateFix': 'demand'}), "(esM=esM, name='Industry site', commodity='hydrogen',\n hasCapacityVariable=False, operationRateFix=demand)\n", (1161, 1270), True, 'import FINE as fn\n'), ((1082, 1112), 'numpy.array', 'np.array', (['[1.0, 1.0, 1.0, 1.0]'], {}), '([1.0, 1.0, 1.0, 1.0])\n', (1090, 1112), True, 'import numpy as np\n'), ((1673, 1728), 'pandas.Series', 'pd.Series', (['[0.7, -1]'], {'index': "['hydrogen', 'electricity']"}), "([0.7, -1], index=['hydrogen', 'electricity'])\n", (1682, 1728), True, 'import pandas as pd\n'), ((2076, 2116), 'pandas.Series', 'pd.Series', (['[1]', "['ElectrolyzerLocation']"], {}), "([1], ['ElectrolyzerLocation'])\n", (2085, 2116), True, 'import pandas as pd\n')] |
import os
import shutil
import sys
MEDCOMMON_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir)
sys.path.append(MEDCOMMON_ROOT)
sys.path.append(os.path.join(MEDCOMMON_ROOT, 'external_lib'))
from utils.data_io_utils import DataIO
from utils.mask_bounding_utils import MaskBoundingUtils
from utils.detection_utils import DETECTION_UTILS
from utils.datasets_utils import DatasetsUtils
import SimpleITK as sitk
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import numpy as np
from tqdm import tqdm
import json
def extract_boundary_info(mask_root, out_file):
os.makedirs(os.path.dirname(out_file), exist_ok=True)
info_dict = {}
for filename in tqdm(os.listdir(mask_root)):
mask_file = os.path.join(mask_root, filename)
boundary_info = MaskBoundingUtils.extract_mask_file_bounding(mask_file)
image = sitk.ReadImage(mask_file)
image_shape = image.GetSize()
info = list(boundary_info) + list(image_shape)
info_dict[filename] = [int(i) for i in info]
with open(out_file, 'w') as f:
f.write(json.dumps(info_dict))
print('====> extract_boundary_info finished!')
def generate_resampled_pairs_unsame_resolution(data_root, out_root, dst_size):
image_root = os.path.join(data_root, 'images')
mask_root = os.path.join(data_root, 'masks')
out_image_root = os.path.join(out_root, 'images')
out_mask_root = os.path.join(out_root, 'masks')
image_postfix='.nii.gz'
mask_postfix='.nii.gz'
DatasetsUtils.resample_image_mask_unsame_resolution_multiprocess(
image_root, mask_root,
out_image_root, out_mask_root, dst_size,
image_postfix, mask_postfix
)
class PositionDetectionDS(Dataset):
def __init__(self, root, image_shape=[128,128,128], boundary_info_file=None) -> None:
super().__init__()
self.root = root
self.image_root = os.path.join(self.root, 'images')
self.mask_root = os.path.join(self.root, 'masks')
self.image_files = []
self.targets = []
boundary_infos = None
if boundary_info_file:
with open(boundary_info_file) as f:
boundary_infos = json.loads(f.read())
for filename in tqdm(os.listdir(self.image_root)):
image_file = os.path.join(self.image_root, filename)
mask_file = os.path.join(self.mask_root, filename)
if not os.path.exists(image_file):
continue
if not os.path.exists(mask_file):
continue
if boundary_info_file:
z_min, y_min, x_min, z_max, y_max, x_max = boundary_infos[filename][:6]
in_shape = boundary_infos[filename][6:]
else:
z_min, y_min, x_min, z_max, y_max, x_max = MaskBoundingUtils.extract_mask_file_bounding(mask_file)
in_image = sitk.ReadImage(image_file)
in_shape = in_image.GetSize()
self.image_files.append(image_file)
x_min, y_min, z_min = DETECTION_UTILS.point_coordinate_resampled(in_shape, image_shape, [x_min, y_min, z_min])
x_max, y_max, z_max = DETECTION_UTILS.point_coordinate_resampled(in_shape, image_shape, [x_max, y_max, z_max])
# 归一化
x_min /= image_shape[0]
x_max /= image_shape[0]
y_min /= image_shape[1]
y_max /= image_shape[1]
z_min /= image_shape[2]
z_max /= image_shape[2]
self.targets.append(np.array([[z_min, y_min, x_min, z_max, y_max, x_max]]))
# if self.image_files.__len__() > 2:
# break
def __len__(self):
return len(self.image_files)
def __getitem__(self, index):
image_file = self.image_files[index]
image = sitk.ReadImage(image_file)
arr = sitk.GetArrayFromImage(image)
image_tensor = torch.from_numpy(arr).float()
image_tensor = image_tensor.unsqueeze(0)
target = self.targets[index]
return image_tensor, target, image_file
def test_PositionDetectionDS():
root = '/data/medical/brain/cerebral_parenchyma/exp/cta'
boundary_info_file='/data/medical/brain/cerebral_parenchyma/exp/cta/config/mask_boundary_info.json'
ds = PositionDetectionDS(root, boundary_info_file=boundary_info_file)
dataloader = DataLoader(ds, batch_size=1)
for index, (images, targets, _) in enumerate(dataloader):
print(images.shape)
print(targets)
if __name__ == '__main__':
# extract_boundary_info(mask_root='/data/medical/brain/cerebral_parenchyma/exp/cta/masks', out_file='/data/medical/brain/cerebral_parenchyma/exp/cta/config/mask_boundary_info.json')
# extract_boundary_info(mask_root='/data/medical/cardiac/seg/coronary/coronary_ori/masks', out_file='/data/medical/cardiac/seg/coronary/coronary_ori/config/mask_boundary_info.json')
extract_boundary_info(mask_root='/data/medical/brain/cerebral_parenchyma/exp/cta_256/masks', out_file='/data/medical/brain/cerebral_parenchyma/exp/cta_256/config/mask_boundary_info.json')
extract_boundary_info(mask_root='/data/medical/cardiac/seg/coronary/coronary_ori_256/masks', out_file='/data/medical/cardiac/seg/coronary/coronary_ori_256/config/mask_boundary_info.json')
# test_PositionDetectionDS()
# 统一数据尺寸,训练的时候可以并行处理
# generate_resampled_pairs_unsame_resolution('/data/medical/brain/cerebral_parenchyma/exp/cta', '/data/medical/brain/cerebral_parenchyma/exp/cta_256', [256,256,256])
# generate_resampled_pairs_unsame_resolution('/data/medical/cardiac/seg/coronary/coronary_ori', '/data/medical/cardiac/seg/coronary/coronary_ori_256', [256,256,256])
| [
"os.path.exists",
"os.listdir",
"utils.datasets_utils.DatasetsUtils.resample_image_mask_unsame_resolution_multiprocess",
"json.dumps",
"os.path.join",
"SimpleITK.GetArrayFromImage",
"utils.detection_utils.DETECTION_UTILS.point_coordinate_resampled",
"torch.from_numpy",
"utils.mask_bounding_utils.Mas... | [((142, 173), 'sys.path.append', 'sys.path.append', (['MEDCOMMON_ROOT'], {}), '(MEDCOMMON_ROOT)\n', (157, 173), False, 'import sys\n'), ((190, 234), 'os.path.join', 'os.path.join', (['MEDCOMMON_ROOT', '"""external_lib"""'], {}), "(MEDCOMMON_ROOT, 'external_lib')\n", (202, 234), False, 'import os\n'), ((1317, 1350), 'os.path.join', 'os.path.join', (['data_root', '"""images"""'], {}), "(data_root, 'images')\n", (1329, 1350), False, 'import os\n'), ((1367, 1399), 'os.path.join', 'os.path.join', (['data_root', '"""masks"""'], {}), "(data_root, 'masks')\n", (1379, 1399), False, 'import os\n'), ((1421, 1453), 'os.path.join', 'os.path.join', (['out_root', '"""images"""'], {}), "(out_root, 'images')\n", (1433, 1453), False, 'import os\n'), ((1474, 1505), 'os.path.join', 'os.path.join', (['out_root', '"""masks"""'], {}), "(out_root, 'masks')\n", (1486, 1505), False, 'import os\n'), ((1566, 1731), 'utils.datasets_utils.DatasetsUtils.resample_image_mask_unsame_resolution_multiprocess', 'DatasetsUtils.resample_image_mask_unsame_resolution_multiprocess', (['image_root', 'mask_root', 'out_image_root', 'out_mask_root', 'dst_size', 'image_postfix', 'mask_postfix'], {}), '(image_root,\n mask_root, out_image_root, out_mask_root, dst_size, image_postfix,\n mask_postfix)\n', (1630, 1731), False, 'from utils.datasets_utils import DatasetsUtils\n'), ((4409, 4437), 'torch.utils.data.DataLoader', 'DataLoader', (['ds'], {'batch_size': '(1)'}), '(ds, batch_size=1)\n', (4419, 4437), False, 'from torch.utils.data import Dataset, DataLoader\n'), ((82, 107), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (97, 107), False, 'import os\n'), ((659, 684), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (674, 684), False, 'import os\n'), ((745, 766), 'os.listdir', 'os.listdir', (['mask_root'], {}), '(mask_root)\n', (755, 766), False, 'import os\n'), ((789, 822), 'os.path.join', 'os.path.join', (['mask_root', 'filename'], {}), '(mask_root, filename)\n', (801, 822), False, 'import os\n'), ((847, 902), 'utils.mask_bounding_utils.MaskBoundingUtils.extract_mask_file_bounding', 'MaskBoundingUtils.extract_mask_file_bounding', (['mask_file'], {}), '(mask_file)\n', (891, 902), False, 'from utils.mask_bounding_utils import MaskBoundingUtils\n'), ((919, 944), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['mask_file'], {}), '(mask_file)\n', (933, 944), True, 'import SimpleITK as sitk\n'), ((1962, 1995), 'os.path.join', 'os.path.join', (['self.root', '"""images"""'], {}), "(self.root, 'images')\n", (1974, 1995), False, 'import os\n'), ((2021, 2053), 'os.path.join', 'os.path.join', (['self.root', '"""masks"""'], {}), "(self.root, 'masks')\n", (2033, 2053), False, 'import os\n'), ((3861, 3887), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_file'], {}), '(image_file)\n', (3875, 3887), True, 'import SimpleITK as sitk\n'), ((3902, 3931), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['image'], {}), '(image)\n', (3924, 3931), True, 'import SimpleITK as sitk\n'), ((1142, 1163), 'json.dumps', 'json.dumps', (['info_dict'], {}), '(info_dict)\n', (1152, 1163), False, 'import json\n'), ((2302, 2329), 'os.listdir', 'os.listdir', (['self.image_root'], {}), '(self.image_root)\n', (2312, 2329), False, 'import os\n'), ((2357, 2396), 'os.path.join', 'os.path.join', (['self.image_root', 'filename'], {}), '(self.image_root, filename)\n', (2369, 2396), False, 'import os\n'), ((2421, 2459), 'os.path.join', 'os.path.join', (['self.mask_root', 'filename'], {}), '(self.mask_root, filename)\n', (2433, 2459), False, 'import os\n'), ((3098, 3190), 'utils.detection_utils.DETECTION_UTILS.point_coordinate_resampled', 'DETECTION_UTILS.point_coordinate_resampled', (['in_shape', 'image_shape', '[x_min, y_min, z_min]'], {}), '(in_shape, image_shape, [x_min,\n y_min, z_min])\n', (3140, 3190), False, 'from utils.detection_utils import DETECTION_UTILS\n'), ((3221, 3313), 'utils.detection_utils.DETECTION_UTILS.point_coordinate_resampled', 'DETECTION_UTILS.point_coordinate_resampled', (['in_shape', 'image_shape', '[x_max, y_max, z_max]'], {}), '(in_shape, image_shape, [x_max,\n y_max, z_max])\n', (3263, 3313), False, 'from utils.detection_utils import DETECTION_UTILS\n'), ((2479, 2505), 'os.path.exists', 'os.path.exists', (['image_file'], {}), '(image_file)\n', (2493, 2505), False, 'import os\n'), ((2551, 2576), 'os.path.exists', 'os.path.exists', (['mask_file'], {}), '(mask_file)\n', (2565, 2576), False, 'import os\n'), ((2859, 2914), 'utils.mask_bounding_utils.MaskBoundingUtils.extract_mask_file_bounding', 'MaskBoundingUtils.extract_mask_file_bounding', (['mask_file'], {}), '(mask_file)\n', (2903, 2914), False, 'from utils.mask_bounding_utils import MaskBoundingUtils\n'), ((2942, 2968), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['image_file'], {}), '(image_file)\n', (2956, 2968), True, 'import SimpleITK as sitk\n'), ((3576, 3630), 'numpy.array', 'np.array', (['[[z_min, y_min, x_min, z_max, y_max, x_max]]'], {}), '([[z_min, y_min, x_min, z_max, y_max, x_max]])\n', (3584, 3630), True, 'import numpy as np\n'), ((3955, 3976), 'torch.from_numpy', 'torch.from_numpy', (['arr'], {}), '(arr)\n', (3971, 3976), False, 'import torch\n')] |
import types
import numpy as np
import jax
from jax import numpy as jnp
from flax import struct
import utils
DTYPE = jnp.int16
SIZE = 10
one_hot_10 = jax.partial(utils.one_hot, k=SIZE)
ACTION_MAP = jnp.stack([
jnp.array((1, 0), dtype=DTYPE), # visually DOWN
jnp.array((0, 1), dtype=DTYPE), # visually RIGHT
jnp.array((-1, 0), dtype=DTYPE), # visually UP
jnp.array((0, -1), dtype=DTYPE), # visually LEFT
])
@struct.dataclass
class GridWorld:
size: int
render: types.FunctionType = struct.field(pytree_node=False)
agent: jnp.ndarray = jnp.array((0, 0), dtype=DTYPE)
actions: jnp.ndarray = jnp.arange(4)
def goal(s):
return jnp.array((s - 1, s - 1), dtype=DTYPE)
def new(size, render_onehot=True):
if render_onehot:
_render = jax.partial(utils.one_hot, k=size)
else:
_render = lambda x: x
return GridWorld(size, _render)
def new_batch(n, size):
return utils.tree_stack(new(size) for _ in range(n))
def reset(env):
return env.replace(agent=jnp.array((0, 0), dtype=DTYPE))
reset_batch = jax.vmap(reset) # noqa: E305
# @jax.profiler.trace_function
def render(env):
return env.render(env.agent)
render_batch = jax.vmap(render) # noqa: E305
# @jax.profiler.trace_function
def step(env, action):
new_agent = env.agent + ACTION_MAP[action]
new_agent = jnp.clip(new_agent, 0, env.size - 1)
env = env.replace(agent=new_agent)
reward = (env.agent == goal(env.size)).all()
return env, render(env), reward
step = jax.jit(step, static_argnums=(1,)) # noqa: E305
step_batch = jax.vmap(step)
def all_coords(size):
grid = jnp.stack(jnp.meshgrid(jnp.linspace(0, size - 1, size),
jnp.linspace(0, size - 1, size)))
return grid.transpose().reshape(-1, 2).astype(DTYPE)
def render_function(fn, env, reduction=jnp.max):
"""Renders a given function at every state in the gridworld.
Arguments:
- fn: a function that takes (batch of states, batch of actions) as its
arguments
- env: a GridWorld instance
- reduction: a function mapping from jnp.ndarray -> float. maps from the
vector of values for each action at a particular state to a single
value which will represent that state.
"""
locations = all_coords(env.size)
render_locations = jax.vmap(env.render)
states = render_locations(locations)
repeated_states = states.repeat(len(env.actions), axis=0)
actions = env.actions
tiled_actions = jnp.tile(actions, (len(states),)).reshape((-1, 1))
values = fn(repeated_states, tiled_actions)
sa_values = values.reshape((len(states), len(actions)))
rendered_values = np.zeros((env.size, env.size))
for (location, action_values) in zip(locations, sa_values):
rendered_values[location[0], location[1]] = reduction(action_values)
return rendered_values
if __name__ == "__main__":
import random
env = new(10)
for i in range(10):
env, obs, r = step(env, random.randint(0, 3))
print(obs)
envs = new_batch(3, 10)
print(envs)
envs, obss, rs = step_batch(envs, jnp.array(range(3)))
| [
"jax.partial",
"flax.struct.field",
"jax.numpy.arange",
"jax.numpy.array",
"numpy.zeros",
"jax.jit",
"jax.numpy.clip",
"jax.numpy.linspace",
"jax.vmap",
"random.randint"
] | [((155, 189), 'jax.partial', 'jax.partial', (['utils.one_hot'], {'k': 'SIZE'}), '(utils.one_hot, k=SIZE)\n', (166, 189), False, 'import jax\n'), ((1076, 1091), 'jax.vmap', 'jax.vmap', (['reset'], {}), '(reset)\n', (1084, 1091), False, 'import jax\n'), ((1204, 1220), 'jax.vmap', 'jax.vmap', (['render'], {}), '(render)\n', (1212, 1220), False, 'import jax\n'), ((1522, 1556), 'jax.jit', 'jax.jit', (['step'], {'static_argnums': '(1,)'}), '(step, static_argnums=(1,))\n', (1529, 1556), False, 'import jax\n'), ((1584, 1598), 'jax.vmap', 'jax.vmap', (['step'], {}), '(step)\n', (1592, 1598), False, 'import jax\n'), ((518, 549), 'flax.struct.field', 'struct.field', ([], {'pytree_node': '(False)'}), '(pytree_node=False)\n', (530, 549), False, 'from flax import struct\n'), ((575, 605), 'jax.numpy.array', 'jnp.array', (['(0, 0)'], {'dtype': 'DTYPE'}), '((0, 0), dtype=DTYPE)\n', (584, 605), True, 'from jax import numpy as jnp\n'), ((633, 646), 'jax.numpy.arange', 'jnp.arange', (['(4)'], {}), '(4)\n', (643, 646), True, 'from jax import numpy as jnp\n'), ((673, 711), 'jax.numpy.array', 'jnp.array', (['(s - 1, s - 1)'], {'dtype': 'DTYPE'}), '((s - 1, s - 1), dtype=DTYPE)\n', (682, 711), True, 'from jax import numpy as jnp\n'), ((1354, 1390), 'jax.numpy.clip', 'jnp.clip', (['new_agent', '(0)', '(env.size - 1)'], {}), '(new_agent, 0, env.size - 1)\n', (1362, 1390), True, 'from jax import numpy as jnp\n'), ((2339, 2359), 'jax.vmap', 'jax.vmap', (['env.render'], {}), '(env.render)\n', (2347, 2359), False, 'import jax\n'), ((2693, 2723), 'numpy.zeros', 'np.zeros', (['(env.size, env.size)'], {}), '((env.size, env.size))\n', (2701, 2723), True, 'import numpy as np\n'), ((220, 250), 'jax.numpy.array', 'jnp.array', (['(1, 0)'], {'dtype': 'DTYPE'}), '((1, 0), dtype=DTYPE)\n', (229, 250), True, 'from jax import numpy as jnp\n'), ((274, 304), 'jax.numpy.array', 'jnp.array', (['(0, 1)'], {'dtype': 'DTYPE'}), '((0, 1), dtype=DTYPE)\n', (283, 304), True, 'from jax import numpy as jnp\n'), ((329, 360), 'jax.numpy.array', 'jnp.array', (['(-1, 0)'], {'dtype': 'DTYPE'}), '((-1, 0), dtype=DTYPE)\n', (338, 360), True, 'from jax import numpy as jnp\n'), ((381, 412), 'jax.numpy.array', 'jnp.array', (['(0, -1)'], {'dtype': 'DTYPE'}), '((0, -1), dtype=DTYPE)\n', (390, 412), True, 'from jax import numpy as jnp\n'), ((789, 823), 'jax.partial', 'jax.partial', (['utils.one_hot'], {'k': 'size'}), '(utils.one_hot, k=size)\n', (800, 823), False, 'import jax\n'), ((1030, 1060), 'jax.numpy.array', 'jnp.array', (['(0, 0)'], {'dtype': 'DTYPE'}), '((0, 0), dtype=DTYPE)\n', (1039, 1060), True, 'from jax import numpy as jnp\n'), ((1657, 1688), 'jax.numpy.linspace', 'jnp.linspace', (['(0)', '(size - 1)', 'size'], {}), '(0, size - 1, size)\n', (1669, 1688), True, 'from jax import numpy as jnp\n'), ((1724, 1755), 'jax.numpy.linspace', 'jnp.linspace', (['(0)', '(size - 1)', 'size'], {}), '(0, size - 1, size)\n', (1736, 1755), True, 'from jax import numpy as jnp\n'), ((3013, 3033), 'random.randint', 'random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (3027, 3033), False, 'import random\n')] |
from finetuna.ml_potentials.ocpd_calc import OCPDCalc
import torch
from torch import nn
import torch.nn.functional as F
import numpy as np
import copy
from multiprocessing import Pool
from ase.atoms import Atoms
class OCPDNNCalc(OCPDCalc):
implemented_properties = ["energy", "forces", "stds"]
def __init__(
self,
initial_structure,
model_path: str,
checkpoint_path: str,
nn_params: dict = {},
):
self.initial_structure = initial_structure
self.n_atoms = len(self.initial_structure)
self.n_hidden = nn_params.get("n_hidden", 2000)
self.n_hidden2 = nn_params.get("n_hidden2", 200)
self.dropout_prob = nn_params.get("dropout_prob", 0)
self.n_estimators = nn_params.get("n_estimators", 3)
self.verbose = nn_params.get("verbose", True)
super().__init__(model_path, checkpoint_path, mlp_params=nn_params)
self.stopping_epoch = self.mlp_params.get("stopping_epoch", 100)
self.parallel = self.mlp_params.get("parallel", False)
if self.parallel:
self.process_pool = Pool(self.parallel)
self.loss_func = nn.MSELoss()
def init_model(self):
self.ml_model = True
self.nn_ensemble = []
self.optimizers = []
self.schedulers = []
for i in range(self.n_estimators):
self.nn_ensemble.append(
Net(
len(self.get_descriptor(self.initial_structure)),
self.n_hidden,
self.n_hidden2,
self.n_atoms * 3,
self.dropout_prob,
)
)
self.init_optimizer()
self.mean_energy = 0
self.std_energy = 0
def init_optimizer(self):
optimizer_class = self.mlp_params.get("optimizer", "AdamW")
if optimizer_class == "AdamW":
optimizer = torch.optim.AdamW(
self.nn_ensemble[-1].parameters(),
lr=self.mlp_params.get("lr", 1e-3),
betas=self.mlp_params.get("betas", (0.9, 0.999)),
eps=self.mlp_params.get("eps", 1e-6),
weight_decay=self.mlp_params.get("weight_decay", 0),
amsgrad=self.mlp_params.get("amsgrad", True),
)
elif optimizer_class == "SGD":
optimizer = torch.optim.SGD(
self.nn_ensemble[-1].parameters(),
lr=self.mlp_params.get("lr", 1e-3),
momentum=self.mlp_params.get("momentum", 0),
dampening=self.mlp_params.get("dampening", 0),
weight_decay=self.mlp_params.get("weight_decay", 0),
nesterov=self.mlp_params.get("nesterov", False),
)
self.optimizers.append(optimizer)
scheduler_class = self.mlp_params.get("scheduler", None)
if scheduler_class == "ReduceLROnPlateau":
scheduler_dict = self.mlp_params.get("scheduler_dict", {})
self.schedulers.append(
torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, **scheduler_dict)
)
def calculate_ml(self, ocp_descriptor) -> tuple:
e_mean = self.mean_energy
e_std = self.std_energy
if self.initial_structure.constraints:
constraints_index = self.initial_structure.constraints[0].index
else:
constraints_index = []
predictions = []
for estimator in self.nn_ensemble:
prediction = estimator(torch.tensor(ocp_descriptor)).detach().numpy()
constraint_array = np.ones((self.n_atoms, 3))
constraint_array[constraints_index] = np.zeros((3,))
constraint_array = constraint_array.flatten()
prediction = np.multiply(constraint_array, prediction)
predictions.append(prediction)
stds = np.std(predictions, axis=0)
avgs = np.average(predictions, axis=0)
f_mean = avgs.reshape(self.n_atoms, 3)
f_std = np.average(
np.delete(
stds.reshape(self.n_atoms, 3),
constraints_index,
axis=0,
)
).item()
return e_mean, f_mean, e_std, f_std
def fit(self, parent_energies, parent_forces, parent_h_descriptors):
args_list = []
for j in range(self.n_estimators):
parent_energies_copy = copy.deepcopy(parent_energies)
parent_forces_copy = copy.deepcopy(parent_forces)
parent_h_descriptors_copy = copy.deepcopy(parent_h_descriptors)
estimator = self.nn_ensemble[j]
optimizer = self.optimizers[j]
if self.initial_structure.constraints:
constraints_index = self.initial_structure.constraints[0].index
else:
constraints_index = []
if self.schedulers:
scheduler = self.schedulers[j]
else:
scheduler = None
args_list.append(
(
j,
parent_energies_copy,
parent_forces_copy,
parent_h_descriptors_copy,
estimator,
optimizer,
scheduler,
self.verbose,
self.stopping_epoch,
self.n_atoms,
constraints_index,
self.loss_func,
)
)
if self.parallel:
results_iterator = self.process_pool.starmap(sub_fit, args_list)
self.nn_ensemble = [model for model in results_iterator]
else:
for j in range(self.n_estimators):
self.nn_ensemble[j] = sub_fit(*args_list[j])
# energy predictions are irrelevant for active learning so just take the mean
self.mean_energy = np.average(parent_energies)
self.std_energy = np.std(parent_energies)
def get_data_from_atoms(self, atoms_dataset: "list[Atoms]"):
energy_data = []
forces_data = []
h_data = []
for atoms in atoms_dataset:
energy_data.append(atoms.get_potential_energy())
forces_data.append(atoms.get_forces())
h_data.append(self.get_descriptor(atoms))
return energy_data, forces_data, h_data
def get_descriptor(self, atoms: "Atoms"):
""" "
Overwritable method for getting the ocp descriptor from atoms objects
"""
ocp_descriptor = self.ocp_describer.get_h(atoms)
h_desc = ocp_descriptor.flatten()
return h_desc
def partial_fit(
self, new_energies, new_forces, new_e_descriptors, new_f_descriptors
):
raise NotImplementedError
def sub_fit(
j,
parent_energies,
parent_forces,
parent_h_descriptors,
estimator,
optimizer,
scheduler,
verbose,
stopping_epoch,
n_atoms,
constraints_index,
loss_function,
):
n_data = len(parent_energies)
epoch = 0
best_loss = np.Inf
best_model = copy.deepcopy(estimator)
epoch_losses = []
if verbose:
print("*loss(" + str(j) + "," + str(epoch) + "): " + str("Inf"))
# for param in estimator.hidden1.parameters():
# print(param.detach().numpy().sum())
# for param in estimator.hidden2.parameters():
# print(param.detach().numpy().sum())
while not epoch > stopping_epoch:
epoch_losses.append(0)
for i in range(n_data):
prediction = estimator(torch.tensor(parent_h_descriptors[i]))
constraint_array = np.ones((n_atoms, 3))
constraint_array[constraints_index] = np.zeros((3,))
constraint_tensor = torch.tensor(constraint_array.flatten()).to(
torch.float32
)
loss = loss_function(
prediction * constraint_tensor,
torch.tensor(parent_forces[i].flatten()).to(torch.float32),
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_losses[-1] += loss.data.item()
if scheduler:
scheduler.step(epoch_losses[-1])
epoch += 1
loss_str = " "
if epoch_losses[-1] < best_loss:
best_loss = epoch_losses[-1]
best_model = copy.deepcopy(estimator)
loss_str = "*"
loss_str += (
"loss("
+ str(j)
+ ","
+ str(epoch)
+ "): "
+ str(epoch_losses[-1])
+ ",\tlr: "
+ str(optimizer.param_groups[0]["lr"])
)
if verbose and epoch % 100 == 0:
print(loss_str)
# for param in estimator.hidden1.parameters():
# print(param.detach().numpy().sum())
# for param in estimator.hidden2.parameters():
# print(param.detach().numpy().sum())
return best_model
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden1, n_hidden2, n_output, dropout_prob):
super(Net, self).__init__()
self.hidden1 = torch.nn.Linear(n_feature, n_hidden1) # hidden layer
self.hidden2 = torch.nn.Linear(n_hidden1, n_hidden2) # hidden layer
self.predict = torch.nn.Linear(n_hidden2, n_output) # output layer
self.dropout = torch.nn.Dropout(dropout_prob)
torch.nn.init.xavier_uniform_(self.hidden1.weight)
torch.nn.init.xavier_uniform_(self.hidden2.weight)
def forward(self, x):
x = F.silu(self.hidden1(x)) # activation function for hidden layer
x = F.silu(self.hidden2(x)) # activation function for hidden layer
x = self.dropout(self.predict(x))
return x
| [
"numpy.multiply",
"torch.nn.Dropout",
"copy.deepcopy",
"numpy.ones",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"numpy.average",
"torch.nn.init.xavier_uniform_",
"torch.nn.MSELoss",
"numpy.zeros",
"torch.tensor",
"multiprocessing.Pool",
"torch.nn.Linear",
"numpy.std"
] | [((7096, 7120), 'copy.deepcopy', 'copy.deepcopy', (['estimator'], {}), '(estimator)\n', (7109, 7120), False, 'import copy\n'), ((1162, 1174), 'torch.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1172, 1174), False, 'from torch import nn\n'), ((3884, 3911), 'numpy.std', 'np.std', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (3890, 3911), True, 'import numpy as np\n'), ((3927, 3958), 'numpy.average', 'np.average', (['predictions'], {'axis': '(0)'}), '(predictions, axis=0)\n', (3937, 3958), True, 'import numpy as np\n'), ((5907, 5934), 'numpy.average', 'np.average', (['parent_energies'], {}), '(parent_energies)\n', (5917, 5934), True, 'import numpy as np\n'), ((5961, 5984), 'numpy.std', 'np.std', (['parent_energies'], {}), '(parent_energies)\n', (5967, 5984), True, 'import numpy as np\n'), ((9177, 9214), 'torch.nn.Linear', 'torch.nn.Linear', (['n_feature', 'n_hidden1'], {}), '(n_feature, n_hidden1)\n', (9192, 9214), False, 'import torch\n'), ((9254, 9291), 'torch.nn.Linear', 'torch.nn.Linear', (['n_hidden1', 'n_hidden2'], {}), '(n_hidden1, n_hidden2)\n', (9269, 9291), False, 'import torch\n'), ((9331, 9367), 'torch.nn.Linear', 'torch.nn.Linear', (['n_hidden2', 'n_output'], {}), '(n_hidden2, n_output)\n', (9346, 9367), False, 'import torch\n'), ((9407, 9437), 'torch.nn.Dropout', 'torch.nn.Dropout', (['dropout_prob'], {}), '(dropout_prob)\n', (9423, 9437), False, 'import torch\n'), ((9447, 9497), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.hidden1.weight'], {}), '(self.hidden1.weight)\n', (9476, 9497), False, 'import torch\n'), ((9506, 9556), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['self.hidden2.weight'], {}), '(self.hidden2.weight)\n', (9535, 9556), False, 'import torch\n'), ((1116, 1135), 'multiprocessing.Pool', 'Pool', (['self.parallel'], {}), '(self.parallel)\n', (1120, 1135), False, 'from multiprocessing import Pool\n'), ((3608, 3634), 'numpy.ones', 'np.ones', (['(self.n_atoms, 3)'], {}), '((self.n_atoms, 3))\n', (3615, 3634), True, 'import numpy as np\n'), ((3685, 3699), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (3693, 3699), True, 'import numpy as np\n'), ((3783, 3824), 'numpy.multiply', 'np.multiply', (['constraint_array', 'prediction'], {}), '(constraint_array, prediction)\n', (3794, 3824), True, 'import numpy as np\n'), ((4415, 4445), 'copy.deepcopy', 'copy.deepcopy', (['parent_energies'], {}), '(parent_energies)\n', (4428, 4445), False, 'import copy\n'), ((4479, 4507), 'copy.deepcopy', 'copy.deepcopy', (['parent_forces'], {}), '(parent_forces)\n', (4492, 4507), False, 'import copy\n'), ((4548, 4583), 'copy.deepcopy', 'copy.deepcopy', (['parent_h_descriptors'], {}), '(parent_h_descriptors)\n', (4561, 4583), False, 'import copy\n'), ((7650, 7671), 'numpy.ones', 'np.ones', (['(n_atoms, 3)'], {}), '((n_atoms, 3))\n', (7657, 7671), True, 'import numpy as np\n'), ((7722, 7736), 'numpy.zeros', 'np.zeros', (['(3,)'], {}), '((3,))\n', (7730, 7736), True, 'import numpy as np\n'), ((8389, 8413), 'copy.deepcopy', 'copy.deepcopy', (['estimator'], {}), '(estimator)\n', (8402, 8413), False, 'import copy\n'), ((3047, 3118), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {}), '(optimizer, **scheduler_dict)\n', (3089, 3118), False, 'import torch\n'), ((7580, 7617), 'torch.tensor', 'torch.tensor', (['parent_h_descriptors[i]'], {}), '(parent_h_descriptors[i])\n', (7592, 7617), False, 'import torch\n'), ((3530, 3558), 'torch.tensor', 'torch.tensor', (['ocp_descriptor'], {}), '(ocp_descriptor)\n', (3542, 3558), False, 'import torch\n')] |
import click
from pathlib import Path
import datetime
from moviepy.editor import *
from PIL import Image
from PIL import ImageFont
from PIL import ImageDraw
import numpy as np
import xmltodict
def make_clips(filelist,timelist,AOI=None):
print('loopstart')
clips=[]
for file, time in zip(filelist, timelist):
print(f'processing image {file.name}')
if AOI is None:
img=Image.open(file)
else:
if ((int(AOI['xmax'])-int(AOI['xmin'])) % 2) == 1:
print('make AOI even')
AOI['xmax']=int(AOI['xmax'])-1
if ((int(AOI['ymax'])-int(AOI['ymin'])) % 2) == 1:
print('make AOI even')
AOI['ymax']=int(AOI['ymax'])-1
img=Image.open(file).crop((int(AOI['xmin']),int(AOI['ymin']),int(AOI['xmax']),int(AOI['ymax'])))
font = ImageFont.truetype("arial.ttf", int(max(img.size)/20))
ImageDraw.Draw(img).text((0,0), str(time).split('.')[0],font=font)
clip =ImageClip(np.array(img)).set_duration(0.1)
clips.append(clip)
return clips
@click.command()
@click.option('--exp_folder', default='.', help='Path to experiment folder with images. Exp_folder will be also used to name the timelapse video')
@click.option('--xml_mask', default=None, help='Path to xml file with 1 or multiple masks generated by labelimg ')
def main(exp_folder, xml_mask=None):
'''Read images in exp_folder, finds date created,
stamps it on the image and generate mp4 video
named as exp_folder in current folder
Added parameter xml_mask'''
exp_folder=Path(exp_folder)
print(f'Processing images in folder: {exp_folder.resolve()}')
p = Path(exp_folder).glob('**/*')
filelist = [item for item in p if item.is_file() and item.suffix in ['.jpg','.bmp','.BMP','.JPG','.png','.PNG']]
timelist = [datetime.datetime.fromtimestamp(file.stat().st_mtime) for file in filelist]
timelist = list(map(lambda x: x-timelist[0],timelist))
if xml_mask is None:
clips=make_clips(filelist,timelist)
concat_clip = concatenate_videoclips(clips, method='compose')
concat_clip.write_videofile(f"{exp_folder.name}.mp4", fps=24)
else:
with open(xml_mask, 'r') as file:
AOI_from_XML=xmltodict.parse(file.read())
AOI_dict={}
for AOI in AOI_from_XML['annotation']['object']:
try:
AOI_dict[AOI['name']]=AOI['bndbox']
except:
AOI_dict['name']=AOI_from_XML['annotation']['object']['bndbox']
for AOI_name, AOI in AOI_dict.items():
clips=make_clips(filelist,timelist,AOI=AOI)
concat_clip = concatenate_videoclips(clips, method='compose')
concat_clip.write_videofile(f"{AOI_name}_{exp_folder.name}.mp4", fps=24)
if __name__ == '__main__':
sys.exit(main())
| [
"PIL.Image.open",
"pathlib.Path",
"click.option",
"numpy.array",
"PIL.ImageDraw.Draw",
"click.command"
] | [((1118, 1133), 'click.command', 'click.command', ([], {}), '()\n', (1131, 1133), False, 'import click\n'), ((1135, 1290), 'click.option', 'click.option', (['"""--exp_folder"""'], {'default': '"""."""', 'help': '"""Path to experiment folder with images. Exp_folder will be also used to name the timelapse video"""'}), "('--exp_folder', default='.', help=\n 'Path to experiment folder with images. Exp_folder will be also used to name the timelapse video'\n )\n", (1147, 1290), False, 'import click\n'), ((1282, 1400), 'click.option', 'click.option', (['"""--xml_mask"""'], {'default': 'None', 'help': '"""Path to xml file with 1 or multiple masks generated by labelimg """'}), "('--xml_mask', default=None, help=\n 'Path to xml file with 1 or multiple masks generated by labelimg ')\n", (1294, 1400), False, 'import click\n'), ((1639, 1655), 'pathlib.Path', 'Path', (['exp_folder'], {}), '(exp_folder)\n', (1643, 1655), False, 'from pathlib import Path\n'), ((410, 426), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (420, 426), False, 'from PIL import Image\n'), ((1740, 1756), 'pathlib.Path', 'Path', (['exp_folder'], {}), '(exp_folder)\n', (1744, 1756), False, 'from pathlib import Path\n'), ((947, 966), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (961, 966), False, 'from PIL import ImageDraw\n'), ((774, 790), 'PIL.Image.open', 'Image.open', (['file'], {}), '(file)\n', (784, 790), False, 'from PIL import Image\n'), ((1038, 1051), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (1046, 1051), True, 'import numpy as np\n')] |
"""
Usefull functions for loading files etc.
-AN
"""
import os
import pickle
import zipfile, lzma
import numpy as np
import matplotlib.pyplot as plt
from collections import OrderedDict
# saves dict to csv using keys as headers
def saveToCSV(savedict, filename = "", path = ""):
try:
#print(savedict)
if not filename:
datanum = 1
filename = "data"
while os.path.isfile("".join([filename,str(datanum),".csv"])):
datanum += 1
filename = "".join([filename,str(datanum)])
if filename[-4:] != ".csv":
filename = "".join([filename,".csv"])
if not path:
path = os.getcwd()
if not os.path.isdir(path):
os.makedirs(path)
if not os.path.isfile("".join([path,filename])):
file = open("".join([path,filename]),'w')
file.write("".join([";".join(list(savedict.keys())),";\n"]))
file.close()
file = open("".join([path,filename]),'a')
valueslist = list(savedict.values())
for rowcount in range((len(list(valueslist[0])))):
for values in valueslist:
file.write("".join([str(values[rowcount]), ";"]))
file.write(";\n")
except PermissionError:
print("Error saving data, is the file open?")
except (AttributeError, TypeError):
print("Error in dataformat, use a dict filled with lists.")
except KeyError:
print("Error in length of data")
return 1
# takes a dict and gives a list with dicts. Kinda sorts by relKey
def splitIntoDicts(data, relKey="Lackh (m)"):
dicts = []
for group in set(data[relKey]):
datanew = OrderedDict()
for gnumb, gline in enumerate(data[relKey]):
if gline == group:
for key in data:
if key in datanew:
datanew[key].append(data[key][gnumb])
else:
datanew[key] = [data[key][gnumb]]
dicts.append(datanew)
return dicts
def parseSubDirs(filtlistlist, dirname="", dirstruct=[]):
"""
:param filtlistlist: list of lists of strings
:param dirname: dir to start parsing from added to cwd
:param dirstruct use predefined dirstruct to avoid calling os.walk all the time
:return: list of all files containing at least one of the strings of all the lists in filtlistlist
"""
if not dirname:
path = os.getcwd()
else:
path = dirname
files = []
if len(dirstruct) > 0:
dirs = dirstruct
else:
dirs = list(os.walk(path))
for r, d, f in dirs:
for file in f:
for filtlist in filtlistlist:
if not np.any([filt in os.path.join(r, file) for filt in filtlist]):
break
else:
files.append(os.path.join(r, file))
return files
def multiReadData(paths, filtlist, splitString="Frequenz [Hz]"):
dataDict = OrderedDict()
for path in paths:
data, metadata = readData(path)
filename = path.split("\\")[-1]
if splitString not in data:
splitString = "frequenz (Hz)" # in case you spelled it wrong
data = splitData(data, splitString)
for filtkey in filtlist:
if filtkey in path:
if not filtkey in dataDict:
dataDict[filtkey] = OrderedDict()
if not filename in dataDict[filtkey]:
dataDict[filtkey][filename] = OrderedDict()
dataDict[filtkey][filename]["data"] = data
dataDict[filtkey][filename]["metadata"] = metadata
dataDict[filtkey][filename]["metadata"]["path"] = path
return dataDict
def splitData(data, relKey = "Frequenz [Hz]"):
"""
:param data:
dictionary with data
:return:
data separated into sweeps
"""
if relKey in data:
relKey = relKey
elif "frequenz (Hz)" in data:
relKey = "frequenz (Hz)"
elif 'Frequenz [Hz]' in data:
relKey = 'Frequenz [Hz]'
elif ' Frequenz [Hz]' in data:
relKey = ' Frequenz [Hz]'
newdict = OrderedDict()
sweepAm = 0
# if len(set(data[relKey])) % len(data[relKey]):
# print("shortcut")
# for key in data:
# newdict[key] = [data[key][x*len(set(data[relKey])):len(set(data[relKey]))*(1+x)] for x in range(int(len(data[relKey])/len(set(data[relKey]))))]
# return newdict
cor = -(data[relKey][0]-data[relKey][1])/np.abs(data[relKey][0]-data[relKey][1])
for point, el in enumerate(data[relKey]):
if (point > 0 and (el*cor) < (data[relKey][point-1])*cor) or (point == len(data[relKey])-cor):
for key in data:
if key in newdict:
newdict[key].append(data[key][sweepAm:point])
else:
newdict[key] = [data[key][sweepAm:point]]
sweepAm=point
return newdict
def savePickle(savedict, filename = "", path =""):
datanum = 1
while os.path.isfile(path+str(datanum)+".pkl"):
datanum += 1
filename = "".join([filename,str(datanum)])
if filename[-4:] != ".pkl":
filename = "".join([filename,".pkl"])
if not os.path.isdir(path):
os.makedirs(path)
with open("".join([path,filename]), 'wb+') as rawF:
pickle.dump(savedict, rawF, pickle.HIGHEST_PROTOCOL)
def transDataML(dataDict, xKey, compFunc=lambda a: a):
X = []
y = []
for campNumb, campaign in enumerate(dataDict):
for fileCount, messfile in enumerate(dataDict[campaign]):
for data in dataDict[campaign][messfile]["data"][xKey]:
X.append(data)
y.append(dataDict[campaign][messfile]["metadata"]["Chip:"] + " " + dataDict[campaign][messfile]["metadata"]["Substanz:"])
return X, y
def winAvg(data, winWidth=0, winfunc=np.blackman, mode="same"):
if not winWidth:
if int(len(data)*(5/100))>0:
winWidth = int(len(data)*(10/100))
if not winWidth % 2:
winWidth += 1
else: return data
kernel = winfunc(winWidth)/np.sum(winfunc(winWidth))
data = np.convolve(data, kernel, mode=mode)
return data
# zips raw data of all sub folders to path containing the provided string
def zipRaw(path = os.getcwd(), mark="_raw", outname = "raw.zip"):
if outname[-4:] != ".zip":
outname = "".join([outname,".zip"])
zf = zipfile.ZipFile(outname, mode="w")
cTree = list(os.walk(path))
for folder in cTree[0][1]:
if mark in folder:
for datei in list(os.walk(path+ "//" +folder))[0][2]:
zf.write(folder+"//"+datei, compress_type=zipfile.ZIP_LZMA)
zf.close()
return 1
def readData(filename, path=""):
"""
:param filename: trivial
:param path: trivial
:return: data, metadata as dicts
"""
data = dict()
metadata = dict()
with open("".join([path, filename]),'r') as file:
lines = [[part for part in line.rstrip('\n').split(";") if part] for line in file]
for count, line in enumerate(lines):
if len(line) > 3:
keyline = line
keypoint = count
break
if len(line) == 2 or not line[0]:
metadata[str(line[0])] = str(line[1])
for key in keyline:
data[key] = []
for line in lines:
if len(line) == len(data.keys()):
for pos in range(len(keyline)):
try:
data[keyline[pos]].append(float(line[pos]))
except: pass
metadata["dataKeys"] = data.keys()
return data, metadata
if __name__ == "__main__":
testdict = {"temperature": [1,2,3,4], "length":[4,5,6,7], "time":[8,9,10,11]}
data,metadata = readData("Daten_sim.csv", path="/home/an/01_XSensor/Chipvergleich/")
data = splitData(data)
signal = {"values": data["UBol (V), Spannung"]}
print(np.shape(signal["values"]))
plt.plot(signal["values"][0])
plt.show() | [
"numpy.abs",
"collections.OrderedDict",
"numpy.convolve",
"pickle.dump",
"zipfile.ZipFile",
"os.makedirs",
"matplotlib.pyplot.plot",
"os.path.join",
"os.getcwd",
"os.path.isdir",
"numpy.shape",
"os.walk",
"matplotlib.pyplot.show"
] | [((3085, 3098), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3096, 3098), False, 'from collections import OrderedDict\n'), ((4311, 4324), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4322, 4324), False, 'from collections import OrderedDict\n'), ((6392, 6428), 'numpy.convolve', 'np.convolve', (['data', 'kernel'], {'mode': 'mode'}), '(data, kernel, mode=mode)\n', (6403, 6428), True, 'import numpy as np\n'), ((6544, 6555), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (6553, 6555), False, 'import os\n'), ((6679, 6713), 'zipfile.ZipFile', 'zipfile.ZipFile', (['outname'], {'mode': '"""w"""'}), "(outname, mode='w')\n", (6694, 6713), False, 'import zipfile, lzma\n'), ((8299, 8328), 'matplotlib.pyplot.plot', 'plt.plot', (["signal['values'][0]"], {}), "(signal['values'][0])\n", (8307, 8328), True, 'import matplotlib.pyplot as plt\n'), ((8334, 8344), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8342, 8344), True, 'import matplotlib.pyplot as plt\n'), ((1751, 1764), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1762, 1764), False, 'from collections import OrderedDict\n'), ((2541, 2552), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2550, 2552), False, 'import os\n'), ((4684, 4725), 'numpy.abs', 'np.abs', (['(data[relKey][0] - data[relKey][1])'], {}), '(data[relKey][0] - data[relKey][1])\n', (4690, 4725), True, 'import numpy as np\n'), ((5428, 5447), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5441, 5447), False, 'import os\n'), ((5458, 5475), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (5469, 5475), False, 'import os\n'), ((5542, 5594), 'pickle.dump', 'pickle.dump', (['savedict', 'rawF', 'pickle.HIGHEST_PROTOCOL'], {}), '(savedict, rawF, pickle.HIGHEST_PROTOCOL)\n', (5553, 5594), False, 'import pickle\n'), ((6732, 6745), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (6739, 6745), False, 'import os\n'), ((8266, 8292), 'numpy.shape', 'np.shape', (["signal['values']"], {}), "(signal['values'])\n", (8274, 8292), True, 'import numpy as np\n'), ((702, 713), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (711, 713), False, 'import os\n'), ((730, 749), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (743, 749), False, 'import os\n'), ((764, 781), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (775, 781), False, 'import os\n'), ((2690, 2703), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2697, 2703), False, 'import os\n'), ((2960, 2981), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (2972, 2981), False, 'import os\n'), ((3515, 3528), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3526, 3528), False, 'from collections import OrderedDict\n'), ((3635, 3648), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3646, 3648), False, 'from collections import OrderedDict\n'), ((6838, 6867), 'os.walk', 'os.walk', (["(path + '//' + folder)"], {}), "(path + '//' + folder)\n", (6845, 6867), False, 'import os\n'), ((2838, 2859), 'os.path.join', 'os.path.join', (['r', 'file'], {}), '(r, file)\n', (2850, 2859), False, 'import os\n')] |
"""
-----------------------------------------------------------------------
Harmoni: a Novel Method for Eliminating Spurious Neuronal Interactions due to the Harmonic Components in Neuronal Data
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>
https://doi.org/10.1101/2021.10.06.463319
-----------------------------------------------------------------------
script for:
** the Sawtooth signal and its the fundamental component and the harmonics **
-----------------------------------------------------------------------
(c) <NAME> (<EMAIL>) @ Neurolgy Dept, MPI CBS, 2021
https://github.com/minajamshidi
(c) please cite the above paper in case of using this code for your research
License: MIT License
-----------------------------------------------------------------------
last modified:
- 20210927 by \Mina
"""
import numpy as np
from scipy.signal import butter, filtfilt, sawtooth
import matplotlib.pyplot as plt
from tools_signal import plot_fft, hilbert_
from tools_connectivity import compute_phase_connectivity
# --------------------------------------
# general settings
# --------------------------------------
sfreq = 512 # sampling rate
dt = 1 / sfreq
t_len = 60 * 2 # total simulation time
t = np.arange(dt, t_len, dt) # time vector
n_samp = sfreq * t_len
times = np.arange(0, n_samp) / sfreq
# --------------------------------------
# generate signal
# --------------------------------------
# sawtooth generation
t = np.linspace(0, t_len, n_samp)
f0 = 6
sig_sawtooth = sawtooth(2 * np.pi * f0 * t, .1)
plot_fft(sig_sawtooth, sfreq)
# build the filters for fundamental and harmonic frequencies
b1, a1 = butter(N=2, Wn=np.array([f0-1, f0+1])/sfreq*2, btype='bandpass')
b7, a7 = butter(N=2, Wn=np.array([7*f0 - 1, 7*f0 + 1])/sfreq*2, btype='bandpass')
b72, a72 = butter(N=2, Wn=np.array([6*f0 - 1, 8*f0 + 1])/sfreq*2, btype='bandpass')
# filter, zero-phase
sig1 = filtfilt(b1, a1, sig_sawtooth)
sig7 = filtfilt(b7, a7, sig_sawtooth)
sig72 = filtfilt(b72, a72, sig_sawtooth)
# complex signal for phase and amplitude extraction
sig1_h = hilbert_(sig1)
sig7_h = hilbert_(sig7)
sig72_h = hilbert_(sig72)
# compute PAC as the synchronization of the lower-frequency signal and the amplitude of the higher-frequency signal
pac17 = compute_phase_connectivity(sig1_h, np.abs(sig7_h), 1, 1, 'coh', type1='abs')
pac172 = compute_phase_connectivity(sig1_h, np.abs(sig72_h), 1, 1, 'coh', type1='abs')
coh17 = compute_phase_connectivity(sig1_h, sig7_h, 1, 7, 'coh', type1='abs')
coh172 = compute_phase_connectivity(sig1_h, sig72_h, 1, 7, 'coh', type1='abs')
print('narrow-band filter at the harmonic frequency: coh17=', str(coh17), ', pac17=', str(pac17))
print('wider filter at the harmonic frequency: coh172=', str(coh172), ', pac172=', str(pac172))
# plot the one-second segment between 40 s and 41 s
ind1 = np.argmin(np.abs(t - 40.045))
ind2 = np.argmin(np.abs(t - 41.045))
# -------------------------------------------
# plot the time series
# -------------------------------------------
plt.figure()
plt.plot(t[ind1:ind2], 0.25 * sig_sawtooth[ind1:ind2]+1.5, label='sawtooth signal')
plt.plot(t[ind1:ind2], 0.25 * sig1[ind1:ind2]+1, label='fundamental component')
plt.plot(t[ind1:ind2], 0.25 * sig1[ind1:ind2]+0.5, color='orange', alpha=0.3)
plt.plot(t[ind1:ind2], 2 * sig7[ind1:ind2]+0.5, label='harmonic component')
plt.plot(t[ind1:ind2], 0.25 * sig1[ind1:ind2], color='orange', alpha=0.3)
plt.plot(t[ind1:ind2], 2 * sig72[ind1:ind2], label='wide harmonic component')
plt.legend()
# -------------------------------------------
# plot the magnitude of FFT
# -------------------------------------------
plt.figure()
plt.subplot(221)
plot_fft(sig_sawtooth, sfreq)
plt.title('sawtooth')
plt.subplot(222)
plot_fft(sig1, sfreq)
plt.title('1st harmonic')
plt.subplot(223)
plot_fft(sig7, sfreq)
plt.title('7th harmonic-narrow')
plt.subplot(224)
plot_fft(sig72, sfreq)
plt.title('7th harmonic-wide')
| [
"numpy.abs",
"tools_signal.plot_fft",
"scipy.signal.filtfilt",
"tools_signal.hilbert_",
"matplotlib.pyplot.plot",
"tools_connectivity.compute_phase_connectivity",
"matplotlib.pyplot.legend",
"numpy.array",
"numpy.linspace",
"scipy.signal.sawtooth",
"matplotlib.pyplot.figure",
"matplotlib.pyplo... | [((1223, 1247), 'numpy.arange', 'np.arange', (['dt', 't_len', 'dt'], {}), '(dt, t_len, dt)\n', (1232, 1247), True, 'import numpy as np\n'), ((1451, 1480), 'numpy.linspace', 'np.linspace', (['(0)', 't_len', 'n_samp'], {}), '(0, t_len, n_samp)\n', (1462, 1480), True, 'import numpy as np\n'), ((1503, 1536), 'scipy.signal.sawtooth', 'sawtooth', (['(2 * np.pi * f0 * t)', '(0.1)'], {}), '(2 * np.pi * f0 * t, 0.1)\n', (1511, 1536), False, 'from scipy.signal import butter, filtfilt, sawtooth\n'), ((1536, 1565), 'tools_signal.plot_fft', 'plot_fft', (['sig_sawtooth', 'sfreq'], {}), '(sig_sawtooth, sfreq)\n', (1544, 1565), False, 'from tools_signal import plot_fft, hilbert_\n'), ((1898, 1928), 'scipy.signal.filtfilt', 'filtfilt', (['b1', 'a1', 'sig_sawtooth'], {}), '(b1, a1, sig_sawtooth)\n', (1906, 1928), False, 'from scipy.signal import butter, filtfilt, sawtooth\n'), ((1936, 1966), 'scipy.signal.filtfilt', 'filtfilt', (['b7', 'a7', 'sig_sawtooth'], {}), '(b7, a7, sig_sawtooth)\n', (1944, 1966), False, 'from scipy.signal import butter, filtfilt, sawtooth\n'), ((1975, 2007), 'scipy.signal.filtfilt', 'filtfilt', (['b72', 'a72', 'sig_sawtooth'], {}), '(b72, a72, sig_sawtooth)\n', (1983, 2007), False, 'from scipy.signal import butter, filtfilt, sawtooth\n'), ((2070, 2084), 'tools_signal.hilbert_', 'hilbert_', (['sig1'], {}), '(sig1)\n', (2078, 2084), False, 'from tools_signal import plot_fft, hilbert_\n'), ((2094, 2108), 'tools_signal.hilbert_', 'hilbert_', (['sig7'], {}), '(sig7)\n', (2102, 2108), False, 'from tools_signal import plot_fft, hilbert_\n'), ((2119, 2134), 'tools_signal.hilbert_', 'hilbert_', (['sig72'], {}), '(sig72)\n', (2127, 2134), False, 'from tools_signal import plot_fft, hilbert_\n'), ((2434, 2502), 'tools_connectivity.compute_phase_connectivity', 'compute_phase_connectivity', (['sig1_h', 'sig7_h', '(1)', '(7)', '"""coh"""'], {'type1': '"""abs"""'}), "(sig1_h, sig7_h, 1, 7, 'coh', type1='abs')\n", (2460, 2502), False, 'from tools_connectivity import compute_phase_connectivity\n'), ((2512, 2581), 'tools_connectivity.compute_phase_connectivity', 'compute_phase_connectivity', (['sig1_h', 'sig72_h', '(1)', '(7)', '"""coh"""'], {'type1': '"""abs"""'}), "(sig1_h, sig72_h, 1, 7, 'coh', type1='abs')\n", (2538, 2581), False, 'from tools_connectivity import compute_phase_connectivity\n'), ((3021, 3033), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3031, 3033), True, 'import matplotlib.pyplot as plt\n'), ((3034, 3124), 'matplotlib.pyplot.plot', 'plt.plot', (['t[ind1:ind2]', '(0.25 * sig_sawtooth[ind1:ind2] + 1.5)'], {'label': '"""sawtooth signal"""'}), "(t[ind1:ind2], 0.25 * sig_sawtooth[ind1:ind2] + 1.5, label=\n 'sawtooth signal')\n", (3042, 3124), True, 'import matplotlib.pyplot as plt\n'), ((3118, 3204), 'matplotlib.pyplot.plot', 'plt.plot', (['t[ind1:ind2]', '(0.25 * sig1[ind1:ind2] + 1)'], {'label': '"""fundamental component"""'}), "(t[ind1:ind2], 0.25 * sig1[ind1:ind2] + 1, label=\n 'fundamental component')\n", (3126, 3204), True, 'import matplotlib.pyplot as plt\n'), ((3198, 3277), 'matplotlib.pyplot.plot', 'plt.plot', (['t[ind1:ind2]', '(0.25 * sig1[ind1:ind2] + 0.5)'], {'color': '"""orange"""', 'alpha': '(0.3)'}), "(t[ind1:ind2], 0.25 * sig1[ind1:ind2] + 0.5, color='orange', alpha=0.3)\n", (3206, 3277), True, 'import matplotlib.pyplot as plt\n'), ((3276, 3353), 'matplotlib.pyplot.plot', 'plt.plot', (['t[ind1:ind2]', '(2 * sig7[ind1:ind2] + 0.5)'], {'label': '"""harmonic component"""'}), "(t[ind1:ind2], 2 * sig7[ind1:ind2] + 0.5, label='harmonic component')\n", (3284, 3353), True, 'import matplotlib.pyplot as plt\n'), ((3352, 3425), 'matplotlib.pyplot.plot', 'plt.plot', (['t[ind1:ind2]', '(0.25 * sig1[ind1:ind2])'], {'color': '"""orange"""', 'alpha': '(0.3)'}), "(t[ind1:ind2], 0.25 * sig1[ind1:ind2], color='orange', alpha=0.3)\n", (3360, 3425), True, 'import matplotlib.pyplot as plt\n'), ((3426, 3503), 'matplotlib.pyplot.plot', 'plt.plot', (['t[ind1:ind2]', '(2 * sig72[ind1:ind2])'], {'label': '"""wide harmonic component"""'}), "(t[ind1:ind2], 2 * sig72[ind1:ind2], label='wide harmonic component')\n", (3434, 3503), True, 'import matplotlib.pyplot as plt\n'), ((3504, 3516), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (3514, 3516), True, 'import matplotlib.pyplot as plt\n'), ((3639, 3651), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3649, 3651), True, 'import matplotlib.pyplot as plt\n'), ((3652, 3668), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(221)'], {}), '(221)\n', (3663, 3668), True, 'import matplotlib.pyplot as plt\n'), ((3669, 3698), 'tools_signal.plot_fft', 'plot_fft', (['sig_sawtooth', 'sfreq'], {}), '(sig_sawtooth, sfreq)\n', (3677, 3698), False, 'from tools_signal import plot_fft, hilbert_\n'), ((3699, 3720), 'matplotlib.pyplot.title', 'plt.title', (['"""sawtooth"""'], {}), "('sawtooth')\n", (3708, 3720), True, 'import matplotlib.pyplot as plt\n'), ((3721, 3737), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(222)'], {}), '(222)\n', (3732, 3737), True, 'import matplotlib.pyplot as plt\n'), ((3738, 3759), 'tools_signal.plot_fft', 'plot_fft', (['sig1', 'sfreq'], {}), '(sig1, sfreq)\n', (3746, 3759), False, 'from tools_signal import plot_fft, hilbert_\n'), ((3760, 3785), 'matplotlib.pyplot.title', 'plt.title', (['"""1st harmonic"""'], {}), "('1st harmonic')\n", (3769, 3785), True, 'import matplotlib.pyplot as plt\n'), ((3786, 3802), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(223)'], {}), '(223)\n', (3797, 3802), True, 'import matplotlib.pyplot as plt\n'), ((3803, 3824), 'tools_signal.plot_fft', 'plot_fft', (['sig7', 'sfreq'], {}), '(sig7, sfreq)\n', (3811, 3824), False, 'from tools_signal import plot_fft, hilbert_\n'), ((3825, 3857), 'matplotlib.pyplot.title', 'plt.title', (['"""7th harmonic-narrow"""'], {}), "('7th harmonic-narrow')\n", (3834, 3857), True, 'import matplotlib.pyplot as plt\n'), ((3858, 3874), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(224)'], {}), '(224)\n', (3869, 3874), True, 'import matplotlib.pyplot as plt\n'), ((3875, 3897), 'tools_signal.plot_fft', 'plot_fft', (['sig72', 'sfreq'], {}), '(sig72, sfreq)\n', (3883, 3897), False, 'from tools_signal import plot_fft, hilbert_\n'), ((3898, 3928), 'matplotlib.pyplot.title', 'plt.title', (['"""7th harmonic-wide"""'], {}), "('7th harmonic-wide')\n", (3907, 3928), True, 'import matplotlib.pyplot as plt\n'), ((1293, 1313), 'numpy.arange', 'np.arange', (['(0)', 'n_samp'], {}), '(0, n_samp)\n', (1302, 1313), True, 'import numpy as np\n'), ((2296, 2310), 'numpy.abs', 'np.abs', (['sig7_h'], {}), '(sig7_h)\n', (2302, 2310), True, 'import numpy as np\n'), ((2382, 2397), 'numpy.abs', 'np.abs', (['sig72_h'], {}), '(sig72_h)\n', (2388, 2397), True, 'import numpy as np\n'), ((2847, 2865), 'numpy.abs', 'np.abs', (['(t - 40.045)'], {}), '(t - 40.045)\n', (2853, 2865), True, 'import numpy as np\n'), ((2884, 2902), 'numpy.abs', 'np.abs', (['(t - 41.045)'], {}), '(t - 41.045)\n', (2890, 2902), True, 'import numpy as np\n'), ((1653, 1679), 'numpy.array', 'np.array', (['[f0 - 1, f0 + 1]'], {}), '([f0 - 1, f0 + 1])\n', (1661, 1679), True, 'import numpy as np\n'), ((1727, 1761), 'numpy.array', 'np.array', (['[7 * f0 - 1, 7 * f0 + 1]'], {}), '([7 * f0 - 1, 7 * f0 + 1])\n', (1735, 1761), True, 'import numpy as np\n'), ((1811, 1845), 'numpy.array', 'np.array', (['[6 * f0 - 1, 8 * f0 + 1]'], {}), '([6 * f0 - 1, 8 * f0 + 1])\n', (1819, 1845), True, 'import numpy as np\n')] |
import copy
import torch
import numpy as np
from torch.utils.data import DataLoader
from src.cli import get_args
from src.utils import capitalize_first_letter, load
from src.data import get_data, get_glove_emotion_embs
from src.trainers.sentiment import SentiTrainer
from src.trainers.emotion import MoseiEmoTrainer, IemocapTrainer
from src.models import baselines # EF_LSTM, LF_LSTM, EF_LF_LSTM
from src.models.transformers import EF_Transformer
from src.models.mult import MULTModel
from src.models.eea import EmotionEmbAttnModel
from src.config import NUM_CLASSES, MULT_PARAMS, EMOTIONS
if __name__ == "__main__":
args = get_args()
# Fix seed for reproducibility
seed = args['seed']
torch.manual_seed(seed)
np.random.seed(seed)
# Set device
# os.environ["CUDA_VISIBLE_DEVICES"] = args['cuda']
device = torch.device(f"cuda:{args['cuda']}" if torch.cuda.is_available() else 'cpu')
print("Start loading the data....")
train_data = get_data(args, 'train')
valid_data = get_data(args, 'valid')
test_data = get_data(args, 'test')
train_loader = DataLoader(train_data, batch_size=args['batch_size'], shuffle=True)
valid_loader = DataLoader(valid_data, batch_size=args['batch_size'], shuffle=False)
test_loader = DataLoader(test_data, batch_size=args['batch_size'], shuffle=False)
print(f'Train samples = {len(train_loader.dataset)}')
print(f'Valid samples = {len(valid_loader.dataset)}')
print(f'Test samples = {len(test_loader.dataset)}')
dataloaders = {
'train': train_loader,
'valid': valid_loader,
'test': test_loader
}
modal_dims = list(train_data.get_dim())
model_type = args['model'].lower()
fusion_type = args['fusion'].lower()
if model_type == 'mult':
mult_params = MULT_PARAMS[args['dataset']]
mult_params['orig_d_l'] = modal_dims[0]
mult_params['orig_d_a'] = modal_dims[1]
mult_params['orig_d_v'] = modal_dims[2]
mult_params['hidden_dim'] = args['hidden_dim']
if args['zsl'] != -1:
mult_params['output_dim'] = mult_params['output_dim'] + 1
model = MULTModel(mult_params)
elif model_type == 'rnn':
if fusion_type == 'lf':
MODEL = baselines.LF_RNN
elif fusion_type == 'ef':
MODEL = baselines.EF_RNN
elif fusion_type == 'eflf':
MODEL = baselines.EF_LF_RNN
elif fusion_type == 'ts':
MODEL = baselines.TextSelectiveRNN
else:
raise ValueError('Wrong fusion!')
num_classes = NUM_CLASSES[args['dataset']]
if args['zsl'] != -1:
if args['dataset'] == 'iemocap':
num_classes += 1
else:
num_classes -= 1
model = MODEL(
num_classes=num_classes,
input_sizes=modal_dims,
hidden_size=args['hidden_size'],
hidden_sizes=args['hidden_sizes'],
num_layers=args['num_layers'],
dropout=args['dropout'],
bidirectional=args['bidirectional'],
gru=args['gru']
)
elif model_type == 'transformer':
if fusion_type == 'lf':
MODEL = EF_Transformer
elif fusion_type == 'ef':
MODEL = EF_Transformer
elif fusion_type == 'eflf':
MODEL = EF_Transformer
else:
raise ValueError('Wrong fusion!')
model = MODEL()
elif model_type == 'eea':
zsl = args['zsl']
emo_list = EMOTIONS[args['dataset']]
if zsl != -1:
if args['dataset'] == 'iemocap':
emo_list.append(EMOTIONS['iemocap9'][zsl])
else:
emo_list = emo_list[:zsl] + emo_list[zsl + 1:]
if args['cap']:
emo_list = capitalize_first_letter(emo_list)
emo_weights = get_glove_emotion_embs(args['glove_emo_path'])
emo_weight = []
for emo in emo_list:
emo_weight.append(emo_weights[emo])
MODEL = EmotionEmbAttnModel
model = MODEL(
num_classes=len(emo_list),
input_sizes=modal_dims,
hidden_size=args['hidden_size'],
hidden_sizes=args['hidden_sizes'],
num_layers=args['num_layers'],
dropout=args['dropout'],
bidirectional=args['bidirectional'],
modalities=args['modalities'],
device=device,
emo_weight=emo_weight,
gru=args['gru']
)
else:
raise ValueError('Wrong model!')
model = model.to(device=device)
# Load model checkpoint
if args['ckpt'] != '':
state_dict = load(args['ckpt'])
if args['model'] == 'eea':
state_dict.pop('textEmoEmbs.weight')
if state_dict['modality_weights.weight'].size(0) != len(args['modalities']):
state_dict.pop('modality_weights.weight')
if args['model'] == 'rnn':
if args['zsl_test'] != -1:
out_weight = copy.deepcopy(model.out.weight)
out_bias = copy.deepcopy(model.out.bias)
pretrained_out_weight = state_dict['out.weight']
pretrained_out_bias = state_dict['out.bias']
indicator = 0
for i in range(len(model.out.weight)):
if i == args['zsl_test']:
indicator = 1
continue
out_weight[i] = pretrained_out_weight[i - indicator]
out_bias[i] = pretrained_out_bias[i - indicator]
model.out.weight = torch.nn.Parameter(out_weight)
model.out.bias = torch.nn.Parameter(out_bias)
state_dict.pop('out.weight')
state_dict.pop('out.bias')
if args['model'] == 'mult':
if args['zsl_test'] != -1:
out_weight = copy.deepcopy(model.out_layer.weight)
out_bias = copy.deepcopy(model.out_layer.bias)
pretrained_out_weight = state_dict['out_layer.weight']
pretrained_out_bias = state_dict['out_layer.bias']
indicator = 0
for i in range(len(model.out_layer.weight)):
if i == args['zsl_test']:
indicator = 1
continue
out_weight[i] = pretrained_out_weight[i - indicator]
out_bias[i] = pretrained_out_bias[i - indicator]
model.out_layer.weight = torch.nn.Parameter(out_weight)
model.out_layer.bias = torch.nn.Parameter(out_bias)
state_dict.pop('out_layer.weight')
state_dict.pop('out_layer.bias')
model.load_state_dict(state_dict, strict=False)
if args['optim'] == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
elif args['optim'] == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=args['learning_rate'], weight_decay=args['weight_decay'])
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='min', factor=0.1, patience=args['patience'], verbose=True)
if args['loss'] == 'l1':
criterion = torch.nn.L1Loss()
elif args['loss'] == 'mse':
criterion = torch.nn.MSELoss()
elif args['loss'] == 'ce':
criterion = torch.nn.CrossEntropyLoss()
elif args['loss'] == 'bce':
pos_weight = train_data.get_pos_weight()
pos_weight = pos_weight.to(device)
criterion = torch.nn.BCEWithLogitsLoss(pos_weight=pos_weight)
# criterion = torch.nn.BCEWithLogitsLoss()
if args['dataset'] == 'mosi' or args['dataset'] == 'mosei_senti':
TRAINER = SentiTrainer
elif args['dataset'] == 'mosei_emo':
TRAINER = MoseiEmoTrainer
elif args['dataset'] == 'iemocap':
TRAINER = IemocapTrainer
trainer = TRAINER(args, model, criterion, optimizer, scheduler, device, dataloaders)
if args['test']:
trainer.test()
elif args['valid']:
trainer.valid()
else:
trainer.train()
| [
"torch.manual_seed",
"torch.optim.lr_scheduler.ReduceLROnPlateau",
"src.models.mult.MULTModel",
"copy.deepcopy",
"torch.nn.CrossEntropyLoss",
"torch.nn.BCEWithLogitsLoss",
"torch.nn.L1Loss",
"src.data.get_data",
"torch.nn.MSELoss",
"torch.cuda.is_available",
"torch.nn.Parameter",
"numpy.random... | [((630, 640), 'src.cli.get_args', 'get_args', ([], {}), '()\n', (638, 640), False, 'from src.cli import get_args\n'), ((705, 728), 'torch.manual_seed', 'torch.manual_seed', (['seed'], {}), '(seed)\n', (722, 728), False, 'import torch\n'), ((733, 753), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (747, 753), True, 'import numpy as np\n'), ((977, 1000), 'src.data.get_data', 'get_data', (['args', '"""train"""'], {}), "(args, 'train')\n", (985, 1000), False, 'from src.data import get_data, get_glove_emotion_embs\n'), ((1018, 1041), 'src.data.get_data', 'get_data', (['args', '"""valid"""'], {}), "(args, 'valid')\n", (1026, 1041), False, 'from src.data import get_data, get_glove_emotion_embs\n'), ((1058, 1080), 'src.data.get_data', 'get_data', (['args', '"""test"""'], {}), "(args, 'test')\n", (1066, 1080), False, 'from src.data import get_data, get_glove_emotion_embs\n'), ((1101, 1168), 'torch.utils.data.DataLoader', 'DataLoader', (['train_data'], {'batch_size': "args['batch_size']", 'shuffle': '(True)'}), "(train_data, batch_size=args['batch_size'], shuffle=True)\n", (1111, 1168), False, 'from torch.utils.data import DataLoader\n'), ((1188, 1256), 'torch.utils.data.DataLoader', 'DataLoader', (['valid_data'], {'batch_size': "args['batch_size']", 'shuffle': '(False)'}), "(valid_data, batch_size=args['batch_size'], shuffle=False)\n", (1198, 1256), False, 'from torch.utils.data import DataLoader\n'), ((1275, 1342), 'torch.utils.data.DataLoader', 'DataLoader', (['test_data'], {'batch_size': "args['batch_size']", 'shuffle': '(False)'}), "(test_data, batch_size=args['batch_size'], shuffle=False)\n", (1285, 1342), False, 'from torch.utils.data import DataLoader\n'), ((7108, 7231), 'torch.optim.lr_scheduler.ReduceLROnPlateau', 'torch.optim.lr_scheduler.ReduceLROnPlateau', (['optimizer'], {'mode': '"""min"""', 'factor': '(0.1)', 'patience': "args['patience']", 'verbose': '(True)'}), "(optimizer, mode='min', factor=\n 0.1, patience=args['patience'], verbose=True)\n", (7150, 7231), False, 'import torch\n'), ((2155, 2177), 'src.models.mult.MULTModel', 'MULTModel', (['mult_params'], {}), '(mult_params)\n', (2164, 2177), False, 'from src.models.mult import MULTModel\n'), ((4688, 4706), 'src.utils.load', 'load', (["args['ckpt']"], {}), "(args['ckpt'])\n", (4692, 4706), False, 'from src.utils import capitalize_first_letter, load\n'), ((7277, 7294), 'torch.nn.L1Loss', 'torch.nn.L1Loss', ([], {}), '()\n', (7292, 7294), False, 'import torch\n'), ((880, 905), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (903, 905), False, 'import torch\n'), ((7347, 7365), 'torch.nn.MSELoss', 'torch.nn.MSELoss', ([], {}), '()\n', (7363, 7365), False, 'import torch\n'), ((5041, 5072), 'copy.deepcopy', 'copy.deepcopy', (['model.out.weight'], {}), '(model.out.weight)\n', (5054, 5072), False, 'import copy\n'), ((5100, 5129), 'copy.deepcopy', 'copy.deepcopy', (['model.out.bias'], {}), '(model.out.bias)\n', (5113, 5129), False, 'import copy\n'), ((5635, 5665), 'torch.nn.Parameter', 'torch.nn.Parameter', (['out_weight'], {}), '(out_weight)\n', (5653, 5665), False, 'import torch\n'), ((5699, 5727), 'torch.nn.Parameter', 'torch.nn.Parameter', (['out_bias'], {}), '(out_bias)\n', (5717, 5727), False, 'import torch\n'), ((5912, 5949), 'copy.deepcopy', 'copy.deepcopy', (['model.out_layer.weight'], {}), '(model.out_layer.weight)\n', (5925, 5949), False, 'import copy\n'), ((5977, 6012), 'copy.deepcopy', 'copy.deepcopy', (['model.out_layer.bias'], {}), '(model.out_layer.bias)\n', (5990, 6012), False, 'import copy\n'), ((6542, 6572), 'torch.nn.Parameter', 'torch.nn.Parameter', (['out_weight'], {}), '(out_weight)\n', (6560, 6572), False, 'import torch\n'), ((6612, 6640), 'torch.nn.Parameter', 'torch.nn.Parameter', (['out_bias'], {}), '(out_bias)\n', (6630, 6640), False, 'import torch\n'), ((7417, 7444), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (7442, 7444), False, 'import torch\n'), ((3876, 3922), 'src.data.get_glove_emotion_embs', 'get_glove_emotion_embs', (["args['glove_emo_path']"], {}), "(args['glove_emo_path'])\n", (3898, 3922), False, 'from src.data import get_data, get_glove_emotion_embs\n'), ((7589, 7638), 'torch.nn.BCEWithLogitsLoss', 'torch.nn.BCEWithLogitsLoss', ([], {'pos_weight': 'pos_weight'}), '(pos_weight=pos_weight)\n', (7615, 7638), False, 'import torch\n'), ((3819, 3852), 'src.utils.capitalize_first_letter', 'capitalize_first_letter', (['emo_list'], {}), '(emo_list)\n', (3842, 3852), False, 'from src.utils import capitalize_first_letter, load\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@Time : 2019/5/15
@Author : AnNing
"""
from __future__ import print_function
import os
import sys
import numpy as np
from initialize import load_yaml_file
from load import ReadAhiL1
TEST = True
def ndsi(in_file_l1, in_file_geo, in_file_cloud):
# -------------------------------------------------------------------------
# SolarZenith_MAX : MAXIMUM SOLAR ZENITH ANGLE, *1.0 DEGREE
# solar_zenith_max = None
# -------------------------------------------------------------------------
# Date and Time
# i_year = None
# i_month = None
# i_day = None
# i_minute = None
# n_year = None
# n_month = None
# n_day = None
# n_hour = None
# n_minute = None
# n_second = None
# -------------------------------------------------------------------------
# out data
# r4_rt = np.array([])
# r4_info = np.array([])
# i2_cm = np.array([])
# r4_test = np.array([])
# -------------------------------------------------------------------------
# swath sum
# i_swath_valid = None
# i_sum_valid = None
# -------------------------------------------------------------------------
# dim_x = None
# dim_y = None
# dim_z = None
# -------------------------------------------------------------------------
# r_lats = None # LATITUDE
# r_lons = None # LONGITUDE
# a_satz = None # SATELLITE ZENITH ANGLE
# a_sata = None # SATELLITE AZIMUTH
# a_sunz = None # SOLAR ZENITH ANGLE
# a_suna = None # SOLAR AZIMUTH
# r_dems = None # DEM MASK
# i_mask = None # LANDCOVER MASK
# i_cm = None # Cloud MASK
# -------------------------------------------------------------------------
# cossl = None # SOLAR-ZENITH-ANGLE-COSINE
# glint = None # SUN GLINT
# lsm = None # Mask For Water & Land
# i_avalible = None # Mask For Data to be used
# -------------------------------------------------------------------------
# ref_01 = None # 0.645 um : Ref, NDVI
# ref_02 = None # 0.865 um : Ref, NDVI
# ref_03 = None # 0.470 um : Ref, NDVI
# ref_04 = None # 0.555 um : Ref, NDVI
# ref_05 = None # 1.640 um : Ref, NDVI
# ref_06 = None # 1.640 um : Ref, NDSI
# ref_07 = None # 2.130 um : Ref, NDSI
# ref_19 = None # 0.940 um : Ref, Vapour
# ref_26 = None # 1.375 um : Ref, Cirrus
# tbb_20 = None # 3.750 um : TBB, Temperature
# tbb_31 = None # 11.030 um : TBB, Temperature
# tbb_32 = None # 12.020 um : TBB, Temperature
# -------------------------------------------------------------------------
# ndvis = None # R2-R1/R2+R1: R0.86,R0.65
# ndsi_6 = None # R4-R6/R4+R6: R0.55,R1.64
# ndsi_7 = None # R4-R7/R4+R7: R0.55,R2.13
#
# dr_16 = None # R1-R6: R0.86,R1.64
# dr_17 = None # R1-0.5*R7: R0.86,R2.13
#
# dt_01 = None # T20-T31: T3.75-T11.0
# dt_02 = None # T20-T32: T3.75-T12.0
# dt_12 = None # T31-T32: T11.0-T12.0
#
# rr_21 = None # R2/R1: R0.86,R0.65
# rr_46 = None # R4/R6: R0.55,R1.64
# rr_47 = None # R4/R7: R0.55,R2.13
#
# dt_34 = None # T20-T23: T3.75-T4.05
# dt_81 = None # T29-T31: T8.55-T11.0
# dt_38 = None # T20-T29: T3.75-T8.55
# -------------------------------------------------------------------------
# Used for Masking Over-Estimation for snow by monthly snow pack lines.
# LookUpTable For Monthly CHN-SnowPackLine (ZhengZJ, 2006)
# Line: Longitude from 65.0 to 145.0 (Step is 0.1 deg.)
# Column: Month from Jan to Dec (Step is month)
# Value: Latitude (Unit is deg.)
# r_mon_snow_line = np.array([]) # Monthly CHN-SnowPackLine
# Used for judging low or water cloud by BT difference.
# LookUpTable For T11-T12 (Saunders and Kriebel, 1988)
# Line: T11 from 250.0K to 310.0K (Step is 1.0K)
# Column: Secant-SZA from 1.00 to 2.50 (Step is 0.01)
# Value: T11-T12 (Unit is K)
# delta_bt_lut = np.array([]) # LookUpTable for BT11-BT12
# Used for judging snow in forest by NDSI and NDVI.
# LookUpTable For Snow in Forest , by NDVI-NDSI (Klein et al., 1998)
# Line: NDVI from 0.010 to 1.000 (Step is 0.01)
# Column: NDSI from 0.01000 to 1.00000 (Step is 0.00001)
# Value: NDSI (Unit is null)
# y_ndsi_x_ndvi = np.array([]) # LookUpTable for NDSI-NDVI
# !!!!! Four Variables below should be USED TOGETHER.
# !! R138R164LUT,R164T11_LUT,R164R138LUT,T11mT12R164LUT
# !! LookUpTable For FreshSnow&WaterIceCloud (ZhengZJ, 2006)
# !! (1)Line-R164T11_LUT: T11 from 225.0 to 280.0 (Step is 0.1K)
# !! Column--R164T11_LUT: R164 from 0.00000 to 0.24000 (No Step)
# !! (2)Line-T11mT12R164LUT: R164 from 0.100 to 0.250 (Step is 0.001)
# !! Column-T11mT12R164LUT: T11mT12 from -40 to 130 (No Step)
# !! (3)Line-R138R164LUT: R164 from 0.010 to 0.260 (Step is 0.001)
# !! Column-R138R164LUT: R138 from 0.0020 to 0.3000 (No Step)
# !! (4)Line-R164R138LUT: R138 from 0.000 to 0.550 (Step is 0.001)
# !! Column-R164R138LUT: R164 from 0.1500 to 0.3000 (No Step)
# y_r164_x_t11 = np.array([]) # LookUpTable For R164T11
# y_t11_m_t12_x_r164 = np.array([]) # LookUpTable For T11mT12R164
# y_r138_x_r164 = np.array([]) # LookUpTable For R138R164
# y_r164_x_r138 = np.array([]) # LookUpTable For R164R138
# -------------------------------------------------------------------------
# Used for Reference of 11um Minimum Brightness Temperature.
# ref_bt11um = None
# ref_bt11um_slope_n = None
# ref_bt11um_slope_s = None
# ref_bt11um_offset_n = None
# ref_bt11um_offset_s = None
# a_low_t_lat = None # Referential Latitude for BT11 LowThreshold
# a_low_bt11 = None # Referential Temp for BT11 LowThreshold
# delta_t_low = None # Referential Temporal Delta-Temp for BT11_Low
# b_hai_t_lat = None # Referential Latitude for BT11 HaiThreshold
# b_hai_bt11 = None # Referential Temp for BT11 HaiThreshold
# delta_t_hai = None # Referential Temporal Delta-Temp for BT11_Hai
#
# a_low_bt11_n = None
# a_low_bt11_s = None
# b_hai_bt11_n = None
# b_hai_bt11_s = None
# -------------------------------------------------------------------------
# Used for Calculate and Store Xun number from 1 to 36 in a year.
# f_xun_n = None
# f_xun_s = None
# i2_xun_num = None
# -------------------------------------------------------------------------
# i_step = np.array([]) # TEST-STEP
# i_mark = np.array([]) # SNOW MAP
# !!!! VALUE = 255 : Fill Data--no Data expected For pixel
# !!!! VALUE = 254 : Saturated MODIS sensor detector
# !!!! VALUE = 240 : NATIONAL OR PROVINCIAL BOUNDARIES
# !!!! VALUE = 200 : Snow
# !!!! VALUE = 100 : Snow-Covered Lake Ice
# !!!! VALUE = 50 : Cloud Obscured
# !!!! VALUE = 39 : Ocean
# !!!! VALUE = 37 : Inland Water
# !!!! VALUE = 25 : Land--no snow detected
# !!!! VALUE = 11 : Darkness, terminator or polar
# !!!! VALUE = 1 : No Decision
# !!!! VALUE = 0 : Sensor Data Missing
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
print('Program : Make SNC')
# -------------------------------------------------------------------------
path = os.path.abspath(os.path.dirname(__file__))
name_list_swath_snc = os.path.join(path, 'ndsi_cfg.yaml')
print('Config file : {}'.format(name_list_swath_snc))
a = load_yaml_file(name_list_swath_snc)
solar_zenith_max = float(a['SolarZenith_MAX'])
inn_put_para_path = a['InnPut_ParaPath']
inn_put_root_l01 = a['InnPut_Root_L01']
inn_put_root_l02 = a['InnPut_Root_L02']
inn_put_root_l03 = a['InnPut_Root_L03']
# inn_put_root_l11 = a['InnPut_Root_L11']
# inn_put_root_l12 = a['InnPut_Root_L12']
# inn_put_root_l13 = a['InnPut_Root_L13']
# inn_put_root_l14 = a['InnPut_Root_L14']
inn_put_file_l01 = os.path.join(path, inn_put_para_path, inn_put_root_l01)
inn_put_file_l02 = os.path.join(path, inn_put_para_path, inn_put_root_l02)
inn_put_file_l03 = os.path.join(path, inn_put_para_path, inn_put_root_l03)
# inn_put_file_l11 = os.path.join(path, inn_put_para_path, inn_put_root_l11)
# inn_put_file_l12 = os.path.join(path, inn_put_para_path, inn_put_root_l12)
# inn_put_file_l13 = os.path.join(path, inn_put_para_path, inn_put_root_l13)
# inn_put_file_l14 = os.path.join(path, inn_put_para_path, inn_put_root_l14)
delta_bt_lut = np.loadtxt(inn_put_file_l01, skiprows=1)[:, 1:]
r_mon_snow_line_temp = np.loadtxt(inn_put_file_l02, skiprows=1)[:, 1:]
r_mon_snow_line = np.zeros((3601, 12, 2))
r_mon_snow_line[:, :, 0] = r_mon_snow_line_temp[:, 0:24:2]
r_mon_snow_line[:, :, 1] = r_mon_snow_line_temp[:, 1:24:2]
y_ndsi_x_ndvi = np.loadtxt(inn_put_file_l03, skiprows=1)[:]
# y_r138_x_r164 = np.loadtxt(inn_put_file_l11, skiprows=1)[:]
# y_r164_x_t11 = np.loadtxt(inn_put_file_l12, skiprows=1)[:]
# y_r164_x_r138 = np.loadtxt(inn_put_file_l13, skiprows=1)[:]
# y_t11_m_t12_x_r164 = np.loadtxt(inn_put_file_l14, skiprows=1)[:]
# -------------------------------------------------------------------------
# Set Date Information
year_min = 2000
year_max = 2048
month_min = 1
month_max = 12
date_min = 1
data_max = 31
hour_min = 0
hour_max = 23
read_ahi = ReadAhiL1(in_file_l1, geo_file=in_file_geo, cloud_file=in_file_cloud)
ymd = read_ahi.ymd
hms = read_ahi.hms
j_year = int(ymd[0:4])
j_month = int(ymd[4:6])
j_date = int(ymd[6:8])
j_hour = int(hms[0:2])
if (not year_min <= j_year <= year_max) or (not month_min <= j_month <= month_max) or \
(not date_min <= j_date <= data_max) or (not hour_min <= j_hour <= hour_max):
raise ValueError('Wrongly Time Setting. Please Retry ......')
# -------------------------------------------------------------------------
# Calculating the Number of Xun (means ten day).
if j_date <= 10:
i2_xun_num = 3 * (j_month - 1) + 1
elif j_date > 20:
i2_xun_num = 3 * (j_month - 1) + 3
else:
i2_xun_num = 3 * (j_month - 1) + 2
if i2_xun_num == 21:
f_xun_n = 0.
elif i2_xun_num < 21:
f_xun_n = np.abs(np.sin(np.pi * (i2_xun_num - 21 + 36) / 36))
else:
f_xun_n = np.abs(np.sin(np.pi * (i2_xun_num - 21) / 36))
f_xun_s = np.sqrt(1.0 - f_xun_n ** 2)
if TEST:
print(' f_xun_n= ', f_xun_n, ' f_xun_s= ', f_xun_s)
# -------------------------------------------------------------------------
# Calculate Parameters (Slope & Offset) for Ref_BT11um
a_low_t_lat = 57.
a_low_bt11 = 243.
delta_t_low = 15.
b_hai_t_lat = 17.
b_hai_bt11 = 270.
delta_t_hai = 10.
a_low_bt11_n = a_low_bt11 - f_xun_n * delta_t_low
a_low_bt11_s = a_low_bt11 - f_xun_s * delta_t_low
b_hai_bt11_n = b_hai_bt11 - f_xun_n * delta_t_hai
b_hai_bt11_s = b_hai_bt11 - f_xun_s * delta_t_hai
if TEST:
print(' a_low_bt11= ', a_low_bt11, ' b_hai_bt11= ', b_hai_bt11)
print(' a_low_bt11_n= ', a_low_bt11_n, ' a_low_bt11_s= ', a_low_bt11_s)
print(' b_hai_bt11_n= ', b_hai_bt11_n, ' b_hai_bt11_s= ', b_hai_bt11_s)
ref_bt11um_slope_n = (b_hai_bt11_n - a_low_bt11_n) / (b_hai_t_lat - a_low_t_lat)
ref_bt11um_slope_s = (b_hai_bt11_s - a_low_bt11_s) / (b_hai_t_lat - a_low_t_lat)
ref_bt11um_offset_n = a_low_bt11_n - ref_bt11um_slope_n * a_low_t_lat
ref_bt11um_offset_s = a_low_bt11_s - ref_bt11um_slope_s * a_low_t_lat
if TEST:
print('ref_bt11um_slope_n', ref_bt11um_slope_n)
print('ref_bt11um_slope_s', ref_bt11um_slope_s)
print('ref_bt11um_offset_n', ref_bt11um_offset_n)
print('ref_bt11um_offset_s', ref_bt11um_offset_s)
# -------------------------------------------------------------------------
# load row and col
data_shape = read_ahi.data_shape
i_rows, i_cols = data_shape
# -------------------------------------------------------------------------
# Check Swath_Valid by Solar Zenith
d_solar_zenith = read_ahi.get_solar_zenith()
i_sum_valid = np.logical_and(d_solar_zenith > 0, d_solar_zenith < solar_zenith_max).sum()
if i_sum_valid < i_cols * 30:
raise ValueError('Valid data is not enough.{}<{}'.format(i_sum_valid, i_cols * 30))
# -------------------------------------------------------------------------
# Read FILE_GEO
# GET SensorZenith
d_sensor_zenith = read_ahi.get_sensor_zenith()
index_valid = np.logical_and(d_sensor_zenith > 0, d_sensor_zenith < 90)
d_sensor_zenith[index_valid] = d_sensor_zenith[index_valid] / 180 * np.pi
d_sensor_zenith[~index_valid] = np.nan
# GET SensorAzimuth
d_sensor_azimuth = read_ahi.get_sensor_azimuth()
index_valid = np.logical_and(d_sensor_azimuth > -180, d_sensor_azimuth < 180)
d_sensor_azimuth[index_valid] = d_sensor_azimuth[index_valid] / 180 * np.pi
d_sensor_azimuth[~index_valid] = np.nan
# GET SolarZenith
d_solar_zenith = read_ahi.get_solar_zenith()
index_valid = np.logical_and(d_solar_zenith > 0, d_solar_zenith < 180)
d_solar_zenith[index_valid] = d_solar_zenith[index_valid] / 180 * np.pi
d_solar_zenith[~index_valid] = np.nan
# GET SolarAzimuth
d_solar_azimuth = read_ahi.get_solar_azimuth()
index_valid = np.logical_and(d_solar_azimuth > -180, d_solar_azimuth < 180)
d_solar_azimuth[index_valid] = d_solar_azimuth[index_valid] / 180 * np.pi
d_solar_azimuth[~index_valid] = np.nan
# GET LATITUDE
r_lats = read_ahi.get_latitude()
# GET LONGITUDE
r_lons = read_ahi.get_longitude()
# GET Elevation
r_dems = read_ahi.get_height()
# GET LandSea
i_mask = read_ahi.get_land_sea_mask()
# -------------------------------------------------------------------------
# MAKE SEA-LAND MASK
# !!!! NATIONAL OR PROVINCIAL BOUNDARIES.
#
# !!!!!!!!!!!!!!!!!! LSM=0(1): Data IS ON WATER-BODY(LAND). !!!!!!!!!!!!!!!!!!
# c. ! iMASK = 0: SHALLOW_OCEAN !
# c. ! iMASK = 1: LAND !
# c. ! iMASK = 2: COASTLINE !
# c. ! iMASK = 3: SHALLOW_INLAND_WATER !
# c. ! iMASK = 4: EPHEMERAL_WATER !
# c. ! iMASK = 5: DEEP_INLAND_WATER !
# c. ! iMASK = 6: MODERATE_OCEAN !
# c. ! iMASK = 7: DEEP_OCEAN !
land_condition = np.logical_or.reduce((i_mask == 1, i_mask == 2, i_mask == 3))
sea_condition = np.logical_or(i_mask == 0, np.logical_and(i_mask > 3, i_mask < 8))
i_lsm = np.full(data_shape, np.nan)
i_lsm[land_condition] = 1
i_lsm[sea_condition] = 0
# -------------------------------------------------------------------------
# Read FILE_CM
# GET Cloud Mask
i_cm = read_ahi.get_cloudmask()
# -------------------------------------------------------------------------
# Read FILE_2KM
# COMPUTE The CORRECTED REFLECTANCE OF BANDs used
i_ref_01 = read_ahi.get_channel_data('VIS0064')
i_ref_02 = read_ahi.get_channel_data('VIS0086')
i_ref_03 = read_ahi.get_channel_data('VIS0046')
i_ref_04 = read_ahi.get_channel_data('VIS0051')
i_ref_06 = read_ahi.get_channel_data('VIS0160')
i_ref_07 = read_ahi.get_channel_data('VIS0230')
# i_ref_26 = read_ahi.get_channel_data('No') # 不能做卷积云的判断
# COMPUTE The CORRECTED REFLECTANCE OF BANDs used
i_tbb_20 = read_ahi.get_channel_data('IRX0390')
i_tbb_31 = read_ahi.get_channel_data('IRX1120')
i_tbb_32 = read_ahi.get_channel_data('IRX1230')
# -------------------------------------------------------------------------
# INITIALIZATION
i_mark = np.zeros(data_shape)
i_step = np.zeros(data_shape)
ref_lon = r_lons
ref_lat = r_lats
ref_dem = r_dems
a_satz = d_sensor_zenith
a_sata = d_sensor_azimuth
a_sunz = d_solar_zenith
a_suna = d_solar_azimuth
# -------------------------------------------------------------------------
# COMPUTE The SUN GLINT EAGLE
temp = np.sin(a_sunz) * np.sin(a_satz) * np.cos(a_suna - a_sata) + np.cos(a_sunz) * np.cos(a_satz)
temp[temp > 1] = 1
temp[temp < -1] = -1
glint = np.arccos(temp)
# -------------------------------------------------------------------------
lsm = i_lsm
i_avalible = np.ones(data_shape, dtype=np.int8)
index = np.isnan(a_sata)
i_mark[index] = 11
i_step[index] = 1
i_avalible[index] = 0
index = np.isnan(a_satz)
i_mark[index] = 11
i_step[index] = 2
i_avalible[index] = 0
index = np.isnan(a_sunz)
i_mark[index] = 11
i_step[index] = 3
i_avalible[index] = 0
index = np.isnan(a_suna)
i_mark[index] = 11
i_step[index] = 4
i_avalible[index] = 0
index = glint < 15 * np.pi / 180
i_mark[index] = 240
i_step[index] = 5
i_avalible[index] = 0
index = np.isnan(ref_lon)
i_mark[index] = 11
i_step[index] = 6
i_avalible[index] = 0
index = np.isnan(ref_lat)
i_mark[index] = 11
i_step[index] = 7
i_avalible[index] = 0
index = np.isnan(ref_dem)
i_mark[index] = 11
i_step[index] = 8
i_avalible[index] = 0
index = np.isnan(lsm)
i_mark[index] = 11
i_step[index] = 9
i_avalible[index] = 0
# -------------------------------------------------------------------------
# COMPUTE The SUN GLINT EAGLE
ref_bt11um = np.full(data_shape, np.nan)
ref_lat_abs = np.abs(ref_lat)
index = ref_lat >= 0
idx_ = np.logical_and(index, ref_lat_abs < b_hai_t_lat)
ref_bt11um[idx_] = ref_bt11um_slope_n * np.abs(b_hai_t_lat) + ref_bt11um_offset_n
idx_ = np.logical_and(index, ref_lat_abs > a_low_t_lat)
ref_bt11um[idx_] = ref_bt11um_slope_n * np.abs(a_low_t_lat) + ref_bt11um_offset_n
idx_ = np.logical_and.reduce((index, ref_lat_abs <= a_low_t_lat, ref_lat_abs >= b_hai_t_lat))
ref_bt11um[idx_] = ref_bt11um_slope_n * ref_lat_abs[idx_] + ref_bt11um_offset_n
index = ref_lat < 0
idx_ = np.logical_and(index, ref_lat_abs < b_hai_t_lat)
ref_bt11um[idx_] = ref_bt11um_slope_s * np.abs(b_hai_t_lat) + ref_bt11um_offset_s
idx_ = np.logical_and(index, ref_lat_abs > a_low_t_lat)
ref_bt11um[idx_] = ref_bt11um_slope_s * np.abs(a_low_t_lat) + ref_bt11um_offset_s
idx_ = np.logical_and.reduce((index, ref_lat_abs >= b_hai_t_lat, ref_lat_abs <= a_low_t_lat))
ref_bt11um[idx_] = ref_bt11um_slope_s * ref_lat_abs[idx_] + ref_bt11um_offset_s
# !!!!!!!!!!!!!!!!!!!!!!!!!! QUALITY CONTROLLING !!!!!!!!!!!!!!!!!!!!!!!!!!!
# iAvalible=1 !! iAvalible=0(1): Data IS(NOT) USABLE. !!
# !!!!!!!!!!!!!!!!!!!!!!!!!! QUALITY CONTROLLING !!!!!!!!!!!!!!!!!!!!!!!!!!!
ref_01 = i_ref_01
ref_02 = i_ref_02
ref_03 = i_ref_03
ref_04 = i_ref_04
ref_06 = i_ref_06
ref_07 = i_ref_07
# ref_26 = i_ref_26
tbb_20 = i_tbb_20
tbb_31 = i_tbb_31
tbb_32 = i_tbb_32
index = np.isnan(ref_01)
i_mark[index] = 255
i_step[index] = 11
i_avalible[index] = 0
index = np.isnan(ref_02)
i_mark[index] = 255
i_step[index] = 12
i_avalible[index] = 0
index = np.isnan(ref_03)
i_mark[index] = 255
i_step[index] = 13
i_avalible[index] = 0
index = np.isnan(ref_04)
i_mark[index] = 255
i_step[index] = 14
i_avalible[index] = 0
index = np.isnan(ref_06)
i_mark[index] = 255
i_step[index] = 15
i_avalible[index] = 0
index = np.isnan(ref_07)
i_mark[index] = 255
i_step[index] = 16
i_avalible[index] = 0
index = np.isnan(tbb_20)
i_mark[index] = 255
i_step[index] = 17
i_avalible[index] = 0
index = np.isnan(tbb_31)
i_mark[index] = 255
i_step[index] = 18
i_avalible[index] = 0
index = np.isnan(tbb_32)
i_mark[index] = 255
i_step[index] = 19
i_avalible[index] = 0
# CORRECT SATURATION VALUE AFTER SOLAR ZENITH ANGLE CORRECTING.
cossl = 1.0
ref_01 = ref_01 * cossl
ref_02 = ref_01 * cossl
ref_03 = ref_01 * cossl
ref_04 = ref_01 * cossl
ref_06 = ref_01 * cossl
ref_07 = ref_01 * cossl
# CHECK The Data QUALITY (ALL BANDS).
index = np.logical_or.reduce((ref_01 <= 0, ref_01 >= 100.0, ref_02 <= 0, ref_02 >= 100.0,
ref_03 <= 0, ref_03 >= 100.0, ref_04 <= 0, ref_04 >= 100.0,
ref_06 <= 0, ref_06 >= 100.0, ref_07 <= 0, ref_07 >= 100.0,
tbb_20 <= 170.0, tbb_20 >= 350.0, tbb_31 <= 170.0, tbb_31 >= 340.0,
tbb_32 <= 170.0, tbb_32 >= 340.0,))
i_mark[index] = 255
i_step[index] = 20
i_avalible[index] = 0
# -------------------------------------------------------------------------
# JUDGE & MARK SNOW
# !!! iTAG For marking The case of Data
# !!!!---- Notice ----!!!! 0: badData; 1: goodData unused; 2: goodData used.
i_tag = np.zeros(data_shape, dtype=np.int8)
idx_avalible = i_avalible == 1
ndvis = (ref_02 - ref_01) / (ref_02 + ref_01)
ndsi_6 = (ref_04 - ref_06) / (ref_04 + ref_06)
ndsi_7 = (ref_04 - ref_07) / (ref_04 + ref_07)
dr_16 = ref_01 - ref_06
dr_17 = ref_01 - 0.5 * ref_07
dt_01 = tbb_20 - tbb_31
dt_02 = tbb_20 - tbb_32
dt_12 = tbb_31 - tbb_32
rr_21 = ref_02 / ref_01
rr_21[rr_21 > 100] = 100
rr_46 = ref_04 / ref_06
rr_46[rr_46 > 100] = 100
# rr_47 = ref_04 / ref_07
# if rr_47 > 100.:
# rr_47 = 100
# dt_34 = tbb_20 - tbb_23
# dt_81 = tbb_29 - tbb_31
# dt_38 = tbb_20 - tbb_29
i_tag[idx_avalible] = 1
judge = np.full(data_shape, True, dtype=np.bool)
# !!! 20190614 暂时不用NDVI去判断水体和陆地
# !!! WHEN LAND-WATER MASK IS WRONG
# idx_lsm = np.logical_and(idx_avalible, ndvis > 0.9)
# lsm[idx_lsm] = 1
# idx_lsm = np.logical_and(idx_avalible, ndvis < -0.9)
# lsm[idx_lsm] = 0
# !!!========================================================================!!!
# !!!========================================================================!!!
# !!!! TESTING For WATER-BODY-PIXEL LSM = 0 !!!!
# !!!========================================================================!!!
# !!!========================================================================!!!
# !!!---!!!---!!! Notice : Test on Water Body ( LSM = 0 ) !!!---!!!---!!!
# !!!! TESTING For WATER-BODY ( INNER LAND, Except Glint Area )
# !!!!! TESTING For WATER-BODY ( OCEAN, Except Glint Area )
idx_ocean = np.logical_and(idx_avalible, lsm == 0)
# !!!! TESTING For WATER-BODY ( INNER LAND, Except Glint Area )
# !!!!! TESTING For WATER-BODY ( OCEAN, Except Glint Area )
idx_ = np.logical_or.reduce((rr_46 > 2., ndsi_6 > 0.38, tbb_31 > 274.5))
idx_ = np.logical_and(idx_ocean, judge, idx_)
i_mark[idx_] = 39
i_step[idx_] = 20
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and(idx_, ref_dem > 0)
i_mark[idx_] = 37
# !!!! TESTING For WATER-BODY ( INNER LAND, Except Glint Area )
# !!!!! TESTING For WATER-BODY ( OCEAN, Except Glint Area )
idx_ = np.logical_and.reduce((ref_02 < 11., ref_06 > 4., tbb_31 > 274.5))
idx_ = np.logical_or.reduce((ref_01 < 7.5, ref_02 < 6., idx_))
idx_ = np.logical_and(idx_ocean, judge, idx_)
i_mark[idx_] = 39
i_step[idx_] = 21
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and(idx_, ref_dem > 0)
i_mark[idx_] = 37
# !!!! CERTAIN CLOUD-1 (High Cloud ; Ice Cloud ; Cold Cloud)
# !!!! Temperature_Test by Referential BT11 Threshold
# !!!! Cirrus_Test by Referential R1.38 Threshold
# idx_ = np.logical_and.reduce((np.abs(ref_lat) > 42, ref_lat < 60, tbb_31 < np.min([ref_bt11um + 5., 245.15])))
# idx_ = np.logical_or(ref_26 > 7.5, idx_)
# idx_ = np.logical_and(idx_ocean, judge, idx_)
# i_mark[idx_] = 50
# i_step[idx_] = 22
# i_tag[idx_] = 2
# !!!! CERTAIN CLOUD-2 (Middle or Low Level Cloud, Except Glint Area)
idx1_ = np.logical_and(ref_06 > 8.5, tbb_20 > 278.5)
idx2_ = np.logical_and(dt_02 > 9.5, rr_46 < 8.)
idx_ = np.logical_or.reduce((idx1_, idx2_, ndsi_6 < 0.5))
idx_ = np.logical_and(idx_ocean, judge, idx_)
i_mark[idx_] = 50
i_step[idx_] = 23
i_tag[idx_] = 2
judge[idx_] = False
del idx1_, idx2_
idx_ = np.logical_and.reduce((ndsi_6 > 0.6, ndvis > -0.15, tbb_31 < 273.5, dr_16 > 20., ref_01 > 25.,
ref_06 > 4., ref_06 < 20.))
idx_ = np.logical_and(idx_ocean, judge, idx_)
i_mark[idx_] = 200
i_step[idx_] = 24
i_tag[idx_] = 2
idx_ = np.logical_and.reduce((ndsi_6 > 0.6, ndvis < -0.03, tbb_31 < 274.5, dr_16 > 9., dr_16 < 60.,
ref_01 > 10., ref_01 < 60., ref_06 < 10., rr_46 > 10.))
idx_ = np.logical_and(idx_ocean, judge, idx_)
i_mark[idx_] = 100
i_step[idx_] = 25
i_tag[idx_] = 2
# !!!------------------------------------------------------------------------!!!
# !!!! Monthly_SnowPackLine_LUT CLOUD-TEST For The REHANDLED DOT
# !!!------------------------------------------------------------------------!!!
# 监测雪线
# !
# ! Eliminate Snow by Monthly_SnowPackLine_LUT Cloud-Test for rehandled pixel
# !
_condition = np.logical_or(i_mark == 200, i_mark == 100)
i_nor_s = np.zeros(data_shape, dtype=np.int8)
i_nor_s[ref_lat > 0] = 1
_condition2 = np.abs(r_mon_snow_line[np.round((ref_lon + 180) * 10).astype(np.int16),
int(j_month), i_nor_s]) > abs(ref_lat)
idx_ = np.logical_and.reduce((idx_ocean, judge, _condition, _condition2))
i_mark[idx_] = 50
i_step[idx_] = 26
i_tag[idx_] = 2
judge[idx_] = False
del _condition2
idx_ = np.logical_and.reduce((idx_ocean, judge, np.abs(ref_lat) < 30, _condition))
i_mark[idx_] = 50
i_step[idx_] = 27
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((idx_ocean, judge, _condition))
judge[idx_] = False
# !!!! TESTING For WATER-BODY FROM UNKOWN PIXELS( INNER LAND, Except Glint Area )
# !!!! TESTING For WATER-BODY FROM UNKOWN PIXELS ( OCEAN, Except Glint Area )
idx_ = np.logical_and.reduce((idx_ocean, judge, ref_06 < 6, dt_02 < 5, rr_46 > 3))
i_mark[idx_] = 39
i_step[idx_] = 28
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and(idx_ocean, judge)
i_mark[idx_] = 1
i_step[idx_] = 30
i_tag[idx_] = 2
judge[idx_] = False
del idx_ocean
# !!!========================================================================!!!
# !!!========================================================================!!!
# !!!! TESTING For LAND-PIXEL LSM = 1 !!!!
# !!!========================================================================!!!
# !!!========================================================================!!!
idx_land = np.logical_and(idx_avalible, lsm == 1)
# !!!! TESTING For Clear Land ( For Forest )
# !!!! CERTAIN
idx_ = np.logical_and.reduce((idx_land, judge, tbb_31 > 278, ndvis > 0.2))
i_mark[idx_] = 25
i_step[idx_] = 31
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For Clear Land ( Including some Dust Storm above Desert )
# !!!! CERTAIN
idx_ = np.logical_and.reduce((idx_land, judge, ndsi_6 < -0.2))
i_mark[idx_] = 25
i_step[idx_] = 32
i_tag[idx_] = 2
judge[idx_] = False
# !!!---!!!---!!! Notice : Test on Land ( LSM = 1 ) !!!---!!!---!!!
idx_temp = np.logical_and(dt_12 < -0.1, ndsi_6 < 0.08)
# !!!! TESTING For Cloud ( Including some Dust Storm above Desert )
# idx_ = np.logical_and.reduce((idx_land, judge, idx_temp, ref_26 > 5))
# i_mark[idx_] = 50
# i_step[idx_] = 34
# i_tag[idx_] = 2
# judge[idx_] = False
# !!!! TESTING For Clear Land ( Including some Dust Storm above Desert )
# !!!! TESTING For Clear Land ( Including some Dust Storm above Desert )
idx_ = np.logical_and.reduce((idx_land, judge, idx_temp, dt_01 < 28))
i_mark[idx_] = 25
i_step[idx_] = 34
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For Cloud ( Including some Dust Storm above Desert )
idx_ = np.logical_and.reduce((idx_land, judge, idx_temp, dt_01 >= 28))
i_mark[idx_] = 25
i_step[idx_] = 35
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For Clear Land ( Including Desert and Non-High-LAT Vegetation )
# !!!! CERTAIN
idx_ = np.logical_and.reduce((idx_land, judge, dr_16 < -7.5))
i_mark[idx_] = 25
i_step[idx_] = 36
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For Snow on Land ( Certainly Snow by )
# !!!! CERTAIN
idx_ = np.logical_and.reduce((idx_land, judge, rr_46 > 5.5, ref_01 > 65, tbb_31 > 240.5, tbb_31 < 276.5))
i_mark[idx_] = 200
i_step[idx_] = 37
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For Cloud ( mid-lower Cloud AFTER Desert is marked )
idx_ = np.logical_and.reduce((idx_land, judge, ref_dem < 1800, ref_06 > 28, ref_01 > 34, ref_02 > 44))
i_mark[idx_] = 50
i_step[idx_] = 38
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((idx_land, judge, ref_dem < 1800, dt_01 > 20.5))
i_mark[idx_] = 50
i_step[idx_] = 39
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((idx_land, judge, ref_dem >= 1800, ref_06 > (28. + (ref_dem - 1800.) * 0.004),
ref_01 > 34, ref_02 > 44))
i_mark[idx_] = 50
i_step[idx_] = 40
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((idx_land, judge, ref_dem >= 1800, dt_01 > (20.5 + (ref_dem - 1800.) * 0.002)))
i_mark[idx_] = 50
i_step[idx_] = 41
i_tag[idx_] = 2
judge[idx_] = False
idx_temp = np.logical_or.reduce((tbb_31 < 170, tbb_31 > 335, tbb_32 < 170, tbb_32 > 335, a_satz > 8, ndsi_6 > 0.5))
test_dtb = tbb_31 - tbb_32
i_test_t11 = np.round(tbb_31).astype(np.int16)
i_test_t11[i_test_t11 <= 250] = 250
i_test_t11[i_test_t11 >= 310] = 310
sec_sza = 100. / np.cos(a_satz)
i_sec_sza = np.round(sec_sza).astype(np.int16)
i_sec_sza[i_sec_sza >= 250] = 250
idx_ = np.logical_and.reduce((idx_land, judge, ~idx_temp))
idx_ = np.logical_and(idx_, test_dtb > delta_bt_lut[np.round(i_test_t11 - 250).astype(np.int16), i_sec_sza-100])
i_mark[idx_] = 50
i_step[idx_] = 42
i_tag[idx_] = 2
judge[idx_] = False
del idx_temp
# !!!! CERTAIN CLOUD-1 (High Cloud ; Ice Cloud ; Cold Cloud)
# !!!! Temperature_Test by Referential BT11 Threshold
# !!!! Cirrus_Test by Referential R1.38 Threshold
compared_t11_hai_lat_a = ref_bt11um + 8. - ref_dem / 1000.
compared_t11_hai_lat_b = 250. - ref_dem / 1000.
compared_t11_hai_lat = np.minimum(compared_t11_hai_lat_a, compared_t11_hai_lat_b)
compared_t11_low_lat_a = ref_bt11um + 12. - ref_dem / 400.
compared_t11_low_lat_b = 260. - ref_dem / 400.
compared_t11_low_lat = np.maximum(compared_t11_low_lat_a, compared_t11_low_lat_b)
idx_1 = np.logical_and.reduce((ref_lat_abs >= 40, ref_lat_abs <= 57, tbb_31 < compared_t11_hai_lat))
idx_2 = np.logical_and.reduce((ref_lat_abs >= 17, ref_lat_abs <= 40, tbb_31 < compared_t11_low_lat))
idx_ = np.logical_or(idx_1, idx_2)
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 50
i_step[idx_] = 43
i_tag[idx_] = 2
judge[idx_] = False
del idx_1, idx_2
del compared_t11_hai_lat_a, compared_t11_hai_lat_b, compared_t11_hai_lat
del compared_t11_low_lat_a, compared_t11_low_lat_b, compared_t11_low_lat
# !!!! CLOUD-1 (High Cloud ; Ice Cloud ; Cold Cloud)
# if judge:
# compared_ref26 = 14.5 + ref_dem / 500.
# if (ref_26 > compared_ref26 and dt_01 > 21.) or \
# (ref_26 > compared_ref26 - 7. and tbb_31 < ref_bt11um + 8. and ndsi_6 > -0.11):
# i_mark[row, col] = 50
# i_step[row, col] = 44
# i_tag = 2
# judge = False
# !!!!! TESTING For LAND WITH CLEAR SKY
# !!!!! CERTAIN
idx_1 = np.logical_and(ndvis > 0.24, ndsi_6 < 0.14)
idx_2 = np.logical_and(rr_21 > 1.42, ndsi_6 < 0.145)
idx_3 = np.logical_and(dr_17 < 14, ndsi_6 < 0.135)
idx_ = np.logical_or.reduce((idx_1, idx_2, idx_3, ndsi_6 < -0.21, ndsi_7 < -0.08, dr_16 < -9.8))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 25
i_step[idx_] = 45
i_tag[idx_] = 2
del idx_3
# !!!! TESTING For Clear Land ( For Forest , small number )
# !!!! CERTAIN
idx_1 = np.logical_and(ndvis > 0.24, ndsi_6 < 0.15)
idx_2 = np.logical_and(rr_21 > 1.4, ndsi_6 < 0.15)
idx_ = np.logical_or.reduce((idx_1, idx_2, ndsi_6 < -0.21, dr_16 < -9.5))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 25
i_step[idx_] = 46
i_tag[idx_] = 2
judge[idx_] = False
del idx_1, idx_2
# !!!! TESTING For Snow in Forest by NDVI-NDSI6-T11
# !!!------------------------------------------------------------------------!!!
# !!!! NDVI_NDSI_LUT SNOW-TEST
# !!!------------------------------------------------------------------------!!!
idx_ = np.logical_and.reduce((ndvis > 0.1, tbb_31 < 277,
ndsi_6 > y_ndsi_x_ndvi[np.round(ndvis * 100).astype(np.int16), 1]))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 200
i_step[idx_] = 47
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For SNOW ON LAND ( For FOREST-SNOW )
# !!!! SNOW-0
idx_ = np.logical_and.reduce((ndsi_6 > 0.18, ref_lat_abs > 36, tbb_31 > 240.15, tbb_31 < 272.15,
ndvis > 0.16, ref_02 > 20, ref_06 < 17))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 200
i_step[idx_] = 48
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and(idx_land, judge, i_mark == 25)
judge[idx_] = False
# !!!! TESTING For SNOW ON LAND ( For Thawy Snow )
# !!!! SNOW-1
# if judge:
# if ref_dem > 2000. and ndsi_6 > 0.33 and \
# 266.15 < tbb_20 < 285.15 and \
# 264.15 < tbb_31 < 275.15 and \
# 6.5 < dt_01 < 21. and \
# 41. < ref_01 < 79. and \
# 12.5 < ref_06 < 24.5 and \
# 9.5 < ref_26 < 17.:
# i_mark[row, col] = 200
# i_step[row, col] = 49
# i_tag = 2
# judge = False
# !!!! TESTING For Thin-Snow by Using R01-R06-NDSI6
# !!!! SNOW-2
ref_bt11um_min = ref_bt11um.copy()
ref_bt11um_min[ref_bt11um_min > 265.15] = 265.15
idx_ = np.logical_and.reduce((ref_dem > 750, tbb_31 > ref_bt11um_min, tbb_31 < 282, ref_01 > 20,
ref_01 < 55, ref_06 > 10, ref_06 < 24, ndsi_6 > (0.68 - 0.0262 * ref_06),
ndsi_6 > (-0.33 + 0.0164 * ref_01)))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 200
i_step[idx_] = 50
i_tag[idx_] = 2
judge[idx_] = False
# !!!! TESTING For SNOW ON LAND
# !!!! SNOW-3
snow_ref_bt11um = np.full(data_shape, 268, dtype=np.float32)
snow_ref_bt11um[ref_lat > 40] = ref_bt11um[ref_lat > 40] + 5.
idx_ = np.logical_or(ref_lat_abs > 20, ref_lat_abs < 40)
snow_ref_bt11um[idx_] = ref_bt11um[idx_] + 18 - ref_dem[idx_] / 800
idx_1 = np.logical_and.reduce((idx_land, judge, rr_46 > 3.1, snow_ref_bt11um < tbb_31, tbb_31 < 278))
idx_ = np.logical_and(idx_1, ref_lat_abs > 20)
i_mark[idx_] = 200
i_step[idx_] = 51
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and(idx_1, ~(ref_lat_abs > 20))
i_mark[idx_] = 50
i_step[idx_] = 52
i_tag[idx_] = 2
judge[idx_] = False
del idx_1
# !!!! TESTING For SNOW ON LAND
# !!!! SNOW-4
idx_ = np.logical_and.reduce((idx_land, judge, dr_16 > 10, ref_06 < 19.5, tbb_31 < 276.15,
rr_46 > 1.5, 2.45 < dt_02, dt_02 < 15, ref_02 > 26,
tbb_31 > ref_bt11um + 5.0))
i_mark[idx_] = 200
i_step[idx_] = 53
i_tag[idx_] = 2
# !!!! TESTING For SNOW ON LAND
# !!!! SNOW-5
idx_ = np.logical_and.reduce((idx_land, judge, ndsi_6 > 0.52, tbb_31 > ref_bt11um + 2, tbb_31 < 278))
i_mark[idx_] = 200
i_step[idx_] = 54
i_tag[idx_] = 2
idx_ = np.logical_and.reduce((idx_land, judge, ndsi_6 > 0.12, ndsi_6 < 0.52, tbb_31 > ref_bt11um, tbb_31 < 276.15,
ndvis > 0.16, ref_02 > 26))
i_mark[idx_] = 200
i_step[idx_] = 55
i_tag[idx_] = 2
# !!!! TESTING For SNOW ON LAND
# !!!! Eliminate_Snow-1
# !!!------------------------------------------------------------------------!!!
# !!!! IceCloud_Overlay_WaterCloud_LUT CLOUD-TEST For The REHANDLED DOT
# !!!------------------------------------------------------------------------!!!
# if judge:
# if i_mark[row, col] == 200. and ref_dem < 3000 and \
# 0.38 < ndsi_6 < ref_06 < 25. and \
# 0.01 < ref_26 < 55. and \
# 235. < tbb_31 < 275.:
# ice_cloud_sums = 0
# if ref_26 * 100 > y_r138_x_r164[int(round(ref_06 * 10)), 1]:
# ice_cloud_sums += 1
# if ref_06 * 100. > y_r164_x_t11[int(round(tbb_31 * 10)), 1]:
# ice_cloud_sums += 1
# if ref_06 * 100. > y_r164_x_r138[int(round(ref_26 * 10)), 1]:
# ice_cloud_sums += 1
# if dt_12 * 100. > y_t11_m_t12_x_r164[int(round(ref_06 * 10.)), 1]:
# ice_cloud_sums += 1
# if ice_cloud_sums > 2:
# i_mark[row, col] = 50
# i_step[row, col] = 56
# i_tag = 2
# judge = False
# !!!! TESTING For SNOW ON LAND
# !!!! Eliminate_Snow-2
# !!!------------------------------------------------------------------------!!!
# !!!! Monthly_SnowPackLine_LUT CLOUD-TEST For The REHANDLED DOT
# !!!------------------------------------------------------------------------!!!
_condition = np.logical_or(i_mark == 200, i_mark == 100)
i_nor_s = np.zeros(data_shape, dtype=np.int8)
i_nor_s[ref_lat > 0] = 1
_condition2 = np.abs(r_mon_snow_line[np.round((ref_lon + 180) * 10).astype(np.int16),
int(j_month), i_nor_s]) > abs(ref_lat)
idx_ = np.logical_and.reduce((idx_land, judge, _condition, _condition2))
i_mark[idx_] = 50
i_step[idx_] = 57
i_tag[idx_] = 2
judge[idx_] = False
del _condition2
idx_ = np.logical_and.reduce((idx_land, judge, i_mark == 200))
judge[idx_] = False
del _condition
# !!!! TESTING For CLOUD
# if judge:
# if ref_06 > 29. and \
# (ref_26 > 13.5 or (ref_26 > 7.5 and tbb_31 < ref_bt11um + 8)) and \
# ref_01 > 24.:
# i_mark[row, col] = 50
# i_step[row, col] = 58
# i_tag = 2
# judge = False
# !!!! Mending TEST For Clear Land
idx_ = np.logical_and(ndvis > 0.11, tbb_31 < 280)
idx_ = np.logical_or.reduce((idx_, dr_16 < 0, ndsi_6 < -0.15))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 25
i_step[idx_] = 59
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((idx_land, judge, ndvis > 0.11, tbb_31 < 280))
i_mark[idx_] = 50
i_step[idx_] = 60
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((idx_land, judge, dr_16 < 0))
i_mark[idx_] = 25
i_step[idx_] = 61
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((idx_land, judge, ndsi_6 < -0.15))
i_mark[idx_] = 25
i_step[idx_] = 62
i_tag[idx_] = 2
judge[idx_] = False
# !!!! Mending TEST For Clear Land and Cloud by Hai-T11
idx_ = np.logical_and(tbb_31 > 280, rr_46 < 1.35)
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 25
i_step[idx_] = 66
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((tbb_31 < 280, ref_dem >= 3000, ref_01 >= 40, ref_06 < 20,
tbb_20 < 295, rr_46 > 1.3))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 200
i_step[idx_] = 67
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((tbb_31 < 280, rr_46 < 1.4, ref_02 < 28))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 25
i_step[idx_] = 68
i_tag[idx_] = 2
judge[idx_] = False
idx_ = np.logical_and.reduce((tbb_31 < 280, rr_46 < 1.4, ref_02 >= 28))
idx_ = np.logical_and.reduce((idx_land, judge, idx_))
i_mark[idx_] = 50
i_step[idx_] = 69
i_tag[idx_] = 2
judge[idx_] = False
# !!!! UNKNOWN TYPE
idx_ = np.logical_and.reduce((idx_land, judge, i_tag == 2))
i_mark[idx_] = 1
i_step[idx_] = 99
i_tag[idx_] = 2
judge[idx_] = False
# judge = True
#
# # !!!! Eliminate_Snow-3
#
# if judge:
# if i_avalible == 1:
# # !!!------------------------------------------------------------------------!!!
# # !!!! Monthly_SnowPackLine_LUT CLOUD-TEST For The REHANDLED DOT
# # !!!------------------------------------------------------------------------!!!
# if ref_lat > 0:
# i_nor_s = 0
# else:
# i_nor_s = 1
# if np.abs(r_mon_snow_line[int(round((ref_lon + 180) * 10)), j_month, i_nor_s]) > \
# ref_lat_abs and (i_mark[row, col] == 200. or i_mark[row, col] == 100):
# i_mark[row, col] = 50
# i_step[row, col] = 57
# i_tag = 2
# judge = False
# # !!!! Take Snow-on-Ice Pixel above Water-body as ICE
# if judge:
# if lsm == 0 and i_mark[row, col] == 200:
# i_mark[row, col] = 100
#
# if judge:
# if i_mark[row, col] == 1:
# if lsm == 0:
# if ref_02 < 18.:
# i_mark[row, col] = 39
# if ref_02 > 19.:
# i_mark[row, col] = 50
# # if ref_26 > 1.5:
# # i_mark[row, col] = 50
# i_step[row, col] = 72
# else:
# if ndsi_6 > 0.27 and tbb_31 < 273.15 and 2.45 < dt_01 < 14.10:
# i_mark[row, col] = 200
# i_step[row, col] = 74
# else:
# if 9.1 < ref_02 < 26.:
# i_mark[row, col] = 25
# if 1.1 < ref_02 < 8.:
# i_mark[row, col] = 25
# if ref_02 > 46.:
# i_mark[row, col] = 50
# # if ref_26 > 10.:
# # i_mark[row, col] = 50
# i_step[row, col] = 76
# !!!==========================================================================!!!
# !
# ! SE by Tree-Decision Algorithm after CM
# !
# !!!--------------------------------------------------------------------------!!!
# !!!! Value = 0 : cloudy
# !!!! Value = 1 : uncertain
# !!!! Value = 2 : probably clear
# !!!! Value = 3 : confident clear
# !!!--------------------------------------------------------------------------!!!
if i_cm is not None:
idx_ = np.logical_and.reduce((i_avalible == 1, i_cm == 1, i_mark == 1))
i_mark[idx_] = 50
i_step[idx_] = 80
idx_ = np.logical_or(i_mark == 50, i_mark == 1)
idx_ = np.logical_and.reduce((i_avalible, i_cm == 3, idx_))
i_mark[idx_] = 25
i_step[idx_] = 82
idx_ = np.logical_and.reduce((i_avalible, i_mark == 200, i_tag < 3))
i_mark[idx_] = 200
i_step[idx_] = 83
return i_mark, i_step
def main(in_file):
if in_file:
pass
in_file_l1 = '/DATA3/HZ_HMW8/H08_L1/ORIGINAL/H08_HDF/20190613/AHI8_OBI_2000M_NOM_20190613_1150.hdf'
in_file_geo = '/DATA3/HZ_HMW8/H08_L1/ORIGINAL/H08_GEO/H08_GEO_ORIGINAL_2000M.hdf5'
in_file_cloud = None
ndsi(in_file_l1, in_file_geo, in_file_cloud)
# ######################## 程序全局入口 ##############################
if __name__ == "__main__":
# 获取程序参数接口
ARGS = sys.argv[1:]
HELP_INFO = \
u"""
[arg1]:yaml_path
[example]: python app.py arg1
"""
if "-h" in ARGS:
print(HELP_INFO)
sys.exit(-1)
if len(ARGS) != 1:
print(HELP_INFO)
sys.exit(-1)
else:
ARG1 = ARGS[0]
main(ARG1)
| [
"numpy.sqrt",
"numpy.arccos",
"initialize.load_yaml_file",
"numpy.logical_and.reduce",
"sys.exit",
"numpy.sin",
"load.ReadAhiL1",
"numpy.maximum",
"numpy.round",
"numpy.abs",
"numpy.ones",
"os.path.dirname",
"numpy.isnan",
"numpy.cos",
"numpy.minimum",
"numpy.logical_and",
"os.path.j... | [((7744, 7779), 'os.path.join', 'os.path.join', (['path', '"""ndsi_cfg.yaml"""'], {}), "(path, 'ndsi_cfg.yaml')\n", (7756, 7779), False, 'import os\n'), ((7847, 7882), 'initialize.load_yaml_file', 'load_yaml_file', (['name_list_swath_snc'], {}), '(name_list_swath_snc)\n', (7861, 7882), False, 'from initialize import load_yaml_file\n'), ((8320, 8375), 'os.path.join', 'os.path.join', (['path', 'inn_put_para_path', 'inn_put_root_l01'], {}), '(path, inn_put_para_path, inn_put_root_l01)\n', (8332, 8375), False, 'import os\n'), ((8399, 8454), 'os.path.join', 'os.path.join', (['path', 'inn_put_para_path', 'inn_put_root_l02'], {}), '(path, inn_put_para_path, inn_put_root_l02)\n', (8411, 8454), False, 'import os\n'), ((8478, 8533), 'os.path.join', 'os.path.join', (['path', 'inn_put_para_path', 'inn_put_root_l03'], {}), '(path, inn_put_para_path, inn_put_root_l03)\n', (8490, 8533), False, 'import os\n'), ((9024, 9047), 'numpy.zeros', 'np.zeros', (['(3601, 12, 2)'], {}), '((3601, 12, 2))\n', (9032, 9047), True, 'import numpy as np\n'), ((9779, 9848), 'load.ReadAhiL1', 'ReadAhiL1', (['in_file_l1'], {'geo_file': 'in_file_geo', 'cloud_file': 'in_file_cloud'}), '(in_file_l1, geo_file=in_file_geo, cloud_file=in_file_cloud)\n', (9788, 9848), False, 'from load import ReadAhiL1\n'), ((10806, 10833), 'numpy.sqrt', 'np.sqrt', (['(1.0 - f_xun_n ** 2)'], {}), '(1.0 - f_xun_n ** 2)\n', (10813, 10833), True, 'import numpy as np\n'), ((12965, 13022), 'numpy.logical_and', 'np.logical_and', (['(d_sensor_zenith > 0)', '(d_sensor_zenith < 90)'], {}), '(d_sensor_zenith > 0, d_sensor_zenith < 90)\n', (12979, 13022), True, 'import numpy as np\n'), ((13241, 13304), 'numpy.logical_and', 'np.logical_and', (['(d_sensor_azimuth > -180)', '(d_sensor_azimuth < 180)'], {}), '(d_sensor_azimuth > -180, d_sensor_azimuth < 180)\n', (13255, 13304), True, 'import numpy as np\n'), ((13520, 13576), 'numpy.logical_and', 'np.logical_and', (['(d_solar_zenith > 0)', '(d_solar_zenith < 180)'], {}), '(d_solar_zenith > 0, d_solar_zenith < 180)\n', (13534, 13576), True, 'import numpy as np\n'), ((13789, 13850), 'numpy.logical_and', 'np.logical_and', (['(d_solar_azimuth > -180)', '(d_solar_azimuth < 180)'], {}), '(d_solar_azimuth > -180, d_solar_azimuth < 180)\n', (13803, 13850), True, 'import numpy as np\n'), ((14858, 14919), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(i_mask == 1, i_mask == 2, i_mask == 3)'], {}), '((i_mask == 1, i_mask == 2, i_mask == 3))\n', (14878, 14919), True, 'import numpy as np\n'), ((15020, 15047), 'numpy.full', 'np.full', (['data_shape', 'np.nan'], {}), '(data_shape, np.nan)\n', (15027, 15047), True, 'import numpy as np\n'), ((16124, 16144), 'numpy.zeros', 'np.zeros', (['data_shape'], {}), '(data_shape)\n', (16132, 16144), True, 'import numpy as np\n'), ((16158, 16178), 'numpy.zeros', 'np.zeros', (['data_shape'], {}), '(data_shape)\n', (16166, 16178), True, 'import numpy as np\n'), ((16638, 16653), 'numpy.arccos', 'np.arccos', (['temp'], {}), '(temp)\n', (16647, 16653), True, 'import numpy as np\n'), ((16768, 16802), 'numpy.ones', 'np.ones', (['data_shape'], {'dtype': 'np.int8'}), '(data_shape, dtype=np.int8)\n', (16775, 16802), True, 'import numpy as np\n'), ((16816, 16832), 'numpy.isnan', 'np.isnan', (['a_sata'], {}), '(a_sata)\n', (16824, 16832), True, 'import numpy as np\n'), ((16917, 16933), 'numpy.isnan', 'np.isnan', (['a_satz'], {}), '(a_satz)\n', (16925, 16933), True, 'import numpy as np\n'), ((17018, 17034), 'numpy.isnan', 'np.isnan', (['a_sunz'], {}), '(a_sunz)\n', (17026, 17034), True, 'import numpy as np\n'), ((17119, 17135), 'numpy.isnan', 'np.isnan', (['a_suna'], {}), '(a_suna)\n', (17127, 17135), True, 'import numpy as np\n'), ((17330, 17347), 'numpy.isnan', 'np.isnan', (['ref_lon'], {}), '(ref_lon)\n', (17338, 17347), True, 'import numpy as np\n'), ((17432, 17449), 'numpy.isnan', 'np.isnan', (['ref_lat'], {}), '(ref_lat)\n', (17440, 17449), True, 'import numpy as np\n'), ((17534, 17551), 'numpy.isnan', 'np.isnan', (['ref_dem'], {}), '(ref_dem)\n', (17542, 17551), True, 'import numpy as np\n'), ((17636, 17649), 'numpy.isnan', 'np.isnan', (['lsm'], {}), '(lsm)\n', (17644, 17649), True, 'import numpy as np\n'), ((17853, 17880), 'numpy.full', 'np.full', (['data_shape', 'np.nan'], {}), '(data_shape, np.nan)\n', (17860, 17880), True, 'import numpy as np\n'), ((17900, 17915), 'numpy.abs', 'np.abs', (['ref_lat'], {}), '(ref_lat)\n', (17906, 17915), True, 'import numpy as np\n'), ((17953, 18001), 'numpy.logical_and', 'np.logical_and', (['index', '(ref_lat_abs < b_hai_t_lat)'], {}), '(index, ref_lat_abs < b_hai_t_lat)\n', (17967, 18001), True, 'import numpy as np\n'), ((18100, 18148), 'numpy.logical_and', 'np.logical_and', (['index', '(ref_lat_abs > a_low_t_lat)'], {}), '(index, ref_lat_abs > a_low_t_lat)\n', (18114, 18148), True, 'import numpy as np\n'), ((18247, 18337), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(index, ref_lat_abs <= a_low_t_lat, ref_lat_abs >= b_hai_t_lat)'], {}), '((index, ref_lat_abs <= a_low_t_lat, ref_lat_abs >=\n b_hai_t_lat))\n', (18268, 18337), True, 'import numpy as np\n'), ((18454, 18502), 'numpy.logical_and', 'np.logical_and', (['index', '(ref_lat_abs < b_hai_t_lat)'], {}), '(index, ref_lat_abs < b_hai_t_lat)\n', (18468, 18502), True, 'import numpy as np\n'), ((18601, 18649), 'numpy.logical_and', 'np.logical_and', (['index', '(ref_lat_abs > a_low_t_lat)'], {}), '(index, ref_lat_abs > a_low_t_lat)\n', (18615, 18649), True, 'import numpy as np\n'), ((18748, 18838), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(index, ref_lat_abs >= b_hai_t_lat, ref_lat_abs <= a_low_t_lat)'], {}), '((index, ref_lat_abs >= b_hai_t_lat, ref_lat_abs <=\n a_low_t_lat))\n', (18769, 18838), True, 'import numpy as np\n'), ((19403, 19419), 'numpy.isnan', 'np.isnan', (['ref_01'], {}), '(ref_01)\n', (19411, 19419), True, 'import numpy as np\n'), ((19506, 19522), 'numpy.isnan', 'np.isnan', (['ref_02'], {}), '(ref_02)\n', (19514, 19522), True, 'import numpy as np\n'), ((19609, 19625), 'numpy.isnan', 'np.isnan', (['ref_03'], {}), '(ref_03)\n', (19617, 19625), True, 'import numpy as np\n'), ((19712, 19728), 'numpy.isnan', 'np.isnan', (['ref_04'], {}), '(ref_04)\n', (19720, 19728), True, 'import numpy as np\n'), ((19815, 19831), 'numpy.isnan', 'np.isnan', (['ref_06'], {}), '(ref_06)\n', (19823, 19831), True, 'import numpy as np\n'), ((19918, 19934), 'numpy.isnan', 'np.isnan', (['ref_07'], {}), '(ref_07)\n', (19926, 19934), True, 'import numpy as np\n'), ((20021, 20037), 'numpy.isnan', 'np.isnan', (['tbb_20'], {}), '(tbb_20)\n', (20029, 20037), True, 'import numpy as np\n'), ((20124, 20140), 'numpy.isnan', 'np.isnan', (['tbb_31'], {}), '(tbb_31)\n', (20132, 20140), True, 'import numpy as np\n'), ((20227, 20243), 'numpy.isnan', 'np.isnan', (['tbb_32'], {}), '(tbb_32)\n', (20235, 20243), True, 'import numpy as np\n'), ((20625, 20949), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(ref_01 <= 0, ref_01 >= 100.0, ref_02 <= 0, ref_02 >= 100.0, ref_03 <= 0, \n ref_03 >= 100.0, ref_04 <= 0, ref_04 >= 100.0, ref_06 <= 0, ref_06 >= \n 100.0, ref_07 <= 0, ref_07 >= 100.0, tbb_20 <= 170.0, tbb_20 >= 350.0, \n tbb_31 <= 170.0, tbb_31 >= 340.0, tbb_32 <= 170.0, tbb_32 >= 340.0)'], {}), '((ref_01 <= 0, ref_01 >= 100.0, ref_02 <= 0, ref_02 >= \n 100.0, ref_03 <= 0, ref_03 >= 100.0, ref_04 <= 0, ref_04 >= 100.0, \n ref_06 <= 0, ref_06 >= 100.0, ref_07 <= 0, ref_07 >= 100.0, tbb_20 <= \n 170.0, tbb_20 >= 350.0, tbb_31 <= 170.0, tbb_31 >= 340.0, tbb_32 <= \n 170.0, tbb_32 >= 340.0))\n', (20645, 20949), True, 'import numpy as np\n'), ((21389, 21424), 'numpy.zeros', 'np.zeros', (['data_shape'], {'dtype': 'np.int8'}), '(data_shape, dtype=np.int8)\n', (21397, 21424), True, 'import numpy as np\n'), ((22086, 22126), 'numpy.full', 'np.full', (['data_shape', '(True)'], {'dtype': 'np.bool'}), '(data_shape, True, dtype=np.bool)\n', (22093, 22126), True, 'import numpy as np\n'), ((23030, 23068), 'numpy.logical_and', 'np.logical_and', (['idx_avalible', '(lsm == 0)'], {}), '(idx_avalible, lsm == 0)\n', (23044, 23068), True, 'import numpy as np\n'), ((23217, 23283), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(rr_46 > 2.0, ndsi_6 > 0.38, tbb_31 > 274.5)'], {}), '((rr_46 > 2.0, ndsi_6 > 0.38, tbb_31 > 274.5))\n', (23237, 23283), True, 'import numpy as np\n'), ((23294, 23332), 'numpy.logical_and', 'np.logical_and', (['idx_ocean', 'judge', 'idx_'], {}), '(idx_ocean, judge, idx_)\n', (23308, 23332), True, 'import numpy as np\n'), ((23433, 23466), 'numpy.logical_and', 'np.logical_and', (['idx_', '(ref_dem > 0)'], {}), '(idx_, ref_dem > 0)\n', (23447, 23466), True, 'import numpy as np\n'), ((23637, 23705), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(ref_02 < 11.0, ref_06 > 4.0, tbb_31 > 274.5)'], {}), '((ref_02 < 11.0, ref_06 > 4.0, tbb_31 > 274.5))\n', (23658, 23705), True, 'import numpy as np\n'), ((23715, 23771), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(ref_01 < 7.5, ref_02 < 6.0, idx_)'], {}), '((ref_01 < 7.5, ref_02 < 6.0, idx_))\n', (23735, 23771), True, 'import numpy as np\n'), ((23782, 23820), 'numpy.logical_and', 'np.logical_and', (['idx_ocean', 'judge', 'idx_'], {}), '(idx_ocean, judge, idx_)\n', (23796, 23820), True, 'import numpy as np\n'), ((23921, 23954), 'numpy.logical_and', 'np.logical_and', (['idx_', '(ref_dem > 0)'], {}), '(idx_, ref_dem > 0)\n', (23935, 23954), True, 'import numpy as np\n'), ((24537, 24581), 'numpy.logical_and', 'np.logical_and', (['(ref_06 > 8.5)', '(tbb_20 > 278.5)'], {}), '(ref_06 > 8.5, tbb_20 > 278.5)\n', (24551, 24581), True, 'import numpy as np\n'), ((24594, 24634), 'numpy.logical_and', 'np.logical_and', (['(dt_02 > 9.5)', '(rr_46 < 8.0)'], {}), '(dt_02 > 9.5, rr_46 < 8.0)\n', (24608, 24634), True, 'import numpy as np\n'), ((24645, 24695), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(idx1_, idx2_, ndsi_6 < 0.5)'], {}), '((idx1_, idx2_, ndsi_6 < 0.5))\n', (24665, 24695), True, 'import numpy as np\n'), ((24707, 24745), 'numpy.logical_and', 'np.logical_and', (['idx_ocean', 'judge', 'idx_'], {}), '(idx_ocean, judge, idx_)\n', (24721, 24745), True, 'import numpy as np\n'), ((24867, 24997), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(ndsi_6 > 0.6, ndvis > -0.15, tbb_31 < 273.5, dr_16 > 20.0, ref_01 > 25.0, \n ref_06 > 4.0, ref_06 < 20.0)'], {}), '((ndsi_6 > 0.6, ndvis > -0.15, tbb_31 < 273.5, dr_16 >\n 20.0, ref_01 > 25.0, ref_06 > 4.0, ref_06 < 20.0))\n', (24888, 24997), True, 'import numpy as np\n'), ((25035, 25073), 'numpy.logical_and', 'np.logical_and', (['idx_ocean', 'judge', 'idx_'], {}), '(idx_ocean, judge, idx_)\n', (25049, 25073), True, 'import numpy as np\n'), ((25151, 25313), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(ndsi_6 > 0.6, ndvis < -0.03, tbb_31 < 274.5, dr_16 > 9.0, dr_16 < 60.0, \n ref_01 > 10.0, ref_01 < 60.0, ref_06 < 10.0, rr_46 > 10.0)'], {}), '((ndsi_6 > 0.6, ndvis < -0.03, tbb_31 < 274.5, dr_16 >\n 9.0, dr_16 < 60.0, ref_01 > 10.0, ref_01 < 60.0, ref_06 < 10.0, rr_46 >\n 10.0))\n', (25172, 25313), True, 'import numpy as np\n'), ((25345, 25383), 'numpy.logical_and', 'np.logical_and', (['idx_ocean', 'judge', 'idx_'], {}), '(idx_ocean, judge, idx_)\n', (25359, 25383), True, 'import numpy as np\n'), ((25822, 25865), 'numpy.logical_or', 'np.logical_or', (['(i_mark == 200)', '(i_mark == 100)'], {}), '(i_mark == 200, i_mark == 100)\n', (25835, 25865), True, 'import numpy as np\n'), ((25881, 25916), 'numpy.zeros', 'np.zeros', (['data_shape'], {'dtype': 'np.int8'}), '(data_shape, dtype=np.int8)\n', (25889, 25916), True, 'import numpy as np\n'), ((26127, 26193), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_ocean, judge, _condition, _condition2)'], {}), '((idx_ocean, judge, _condition, _condition2))\n', (26148, 26193), True, 'import numpy as np\n'), ((26490, 26543), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_ocean, judge, _condition)'], {}), '((idx_ocean, judge, _condition))\n', (26511, 26543), True, 'import numpy as np\n'), ((26752, 26827), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_ocean, judge, ref_06 < 6, dt_02 < 5, rr_46 > 3)'], {}), '((idx_ocean, judge, ref_06 < 6, dt_02 < 5, rr_46 > 3))\n', (26773, 26827), True, 'import numpy as np\n'), ((26928, 26960), 'numpy.logical_and', 'np.logical_and', (['idx_ocean', 'judge'], {}), '(idx_ocean, judge)\n', (26942, 26960), True, 'import numpy as np\n'), ((27507, 27545), 'numpy.logical_and', 'np.logical_and', (['idx_avalible', '(lsm == 1)'], {}), '(idx_avalible, lsm == 1)\n', (27521, 27545), True, 'import numpy as np\n'), ((27629, 27696), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, tbb_31 > 278, ndvis > 0.2)'], {}), '((idx_land, judge, tbb_31 > 278, ndvis > 0.2))\n', (27650, 27696), True, 'import numpy as np\n'), ((27897, 27952), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ndsi_6 < -0.2)'], {}), '((idx_land, judge, ndsi_6 < -0.2))\n', (27918, 27952), True, 'import numpy as np\n'), ((28142, 28185), 'numpy.logical_and', 'np.logical_and', (['(dt_12 < -0.1)', '(ndsi_6 < 0.08)'], {}), '(dt_12 < -0.1, ndsi_6 < 0.08)\n', (28156, 28185), True, 'import numpy as np\n'), ((28602, 28664), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_temp, dt_01 < 28)'], {}), '((idx_land, judge, idx_temp, dt_01 < 28))\n', (28623, 28664), True, 'import numpy as np\n'), ((28838, 28901), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_temp, dt_01 >= 28)'], {}), '((idx_land, judge, idx_temp, dt_01 >= 28))\n', (28859, 28901), True, 'import numpy as np\n'), ((29108, 29162), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, dr_16 < -7.5)'], {}), '((idx_land, judge, dr_16 < -7.5))\n', (29129, 29162), True, 'import numpy as np\n'), ((29345, 29448), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, rr_46 > 5.5, ref_01 > 65, tbb_31 > 240.5, tbb_31 < 276.5)'], {}), '((idx_land, judge, rr_46 > 5.5, ref_01 > 65, tbb_31 > \n 240.5, tbb_31 < 276.5))\n', (29366, 29448), True, 'import numpy as np\n'), ((29619, 29718), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ref_dem < 1800, ref_06 > 28, ref_01 > 34, ref_02 > 44)'], {}), '((idx_land, judge, ref_dem < 1800, ref_06 > 28, ref_01 >\n 34, ref_02 > 44))\n', (29640, 29718), True, 'import numpy as np\n'), ((29815, 29885), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ref_dem < 1800, dt_01 > 20.5)'], {}), '((idx_land, judge, ref_dem < 1800, dt_01 > 20.5))\n', (29836, 29885), True, 'import numpy as np\n'), ((29986, 30118), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ref_dem >= 1800, ref_06 > 28.0 + (ref_dem - 1800.0) * \n 0.004, ref_01 > 34, ref_02 > 44)'], {}), '((idx_land, judge, ref_dem >= 1800, ref_06 > 28.0 + (\n ref_dem - 1800.0) * 0.004, ref_01 > 34, ref_02 > 44))\n', (30007, 30118), True, 'import numpy as np\n'), ((30248, 30353), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ref_dem >= 1800, dt_01 > 20.5 + (ref_dem - 1800.0) * 0.002)'], {}), '((idx_land, judge, ref_dem >= 1800, dt_01 > 20.5 + (\n ref_dem - 1800.0) * 0.002))\n', (30269, 30353), True, 'import numpy as np\n'), ((30454, 30563), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(tbb_31 < 170, tbb_31 > 335, tbb_32 < 170, tbb_32 > 335, a_satz > 8, ndsi_6 >\n 0.5)'], {}), '((tbb_31 < 170, tbb_31 > 335, tbb_32 < 170, tbb_32 > \n 335, a_satz > 8, ndsi_6 > 0.5))\n', (30474, 30563), True, 'import numpy as np\n'), ((30858, 30909), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ~idx_temp)'], {}), '((idx_land, judge, ~idx_temp))\n', (30879, 30909), True, 'import numpy as np\n'), ((31459, 31517), 'numpy.minimum', 'np.minimum', (['compared_t11_hai_lat_a', 'compared_t11_hai_lat_b'], {}), '(compared_t11_hai_lat_a, compared_t11_hai_lat_b)\n', (31469, 31517), True, 'import numpy as np\n'), ((31660, 31718), 'numpy.maximum', 'np.maximum', (['compared_t11_low_lat_a', 'compared_t11_low_lat_b'], {}), '(compared_t11_low_lat_a, compared_t11_low_lat_b)\n', (31670, 31718), True, 'import numpy as np\n'), ((31732, 31828), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(ref_lat_abs >= 40, ref_lat_abs <= 57, tbb_31 < compared_t11_hai_lat)'], {}), '((ref_lat_abs >= 40, ref_lat_abs <= 57, tbb_31 <\n compared_t11_hai_lat))\n', (31753, 31828), True, 'import numpy as np\n'), ((31837, 31933), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(ref_lat_abs >= 17, ref_lat_abs <= 40, tbb_31 < compared_t11_low_lat)'], {}), '((ref_lat_abs >= 17, ref_lat_abs <= 40, tbb_31 <\n compared_t11_low_lat))\n', (31858, 31933), True, 'import numpy as np\n'), ((31941, 31968), 'numpy.logical_or', 'np.logical_or', (['idx_1', 'idx_2'], {}), '(idx_1, idx_2)\n', (31954, 31968), True, 'import numpy as np\n'), ((31980, 32026), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (32001, 32026), True, 'import numpy as np\n'), ((32778, 32821), 'numpy.logical_and', 'np.logical_and', (['(ndvis > 0.24)', '(ndsi_6 < 0.14)'], {}), '(ndvis > 0.24, ndsi_6 < 0.14)\n', (32792, 32821), True, 'import numpy as np\n'), ((32834, 32878), 'numpy.logical_and', 'np.logical_and', (['(rr_21 > 1.42)', '(ndsi_6 < 0.145)'], {}), '(rr_21 > 1.42, ndsi_6 < 0.145)\n', (32848, 32878), True, 'import numpy as np\n'), ((32891, 32933), 'numpy.logical_and', 'np.logical_and', (['(dr_17 < 14)', '(ndsi_6 < 0.135)'], {}), '(dr_17 < 14, ndsi_6 < 0.135)\n', (32905, 32933), True, 'import numpy as np\n'), ((32945, 33039), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(idx_1, idx_2, idx_3, ndsi_6 < -0.21, ndsi_7 < -0.08, dr_16 < -9.8)'], {}), '((idx_1, idx_2, idx_3, ndsi_6 < -0.21, ndsi_7 < -0.08, \n dr_16 < -9.8))\n', (32965, 33039), True, 'import numpy as np\n'), ((33046, 33092), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (33067, 33092), True, 'import numpy as np\n'), ((33272, 33315), 'numpy.logical_and', 'np.logical_and', (['(ndvis > 0.24)', '(ndsi_6 < 0.15)'], {}), '(ndvis > 0.24, ndsi_6 < 0.15)\n', (33286, 33315), True, 'import numpy as np\n'), ((33328, 33370), 'numpy.logical_and', 'np.logical_and', (['(rr_21 > 1.4)', '(ndsi_6 < 0.15)'], {}), '(rr_21 > 1.4, ndsi_6 < 0.15)\n', (33342, 33370), True, 'import numpy as np\n'), ((33382, 33448), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(idx_1, idx_2, ndsi_6 < -0.21, dr_16 < -9.5)'], {}), '((idx_1, idx_2, ndsi_6 < -0.21, dr_16 < -9.5))\n', (33402, 33448), True, 'import numpy as np\n'), ((33460, 33506), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (33481, 33506), True, 'import numpy as np\n'), ((34057, 34103), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (34078, 34103), True, 'import numpy as np\n'), ((34282, 34417), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(ndsi_6 > 0.18, ref_lat_abs > 36, tbb_31 > 240.15, tbb_31 < 272.15, ndvis >\n 0.16, ref_02 > 20, ref_06 < 17)'], {}), '((ndsi_6 > 0.18, ref_lat_abs > 36, tbb_31 > 240.15, \n tbb_31 < 272.15, ndvis > 0.16, ref_02 > 20, ref_06 < 17))\n', (34303, 34417), True, 'import numpy as np\n'), ((34458, 34504), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (34479, 34504), True, 'import numpy as np\n'), ((34606, 34651), 'numpy.logical_and', 'np.logical_and', (['idx_land', 'judge', '(i_mark == 25)'], {}), '(idx_land, judge, i_mark == 25)\n', (34620, 34651), True, 'import numpy as np\n'), ((35397, 35602), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(ref_dem > 750, tbb_31 > ref_bt11um_min, tbb_31 < 282, ref_01 > 20, ref_01 <\n 55, ref_06 > 10, ref_06 < 24, ndsi_6 > 0.68 - 0.0262 * ref_06, ndsi_6 >\n -0.33 + 0.0164 * ref_01)'], {}), '((ref_dem > 750, tbb_31 > ref_bt11um_min, tbb_31 < 282,\n ref_01 > 20, ref_01 < 55, ref_06 > 10, ref_06 < 24, ndsi_6 > 0.68 - \n 0.0262 * ref_06, ndsi_6 > -0.33 + 0.0164 * ref_01))\n', (35418, 35602), True, 'import numpy as np\n'), ((35677, 35723), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (35698, 35723), True, 'import numpy as np\n'), ((35894, 35936), 'numpy.full', 'np.full', (['data_shape', '(268)'], {'dtype': 'np.float32'}), '(data_shape, 268, dtype=np.float32)\n', (35901, 35936), True, 'import numpy as np\n'), ((36014, 36063), 'numpy.logical_or', 'np.logical_or', (['(ref_lat_abs > 20)', '(ref_lat_abs < 40)'], {}), '(ref_lat_abs > 20, ref_lat_abs < 40)\n', (36027, 36063), True, 'import numpy as np\n'), ((36148, 36245), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, rr_46 > 3.1, snow_ref_bt11um < tbb_31, tbb_31 < 278)'], {}), '((idx_land, judge, rr_46 > 3.1, snow_ref_bt11um <\n tbb_31, tbb_31 < 278))\n', (36169, 36245), True, 'import numpy as np\n'), ((36253, 36292), 'numpy.logical_and', 'np.logical_and', (['idx_1', '(ref_lat_abs > 20)'], {}), '(idx_1, ref_lat_abs > 20)\n', (36267, 36292), True, 'import numpy as np\n'), ((36394, 36436), 'numpy.logical_and', 'np.logical_and', (['idx_1', '(~(ref_lat_abs > 20))'], {}), '(idx_1, ~(ref_lat_abs > 20))\n', (36408, 36436), True, 'import numpy as np\n'), ((36609, 36781), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, dr_16 > 10, ref_06 < 19.5, tbb_31 < 276.15, rr_46 > 1.5, \n 2.45 < dt_02, dt_02 < 15, ref_02 > 26, tbb_31 > ref_bt11um + 5.0)'], {}), '((idx_land, judge, dr_16 > 10, ref_06 < 19.5, tbb_31 <\n 276.15, rr_46 > 1.5, 2.45 < dt_02, dt_02 < 15, ref_02 > 26, tbb_31 > \n ref_bt11um + 5.0))\n', (36630, 36781), True, 'import numpy as np\n'), ((36976, 37074), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ndsi_6 > 0.52, tbb_31 > ref_bt11um + 2, tbb_31 < 278)'], {}), '((idx_land, judge, ndsi_6 > 0.52, tbb_31 > ref_bt11um +\n 2, tbb_31 < 278))\n', (36997, 37074), True, 'import numpy as np\n'), ((37148, 37288), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ndsi_6 > 0.12, ndsi_6 < 0.52, tbb_31 > ref_bt11um, tbb_31 <\n 276.15, ndvis > 0.16, ref_02 > 26)'], {}), '((idx_land, judge, ndsi_6 > 0.12, ndsi_6 < 0.52, \n tbb_31 > ref_bt11um, tbb_31 < 276.15, ndvis > 0.16, ref_02 > 26))\n', (37169, 37288), True, 'import numpy as np\n'), ((38914, 38957), 'numpy.logical_or', 'np.logical_or', (['(i_mark == 200)', '(i_mark == 100)'], {}), '(i_mark == 200, i_mark == 100)\n', (38927, 38957), True, 'import numpy as np\n'), ((38973, 39008), 'numpy.zeros', 'np.zeros', (['data_shape'], {'dtype': 'np.int8'}), '(data_shape, dtype=np.int8)\n', (38981, 39008), True, 'import numpy as np\n'), ((39219, 39284), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, _condition, _condition2)'], {}), '((idx_land, judge, _condition, _condition2))\n', (39240, 39284), True, 'import numpy as np\n'), ((39405, 39460), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, i_mark == 200)'], {}), '((idx_land, judge, i_mark == 200))\n', (39426, 39460), True, 'import numpy as np\n'), ((39879, 39921), 'numpy.logical_and', 'np.logical_and', (['(ndvis > 0.11)', '(tbb_31 < 280)'], {}), '(ndvis > 0.11, tbb_31 < 280)\n', (39893, 39921), True, 'import numpy as np\n'), ((39933, 39988), 'numpy.logical_or.reduce', 'np.logical_or.reduce', (['(idx_, dr_16 < 0, ndsi_6 < -0.15)'], {}), '((idx_, dr_16 < 0, ndsi_6 < -0.15))\n', (39953, 39988), True, 'import numpy as np\n'), ((40000, 40046), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (40021, 40046), True, 'import numpy as np\n'), ((40147, 40215), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ndvis > 0.11, tbb_31 < 280)'], {}), '((idx_land, judge, ndvis > 0.11, tbb_31 < 280))\n', (40168, 40215), True, 'import numpy as np\n'), ((40316, 40367), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, dr_16 < 0)'], {}), '((idx_land, judge, dr_16 < 0))\n', (40337, 40367), True, 'import numpy as np\n'), ((40468, 40524), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, ndsi_6 < -0.15)'], {}), '((idx_land, judge, ndsi_6 < -0.15))\n', (40489, 40524), True, 'import numpy as np\n'), ((40687, 40729), 'numpy.logical_and', 'np.logical_and', (['(tbb_31 > 280)', '(rr_46 < 1.35)'], {}), '(tbb_31 > 280, rr_46 < 1.35)\n', (40701, 40729), True, 'import numpy as np\n'), ((40741, 40787), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (40762, 40787), True, 'import numpy as np\n'), ((40888, 41000), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(tbb_31 < 280, ref_dem >= 3000, ref_01 >= 40, ref_06 < 20, tbb_20 < 295, \n rr_46 > 1.3)'], {}), '((tbb_31 < 280, ref_dem >= 3000, ref_01 >= 40, ref_06 <\n 20, tbb_20 < 295, rr_46 > 1.3))\n', (40909, 41000), True, 'import numpy as np\n'), ((41042, 41088), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (41063, 41088), True, 'import numpy as np\n'), ((41190, 41253), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(tbb_31 < 280, rr_46 < 1.4, ref_02 < 28)'], {}), '((tbb_31 < 280, rr_46 < 1.4, ref_02 < 28))\n', (41211, 41253), True, 'import numpy as np\n'), ((41265, 41311), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (41286, 41311), True, 'import numpy as np\n'), ((41412, 41476), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(tbb_31 < 280, rr_46 < 1.4, ref_02 >= 28)'], {}), '((tbb_31 < 280, rr_46 < 1.4, ref_02 >= 28))\n', (41433, 41476), True, 'import numpy as np\n'), ((41488, 41534), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, idx_)'], {}), '((idx_land, judge, idx_))\n', (41509, 41534), True, 'import numpy as np\n'), ((41661, 41713), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(idx_land, judge, i_tag == 2)'], {}), '((idx_land, judge, i_tag == 2))\n', (41682, 41713), True, 'import numpy as np\n'), ((7691, 7716), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7706, 7716), False, 'import os\n'), ((8878, 8918), 'numpy.loadtxt', 'np.loadtxt', (['inn_put_file_l01'], {'skiprows': '(1)'}), '(inn_put_file_l01, skiprows=1)\n', (8888, 8918), True, 'import numpy as np\n'), ((8954, 8994), 'numpy.loadtxt', 'np.loadtxt', (['inn_put_file_l02'], {'skiprows': '(1)'}), '(inn_put_file_l02, skiprows=1)\n', (8964, 8994), True, 'import numpy as np\n'), ((9195, 9235), 'numpy.loadtxt', 'np.loadtxt', (['inn_put_file_l03'], {'skiprows': '(1)'}), '(inn_put_file_l03, skiprows=1)\n', (9205, 9235), True, 'import numpy as np\n'), ((14967, 15005), 'numpy.logical_and', 'np.logical_and', (['(i_mask > 3)', '(i_mask < 8)'], {}), '(i_mask > 3, i_mask < 8)\n', (14981, 15005), True, 'import numpy as np\n'), ((30742, 30756), 'numpy.cos', 'np.cos', (['a_satz'], {}), '(a_satz)\n', (30748, 30756), True, 'import numpy as np\n'), ((44521, 44585), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(i_avalible == 1, i_cm == 1, i_mark == 1)'], {}), '((i_avalible == 1, i_cm == 1, i_mark == 1))\n', (44542, 44585), True, 'import numpy as np\n'), ((44654, 44694), 'numpy.logical_or', 'np.logical_or', (['(i_mark == 50)', '(i_mark == 1)'], {}), '(i_mark == 50, i_mark == 1)\n', (44667, 44694), True, 'import numpy as np\n'), ((44710, 44762), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(i_avalible, i_cm == 3, idx_)'], {}), '((i_avalible, i_cm == 3, idx_))\n', (44731, 44762), True, 'import numpy as np\n'), ((44831, 44892), 'numpy.logical_and.reduce', 'np.logical_and.reduce', (['(i_avalible, i_mark == 200, i_tag < 3)'], {}), '((i_avalible, i_mark == 200, i_tag < 3))\n', (44852, 44892), True, 'import numpy as np\n'), ((45581, 45593), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (45589, 45593), False, 'import sys\n'), ((45651, 45663), 'sys.exit', 'sys.exit', (['(-1)'], {}), '(-1)\n', (45659, 45663), False, 'import sys\n'), ((12567, 12636), 'numpy.logical_and', 'np.logical_and', (['(d_solar_zenith > 0)', '(d_solar_zenith < solar_zenith_max)'], {}), '(d_solar_zenith > 0, d_solar_zenith < solar_zenith_max)\n', (12581, 12636), True, 'import numpy as np\n'), ((16520, 16543), 'numpy.cos', 'np.cos', (['(a_suna - a_sata)'], {}), '(a_suna - a_sata)\n', (16526, 16543), True, 'import numpy as np\n'), ((16546, 16560), 'numpy.cos', 'np.cos', (['a_sunz'], {}), '(a_sunz)\n', (16552, 16560), True, 'import numpy as np\n'), ((16563, 16577), 'numpy.cos', 'np.cos', (['a_satz'], {}), '(a_satz)\n', (16569, 16577), True, 'import numpy as np\n'), ((18046, 18065), 'numpy.abs', 'np.abs', (['b_hai_t_lat'], {}), '(b_hai_t_lat)\n', (18052, 18065), True, 'import numpy as np\n'), ((18193, 18212), 'numpy.abs', 'np.abs', (['a_low_t_lat'], {}), '(a_low_t_lat)\n', (18199, 18212), True, 'import numpy as np\n'), ((18547, 18566), 'numpy.abs', 'np.abs', (['b_hai_t_lat'], {}), '(b_hai_t_lat)\n', (18553, 18566), True, 'import numpy as np\n'), ((18694, 18713), 'numpy.abs', 'np.abs', (['a_low_t_lat'], {}), '(a_low_t_lat)\n', (18700, 18713), True, 'import numpy as np\n'), ((30607, 30623), 'numpy.round', 'np.round', (['tbb_31'], {}), '(tbb_31)\n', (30615, 30623), True, 'import numpy as np\n'), ((30773, 30790), 'numpy.round', 'np.round', (['sec_sza'], {}), '(sec_sza)\n', (30781, 30790), True, 'import numpy as np\n'), ((10671, 10714), 'numpy.sin', 'np.sin', (['(np.pi * (i2_xun_num - 21 + 36) / 36)'], {}), '(np.pi * (i2_xun_num - 21 + 36) / 36)\n', (10677, 10714), True, 'import numpy as np\n'), ((10751, 10789), 'numpy.sin', 'np.sin', (['(np.pi * (i2_xun_num - 21) / 36)'], {}), '(np.pi * (i2_xun_num - 21) / 36)\n', (10757, 10789), True, 'import numpy as np\n'), ((16486, 16500), 'numpy.sin', 'np.sin', (['a_sunz'], {}), '(a_sunz)\n', (16492, 16500), True, 'import numpy as np\n'), ((16503, 16517), 'numpy.sin', 'np.sin', (['a_satz'], {}), '(a_satz)\n', (16509, 16517), True, 'import numpy as np\n'), ((26355, 26370), 'numpy.abs', 'np.abs', (['ref_lat'], {}), '(ref_lat)\n', (26361, 26370), True, 'import numpy as np\n'), ((25987, 26017), 'numpy.round', 'np.round', (['((ref_lon + 180) * 10)'], {}), '((ref_lon + 180) * 10)\n', (25995, 26017), True, 'import numpy as np\n'), ((30966, 30992), 'numpy.round', 'np.round', (['(i_test_t11 - 250)'], {}), '(i_test_t11 - 250)\n', (30974, 30992), True, 'import numpy as np\n'), ((39079, 39109), 'numpy.round', 'np.round', (['((ref_lon + 180) * 10)'], {}), '((ref_lon + 180) * 10)\n', (39087, 39109), True, 'import numpy as np\n'), ((34001, 34022), 'numpy.round', 'np.round', (['(ndvis * 100)'], {}), '(ndvis * 100)\n', (34009, 34022), True, 'import numpy as np\n')] |
# to do:
# - read in tidal predictions (if file exists) to validate data
import socket
import numpy as np
def ADCP_read(stage_instance, udp_IP = "", udp_port = 61557, buff_size = 1024, timeout = 5):
"""
Reads ADCP data continously from the specified port.
**EDITING NOTE - break added after timeout**
Inputs:
udp_ip: "" for local host, otherwise "XXX.XXX.XXX.XXX"
udp_port: port for UDP communication (61557 = ADCP Default)
buff_size: Each read iteration will read buff_size bytes of data, or until
the end of a packet is reached.
"""
# create socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((udp_IP, udp_port))
sock.settimeout(timeout)
# read one data packet
currents = []
headers = []
while True:
# read data. Continue reading if timeout error, attempt to reconnect if
# there is a different socket error.
try:
data, addr = sock.recvfrom(buff_size)
except socket.timeout:
if len(currents):
process_ADCP(currents, header)
currents = []
else:
pass
except socket.error:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((udp_IP, udp_port))
sock.settimeout(timeout)
# decode data, if present
try:
data
except NameError:
pass
else:
current, header = decode_ADCP(data)
currents.append(current)
headers.append(header)
del data
sock.close()
def decode_ADCP(data):
"""
Decodes ADCP data read in over UDP. Returns two lists: header and current.
input: Raw data string from ADCP UDP stream
Output:
header: [timestamp, nCells, nBeams, pressure]
- timestamp in unix format
- nBeams x nCells gives dimensions of current data
- pressure is hydrostatic pressure in dBar
current: nBeams x nCells current values in m/s
"""
data = data.decode("utf-8")
if data.endswith('ZZZZ') and data.startswith('AAAA'):
data = data.split(' ')
timestamp = float(data[1]) + float(data[2])/1000
nCells = int(data[3])
nBeams = int(data[4])
pressure = int(data[5])
current = np.array(list(map(float, list(data[6:-2]))))/1000
current = np.resize(current, (nBeams, nCells)).round(3)
header = [timestamp, nCells, nBeams, pressure]
else:
header = []
current = []
return current, header
def process_ADCP(stage_instance, currents, header):
"""
Calculates velocity magnitude and direction after a burst has finished.
Inputs:
Currents = raw data from ADCP [nPings x nBeams x nCells]
Header = header data from ADCP
Outputs:
Heading = velocity direction (in radians from north)
Speed = magintude of horizontal velocity (East and North)
Timestamp = end of burst in unix time format
"""
timestamp = header[0]
currents = np.array(currents)
bin_avg = np.mean(currents, axis=0)
bins = bin_avg[:, 1:4]
avg = np.mean(bins, axis=1).round(3)
heading = np.arctan(avg[1]/avg[0]).round(3)
speed = (avg[1]**2 + avg[0]**2)**0.5
pressure = header[3]/0.0001 # dBar to Pa
# depth = pressure/(g*rho) # fix this correction!
adcp_data = [timestamp, speed, heading]
stage_instance.addDataToStage('adcp', adcp_data)
| [
"numpy.mean",
"socket.socket",
"numpy.array",
"numpy.resize",
"numpy.arctan"
] | [((606, 654), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (619, 654), False, 'import socket\n'), ((3093, 3111), 'numpy.array', 'np.array', (['currents'], {}), '(currents)\n', (3101, 3111), True, 'import numpy as np\n'), ((3126, 3151), 'numpy.mean', 'np.mean', (['currents'], {'axis': '(0)'}), '(currents, axis=0)\n', (3133, 3151), True, 'import numpy as np\n'), ((3189, 3210), 'numpy.mean', 'np.mean', (['bins'], {'axis': '(1)'}), '(bins, axis=1)\n', (3196, 3210), True, 'import numpy as np\n'), ((3235, 3261), 'numpy.arctan', 'np.arctan', (['(avg[1] / avg[0])'], {}), '(avg[1] / avg[0])\n', (3244, 3261), True, 'import numpy as np\n'), ((1238, 1286), 'socket.socket', 'socket.socket', (['socket.AF_INET', 'socket.SOCK_DGRAM'], {}), '(socket.AF_INET, socket.SOCK_DGRAM)\n', (1251, 1286), False, 'import socket\n'), ((2433, 2469), 'numpy.resize', 'np.resize', (['current', '(nBeams, nCells)'], {}), '(current, (nBeams, nCells))\n', (2442, 2469), True, 'import numpy as np\n')] |
# python libraries
import numpy as np
# matplotlib libraries
import matplotlib.pyplot as plt
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score, make_scorer, accuracy_score, average_precision_score, confusion_matrix
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils import shuffle
from sklearn import metrics, preprocessing
from sklearn.dummy import DummyClassifier
# ours
import sys
sys.path.insert(0, '../data+wrangling')
import util
####################
# Cross Validation #
####################
def cv_performance(clf, X, y, kf, metric="accuracy"):
"""
Splits the data, X and y, into k-folds and runs k-fold cross-validation.
Trains classifier on k-1 folds and tests on the remaining fold.
Calculates the k-fold cross-validation performance metric for classifier
by averaging the performance across folds.
Parameters
--------------------
clf -- classifier (instance of SVC)
X -- numpy array of shape (n,d), feature vectors
n = number of examples
d = number of features
y -- numpy array of shape (n,), binary labels {1,-1}
kf -- model_selection.KFold or model_selection.StratifiedKFold
metric -- string, option used to select performance measure
Returns
--------------------
score -- float, average cross-validation performance across k folds
"""
scores = []
for train, test in kf.split(X, y) :
X_train, X_test, y_train, y_test = X[train], X[test], y[train], y[test]
clf.fit(X_train, y_train)
# use SVC.decision_function to make ``continuous-valued'' predictions
y_pred = clf.decision_function(X_test)
score = performance(y_test, y_pred, metric)
if not np.isnan(score) :
scores.append(score)
return np.array(scores).mean()
def select_param_linear(X, y, kf, metric="accuracy", plot=True, class_weight = {1:1, -1:1}) :
"""
Sweeps different settings for the hyperparameter of a linear-kernel SVM,
calculating the k-fold CV performance for each setting, then selecting the
hyperparameter that 'maximizes' the average k-fold CV performance.
Parameters
--------------------
X -- numpy array of shape (n,d), feature vectors
n = number of examples
d = number of features
y -- numpy array of shape (n,), binary labels {1,-1}
kf -- model_selection.KFold or model_selection.StratifiedKFold
metric -- string, option used to select performance measure
plot -- boolean, make a plot
class_weight -- class weights if we want to do a weighted SVC.
Defaults to not weighting any class in particular.
Returns
--------------------
C -- float, optimal parameter value for linear-kernel SVM
"""
print ('Linear SVM Hyperparameter Selection based on ' + str(metric) + ':')
C_range = 10.0 ** np.arange(-3, 3)
### ========== TODO : START ========== ###
# part 2c: select optimal hyperparameter using cross-validation
scores = [0 for _ in xrange(len(C_range))] # dummy values, feel free to change
# search over all C values
for c in range (len(C_range)):
clf = SVC(kernel = 'linear', C = C_range[c], class_weight=class_weight)
scores[c] = cv_performance(clf,X,y,kf,metric = metric)
# which C gives the best score?
max_ind = np.argmax(scores)
if plot:
lineplot(C_range, scores, metric)
return C_range[max_ind]
def lineplot(x, y, label):
"""
Make a line plot.
Parameters
--------------------
x -- list of doubles, x values
y -- list of doubles, y values
label -- string, label for legend
"""
xx = range(len(x))
plt.plot(xx, y, linestyle='-', linewidth=2, label=label)
plt.xticks(xx, x)
def check_overfit(clf, metric, *args):
'''Given a classifier function and'''
y = util.code_truVrest()
X, colnames = util.make_full_X()
X, y = shuffle(X, y, random_state=42)
n = len(y)
step = n/10
train_scores = []
test_scores = []
dummy_scores =[]
dummy = DummyClassifier(strategy = "most_frequent")
for i in range(step, n-step, step):
print (i)
X_train, X_test = X[:i], X[i:]
y_train, y_test = y[:i], y[i:]
# normalize on training set and then normalize test set
scaler = preprocessing.StandardScaler().fit(X_train)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
clf.fit(X_train, y_train)
train_preds = clf.predict(X_train)
train_scores.append(metric(y_train, train_preds, *args))
test_preds = clf.predict(X_test)
test_scores.append(metric(y_test, test_preds, *args))
dummy.fit(X_train, y_train)
test_preds = dummy.predict(X_test)
dummy_scores.append(metric(y_test, test_preds, *args))
x_axis = [.1,.2,.3,.4,.5,.6,.7,.8,.9]
plt.plot(x_axis, train_scores, 'b', label = "training score")
plt.plot(x_axis, test_scores, 'g', label = "test score")
plt.plot(x_axis, dummy_scores, 'k--', label = "baseline (majority vote) score")
plt.legend()
plt.xlabel("fraction of data used to train model")
plt.ylabel("accuracy")
plt.ylim(0, 1)
plt.show()
| [
"sys.path.insert",
"util.code_truVrest",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.ylabel",
"numpy.arange",
"sklearn.utils.shuffle",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.xlabel",
"numpy.argmax",
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"... | [((444, 483), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../data+wrangling"""'], {}), "(0, '../data+wrangling')\n", (459, 483), False, 'import sys\n'), ((3532, 3549), 'numpy.argmax', 'np.argmax', (['scores'], {}), '(scores)\n', (3541, 3549), True, 'import numpy as np\n'), ((3930, 3986), 'matplotlib.pyplot.plot', 'plt.plot', (['xx', 'y'], {'linestyle': '"""-"""', 'linewidth': '(2)', 'label': 'label'}), "(xx, y, linestyle='-', linewidth=2, label=label)\n", (3938, 3986), True, 'import matplotlib.pyplot as plt\n'), ((3991, 4008), 'matplotlib.pyplot.xticks', 'plt.xticks', (['xx', 'x'], {}), '(xx, x)\n', (4001, 4008), True, 'import matplotlib.pyplot as plt\n'), ((4102, 4122), 'util.code_truVrest', 'util.code_truVrest', ([], {}), '()\n', (4120, 4122), False, 'import util\n'), ((4141, 4159), 'util.make_full_X', 'util.make_full_X', ([], {}), '()\n', (4157, 4159), False, 'import util\n'), ((4171, 4201), 'sklearn.utils.shuffle', 'shuffle', (['X', 'y'], {'random_state': '(42)'}), '(X, y, random_state=42)\n', (4178, 4201), False, 'from sklearn.utils import shuffle\n'), ((4309, 4350), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""most_frequent"""'}), "(strategy='most_frequent')\n", (4324, 4350), False, 'from sklearn.dummy import DummyClassifier\n'), ((5142, 5201), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'train_scores', '"""b"""'], {'label': '"""training score"""'}), "(x_axis, train_scores, 'b', label='training score')\n", (5150, 5201), True, 'import matplotlib.pyplot as plt\n'), ((5208, 5262), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'test_scores', '"""g"""'], {'label': '"""test score"""'}), "(x_axis, test_scores, 'g', label='test score')\n", (5216, 5262), True, 'import matplotlib.pyplot as plt\n'), ((5269, 5346), 'matplotlib.pyplot.plot', 'plt.plot', (['x_axis', 'dummy_scores', '"""k--"""'], {'label': '"""baseline (majority vote) score"""'}), "(x_axis, dummy_scores, 'k--', label='baseline (majority vote) score')\n", (5277, 5346), True, 'import matplotlib.pyplot as plt\n'), ((5353, 5365), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5363, 5365), True, 'import matplotlib.pyplot as plt\n'), ((5370, 5420), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""fraction of data used to train model"""'], {}), "('fraction of data used to train model')\n", (5380, 5420), True, 'import matplotlib.pyplot as plt\n'), ((5425, 5447), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""accuracy"""'], {}), "('accuracy')\n", (5435, 5447), True, 'import matplotlib.pyplot as plt\n'), ((5452, 5466), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0)', '(1)'], {}), '(0, 1)\n', (5460, 5466), True, 'import matplotlib.pyplot as plt\n'), ((5471, 5481), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5479, 5481), True, 'import matplotlib.pyplot as plt\n'), ((3053, 3069), 'numpy.arange', 'np.arange', (['(-3)', '(3)'], {}), '(-3, 3)\n', (3062, 3069), True, 'import numpy as np\n'), ((1830, 1845), 'numpy.isnan', 'np.isnan', (['score'], {}), '(score)\n', (1838, 1845), True, 'import numpy as np\n'), ((1892, 1908), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (1900, 1908), True, 'import numpy as np\n'), ((4571, 4601), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (4599, 4601), False, 'from sklearn import metrics, preprocessing\n')] |
# coding=utf8
import json
import math
from numpy.ma import arange
from data_utils import build_data_loader
from model_utils import load_vocabulary, build_model, model_evaluate
with open('config.json') as config_file:
config = json.load(config_file)
MIN_EPOCH = config['SELECTOR']['MIN_EPOCH']
MAX_EPOCH = config['SELECTOR']['MAX_EPOCH']
STEP_SIZE = config['SELECTOR']['STEP_SIZE']
BATCH_SIZE = config['TRAIN']['BATCH_SIZE']
def select_proper_checkpoint():
for epoch in arange(MIN_EPOCH, MAX_EPOCH + STEP_SIZE, STEP_SIZE):
vocab = load_vocabulary()
model = build_model(len(vocab.word2index), load_checkpoint=True, checkpoint_epoch=epoch, print_module=False)
data_set = build_data_loader(batch_size=BATCH_SIZE)
test_loss = model_evaluate(model, data_set)
print('EPOCH %d Test PPL: %.4f' % (epoch, math.exp(test_loss)))
if __name__ == '__main__':
try:
select_proper_checkpoint()
except KeyboardInterrupt as _:
print("You quit.")
| [
"model_utils.model_evaluate",
"data_utils.build_data_loader",
"numpy.ma.arange",
"model_utils.load_vocabulary",
"json.load",
"math.exp"
] | [((233, 255), 'json.load', 'json.load', (['config_file'], {}), '(config_file)\n', (242, 255), False, 'import json\n'), ((483, 534), 'numpy.ma.arange', 'arange', (['MIN_EPOCH', '(MAX_EPOCH + STEP_SIZE)', 'STEP_SIZE'], {}), '(MIN_EPOCH, MAX_EPOCH + STEP_SIZE, STEP_SIZE)\n', (489, 534), False, 'from numpy.ma import arange\n'), ((552, 569), 'model_utils.load_vocabulary', 'load_vocabulary', ([], {}), '()\n', (567, 569), False, 'from model_utils import load_vocabulary, build_model, model_evaluate\n'), ((706, 746), 'data_utils.build_data_loader', 'build_data_loader', ([], {'batch_size': 'BATCH_SIZE'}), '(batch_size=BATCH_SIZE)\n', (723, 746), False, 'from data_utils import build_data_loader\n'), ((767, 798), 'model_utils.model_evaluate', 'model_evaluate', (['model', 'data_set'], {}), '(model, data_set)\n', (781, 798), False, 'from model_utils import load_vocabulary, build_model, model_evaluate\n'), ((849, 868), 'math.exp', 'math.exp', (['test_loss'], {}), '(test_loss)\n', (857, 868), False, 'import math\n')] |
import os
import numpy as np
from scipy.stats import rankdata
from scipy.special import binom #faster than comb
import dynamicTreeCut.df_apply
from functools import partial
from dynamicTreeCut.R_func import *
chunkSize = 100
#Function to index flat matrix as squareform matrix
def dist_index(i, j, matrix, l, n):
if i == j:
return(0.0)
index = int(l - binom(n-min(i, j), 2) + (max(i, j) - min(i, j) - 1))
return(matrix[index])
#Function to index flat matrix as squareform matrix
def dist_multi_index(_array, matrix):
#handle 2D array
if len(matrix.shape) == 2:
return(matrix[_array, :][:, _array])
l = len(matrix)
n = 0.5*(np.sqrt((8*l)+1)+1)
results = np.zeros((len(_array), len(_array)))
for i in range(len(_array)):
for j in range(i, len(_array)):
score = dist_index(_array[i], _array[j], matrix, l, n)
results[i,j] = score
results[j,i] = score
return(results)
#Function to index rows of flat matrix as squareform matrix
def get_rows(_array, matrix):
#handle 2D array
if len(matrix.shape) == 2:
return(matrix[_array,:])
l = len(matrix)
n = int(0.5*(np.sqrt((8*l)+1)+1))
if _array.dtype != "bool":
results = np.zeros((len(_array), n))
for row, i in enumerate(_array):
for j in range(n):
if i == j:
results[row, j] = 0.0
else:
index = int(l - binom(n - min(i, j), 2) + (max(i, j) - min(i, j) - 1))
results[row,j] = matrix[index]
return(results)
else:
results = np.zeros((np.sum(_array), n))
row = 0
for i, b in enumerate(_array):
if b == True:
for j in range(n):
if i == j:
results[row, j] = 0.0
else:
index = int(l - binom(n - min(i, j), 2) + (max(i, j) - min(i, j) - 1))
results[row, j] = matrix[index]
row += 1
return(results)
#The following are supporting function for GetClusters.
def CoreSize(BranchSize, minClusterSize):
BaseCoreSize = minClusterSize / 2 + 1
if BaseCoreSize < BranchSize:
CoreSize = BaseCoreSize + np.sqrt(BranchSize - BaseCoreSize)
else:
CoreSize = BranchSize
return(int(CoreSize))
# This assumes the diagonal of the distance matrix
# is zero, BranchDist is a square matrix whose dimension is at least 2.
def CoreScatter(BranchDist, minClusterSize):
nPoints = BranchDist.shape[0]
PointAverageDistances = np.sum(BranchDist, axis=1) / (nPoints - 1)
CoreSize = minClusterSize / 2 + 1
if CoreSize < nPoints:
EffCoreSize = CoreSize + np.sqrt(nPoints - CoreSize)
order = np.argsort(PointAverageDistances)
Core = order[np.arange(EffCoreSize)]
else:
Core = np.arange(nPoints)
EffCoreSize = nPoints
CoreAverageDistances = np.sum(BranchDist[Core, Core], axis=1) / (EffCoreSize - 1)
return(np.mean(CoreAverageDistances))
def interpolate(data, index):
i = np.round(index)
n = len(data)
if i < 0: return(data[0])
if i >= n: return(data[-1])
r = index - i
return(data[i-1] * (1 - r) + data[i] * r)
def cutreeHybrid(link, distM,
cutHeight = None, minClusterSize = 20, deepSplit = 1,
maxCoreScatter = None, minGap = None,
maxAbsCoreScatter = None, minAbsGap = None,
minSplitHeight = None, minAbsSplitHeight = None,
externalBranchSplitFnc = None, minExternalSplit = None,
externalSplitOptions = [],
externalSplitFncNeedsDistance = None,
assumeSimpleExternalSpecification = True,
pamStage = True, pamRespectsDendro = True,
useMedoids = False,
maxPamDist = None,
respectSmallClusters = True,
verbose = 2, indent = 0):
dendro_height = get_heights(link)
dendro_merge = get_merges(link)
if maxPamDist == None:
maxPamDist = cutHeight
nMerge = len(dendro_height)
refQuantile = 0.05
refMerge = np.round(nMerge * refQuantile)
if refMerge < 1: refMerge = 1
refHeight = dendro_height[int(refMerge) - 1]
if cutHeight == None:
cutHeight = 0.99 * (np.max(dendro_height) - refHeight) + refHeight
print("..cutHeight not given, setting it to", cutHeight,
" ===> 99% of the (truncated) height range in dendro.")
else:
if cutHeight > np.max(dendro_height): cutHeight = np.max(dendro_height)
if maxPamDist == None: maxPamDist = cutHeight
nMergeBelowCut = np.sum(dendro_height <= cutHeight)
if nMergeBelowCut < minClusterSize:
print("cutHeight set too low; no merges below the cut.")
return(np.zeros(nMerge+1))
# fill in this section once understood better
if externalBranchSplitFnc != None:
raise NotImplementedError("externalBranchSplitFnc is not supported yet")
nExternalSplits = len(externalBranchSplitFnc)
if len(minExternalSplit) < 1:
raise AttributeError("minExternalBranchSplit must be given.")
if assumeSimpleExternalSpecification and nExternalSplits == 1:
pass
else:
nExternalSplits = 0
MxBranches = nMergeBelowCut
branch_isBasic = np.repeat(True, MxBranches)
branch_isTopBasic = np.repeat(True, MxBranches)
branch_failSize = np.repeat(False, MxBranches)
branch_rootHeight = np.repeat(np.nan, MxBranches)
branch_size = np.repeat(2, MxBranches)
branch_nMerge = np.repeat(1, MxBranches)
branch_nSingletons = np.repeat(2, MxBranches)
branch_nBasicClusters = np.repeat(0, MxBranches)
branch_mergedInto = np.repeat(0, MxBranches)
branch_attachHeight = np.repeat(np.nan, MxBranches)
#branch_singletons = np.zeros(MxBranches)
branch_singletons = [np.nan] * MxBranches
#branch_basicClusters = pd.Series(np.zeros(MxBranches))
branch_basicClusters = [np.nan] * MxBranches
#branch_mergingHeights = pd.Series(np.zeros(MxBranches))
branch_mergingHeights = [np.nan] * MxBranches
#branch_singletonHeights = pd.Series(np.zeros(MxBranches))
branch_singletonHeights = [np.nan] * MxBranches
nBranches = 0
spyIndex = None
if os.path.isfile(".dynamicTreeCutSpyFile"):
spyIndex = pd.read_csv(".dynamicTreeCutSpyFile")
print("Found 'spy file' with indices of objects to watch for.")
spyIndex = spyIndex.iloc[:,1].values
defMCS = np.array([0.64, 0.73, 0.82, 0.91, 0.95])
defMG = (1 - defMCS) * 3 / 4.0
nSplitDefaults = len(defMCS)
if type(deepSplit) == bool: deepSplit = int(deepSplit) * (nSplitDefaults - 2)
deepSplit = deepSplit + 1
if deepSplit < 1 or deepSplit > nSplitDefaults:
raise IndexError("Parameter deepSplit (value", deepSplit,
") out of range: allowable range is 0 through",
nSplitDefaults - 1)
if maxCoreScatter == None: maxCoreScatter = interpolate(defMCS, deepSplit)
if minGap == None: minGap = interpolate(defMG, deepSplit)
if maxAbsCoreScatter == None:
maxAbsCoreScatter = refHeight + maxCoreScatter * (cutHeight - refHeight)
if minAbsGap == None:
minAbsGap = minGap * (cutHeight - refHeight)
if minSplitHeight == None: minSplitHeight = 0
if minAbsSplitHeight == None:
minAbsSplitHeight = refHeight + minSplitHeight * (cutHeight - refHeight)
nPoints = nMerge + 1
IndMergeToBranch = np.repeat(0, nMerge)
onBranch = np.repeat(0, nPoints)
RootBranch = 0
mergeDiagnostics = dict(smI = np.repeat(np.nan, nMerge), smSize = np.repeat(np.nan, nMerge),
smCrSc = np.repeat(np.nan, nMerge), smGap = np.repeat(np.nan, nMerge),
lgI = np.repeat(np.nan, nMerge), lgSize = np.repeat(np.nan, nMerge),
lgCrSc = np.repeat(np.nan, nMerge), lgGap = np.repeat(np.nan, nMerge),
merged = np.repeat(np.nan, nMerge))
if nExternalSplits > 0:
#externalMergeDiags = pd.DataFrame(np.repeat(np.nan, nMerge*nExternalSplits).reshape(nMerge, nExternalSplits))
#externalMergeDiags.columns = paste("externalBranchSplit", nExternalSplits, sep = ".")
pass
extender = np.zeros(chunkSize, dtype=int)
for merge in range(nMerge):
if dendro_height[merge] <= cutHeight:
# are both merged objects singletons?
if dendro_merge[merge, 0] < 0 and dendro_merge[merge, 1] < 0:
nBranches = nBranches + 1
branch_isBasic[nBranches - 1] = True
branch_isTopBasic[nBranches - 1] = True
branch_singletons[nBranches - 1] = np.append(-dendro_merge[merge,], extender)
branch_basicClusters[nBranches - 1] = extender
branch_mergingHeights[nBranches - 1] = np.append(np.repeat(dendro_height[merge], 2), extender)
branch_singletonHeights[nBranches - 1] = np.append(np.repeat(dendro_height[merge], 2), extender)
IndMergeToBranch[merge] = nBranches
RootBranch = nBranches
elif sign(dendro_merge[merge,0]) * sign(dendro_merge[merge,1]) < 0:
clust = IndMergeToBranch[int(np.max(dendro_merge[merge,])) - 1]
if clust == 0: raise ValueError("a previous merge has no associated cluster. Sorry!")
gene = -np.min(dendro_merge[merge,])
ns = branch_nSingletons[clust - 1] + 1
nm = branch_nMerge[clust - 1] + 1
if branch_isBasic[clust - 1]:
if ns > len(branch_singletons[clust - 1]):
branch_singletons[clust - 1] = np.append(branch_singletons[clust - 1], extender)
branch_singletonHeights[clust - 1] = np.append(branch_singletonHeights[clust - 1], extender)
branch_singletons[clust - 1][ns - 1] = gene
branch_singletonHeights[clust - 1][ns - 1] = dendro_height[merge]
else:
onBranch[int(gene) - 1] = clust
if nm >= len(branch_mergingHeights[clust - 1]):
branch_mergingHeights[clust - 1] = np.append(branch_mergingHeights[clust - 1], extender)
branch_mergingHeights[clust - 1][nm - 1] = dendro_height[merge]
branch_size[clust - 1] = branch_size[clust - 1] + 1
branch_nMerge[clust - 1] = nm
branch_nSingletons[clust - 1] = ns
IndMergeToBranch[merge] = clust
RootBranch = clust
else:
# attempt to merge two branches:
clusts = IndMergeToBranch[dendro_merge[merge,] - 1]
sizes = branch_size[clusts - 1]
# Note: for 2 elements, rank and order are the same.
rnk = rankdata(sizes, method = "ordinal")
small = clusts[rnk[0] - 1]
large = clusts[rnk[1] - 1]
sizes = sizes[rnk - 1]
branch1 = np.nan if np.any(np.isnan(branch_singletons[large - 1])) else branch_singletons[large - 1][np.arange(sizes[1])]
branch2 = np.nan if np.any(np.isnan(branch_singletons[small - 1])) else branch_singletons[small - 1][np.arange(sizes[0])]
spyMatch = False
if spyIndex != None:
n1 = len(set(branch1) & set(spyIndex))
if n1 / len(branch1) > 0.99 and n1 / len(spyIndex) > 0.99:
print("Found spy match for branch 1 on merge", merge)
spyMatch = True
n2 = len(set(branch2) & set(spyIndex))
if n2 / len(branch1) > 0.99 and n2 / len(spyIndex) > 0.99:
print("Found spy match for branch 2 on merge", merge)
spyMatch = True
if branch_isBasic[small - 1]:
coresize = CoreSize(branch_nSingletons[small - 1], minClusterSize)
Core = np.array(branch_singletons[small - 1][np.arange(int(coresize))], dtype=int)
# SmAveDist = mean(apply(distM[Core, Core], 2, sum)/(coresize-1))
SmAveDist = np.mean(np.sum(dist_multi_index(Core - 1, distM), axis=1) / (coresize - 1))
else:
SmAveDist = 0
if branch_isBasic[large - 1]:
coresize = CoreSize(branch_nSingletons[large - 1], minClusterSize)
Core = np.array(branch_singletons[large - 1][np.arange(int(coresize))], dtype=int)
LgAveDist = np.mean(np.sum(dist_multi_index(Core - 1, distM), axis=1) / (coresize -1 ))
else:
LgAveDist = 0
for key in mergeDiagnostics:
if key == "smI":
mergeDiagnostics[key][merge] = small
elif key == "smSize":
mergeDiagnostics[key][merge] = branch_size[small - 1]
elif key == "smCrSc":
mergeDiagnostics[key][merge] = SmAveDist
elif key == "smGap":
mergeDiagnostics[key][merge] = dendro_height[merge] - SmAveDist
elif key == "lgI":
mergeDiagnostics[key][merge] = large
elif key == "lgSize":
mergeDiagnostics[key][merge] = branch_size[large - 1]
elif key == "lgCrSc":
mergeDiagnostics[key][merge] = LgAveDist
elif key == "lgGap":
mergeDiagnostics[key][merge] = dendro_height[merge] - LgAveDist
elif key == "merged":
mergeDiagnostics[key][merge] = np.nan
# We first check each cluster separately for being too small, too diffuse, or too shallow:
SmallerScores = [branch_isBasic[small - 1],
branch_size[small - 1] < minClusterSize,
SmAveDist > maxAbsCoreScatter,
dendro_height[merge] - SmAveDist < minAbsGap,
dendro_height[merge] < minAbsSplitHeight]
if SmallerScores[0] * np.sum(SmallerScores[1:]) > 0:
DoMerge = True
SmallerFailSize = ~np.logical_or(SmallerScores[2], SmallerScores[3]) # Smaller fails only due to size
else:
LargerScores = [branch_isBasic[large - 1],
branch_size[large - 1] < minClusterSize,
LgAveDist > maxAbsCoreScatter,
dendro_height[merge] - LgAveDist < minAbsGap,
dendro_height[merge] < minAbsSplitHeight]
if LargerScores[0] * np.sum(LargerScores[1:]) > 0:
# Actually: the large one is the one to be merged
DoMerge = True
SmallerFailSize = ~np.logical_or(LargerScores[2], LargerScores[3]) # cluster fails only due to size
x = small
small = large
large = x
sizes = sizes[::-1]
else:
DoMerge = False # None of the two satisfies merging criteria
if DoMerge:
mergeDiagnostics["merged"][merge] = 1
if ~DoMerge and nExternalSplits > 0 and branch_isBasic[small - 1] and branch_isBasic[large - 1]:
if verbose > 4: print("Entering external split code on merge ", merge)
branch1 = branch_singletons[large - 1][np.arange(sizes[1])]
branch2 = branch_singletons[small - 1][np.arange(sizes[0])]
if verbose > 4 or spyMatch: print(" ..branch lengths: ", sizes[0], ", ", sizes[1])
#if (any(is.na(branch1)) || any(branch1==0)) browser();
#if (any(is.na(branch2)) || any(branch2==0)) browser();
##### fix after External Splits is understood better
es = 0
while es < nExternalSplits and ~DoMerge:
es = es + 1
args = externalSplitOptions[es - 1]
args = [args, list(branch1 = branch1, branch2 = branch2)]
#extSplit = do.call(externalBranchSplitFnc[es], args)
if spyMatch:
print(" .. external criterion ", es, ": ", extSplit)
DoMerge = extSplit < minExternalSplit[es - 1]
externalMergeDiags[merge, es - 1] = extSplit
if DoMerge:
mergeDiagnostics_merged[merge] = 2
else:
mergeDiagnostics_merged[merge] = 0
if DoMerge:
# merge the small into the large cluster and close it.
branch_failSize[small - 1] = SmallerFailSize
branch_mergedInto[small - 1] = large
branch_attachHeight[small - 1] = dendro_height[merge]
branch_isTopBasic[small - 1] = False
nss = branch_nSingletons[small - 1]
nsl = branch_nSingletons[large - 1]
ns = nss + nsl
if branch_isBasic[large - 1]:
nExt = np.ceil( (ns - len(branch_singletons[large - 1])) / chunkSize )
if nExt > 0:
if verbose > 5:
print("Extending singletons for branch", large, "by", nExt, " extenders.")
branch_singletons[large - 1] = np.append(branch_singletons[large - 1], np.repeat(extender, nExt))
branch_singletonHeights[large - 1] = np.append(branch_singletonHeights[large - 1], np.repeat(extender, nExt))
branch_singletons[large - 1][np.arange(nsl,ns)] = branch_singletons[small - 1][np.arange(nss)]
branch_singletonHeights[large - 1][np.arange(nsl,ns)] = branch_singletonHeights[small - 1][np.arange(nss)]
branch_nSingletons[large - 1] = ns
else:
if ~branch_isBasic[small - 1]:
raise ValueError("merging two composite clusters. Sorry!")
onBranch[ branch_singletons[small - 1][branch_singletons[small - 1] != 0] - 1 ] = large
nm = branch_nMerge[large - 1] + 1
if nm > len(branch_mergingHeights[large - 1]):
branch_mergingHeights[large - 1] = np.append(branch_mergingHeights[large - 1], extender)
branch_mergingHeights[large - 1][nm - 1] = dendro_height[merge]
branch_nMerge[large - 1] = nm
branch_size[large - 1] = branch_size[small - 1] + branch_size[large - 1]
IndMergeToBranch[merge] = large
RootBranch = large
else:
# start or continue a composite cluster.
# If large is basic and small is not basic, switch them.
if branch_isBasic[large - 1] and ~branch_isBasic[small - 1]:
x = large
large = small
small = x
sizes = sizes[::-1]
# Note: if pamRespectsDendro, need to start a new composite cluster every time two branches merge,
# otherwise will not have the necessary information.
# Otherwise, if the large cluster is already composite, I can simply merge both clusters into
# one of the non-composite clusters.
if branch_isBasic[large - 1] or pamStage and pamRespectsDendro:
nBranches = nBranches + 1
branch_attachHeight[[large - 1, small - 1]] = dendro_height[merge]
branch_mergedInto[[large - 1, small - 1]] = nBranches
if branch_isBasic[small - 1]:
addBasicClusters = small # add basic clusters
else:
addBasicClusters = branch_basicClusters[small - 1]
if branch_isBasic[large - 1]:
addBasicClusters = np.append(addBasicClusters, large)
else:
addBasicClusters = np.append(addBasicClusters, branch_basicClusters[large - 1])
# print(paste(" Starting a composite cluster with number", nBranches));
branch_isBasic[nBranches - 1] = False
branch_isTopBasic[nBranches - 1] = False
branch_basicClusters[nBranches - 1] = addBasicClusters
branch_mergingHeights[nBranches - 1] = np.append(np.repeat(dendro_height[merge], 2), extender)
branch_nMerge[nBranches - 1] = 2
branch_size[nBranches - 1] = np.sum(sizes)
branch_nBasicClusters[nBranches - 1] = len(addBasicClusters)
IndMergeToBranch[merge] = nBranches
RootBranch = nBranches
else:
# Add small branch to the large one
addBasicClusters = small if branch_isBasic[small - 1] else branch_basicClusters[small - 1]
nbl = branch_nBasicClusters[large - 1]
#small might be an int
try:
nb = branch_nBasicClusters[large - 1] + len(addBasicClusters)
except TypeError:
nb = branch_nBasicClusters[large - 1] + 1
if nb > len(branch_basicClusters[large - 1]):
nExt = np.ceil( ( nb - len(branch_basicClusters[large - 1])) / chunkSize)
branch_basicClusters[large - 1] = np.append(branch_basicClusters[large - 1], np.repeat(extender, nExt))
branch_basicClusters[large - 1][np.arange(nbl,nb)] = addBasicClusters
branch_nBasicClusters[large - 1] = nb
branch_size[large - 1] = branch_size[large - 1] + branch_size[small - 1]
nm = branch_nMerge[large - 1] + 1
if nm > len(branch_mergingHeights[large - 1]):
branch_mergingHeights[large - 1] = np.append(branch_mergingHeights[large - 1], extender)
branch_mergingHeights[large - 1][nm - 1] = dendro_height[merge]
branch_nMerge[large - 1] = nm
branch_attachHeight[small - 1] = dendro_height[merge]
branch_mergedInto[small - 1] = large
IndMergeToBranch[merge] = large
RootBranch = large
if verbose > 2: print("..Going through detected branches and marking clusters..")
isCluster = np.repeat(False, nBranches)
SmallLabels = np.repeat(0, nPoints)
for clust in range(nBranches):
if np.isnan(branch_attachHeight[clust]): branch_attachHeight[clust] = cutHeight
if branch_isTopBasic[clust]:
coresize = CoreSize(branch_nSingletons[clust], minClusterSize)
Core = branch_singletons[clust][np.arange(coresize)]
CoreScatter = np.mean(np.sum(dist_multi_index(Core - 1, distM), axis=1) / (coresize - 1))
isCluster[clust] = np.logical_and(np.logical_and(branch_isTopBasic[clust],
branch_size[clust] >= minClusterSize),
np.logical_and(CoreScatter < maxAbsCoreScatter,
branch_attachHeight[clust] - CoreScatter > minAbsGap))
else:
CoreScatter = 0
if branch_failSize[clust]: SmallLabels[branch_singletons[clust][branch_singletons[clust] != 0] - 1] = clust + 1
if not respectSmallClusters: SmallLabels = np.repeat(0, nPoints)
if verbose > 2: print(spaces, "..Assigning Tree Cut stage labels..")
Colors = np.repeat(0, nPoints)
coreLabels = np.repeat(0, nPoints)
clusterBranches = np.arange(nBranches)[isCluster]
branchLabels = np.repeat(0, nBranches)
color = 0
for clust in clusterBranches:
color = color + 1
Colors[branch_singletons[clust][branch_singletons[clust] != 0] - 1] = color
SmallLabels[branch_singletons[clust][branch_singletons[clust] != 0] - 1] = 0
coresize = CoreSize(branch_nSingletons[clust], minClusterSize)
Core = branch_singletons[clust][np.arange(coresize)]
coreLabels[Core - 1] = color
branchLabels[clust] = color
Labeled = np.arange(nPoints)[Colors != 0]
Unlabeled = np.arange(nPoints)[Colors == 0]
nUnlabeled = len(Unlabeled)
UnlabeledExist = nUnlabeled > 0
if len(Labeled) > 0:
LabelFac = factor(Colors[Labeled])
nProperLabels = nlevels(LabelFac)
else:
nProperLabels = 0
if pamStage and UnlabeledExist and nProperLabels > 0:
if verbose > 2: print(spaces, "..Assigning PAM stage labels..")
nPAMed = 0
# Assign some of the grey genes to the nearest module. Define nearest as the distance to the medoid,
# that is the point in the cluster that has the lowest average distance to all other points in the
# cluster. First get the medoids.
if useMedoids:
Medoids = np.repeat(0, nProperLabels)
ClusterRadii = np.repeat(0.0, nProperLabels)
for cluster in range(1, nProperLabels + 1):
InCluster = np.arange(1,nPoints+1)[Colors == cluster]
DistInCluster = dist_multi_index(InCluster - 1, distM)
#DistInCluster = distM[InCluster, InCluster]
DistSums = np.sum(DistInCluster, axis=1)
Medoids[cluster - 1] = InCluster[np.argmin(DistSums)]
ClusterRadii[cluster - 1] = np.max(DistInCluster[:, np.argmin(DistSums)])
# If small clusters are to be respected, assign those first based on medoid-medoid distances.
if respectSmallClusters:
FSmallLabels = factor(SmallLabels)
SmallLabLevs = levels(FSmallLabels)
nSmallClusters = nlevels(FSmallLabels) - (SmallLabLevs[0] == 0)
if nSmallClusters > 0 :
for sclust in SmallLabLevs[SmallLabLevs != 0]:
InCluster = np.arange(nPoints)[SmallLabels == sclust]
if pamRespectsDendro:
onBr = np.unique(onBranch[InCluster])
if len(onBr) > 1:
raise ValueError("Internal error: objects in a small cluster are marked to belong",
"\nto several large branches:")
if onBr > 0:
basicOnBranch = branch_basicClusters[onBr[0] - 1]
labelsOnBranch = branchLabels[basicOnBranch - 1]
else:
labelsOnBranch = None
else:
labelsOnBranch = np.arange(1, nProperLabels + 1)
# printFlush(paste("SmallCluster", sclust, "has", length(InCluster), "elements."));
DistInCluster = dist_multi_index(InCluster, distM)
#DistInCluster = distM[InCluster, InCluster]
if len(labelsOnBranch) > 0:
if len(InCluster) > 1:
DistSums = df_apply.apply(np.sum, DistInCluster, 1)
smed = InCluster[np.argmin(DistSums)]
DistToMeds = get_rows(Medoids[labelsOnBranch - 1][Medoids[labelsOnBranch - 1] != 0] - 1, distM)[:, smed]
closest = np.argmin(DistToMeds)
DistToClosest = DistToMeds[closest]
closestLabel = labelsOnBranch[closest]
if DistToClosest < ClusterRadii[closestLabel - 1] or DistToClosest < maxPamDist:
Colors[InCluster] = closestLabel
nPAMed = nPAMed + len(InCluster)
else: Colors[InCluster] = -1 # This prevents individual points from being assigned later
else:
Colors[InCluster] = -1
# Assign leftover unlabeled objects to clusters with nearest medoids
Unlabeled = np.arange(nPoints)[Colors == 0]
if len(Unlabeled > 0):
for obj in Unlabeled:
if pamRespectsDendro:
onBr = onBranch[obj]
if onBr > 0:
basicOnBranch = branch_basicClusters[onBr - 1]
labelsOnBranch = branchLabels[basicOnBranch - 1]
else:
labelsOnBranch = None
else:
labelsOnBranch = np.arange(nProperLabels)
if labelsOnBranch != None:
UnassdToMedoidDist = get_rows(Medoids[labelsOnBranch - 1] - 1, distM)[:,obj]
#UnassdToMedoidDist = distM[Medoids[labelsOnBranch], obj]
nearest= np.argmin(UnassdToMedoidDist)
NearestCenterDist = UnassdToMedoidDist[nearest]
nearestMed = labelsOnBranch[nearest]
if NearestCenterDist < ClusterRadii[nearestMed - 1] or NearestCenterDist < maxPamDist:
Colors[obj] = nearestMed
nPAMed = nPAMed + 1
UnlabeledExist = np.sum(Colors == 0) > 0
else: # Instead of medoids, use average distances
# This is the default method, so I will try to tune it for speed a bit.
ClusterDiam = np.repeat(0, nProperLabels)
for cluster in range(nProperLabels):
InCluster = np.arange(nPoints)[Colors == cluster]
nInCluster = len(InCluster)
DistInCluster = dist_multi_index(InCluster, distM)
#DistInCluster = distM[InCluster, InCluster]
if nInCluster > 1:
AveDistInClust = np.sum(DistInCluster, axis=1) / (nInCluster - 1)
ClusterDiam[cluster] = np.max(AveDistInClust)
else:
ClusterDiam[cluster] = 0
# If small clusters are respected, assign them first based on average cluster-cluster distances.
ColorsX = Colors.copy()
if respectSmallClusters:
FSmallLabels = factor(SmallLabels) #### think about
SmallLabLevs = levels(FSmallLabels) ##### think about
nSmallClusters = nlevels(FSmallLabels) - (SmallLabLevs[0] == 0)
if nSmallClusters > 0:
if pamRespectsDendro:
for sclust in SmallLabLevs[SmallLabLevs != 0]:
InCluster = np.arange(nPoints)[SmallLabels == sclust]
onBr = np.unique(onBranch[InCluster])
if len(onBr) > 1:
raise ValueError("objects in a small cluster are marked to belong",
"\nto several large branches:")
if onBr > 0:
basicOnBranch = branch_basicClusters[onBr[0] - 1]
labelsOnBranch = branchLabels[basicOnBranch - 1]
useObjects = np.in1d(ColorsX, np.unique(labelsOnBranch))
DistSClustClust = get_rows(InCluster, distM)[:,useObjects]
#DistSClustClust = distM[InCluster, useObjects]
MeanDist = np.mean(DistSClustClust, axis=0)
useColorsFac = factor(ColorsX[useObjects]) ### think about
MeanMeanDist = tapply(MeanDist, useColorsFac, np.mean) ## think about
nearest = np.argmin(MeanMeanDist)
NearestDist = MeanMeanDist[nearest]
nearestLabel = levels(useColorsFac)[nearest] ## think about
if NearestDist < ClusterDiam[nearestLabel - 1] or NearestDist < maxPamDist:
Colors[InCluster] = nearestLabel
nPAMed = nPAMed + len(InCluster)
else:
Colors[InCluster] = -1 # This prevents individual points from being assigned later
else:
labelsOnBranch = np.arange(nProperLabels)
useObjects = np.arange(nPoints)[ColorsX != 0]
for sclust in SmallLabLevs[SmallLabLevs != 0]:
InCluster = np.arange(nPoints)[SmallLabels == sclust]
DistSClustClust = get_rows(InCluster, distM)[:,useObjects]
#DistSClustClust = distM[InCluster, useObjects]
MeanDist = np.mean(DistSClustClust, axis=0)
useColorsFac = factor(ColorsX[useObjects]) ### think about
MeanMeanDist = tapply(MeanDist, useColorsFac, np.mean) ### think about
nearest = np.argmin(MeanMeanDist)
NearestDist = MeanMeanDist[nearest]
nearestLabel = levels(useColorsFac)[nearest] ## think about
if NearestDist < ClusterDiam[nearestLabel - 1] or NearestDist < maxPamDist:
Colors[InCluster] = nearestLabel
nPAMed = nPAMed + len(InCluster)
else:
Colors[InCluster] = -1 # This prevents individual points from being assigned later
# Assign leftover unlabeled objects to clusters with nearest medoids
Unlabeled = np.arange(nPoints)[Colors == 0]
#ColorsX = Colors;
if len(Unlabeled) > 0:
if pamRespectsDendro:
unlabOnBranch = Unlabeled[onBranch[Unlabeled] > 0]
for obj in unlabOnBranch:
onBr = onBranch[obj]
basicOnBranch = branch_basicClusters[onBr - 1]
labelsOnBranch = branchLabels[basicOnBranch - 1]
useObjects = np.in1d(ColorsX, np.unique(labelsOnBranch))
useColorsFac = factor(ColorsX[useObjects]) ### think about
#UnassdToClustDist = tapply(distM[useObjects, obj], useColorsFac, mean) ### think about
UnassdToClustDist = tapply(get_rows(useObjects, distM)[:,obj], useColorsFac, np.mean) ### think about
nearest = np.argmin(UnassdToClustDist)
NearestClusterDist = UnassdToClustDist[nearest]
nearestLabel = levels(useColorsFac)[nearest] ### think about
if NearestClusterDist < ClusterDiam[nearestLabel - 1] or NearestClusterDist < maxPamDist:
Colors[obj] = nearestLabel
nPAMed = nPAMed + 1
else:
useObjects = np.arange(nPoints)[ColorsX != 0]
useColorsFac = factor(ColorsX[useObjects]) ## think about
nUseColors = nlevels(useColorsFac) ### think about
UnassdToClustDist = tapply_df(get_rows(useObjects, distM)[:,Unlabeled], useColorsFac, np.mean, 1)
#UnassdToClustDist = df_apply.apply(distM[useObjects, Unlabeled], 1, tapply, useColorsFac, mean) ### think about
# Fix dimensions for the case when there's only one cluster
#dim(UnassdToClustDist) = np.append(nUseColors, len(Unlabeled)) ### think about
nearest = df_apply.apply(np.argmin, UnassdToClustDist, 1)
nearestDist = df_apply.apply(np.min, UnassdToClustDist, 1)
nearestLabel = levels(useColorsFac)[nearest - 1] ### think about
assign = np.logical_or(nearestDist < ClusterDiam[nearestLabel - 1], nearestDist < maxPamDist)
Colors[Unlabeled[assign]] = nearestLabel[assign]
nPAMed = nPAMed + np.sum(assign)
if verbose > 2: print("....assigned", nPAMed, "objects to existing clusters.")
# Relabel labels such that 1 corresponds to the largest cluster etc.
Colors[Colors < 0] = 0
UnlabeledExist = np.sum(Colors == 0) > 0
NumLabs = Colors + 1
Sizes = table(NumLabs) ### think about
if UnlabeledExist:
if len(Sizes) > 1:
SizeRank = np.append(1, rankdata(-Sizes[1:len(Sizes)], method="ordinal")+1)
else:
SizeRank = np.array([1])
OrdNumLabs = SizeRank[NumLabs - 1]
else:
SizeRank = rankdata(-Sizes[np.arange(len(Sizes))], method="ordinal")
OrdNumLabs = SizeRank[NumLabs - 2]
ordCoreLabels = OrdNumLabs - UnlabeledExist
ordCoreLabels[coreLabels == 0] = 0
if verbose > 0: print( "..done.")
results = dict(labels = OrdNumLabs-UnlabeledExist,
cores = ordCoreLabels,
smallLabels = SmallLabels,
onBranch = onBranch,
mergeDiagnostics = mergeDiagnostics if nExternalSplits==0 else pd.DataFrame({'x':mergeDiagnostics, 'y':externalMergeDiags}),
mergeCriteria = dict(maxCoreScatter = maxCoreScatter, minGap = minGap,
maxAbsCoreScatter = maxAbsCoreScatter, minAbsGap = minAbsGap,
minExternalSplit = minExternalSplit),
branches = dict(nBranches = nBranches, # Branches = Branches,
IndMergeToBranch = IndMergeToBranch,
RootBranch = RootBranch, isCluster = isCluster,
nPoints = nMerge+1))
return(results)
| [
"numpy.sqrt",
"numpy.argsort",
"numpy.array",
"numpy.arange",
"numpy.mean",
"numpy.repeat",
"numpy.max",
"numpy.min",
"numpy.argmin",
"numpy.round",
"os.path.isfile",
"numpy.isnan",
"numpy.unique",
"numpy.logical_and",
"scipy.stats.rankdata",
"numpy.logical_or",
"numpy.append",
"nu... | [((3209, 3238), 'numpy.mean', 'np.mean', (['CoreAverageDistances'], {}), '(CoreAverageDistances)\n', (3216, 3238), True, 'import numpy as np\n'), ((3289, 3304), 'numpy.round', 'np.round', (['index'], {}), '(index)\n', (3297, 3304), True, 'import numpy as np\n'), ((4419, 4449), 'numpy.round', 'np.round', (['(nMerge * refQuantile)'], {}), '(nMerge * refQuantile)\n', (4427, 4449), True, 'import numpy as np\n'), ((4941, 4975), 'numpy.sum', 'np.sum', (['(dendro_height <= cutHeight)'], {}), '(dendro_height <= cutHeight)\n', (4947, 4975), True, 'import numpy as np\n'), ((5639, 5666), 'numpy.repeat', 'np.repeat', (['(True)', 'MxBranches'], {}), '(True, MxBranches)\n', (5648, 5666), True, 'import numpy as np\n'), ((5691, 5718), 'numpy.repeat', 'np.repeat', (['(True)', 'MxBranches'], {}), '(True, MxBranches)\n', (5700, 5718), True, 'import numpy as np\n'), ((5741, 5769), 'numpy.repeat', 'np.repeat', (['(False)', 'MxBranches'], {}), '(False, MxBranches)\n', (5750, 5769), True, 'import numpy as np\n'), ((5794, 5823), 'numpy.repeat', 'np.repeat', (['np.nan', 'MxBranches'], {}), '(np.nan, MxBranches)\n', (5803, 5823), True, 'import numpy as np\n'), ((5842, 5866), 'numpy.repeat', 'np.repeat', (['(2)', 'MxBranches'], {}), '(2, MxBranches)\n', (5851, 5866), True, 'import numpy as np\n'), ((5887, 5911), 'numpy.repeat', 'np.repeat', (['(1)', 'MxBranches'], {}), '(1, MxBranches)\n', (5896, 5911), True, 'import numpy as np\n'), ((5937, 5961), 'numpy.repeat', 'np.repeat', (['(2)', 'MxBranches'], {}), '(2, MxBranches)\n', (5946, 5961), True, 'import numpy as np\n'), ((5990, 6014), 'numpy.repeat', 'np.repeat', (['(0)', 'MxBranches'], {}), '(0, MxBranches)\n', (5999, 6014), True, 'import numpy as np\n'), ((6039, 6063), 'numpy.repeat', 'np.repeat', (['(0)', 'MxBranches'], {}), '(0, MxBranches)\n', (6048, 6063), True, 'import numpy as np\n'), ((6090, 6119), 'numpy.repeat', 'np.repeat', (['np.nan', 'MxBranches'], {}), '(np.nan, MxBranches)\n', (6099, 6119), True, 'import numpy as np\n'), ((6595, 6635), 'os.path.isfile', 'os.path.isfile', (['""".dynamicTreeCutSpyFile"""'], {}), "('.dynamicTreeCutSpyFile')\n", (6609, 6635), False, 'import os\n'), ((6830, 6870), 'numpy.array', 'np.array', (['[0.64, 0.73, 0.82, 0.91, 0.95]'], {}), '([0.64, 0.73, 0.82, 0.91, 0.95])\n', (6838, 6870), True, 'import numpy as np\n'), ((7852, 7872), 'numpy.repeat', 'np.repeat', (['(0)', 'nMerge'], {}), '(0, nMerge)\n', (7861, 7872), True, 'import numpy as np\n'), ((7889, 7910), 'numpy.repeat', 'np.repeat', (['(0)', 'nPoints'], {}), '(0, nPoints)\n', (7898, 7910), True, 'import numpy as np\n'), ((8663, 8693), 'numpy.zeros', 'np.zeros', (['chunkSize'], {'dtype': 'int'}), '(chunkSize, dtype=int)\n', (8671, 8693), True, 'import numpy as np\n'), ((24348, 24375), 'numpy.repeat', 'np.repeat', (['(False)', 'nBranches'], {}), '(False, nBranches)\n', (24357, 24375), True, 'import numpy as np\n'), ((24394, 24415), 'numpy.repeat', 'np.repeat', (['(0)', 'nPoints'], {}), '(0, nPoints)\n', (24403, 24415), True, 'import numpy as np\n'), ((25554, 25575), 'numpy.repeat', 'np.repeat', (['(0)', 'nPoints'], {}), '(0, nPoints)\n', (25563, 25575), True, 'import numpy as np\n'), ((25593, 25614), 'numpy.repeat', 'np.repeat', (['(0)', 'nPoints'], {}), '(0, nPoints)\n', (25602, 25614), True, 'import numpy as np\n'), ((25688, 25711), 'numpy.repeat', 'np.repeat', (['(0)', 'nBranches'], {}), '(0, nBranches)\n', (25697, 25711), True, 'import numpy as np\n'), ((2767, 2793), 'numpy.sum', 'np.sum', (['BranchDist'], {'axis': '(1)'}), '(BranchDist, axis=1)\n', (2773, 2793), True, 'import numpy as np\n'), ((2953, 2986), 'numpy.argsort', 'np.argsort', (['PointAverageDistances'], {}), '(PointAverageDistances)\n', (2963, 2986), True, 'import numpy as np\n'), ((3057, 3075), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (3066, 3075), True, 'import numpy as np\n'), ((3134, 3172), 'numpy.sum', 'np.sum', (['BranchDist[Core, Core]'], {'axis': '(1)'}), '(BranchDist[Core, Core], axis=1)\n', (3140, 3172), True, 'import numpy as np\n'), ((5097, 5117), 'numpy.zeros', 'np.zeros', (['(nMerge + 1)'], {}), '(nMerge + 1)\n', (5105, 5117), True, 'import numpy as np\n'), ((24468, 24504), 'numpy.isnan', 'np.isnan', (['branch_attachHeight[clust]'], {}), '(branch_attachHeight[clust])\n', (24476, 24504), True, 'import numpy as np\n'), ((25444, 25465), 'numpy.repeat', 'np.repeat', (['(0)', 'nPoints'], {}), '(0, nPoints)\n', (25453, 25465), True, 'import numpy as np\n'), ((25637, 25657), 'numpy.arange', 'np.arange', (['nBranches'], {}), '(nBranches)\n', (25646, 25657), True, 'import numpy as np\n'), ((26196, 26214), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (26205, 26214), True, 'import numpy as np\n'), ((26244, 26262), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (26253, 26262), True, 'import numpy as np\n'), ((38639, 38658), 'numpy.sum', 'np.sum', (['(Colors == 0)'], {}), '(Colors == 0)\n', (38645, 38658), True, 'import numpy as np\n'), ((701, 719), 'numpy.sqrt', 'np.sqrt', (['(8 * l + 1)'], {}), '(8 * l + 1)\n', (708, 719), True, 'import numpy as np\n'), ((2420, 2454), 'numpy.sqrt', 'np.sqrt', (['(BranchSize - BaseCoreSize)'], {}), '(BranchSize - BaseCoreSize)\n', (2427, 2454), True, 'import numpy as np\n'), ((2909, 2936), 'numpy.sqrt', 'np.sqrt', (['(nPoints - CoreSize)'], {}), '(nPoints - CoreSize)\n', (2916, 2936), True, 'import numpy as np\n'), ((3008, 3030), 'numpy.arange', 'np.arange', (['EffCoreSize'], {}), '(EffCoreSize)\n', (3017, 3030), True, 'import numpy as np\n'), ((4807, 4828), 'numpy.max', 'np.max', (['dendro_height'], {}), '(dendro_height)\n', (4813, 4828), True, 'import numpy as np\n'), ((4842, 4863), 'numpy.max', 'np.max', (['dendro_height'], {}), '(dendro_height)\n', (4848, 4863), True, 'import numpy as np\n'), ((7966, 7991), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (7975, 7991), True, 'import numpy as np\n'), ((8002, 8027), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8011, 8027), True, 'import numpy as np\n'), ((8067, 8092), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8076, 8092), True, 'import numpy as np\n'), ((8102, 8127), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8111, 8127), True, 'import numpy as np\n'), ((8164, 8189), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8173, 8189), True, 'import numpy as np\n'), ((8200, 8225), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8209, 8225), True, 'import numpy as np\n'), ((8265, 8290), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8274, 8290), True, 'import numpy as np\n'), ((8300, 8325), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8309, 8325), True, 'import numpy as np\n'), ((8364, 8389), 'numpy.repeat', 'np.repeat', (['np.nan', 'nMerge'], {}), '(np.nan, nMerge)\n', (8373, 8389), True, 'import numpy as np\n'), ((26087, 26106), 'numpy.arange', 'np.arange', (['coresize'], {}), '(coresize)\n', (26096, 26106), True, 'import numpy as np\n'), ((26945, 26972), 'numpy.repeat', 'np.repeat', (['(0)', 'nProperLabels'], {}), '(0, nProperLabels)\n', (26954, 26972), True, 'import numpy as np\n'), ((27000, 27029), 'numpy.repeat', 'np.repeat', (['(0.0)', 'nProperLabels'], {}), '(0.0, nProperLabels)\n', (27009, 27029), True, 'import numpy as np\n'), ((31691, 31718), 'numpy.repeat', 'np.repeat', (['(0)', 'nProperLabels'], {}), '(0, nProperLabels)\n', (31700, 31718), True, 'import numpy as np\n'), ((38906, 38919), 'numpy.array', 'np.array', (['[1]'], {}), '([1])\n', (38914, 38919), True, 'import numpy as np\n'), ((1236, 1254), 'numpy.sqrt', 'np.sqrt', (['(8 * l + 1)'], {}), '(8 * l + 1)\n', (1243, 1254), True, 'import numpy as np\n'), ((1724, 1738), 'numpy.sum', 'np.sum', (['_array'], {}), '(_array)\n', (1730, 1738), True, 'import numpy as np\n'), ((9103, 9145), 'numpy.append', 'np.append', (['(-dendro_merge[merge,])', 'extender'], {}), '(-dendro_merge[merge,], extender)\n', (9112, 9145), True, 'import numpy as np\n'), ((24701, 24720), 'numpy.arange', 'np.arange', (['coresize'], {}), '(coresize)\n', (24710, 24720), True, 'import numpy as np\n'), ((24870, 24948), 'numpy.logical_and', 'np.logical_and', (['branch_isTopBasic[clust]', '(branch_size[clust] >= minClusterSize)'], {}), '(branch_isTopBasic[clust], branch_size[clust] >= minClusterSize)\n', (24884, 24948), True, 'import numpy as np\n'), ((25057, 25162), 'numpy.logical_and', 'np.logical_and', (['(CoreScatter < maxAbsCoreScatter)', '(branch_attachHeight[clust] - CoreScatter > minAbsGap)'], {}), '(CoreScatter < maxAbsCoreScatter, branch_attachHeight[clust] -\n CoreScatter > minAbsGap)\n', (25071, 25162), True, 'import numpy as np\n'), ((27315, 27344), 'numpy.sum', 'np.sum', (['DistInCluster'], {'axis': '(1)'}), '(DistInCluster, axis=1)\n', (27321, 27344), True, 'import numpy as np\n'), ((35996, 36014), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (36005, 36014), True, 'import numpy as np\n'), ((4590, 4611), 'numpy.max', 'np.max', (['dendro_height'], {}), '(dendro_height)\n', (4596, 4611), True, 'import numpy as np\n'), ((9274, 9308), 'numpy.repeat', 'np.repeat', (['dendro_height[merge]', '(2)'], {}), '(dendro_height[merge], 2)\n', (9283, 9308), True, 'import numpy as np\n'), ((9387, 9421), 'numpy.repeat', 'np.repeat', (['dendro_height[merge]', '(2)'], {}), '(dendro_height[merge], 2)\n', (9396, 9421), True, 'import numpy as np\n'), ((11291, 11324), 'scipy.stats.rankdata', 'rankdata', (['sizes'], {'method': '"""ordinal"""'}), "(sizes, method='ordinal')\n", (11299, 11324), False, 'from scipy.stats import rankdata\n'), ((27114, 27139), 'numpy.arange', 'np.arange', (['(1)', '(nPoints + 1)'], {}), '(1, nPoints + 1)\n', (27123, 27139), True, 'import numpy as np\n'), ((27394, 27413), 'numpy.argmin', 'np.argmin', (['DistSums'], {}), '(DistSums)\n', (27403, 27413), True, 'import numpy as np\n'), ((30198, 30216), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (30207, 30216), True, 'import numpy as np\n'), ((31503, 31522), 'numpy.sum', 'np.sum', (['(Colors == 0)'], {}), '(Colors == 0)\n', (31509, 31522), True, 'import numpy as np\n'), ((31796, 31814), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (31805, 31814), True, 'import numpy as np\n'), ((32170, 32192), 'numpy.max', 'np.max', (['AveDistInClust'], {}), '(AveDistInClust)\n', (32176, 32192), True, 'import numpy as np\n'), ((38217, 38305), 'numpy.logical_or', 'np.logical_or', (['(nearestDist < ClusterDiam[nearestLabel - 1])', '(nearestDist < maxPamDist)'], {}), '(nearestDist < ClusterDiam[nearestLabel - 1], nearestDist <\n maxPamDist)\n', (38230, 38305), True, 'import numpy as np\n'), ((9810, 9838), 'numpy.min', 'np.min', (['dendro_merge[merge,]'], {}), '(dendro_merge[merge,])\n', (9816, 9838), True, 'import numpy as np\n'), ((10635, 10688), 'numpy.append', 'np.append', (['branch_mergingHeights[clust - 1]', 'extender'], {}), '(branch_mergingHeights[clust - 1], extender)\n', (10644, 10688), True, 'import numpy as np\n'), ((32078, 32107), 'numpy.sum', 'np.sum', (['DistInCluster'], {'axis': '(1)'}), '(DistInCluster, axis=1)\n', (32084, 32107), True, 'import numpy as np\n'), ((34622, 34646), 'numpy.arange', 'np.arange', (['nProperLabels'], {}), '(nProperLabels)\n', (34631, 34646), True, 'import numpy as np\n'), ((36874, 36902), 'numpy.argmin', 'np.argmin', (['UnassdToClustDist'], {}), '(UnassdToClustDist)\n', (36883, 36902), True, 'import numpy as np\n'), ((37333, 37351), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (37342, 37351), True, 'import numpy as np\n'), ((38409, 38423), 'numpy.sum', 'np.sum', (['assign'], {}), '(assign)\n', (38415, 38423), True, 'import numpy as np\n'), ((10108, 10157), 'numpy.append', 'np.append', (['branch_singletons[clust - 1]', 'extender'], {}), '(branch_singletons[clust - 1], extender)\n', (10117, 10157), True, 'import numpy as np\n'), ((10219, 10274), 'numpy.append', 'np.append', (['branch_singletonHeights[clust - 1]', 'extender'], {}), '(branch_singletonHeights[clust - 1], extender)\n', (10228, 10274), True, 'import numpy as np\n'), ((11495, 11533), 'numpy.isnan', 'np.isnan', (['branch_singletons[large - 1]'], {}), '(branch_singletons[large - 1])\n', (11503, 11533), True, 'import numpy as np\n'), ((11569, 11588), 'numpy.arange', 'np.arange', (['sizes[1]'], {}), '(sizes[1])\n', (11578, 11588), True, 'import numpy as np\n'), ((11633, 11671), 'numpy.isnan', 'np.isnan', (['branch_singletons[small - 1]'], {}), '(branch_singletons[small - 1])\n', (11641, 11671), True, 'import numpy as np\n'), ((11707, 11726), 'numpy.arange', 'np.arange', (['sizes[0]'], {}), '(sizes[0])\n', (11716, 11726), True, 'import numpy as np\n'), ((14853, 14878), 'numpy.sum', 'np.sum', (['SmallerScores[1:]'], {}), '(SmallerScores[1:])\n', (14859, 14878), True, 'import numpy as np\n'), ((14958, 15007), 'numpy.logical_or', 'np.logical_or', (['SmallerScores[2]', 'SmallerScores[3]'], {}), '(SmallerScores[2], SmallerScores[3])\n', (14971, 15007), True, 'import numpy as np\n'), ((16366, 16385), 'numpy.arange', 'np.arange', (['sizes[1]'], {}), '(sizes[1])\n', (16375, 16385), True, 'import numpy as np\n'), ((16446, 16465), 'numpy.arange', 'np.arange', (['sizes[0]'], {}), '(sizes[0])\n', (16455, 16465), True, 'import numpy as np\n'), ((19669, 19722), 'numpy.append', 'np.append', (['branch_mergingHeights[large - 1]', 'extender'], {}), '(branch_mergingHeights[large - 1], extender)\n', (19678, 19722), True, 'import numpy as np\n'), ((22178, 22191), 'numpy.sum', 'np.sum', (['sizes'], {}), '(sizes)\n', (22184, 22191), True, 'import numpy as np\n'), ((27483, 27502), 'numpy.argmin', 'np.argmin', (['DistSums'], {}), '(DistSums)\n', (27492, 27502), True, 'import numpy as np\n'), ((27974, 27992), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (27983, 27992), True, 'import numpy as np\n'), ((28097, 28127), 'numpy.unique', 'np.unique', (['onBranch[InCluster]'], {}), '(onBranch[InCluster])\n', (28106, 28127), True, 'import numpy as np\n'), ((28738, 28769), 'numpy.arange', 'np.arange', (['(1)', '(nProperLabels + 1)'], {}), '(1, nProperLabels + 1)\n', (28747, 28769), True, 'import numpy as np\n'), ((30770, 30794), 'numpy.arange', 'np.arange', (['nProperLabels'], {}), '(nProperLabels)\n', (30779, 30794), True, 'import numpy as np\n'), ((31074, 31103), 'numpy.argmin', 'np.argmin', (['UnassdToMedoidDist'], {}), '(UnassdToMedoidDist)\n', (31083, 31103), True, 'import numpy as np\n'), ((32930, 32960), 'numpy.unique', 'np.unique', (['onBranch[InCluster]'], {}), '(onBranch[InCluster])\n', (32939, 32960), True, 'import numpy as np\n'), ((34684, 34702), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (34693, 34702), True, 'import numpy as np\n'), ((35072, 35104), 'numpy.mean', 'np.mean', (['DistSClustClust'], {'axis': '(0)'}), '(DistSClustClust, axis=0)\n', (35079, 35104), True, 'import numpy as np\n'), ((35329, 35352), 'numpy.argmin', 'np.argmin', (['MeanMeanDist'], {}), '(MeanMeanDist)\n', (35338, 35352), True, 'import numpy as np\n'), ((36492, 36517), 'numpy.unique', 'np.unique', (['labelsOnBranch'], {}), '(labelsOnBranch)\n', (36501, 36517), True, 'import numpy as np\n'), ((9649, 9677), 'numpy.max', 'np.max', (['dendro_merge[merge,]'], {}), '(dendro_merge[merge,])\n', (9655, 9677), True, 'import numpy as np\n'), ((15474, 15498), 'numpy.sum', 'np.sum', (['LargerScores[1:]'], {}), '(LargerScores[1:])\n', (15480, 15498), True, 'import numpy as np\n'), ((15660, 15707), 'numpy.logical_or', 'np.logical_or', (['LargerScores[2]', 'LargerScores[3]'], {}), '(LargerScores[2], LargerScores[3])\n', (15673, 15707), True, 'import numpy as np\n'), ((18877, 18895), 'numpy.arange', 'np.arange', (['nsl', 'ns'], {}), '(nsl, ns)\n', (18886, 18895), True, 'import numpy as np\n'), ((18927, 18941), 'numpy.arange', 'np.arange', (['nss'], {}), '(nss)\n', (18936, 18941), True, 'import numpy as np\n'), ((19002, 19020), 'numpy.arange', 'np.arange', (['nsl', 'ns'], {}), '(nsl, ns)\n', (19011, 19020), True, 'import numpy as np\n'), ((19058, 19072), 'numpy.arange', 'np.arange', (['nss'], {}), '(nss)\n', (19067, 19072), True, 'import numpy as np\n'), ((21473, 21507), 'numpy.append', 'np.append', (['addBasicClusters', 'large'], {}), '(addBasicClusters, large)\n', (21482, 21507), True, 'import numpy as np\n'), ((21585, 21645), 'numpy.append', 'np.append', (['addBasicClusters', 'branch_basicClusters[large - 1]'], {}), '(addBasicClusters, branch_basicClusters[large - 1])\n', (21594, 21645), True, 'import numpy as np\n'), ((22022, 22056), 'numpy.repeat', 'np.repeat', (['dendro_height[merge]', '(2)'], {}), '(dendro_height[merge], 2)\n', (22031, 22056), True, 'import numpy as np\n'), ((23358, 23376), 'numpy.arange', 'np.arange', (['nbl', 'nb'], {}), '(nbl, nb)\n', (23367, 23376), True, 'import numpy as np\n'), ((23768, 23821), 'numpy.append', 'np.append', (['branch_mergingHeights[large - 1]', 'extender'], {}), '(branch_mergingHeights[large - 1], extender)\n', (23777, 23821), True, 'import numpy as np\n'), ((29458, 29479), 'numpy.argmin', 'np.argmin', (['DistToMeds'], {}), '(DistToMeds)\n', (29467, 29479), True, 'import numpy as np\n'), ((32853, 32871), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (32862, 32871), True, 'import numpy as np\n'), ((33696, 33728), 'numpy.mean', 'np.mean', (['DistSClustClust'], {'axis': '(0)'}), '(DistSClustClust, axis=0)\n', (33703, 33728), True, 'import numpy as np\n'), ((33964, 33987), 'numpy.argmin', 'np.argmin', (['MeanMeanDist'], {}), '(MeanMeanDist)\n', (33973, 33987), True, 'import numpy as np\n'), ((34828, 34846), 'numpy.arange', 'np.arange', (['nPoints'], {}), '(nPoints)\n', (34837, 34846), True, 'import numpy as np\n'), ((18638, 18663), 'numpy.repeat', 'np.repeat', (['extender', 'nExt'], {}), '(extender, nExt)\n', (18647, 18663), True, 'import numpy as np\n'), ((18776, 18801), 'numpy.repeat', 'np.repeat', (['extender', 'nExt'], {}), '(extender, nExt)\n', (18785, 18801), True, 'import numpy as np\n'), ((23254, 23279), 'numpy.repeat', 'np.repeat', (['extender', 'nExt'], {}), '(extender, nExt)\n', (23263, 23279), True, 'import numpy as np\n'), ((29258, 29277), 'numpy.argmin', 'np.argmin', (['DistSums'], {}), '(DistSums)\n', (29267, 29277), True, 'import numpy as np\n'), ((33455, 33480), 'numpy.unique', 'np.unique', (['labelsOnBranch'], {}), '(labelsOnBranch)\n', (33464, 33480), True, 'import numpy as np\n')] |
import pickle
import numpy as np
import pygame
from time import sleep
#Duplicate of the Arduino map function (needed for processing autoencoder data)
def map(x, in_min, in_max, out_min, out_max):
return (x - in_min) * (out_max - out_min) // (in_max - in_min) + out_min
class number:
def __init__(self, imageData, number, isMatrix=True, isAutoEncoderOutput = False):
if isAutoEncoderOutput:
for px in range(len(imageData)):
if imageData[px] < 0:
new_px = 0
else:
new_px = imageData[px]
new_px = map(new_px, 0, 1, 0, 100)/100.0
if new_px > 1: new_px = 1
imageData[px] = new_px
if not isMatrix:
#Assume the image is 28px x 28px
matrix = [(imageData[28 * ind: 28 * (ind + 1)]) for ind in range(28)]
imageData = matrix
self.imageData = imageData
self.number = number
self.activation = [0 for x in range(10)]
self.activation[number] = 10
#Display the number in a pygame window
def display(self):
#Set up the window
pygame.init()
screenSize = 280
screen = pygame.display.set_mode((screenSize, screenSize))
pygame.display.set_caption("MNIST Data Display")
scalar = screenSize / 28 #Set the scalar
#Zero out the position
x = 0
y = 0
#Run through the data, printing rectangles
for row in self.imageData:
for col in row:
pygame.draw.rect(screen, (int(255 * col), int(255 * col), int(255 * col)), (x, y, scalar, scalar))
x += scalar #Increment the x value
#Reset x and increment y
x = 0
y += scalar
#Set up the text
numberFont = pygame.font.Font(None, 35)
numberText = numberFont.render(str(self.number), 1, (255,255,255))
screen.blit(numberText, (5, 5))
#Make sure the exit button hasn't been pressed
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
return None
pygame.display.flip() #Show the image
#Same as display above, but modified to show output strength
def display_autoencoder(self):
#Set up the window
pygame.init()
screenSize = 280
screen = pygame.display.set_mode((screenSize, screenSize))
pygame.display.set_caption("MNIST Data Display")
scalar = screenSize / 28 #Set the scalar
#Zero out the position
x = 0
y = 0
#Run through the data, printing rectangles
for row in self.imageData:
for col in row:
color = (255,255,255)
#If the value is negative, edit the red channel
if col < 0:
col = abs(map(col, -0.1, 0, 255, 0))
if col > 255: col = 255
if col < 0: col = 0
color = (int(col), 0, 0)
#If the value is positive, edit the green channel
elif col > 0:
col = abs(map(col, 0, 1, 0, 255))
if col > 255: col = 255
if col < 0: col = 0
color = (0, int(col), 0)
#If the value is exactly zero, edit the blue channel
else:
color = (0,0,200)
pygame.draw.rect(screen, color, (x, y, scalar, scalar))
x += scalar #Increment the x value
#Reset x and increment y
x = 0
y += scalar
#Set up the text
numberFont = pygame.font.Font(None, 35)
numberText = numberFont.render(str(self.number), 1, (255,255,255))
screen.blit(numberText, (5, 5))
#Make sure the exit button hasn't been pressed
for e in pygame.event.get():
if e.type == pygame.QUIT:
pygame.quit()
return None
pygame.display.flip() #Show the image
def load_MNIST():
"""Load the MNIST library"""
global trainingSet, validationSet, testSet
global images
#Import and unpickle the MNIST library
f = open("mnist.pkl", "rb")
training_set, validation_set, test_set = pickle.load(f)
f.close()
#Prepare the images
trainingImages = get_images(training_set)
validationImages = get_images(validation_set)
testImages = get_images(test_set)
trainingSet = []
validationSet = []
testSet = []
#Process the images from training set
for img in range(len(trainingImages)):
trainingSet.append(number(trainingImages[img], training_set[1][img]))
#Process the images from the validation set
for img in range(len(validationImages)):
validationSet.append(number(validationImages[img], validation_set[1][img]))
#Process the images from the test set
for img in range(len(testImages)):
testSet.append(number(testImages[img], test_set[1][img]))
return trainingSet, validationSet, testSet
"""
The following function is based on <NAME>'s code for displaying
MNIST digits. The code can be found at the following URL:
https://github.com/colah/nnftd/blob/master/fig/chap3/mnist.py
"""
def get_images(training_set):
""" Return a list containing the images from the MNIST data
set. Each image is represented as a 2-d numpy array."""
flattened_images = training_set[0]
numpyArrays = [np.reshape(f, (-1, 28)) for f in flattened_images]
return numpyArrays
#Show a specific image from a dataset
def showDatasetSample(sample):
value = list(sample[1]).index(1) #Transform network outputs to numbers [0,0,1,0,0,0,0,0,0,0] -> 2
tmp = number(sample[0], value, False) #Create a temporary number object
tmp.display() #Display the object
#Quickly run through the images in a dataset
def dramaticShowAll(ds):
for img in ds:
img.display()
sleep(0.5)
load_MNIST()
#dramaticShowAll(testSet)
| [
"numpy.reshape",
"pygame.init",
"pygame.quit",
"pygame.event.get",
"pygame.display.set_mode",
"pygame.display.flip",
"pickle.load",
"time.sleep",
"pygame.draw.rect",
"pygame.display.set_caption",
"pygame.font.Font"
] | [((4453, 4467), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4464, 4467), False, 'import pickle\n'), ((1205, 1218), 'pygame.init', 'pygame.init', ([], {}), '()\n', (1216, 1218), False, 'import pygame\n'), ((1261, 1310), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(screenSize, screenSize)'], {}), '((screenSize, screenSize))\n', (1284, 1310), False, 'import pygame\n'), ((1319, 1367), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""MNIST Data Display"""'], {}), "('MNIST Data Display')\n", (1345, 1367), False, 'import pygame\n'), ((1886, 1912), 'pygame.font.Font', 'pygame.font.Font', (['None', '(35)'], {}), '(None, 35)\n', (1902, 1912), False, 'import pygame\n'), ((2101, 2119), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (2117, 2119), False, 'import pygame\n'), ((2234, 2255), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (2253, 2255), False, 'import pygame\n'), ((2408, 2421), 'pygame.init', 'pygame.init', ([], {}), '()\n', (2419, 2421), False, 'import pygame\n'), ((2464, 2513), 'pygame.display.set_mode', 'pygame.display.set_mode', (['(screenSize, screenSize)'], {}), '((screenSize, screenSize))\n', (2487, 2513), False, 'import pygame\n'), ((2522, 2570), 'pygame.display.set_caption', 'pygame.display.set_caption', (['"""MNIST Data Display"""'], {}), "('MNIST Data Display')\n", (2548, 2570), False, 'import pygame\n'), ((3829, 3855), 'pygame.font.Font', 'pygame.font.Font', (['None', '(35)'], {}), '(None, 35)\n', (3845, 3855), False, 'import pygame\n'), ((4044, 4062), 'pygame.event.get', 'pygame.event.get', ([], {}), '()\n', (4060, 4062), False, 'import pygame\n'), ((4177, 4198), 'pygame.display.flip', 'pygame.display.flip', ([], {}), '()\n', (4196, 4198), False, 'import pygame\n'), ((5648, 5671), 'numpy.reshape', 'np.reshape', (['f', '(-1, 28)'], {}), '(f, (-1, 28))\n', (5658, 5671), True, 'import numpy as np\n'), ((6174, 6184), 'time.sleep', 'sleep', (['(0.5)'], {}), '(0.5)\n', (6179, 6184), False, 'from time import sleep\n'), ((2175, 2188), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (2186, 2188), False, 'import pygame\n'), ((3595, 3650), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'color', '(x, y, scalar, scalar)'], {}), '(screen, color, (x, y, scalar, scalar))\n', (3611, 3650), False, 'import pygame\n'), ((4118, 4131), 'pygame.quit', 'pygame.quit', ([], {}), '()\n', (4129, 4131), False, 'import pygame\n')] |
#!/usr/bin/env python
# encoding: utf-8
'''
@project : MSRGCN
@file : config.py
@author : Droliven
@contact : <EMAIL>
@ide : PyCharm
@time : 2021-07-27 16:56
'''
import os
import getpass
import torch
import numpy as np
class Config():
def __init__(self, exp_name="h36m", input_n=10, output_n=10, dct_n=15, device="cuda:0", num_works=0, test_manner="all"):
self.platform = getpass.getuser()
assert exp_name in ["h36m", "cmu", "3dpw"]
self.exp_name = exp_name
self.p_dropout = 0.1
self.train_batch_size = 16
self.test_batch_size = 128
self.lr = 2e-4
self.lr_decay = 0.98
self.n_epoch = 5000
self.leaky_c = 0.2
self.test_manner = test_manner
self.input_n = input_n
self.output_n = output_n
self.seq_len = input_n + output_n
self.dct_n = dct_n
if self.output_n == 25:
self.frame_ids = [1, 3, 7, 9, 13, 24]
elif self.output_n == 10:
self.frame_ids = [1, 3, 7, 9]
if exp_name == "h36m":
self.origin_noden = 32
self.final_out_noden = 22
self.dim_used_3d = [2, 3, 4, 5, 7, 8, 9, 10, 12, 13, 14, 15, 17, 18, 19, 21, 22, 25, 26, 27, 29, 30]
self.dim_repeat_22 = [9, 9, 14, 16, 19, 21]
self.dim_repeat_32 = [16, 24, 20, 23, 28, 31]
self.Index2212 = [[0], [1, 2, 3], [4], [5, 6, 7], [8, 9], [10, 11], [12], [13], [14, 15, 16], [17], [18], [19, 20, 21]]
self.Index127 = [[0, 1], [2, 3], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11]]
self.Index74 = [[0, 2], [1, 2], [3, 4], [5, 6]]
self.I32_plot = np.array(
[0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12, 16, 17, 18, 19, 20, 19, 22, 12, 24, 25, 26, 27,
28,
27, 30])
self.J32_plot = np.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29,
30, 31])
self.LR32_plot = np.array(
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
self.I22_plot = np.array([8, 0, 1, 2, 8, 4, 5, 6, 8, 9, 10, 9, 12, 13, 14, 14, 9, 17, 18, 19, 19])
self.J22_plot = np.array([0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21])
self.LR22_plot = np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
self.I12_plot = np.array([4, 0, 4, 2, 4, 4, 6, 7, 4, 9, 10])
self.J12_plot = np.array([0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11])
self.LR12_plot = np.array([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0])
self.I7_plot = np.array([2, 2, 2, 3, 2, 5])
self.J7_plot = np.array([0, 1, 3, 4, 5, 6])
self.LR7_plot = np.array([0, 1, 1, 1, 0, 0])
self.I4_plot = np.array([0, 1])
self.J4_plot = np.array([3, 2])
self.LR4_plot = np.array([0, 1])
elif exp_name == "cmu":
self.origin_noden = 38
self.final_out_noden = 25
self.dim_used_3d = [3, 4, 5, 6, 9, 10, 11, 12, 14, 15, 17, 18, 19, 21, 22, 23, 25, 26, 28, 30, 31, 32, 34, 35, 37]
self.dim_repeat_22 = [9, 9, 9, 15, 15, 21, 21]
self.dim_repeat_32 = [16, 20, 29, 24, 27, 33, 36]
self.Index2212 = [[0], [1, 2, 3], [4], [5, 6, 7], [8, 9], [10, 11, 12], [13], [14, 15], [16, 17, 18], [19], [20, 21], [22, 23, 24]] # 其实是 Index2512, 为了保持统一没改名
self.Index127 = [[0, 1], [2, 3], [4, 5], [6, 7], [7, 8], [9, 10], [10, 11]]
self.Index74 = [[0, 2], [1, 2], [3, 4], [5, 6]]
self.Index2510 = [[0], [1, 2, 3], [4], [5, 6, 7], [8, 9], [10, 11, 12], [14, 15], [16, 17, 18], [20, 21],
[22, 23, 24]]
self.Index105 = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
self.Index53 = [[2], [0, 3], [1, 4]]
self.I32_plot = np.array(
[0, 1, 2, 3, 4, 5, 0, 7, 8, 9, 10, 11, 0, 13, 14, 15, 16, 17, 18, 16, 20, 21, 22, 23, 24, 25, 23, 27,
16, 29, 30, 31, 32, 33, 34, 32, 36])
self.J32_plot = np.array(
[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37])
self.LR32_plot = np.array(
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1])
self.I22_plot = np.array([8, 0, 1, 2, 8, 4, 5, 6, 8, 9, 10, 11, 9, 13, 14, 15, 16, 15, 9, 19, 20, 21, 22, 21])
self.J22_plot = np.array([0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24])
self.LR22_plot = np.array([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1])
self.I12_plot = np.array([4, 0, 4, 2, 4, 4, 6, 7, 4, 9, 10])
self.J12_plot = np.array([0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11])
self.LR12_plot = np.array([0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1])
self.I7_plot = np.array([2, 2, 2, 3, 2, 5])
self.J7_plot = np.array([0, 1, 3, 4, 5, 6])
self.LR7_plot = np.array([0, 1, 0, 0, 1, 1])
self.I4_plot = np.array([0, 1])
self.J4_plot = np.array([2, 3])
self.LR4_plot = np.array([0, 1])
self.device = device
self.num_works = num_works
self.ckpt_dir = os.path.join("./ckpt/", exp_name, "short_term" if self.output_n==10 else "long_term")
if not os.path.exists(os.path.join(self.ckpt_dir, "models")):
os.makedirs(os.path.join(self.ckpt_dir, "models"))
if not os.path.exists(os.path.join(self.ckpt_dir, "images")):
os.makedirs(os.path.join(self.ckpt_dir, "images"))
if self.exp_name == "h36m":
self.base_data_dir = os.path.join("F:\model_report_data\mocap_motion_prediction\data\human36mData3D\others", "h3.6m\dataset")
elif self.exp_name == "cmu":
self.base_data_dir = os.path.join("F:\model_report_data\mocap_motion_prediction", "data\cmu")
| [
"getpass.getuser",
"numpy.array",
"os.path.join"
] | [((397, 414), 'getpass.getuser', 'getpass.getuser', ([], {}), '()\n', (412, 414), False, 'import getpass\n'), ((5601, 5692), 'os.path.join', 'os.path.join', (['"""./ckpt/"""', 'exp_name', "('short_term' if self.output_n == 10 else 'long_term')"], {}), "('./ckpt/', exp_name, 'short_term' if self.output_n == 10 else\n 'long_term')\n", (5613, 5692), False, 'import os\n'), ((1681, 1809), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12, 16, 17, 18, 19, 20, \n 19, 22, 12, 24, 25, 26, 27, 28, 27, 30]'], {}), '([0, 1, 2, 3, 4, 0, 6, 7, 8, 9, 0, 11, 12, 13, 14, 12, 16, 17, 18, \n 19, 20, 19, 22, 12, 24, 25, 26, 27, 28, 27, 30])\n', (1689, 1809), True, 'import numpy as np\n'), ((1884, 2013), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 26, 27, 28, 29, 30, 31]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31])\n', (1892, 2013), True, 'import numpy as np\n'), ((2090, 2197), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,\n 0, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1,\n 1, 0, 0, 0, 0, 0, 0, 0, 0])\n', (2098, 2197), True, 'import numpy as np\n'), ((2240, 2327), 'numpy.array', 'np.array', (['[8, 0, 1, 2, 8, 4, 5, 6, 8, 9, 10, 9, 12, 13, 14, 14, 9, 17, 18, 19, 19]'], {}), '([8, 0, 1, 2, 8, 4, 5, 6, 8, 9, 10, 9, 12, 13, 14, 14, 9, 17, 18, \n 19, 19])\n', (2248, 2327), True, 'import numpy as np\n'), ((2351, 2440), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21])\n', (2359, 2440), True, 'import numpy as np\n'), ((2466, 2539), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])\n', (2474, 2539), True, 'import numpy as np\n'), ((2569, 2613), 'numpy.array', 'np.array', (['[4, 0, 4, 2, 4, 4, 6, 7, 4, 9, 10]'], {}), '([4, 0, 4, 2, 4, 4, 6, 7, 4, 9, 10])\n', (2577, 2613), True, 'import numpy as np\n'), ((2642, 2687), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11]'], {}), '([0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11])\n', (2650, 2687), True, 'import numpy as np\n'), ((2717, 2760), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0]'], {}), '([0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0])\n', (2725, 2760), True, 'import numpy as np\n'), ((2789, 2817), 'numpy.array', 'np.array', (['[2, 2, 2, 3, 2, 5]'], {}), '([2, 2, 2, 3, 2, 5])\n', (2797, 2817), True, 'import numpy as np\n'), ((2845, 2873), 'numpy.array', 'np.array', (['[0, 1, 3, 4, 5, 6]'], {}), '([0, 1, 3, 4, 5, 6])\n', (2853, 2873), True, 'import numpy as np\n'), ((2902, 2930), 'numpy.array', 'np.array', (['[0, 1, 1, 1, 0, 0]'], {}), '([0, 1, 1, 1, 0, 0])\n', (2910, 2930), True, 'import numpy as np\n'), ((2959, 2975), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (2967, 2975), True, 'import numpy as np\n'), ((3003, 3019), 'numpy.array', 'np.array', (['[3, 2]'], {}), '([3, 2])\n', (3011, 3019), True, 'import numpy as np\n'), ((3048, 3064), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (3056, 3064), True, 'import numpy as np\n'), ((6023, 6143), 'os.path.join', 'os.path.join', (['"""F:\\\\model_report_data\\\\mocap_motion_prediction\\\\data\\\\human36mData3D\\\\others"""', '"""h3.6m\\\\dataset"""'], {}), "(\n 'F:\\\\model_report_data\\\\mocap_motion_prediction\\\\data\\\\human36mData3D\\\\others'\n , 'h3.6m\\\\dataset')\n", (6035, 6143), False, 'import os\n'), ((4046, 4198), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 0, 7, 8, 9, 10, 11, 0, 13, 14, 15, 16, 17, 18, 16, 20, \n 21, 22, 23, 24, 25, 23, 27, 16, 29, 30, 31, 32, 33, 34, 32, 36]'], {}), '([0, 1, 2, 3, 4, 5, 0, 7, 8, 9, 10, 11, 0, 13, 14, 15, 16, 17, 18, \n 16, 20, 21, 22, 23, 24, 25, 23, 27, 16, 29, 30, 31, 32, 33, 34, 32, 36])\n', (4054, 4198), True, 'import numpy as np\n'), ((4256, 4409), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37]'], {}), '([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37])\n', (4264, 4409), True, 'import numpy as np\n'), ((4469, 4594), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,\n 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])\n', (4477, 4594), True, 'import numpy as np\n'), ((4654, 4753), 'numpy.array', 'np.array', (['[8, 0, 1, 2, 8, 4, 5, 6, 8, 9, 10, 11, 9, 13, 14, 15, 16, 15, 9, 19, 20, 21,\n 22, 21]'], {}), '([8, 0, 1, 2, 8, 4, 5, 6, 8, 9, 10, 11, 9, 13, 14, 15, 16, 15, 9, \n 19, 20, 21, 22, 21])\n', (4662, 4753), True, 'import numpy as np\n'), ((4777, 4878), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21,\n 22, 23, 24]'], {}), '([0, 1, 2, 3, 4, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,\n 20, 21, 22, 23, 24])\n', (4785, 4878), True, 'import numpy as np\n'), ((4904, 4990), 'numpy.array', 'np.array', (['[0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]'], {}), '([0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1,\n 1, 1])\n', (4912, 4990), True, 'import numpy as np\n'), ((5016, 5060), 'numpy.array', 'np.array', (['[4, 0, 4, 2, 4, 4, 6, 7, 4, 9, 10]'], {}), '([4, 0, 4, 2, 4, 4, 6, 7, 4, 9, 10])\n', (5024, 5060), True, 'import numpy as np\n'), ((5089, 5134), 'numpy.array', 'np.array', (['[0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11]'], {}), '([0, 1, 2, 3, 5, 6, 7, 8, 9, 10, 11])\n', (5097, 5134), True, 'import numpy as np\n'), ((5164, 5207), 'numpy.array', 'np.array', (['[0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1]'], {}), '([0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1])\n', (5172, 5207), True, 'import numpy as np\n'), ((5236, 5264), 'numpy.array', 'np.array', (['[2, 2, 2, 3, 2, 5]'], {}), '([2, 2, 2, 3, 2, 5])\n', (5244, 5264), True, 'import numpy as np\n'), ((5292, 5320), 'numpy.array', 'np.array', (['[0, 1, 3, 4, 5, 6]'], {}), '([0, 1, 3, 4, 5, 6])\n', (5300, 5320), True, 'import numpy as np\n'), ((5349, 5377), 'numpy.array', 'np.array', (['[0, 1, 0, 0, 1, 1]'], {}), '([0, 1, 0, 0, 1, 1])\n', (5357, 5377), True, 'import numpy as np\n'), ((5406, 5422), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (5414, 5422), True, 'import numpy as np\n'), ((5450, 5466), 'numpy.array', 'np.array', (['[2, 3]'], {}), '([2, 3])\n', (5458, 5466), True, 'import numpy as np\n'), ((5495, 5511), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (5503, 5511), True, 'import numpy as np\n'), ((5717, 5754), 'os.path.join', 'os.path.join', (['self.ckpt_dir', '"""models"""'], {}), "(self.ckpt_dir, 'models')\n", (5729, 5754), False, 'import os\n'), ((5781, 5818), 'os.path.join', 'os.path.join', (['self.ckpt_dir', '"""models"""'], {}), "(self.ckpt_dir, 'models')\n", (5793, 5818), False, 'import os\n'), ((5850, 5887), 'os.path.join', 'os.path.join', (['self.ckpt_dir', '"""images"""'], {}), "(self.ckpt_dir, 'images')\n", (5862, 5887), False, 'import os\n'), ((5914, 5951), 'os.path.join', 'os.path.join', (['self.ckpt_dir', '"""images"""'], {}), "(self.ckpt_dir, 'images')\n", (5926, 5951), False, 'import os\n'), ((6198, 6273), 'os.path.join', 'os.path.join', (['"""F:\\\\model_report_data\\\\mocap_motion_prediction"""', '"""data\\\\cmu"""'], {}), "('F:\\\\model_report_data\\\\mocap_motion_prediction', 'data\\\\cmu')\n", (6210, 6273), False, 'import os\n')] |
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from numpy.testing import assert_array_almost_equal
from auxiliary.functions_daniel import (
rastrigin_instance,
griewank_instance,
levi_no_13_instance,
rosenbrock_instance,
)
def test_rastrigin():
inputs = create_inputs()
function = rastrigin_instance(3)
expected_outs = np.array([15, 111, 139, 31, 2, 46, 1180, 3])
computed_outs = np.array([function.value(x) for x in inputs])
assert_array_almost_equal(expected_outs, computed_outs)
def test_griewank():
inputs = create_inputs()
function = griewank_instance(3)
expected_outs = np.array([2.084, 2.470, 2.774, 2.113, 1.245, 2.608, 7.256, 1.599])
computed_outs = np.round([function.value(x) for x in inputs], 3)
assert_array_almost_equal(expected_outs, computed_outs)
def test_levi_no_13():
inputs = create_inputs()
function = levi_no_13_instance(3)
expected_outs = np.array([6, 78, 134, 22, 3, 63, 1065, 6])
computed_outs = np.array([function.value(x) for x in inputs])
assert_array_almost_equal(expected_outs, computed_outs)
def test_rosenbrock():
inputs = create_inputs()
function = rosenbrock_instance(3)
expected_outs = np.array([202, 120242, 380598, 55418, 202, 50527, 18909781, 505])
computed_outs = np.array([function.value(x) for x in inputs])
assert_array_almost_equal(expected_outs, computed_outs)
def create_inputs():
out = np.array(
[
[1, 2, 3],
[5, 6, 7],
[-8, 5, 7],
[5, 2, -1],
[0, 1, 0],
[2, -4, -5],
[19, 17, 23],
[1, -1, 0],
]
)
return out
if __name__ == "__main__":
test_rastrigin()
test_rosenbrock()
test_griewank()
test_levi_no_13()
| [
"auxiliary.functions_daniel.rosenbrock_instance",
"auxiliary.functions_daniel.rastrigin_instance",
"numpy.testing.assert_array_almost_equal",
"auxiliary.functions_daniel.levi_no_13_instance",
"auxiliary.functions_daniel.griewank_instance",
"numpy.array"
] | [((347, 368), 'auxiliary.functions_daniel.rastrigin_instance', 'rastrigin_instance', (['(3)'], {}), '(3)\n', (365, 368), False, 'from auxiliary.functions_daniel import rastrigin_instance, griewank_instance, levi_no_13_instance, rosenbrock_instance\n'), ((389, 433), 'numpy.array', 'np.array', (['[15, 111, 139, 31, 2, 46, 1180, 3]'], {}), '([15, 111, 139, 31, 2, 46, 1180, 3])\n', (397, 433), True, 'import numpy as np\n'), ((504, 559), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected_outs', 'computed_outs'], {}), '(expected_outs, computed_outs)\n', (529, 559), False, 'from numpy.testing import assert_array_almost_equal\n'), ((627, 647), 'auxiliary.functions_daniel.griewank_instance', 'griewank_instance', (['(3)'], {}), '(3)\n', (644, 647), False, 'from auxiliary.functions_daniel import rastrigin_instance, griewank_instance, levi_no_13_instance, rosenbrock_instance\n'), ((668, 733), 'numpy.array', 'np.array', (['[2.084, 2.47, 2.774, 2.113, 1.245, 2.608, 7.256, 1.599]'], {}), '([2.084, 2.47, 2.774, 2.113, 1.245, 2.608, 7.256, 1.599])\n', (676, 733), True, 'import numpy as np\n'), ((808, 863), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected_outs', 'computed_outs'], {}), '(expected_outs, computed_outs)\n', (833, 863), False, 'from numpy.testing import assert_array_almost_equal\n'), ((933, 955), 'auxiliary.functions_daniel.levi_no_13_instance', 'levi_no_13_instance', (['(3)'], {}), '(3)\n', (952, 955), False, 'from auxiliary.functions_daniel import rastrigin_instance, griewank_instance, levi_no_13_instance, rosenbrock_instance\n'), ((976, 1018), 'numpy.array', 'np.array', (['[6, 78, 134, 22, 3, 63, 1065, 6]'], {}), '([6, 78, 134, 22, 3, 63, 1065, 6])\n', (984, 1018), True, 'import numpy as np\n'), ((1089, 1144), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected_outs', 'computed_outs'], {}), '(expected_outs, computed_outs)\n', (1114, 1144), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1214, 1236), 'auxiliary.functions_daniel.rosenbrock_instance', 'rosenbrock_instance', (['(3)'], {}), '(3)\n', (1233, 1236), False, 'from auxiliary.functions_daniel import rastrigin_instance, griewank_instance, levi_no_13_instance, rosenbrock_instance\n'), ((1257, 1322), 'numpy.array', 'np.array', (['[202, 120242, 380598, 55418, 202, 50527, 18909781, 505]'], {}), '([202, 120242, 380598, 55418, 202, 50527, 18909781, 505])\n', (1265, 1322), True, 'import numpy as np\n'), ((1393, 1448), 'numpy.testing.assert_array_almost_equal', 'assert_array_almost_equal', (['expected_outs', 'computed_outs'], {}), '(expected_outs, computed_outs)\n', (1418, 1448), False, 'from numpy.testing import assert_array_almost_equal\n'), ((1482, 1593), 'numpy.array', 'np.array', (['[[1, 2, 3], [5, 6, 7], [-8, 5, 7], [5, 2, -1], [0, 1, 0], [2, -4, -5], [19,\n 17, 23], [1, -1, 0]]'], {}), '([[1, 2, 3], [5, 6, 7], [-8, 5, 7], [5, 2, -1], [0, 1, 0], [2, -4, \n -5], [19, 17, 23], [1, -1, 0]])\n', (1490, 1593), True, 'import numpy as np\n')] |
# Copyright (C) <NAME> 2020.
# Distributed under the MIT License (see the accompanying README.md and LICENSE files).
import numpy as np
import utils.clicks as clk
def oracle_doc_variance(
expected_reward,
doc_values,
rel_prob,
obs_prob,
sampled_inv_rankings):
n_docs = rel_prob.shape[0]
doc_obs_prob = np.mean(obs_prob[sampled_inv_rankings], axis=0)
doc_click_prob = doc_obs_prob*rel_prob
doc_score = n_docs * doc_values / doc_obs_prob
doc_error = doc_score - expected_reward
doc_variance = doc_click_prob*doc_error**2 + (1.-doc_click_prob)*expected_reward**2
variance = np.mean(doc_variance)
var_grad = rel_prob*(doc_error**2. - expected_reward**2. - 2.*doc_score*doc_error)
return variance, var_grad
def oracle_list_variance(
expected_reward,
doc_values,
rel_prob,
obs_prob,
doc_prop_scores,
policy_log_scores,
sampled_rankings,
sampled_inv_rankings,
sampled_ranking_probs,
cutoff=None,
compute_gradient=True):
n_docs = rel_prob.shape[0]
doc_click_prob = obs_prob[sampled_inv_rankings]*rel_prob[None, :]
n_samples = sampled_rankings.shape[0]
sampled_clicks = clk.bernoilli_sample_from_probs(doc_click_prob)
doc_score = doc_values/doc_prop_scores
click_values = sampled_clicks*doc_score[None, :]
click_seq_values = np.sum(click_values, axis=1)
click_seq_diff = (expected_reward - click_seq_values)
click_seq_error = click_seq_diff**2.
variance = np.mean(click_seq_error)
if not compute_gradient:
return variance, None, None
ind = np.arange(n_samples)
score_grad = np.zeros(n_docs)
temp_grad = np.zeros(n_docs)
log_scores = np.tile(policy_log_scores[None,:], (n_samples, 1))
if cutoff:
ranking_len = min(n_docs, cutoff)
else:
ranking_len = n_docs
for i in range(ranking_len):
log_scores += 18 - np.amax(log_scores, axis=1)[:, None]
log_denom = np.log(np.sum(np.exp(log_scores), axis=1))
probs = np.exp(log_scores - log_denom[:, None])
temp_grad[:] = 0.
np.add.at(temp_grad, sampled_rankings[:, i], click_seq_error)
score_grad += temp_grad/float(n_samples)
score_grad -= np.mean(probs*click_seq_error[:, None], axis=0)
log_scores[ind, sampled_rankings[:, i]] = np.NINF
score_grad /= n_samples
policy_grad = np.mean(
2.*click_seq_diff[:,None]*sampled_clicks*doc_score[None,:]/doc_prop_scores[None,:],
axis=0)
return variance, score_grad, policy_grad
def oracle_data_split_list_variance(
data_split,
sample_ranking_f,
expected_reward,
doc_values,
rel_prob,
obs_prob,
policy_log_scores,
cutoff=None
):
mean_variance = 0.
for qid in range(data_split.num_queries()):
(sampled_rankings,
sampled_inv_rankings,
sampled_ranking_prob,
prob_per_rank) = sample_ranking_f(qid)
doc_prop_scores = np.sum(prob_per_rank*obs_prob[:prob_per_rank.shape[0], None], axis=0)
s_i, e_i = data_split.query_range(qid)
q_variance = oracle_list_variance(
expected_reward,
doc_values[s_i:e_i],
rel_prob[s_i:e_i],
obs_prob,
doc_prop_scores,
policy_log_scores[s_i:e_i],
sampled_rankings,
sampled_inv_rankings,
sampled_ranking_prob,
cutoff=cutoff,
compute_gradient=False,
)[0]
mean_variance += q_variance
return mean_variance/float(data_split.num_queries())
| [
"numpy.mean",
"numpy.tile",
"numpy.exp",
"numpy.sum",
"utils.clicks.bernoilli_sample_from_probs",
"numpy.zeros",
"numpy.add.at",
"numpy.amax",
"numpy.arange"
] | [((360, 407), 'numpy.mean', 'np.mean', (['obs_prob[sampled_inv_rankings]'], {'axis': '(0)'}), '(obs_prob[sampled_inv_rankings], axis=0)\n', (367, 407), True, 'import numpy as np\n'), ((643, 664), 'numpy.mean', 'np.mean', (['doc_variance'], {}), '(doc_variance)\n', (650, 664), True, 'import numpy as np\n'), ((1262, 1309), 'utils.clicks.bernoilli_sample_from_probs', 'clk.bernoilli_sample_from_probs', (['doc_click_prob'], {}), '(doc_click_prob)\n', (1293, 1309), True, 'import utils.clicks as clk\n'), ((1425, 1453), 'numpy.sum', 'np.sum', (['click_values'], {'axis': '(1)'}), '(click_values, axis=1)\n', (1431, 1453), True, 'import numpy as np\n'), ((1564, 1588), 'numpy.mean', 'np.mean', (['click_seq_error'], {}), '(click_seq_error)\n', (1571, 1588), True, 'import numpy as np\n'), ((1660, 1680), 'numpy.arange', 'np.arange', (['n_samples'], {}), '(n_samples)\n', (1669, 1680), True, 'import numpy as np\n'), ((1696, 1712), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (1704, 1712), True, 'import numpy as np\n'), ((1727, 1743), 'numpy.zeros', 'np.zeros', (['n_docs'], {}), '(n_docs)\n', (1735, 1743), True, 'import numpy as np\n'), ((1760, 1811), 'numpy.tile', 'np.tile', (['policy_log_scores[None, :]', '(n_samples, 1)'], {}), '(policy_log_scores[None, :], (n_samples, 1))\n', (1767, 1811), True, 'import numpy as np\n'), ((2397, 2512), 'numpy.mean', 'np.mean', (['(2.0 * click_seq_diff[:, None] * sampled_clicks * doc_score[None, :] /\n doc_prop_scores[None, :])'], {'axis': '(0)'}), '(2.0 * click_seq_diff[:, None] * sampled_clicks * doc_score[None, :] /\n doc_prop_scores[None, :], axis=0)\n', (2404, 2512), True, 'import numpy as np\n'), ((2058, 2097), 'numpy.exp', 'np.exp', (['(log_scores - log_denom[:, None])'], {}), '(log_scores - log_denom[:, None])\n', (2064, 2097), True, 'import numpy as np\n'), ((2125, 2186), 'numpy.add.at', 'np.add.at', (['temp_grad', 'sampled_rankings[:, i]', 'click_seq_error'], {}), '(temp_grad, sampled_rankings[:, i], click_seq_error)\n', (2134, 2186), True, 'import numpy as np\n'), ((2250, 2299), 'numpy.mean', 'np.mean', (['(probs * click_seq_error[:, None])'], {'axis': '(0)'}), '(probs * click_seq_error[:, None], axis=0)\n', (2257, 2299), True, 'import numpy as np\n'), ((3110, 3181), 'numpy.sum', 'np.sum', (['(prob_per_rank * obs_prob[:prob_per_rank.shape[0], None])'], {'axis': '(0)'}), '(prob_per_rank * obs_prob[:prob_per_rank.shape[0], None], axis=0)\n', (3116, 3181), True, 'import numpy as np\n'), ((1950, 1977), 'numpy.amax', 'np.amax', (['log_scores'], {'axis': '(1)'}), '(log_scores, axis=1)\n', (1957, 1977), True, 'import numpy as np\n'), ((2017, 2035), 'numpy.exp', 'np.exp', (['log_scores'], {}), '(log_scores)\n', (2023, 2035), True, 'import numpy as np\n')] |
import math
import numpy as np
from keras.datasets import mnist, cifar10
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num) / width))
shape = generated_images.shape[1:3]
image = np.zeros((height * shape[0], width * shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index / width)
j = index % width
image[i * shape[0]:(i + 1) * shape[0],
j * shape[1]:(j + 1) * shape[1]] = img[:, :, 0]
return image
def load_mnist():
# Load dataset and roughly rescale to [-1, 1]
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_test = (x_test.astype(np.float32) - 127.5) / 127.5
# Add channel axis
x_train = x_train[:, :, :, np.newaxis]
x_test = x_test[:, :, :, np.newaxis]
return x_train, y_train, x_test, y_test
def load_cifar10():
# Load dataset and roughly rescale to [-1, 1]
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_test = (x_test.astype(np.float32) - 127.5) / 127.5
# Add channel axis
x_train = x_train[:, :, :, np.newaxis]
x_test = x_test[:, :, :, np.newaxis]
return x_train, y_train, x_test, y_test
| [
"keras.datasets.cifar10.load_data",
"numpy.zeros",
"math.sqrt",
"keras.datasets.mnist.load_data"
] | [((282, 359), 'numpy.zeros', 'np.zeros', (['(height * shape[0], width * shape[1])'], {'dtype': 'generated_images.dtype'}), '((height * shape[0], width * shape[1]), dtype=generated_images.dtype)\n', (290, 359), True, 'import numpy as np\n'), ((729, 746), 'keras.datasets.mnist.load_data', 'mnist.load_data', ([], {}), '()\n', (744, 746), False, 'from keras.datasets import mnist, cifar10\n'), ((1131, 1150), 'keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (1148, 1150), False, 'from keras.datasets import mnist, cifar10\n'), ((166, 180), 'math.sqrt', 'math.sqrt', (['num'], {}), '(num)\n', (175, 180), False, 'import math\n')] |
from __future__ import division
import numpy as np
from collections import namedtuple
import bilby
from bilby.gw import conversion
import os
import gwpopulation
MassContainer = namedtuple('MassContainer', ['primary_masses', 'secondary_masses',
'mass_ratios', 'total_masses', 'chirp_masses'])
SpinContainer = namedtuple('SpinContainer', ['s13', 's23'])
ExtrinsicParameterContainer = namedtuple('ExtrinisicParamterContainer', ['inc', 'ra', 'dec',
'phase', 'psi', 'geocent_time',
'luminosity_distance'])
AllParameterContainer = namedtuple('AllParameterContainer',
['primary_masses', 'secondary_masses', 'mass_ratios', 'total_masses',
'chirp_masses', 's13', 's23', 'inc', 'ra', 'dec',
'phase', 'psi', 'geocent_time', 'luminosity_distance'])
def generate_mass_parameters(size=10000, clean=False, alpha=1.5, mmin=8, mmax=45, beta=3, plot=False):
m1s = np.linspace(4, 45, size)
qs = np.linspace(0.01, 1, size)
q_mesh, m_mesh = np.meshgrid(qs, m1s)
outfile = 'pop_masses_{}.txt'.format(size)
if clean or not os.path.isfile(outfile):
primary_masses, mass_ratios = \
_generate_masses(m_mesh, q_mesh, size, alpha=alpha, m_min=mmin, m_max=mmax, beta=beta)
save = np.array((primary_masses, mass_ratios))
np.savetxt(outfile, save)
else:
pop_masses = np.loadtxt(outfile)
primary_masses = pop_masses[0]
mass_ratios = pop_masses[1]
secondary_masses = primary_masses * mass_ratios
total_masses = primary_masses + secondary_masses
chirp_masses = conversion.component_masses_to_chirp_mass(primary_masses, secondary_masses)
if plot:
mass_debug_plots(mass_ratios, primary_masses, secondary_masses, total_masses, chirp_masses)
return MassContainer(primary_masses=primary_masses, secondary_masses=secondary_masses,
mass_ratios=mass_ratios, total_masses=total_masses, chirp_masses=chirp_masses)
def _generate_masses(m_mesh, q_mesh, size, alpha, m_min, m_max, beta):
dataset = dict(mass_1=m_mesh, mass_ratio=q_mesh)
weights = gwpopulation.models.mass.power_law_primary_mass_ratio(dataset=dataset, alpha=alpha,
mmin=m_min, mmax=m_max, beta=beta)
norm_weights = weights / np.max(weights)
random_numbers = np.random.random(size=(size, size))
valid_samples = random_numbers < norm_weights
primary_masses_filtered = []
mass_ratios_filtered = []
for i in range(len(weights[0])):
for j in range(len(weights[:, 0])):
if valid_samples[i][j]:
primary_masses_filtered.append(m_mesh[i][j])
mass_ratios_filtered.append(q_mesh[i][j])
primary_masses_filtered = np.array(primary_masses_filtered)
mass_ratios_filtered = np.array(mass_ratios_filtered)
return np.array(primary_masses_filtered), np.array(mass_ratios_filtered)
def mass_debug_plots(mass_ratios, primary_masses, secondary_masses, total_masses, chirp_masses):
import matplotlib.pyplot as plt
plt.scatter(primary_masses, mass_ratios)
plt.xlabel('Primary mass')
plt.ylabel('Mass ratio')
plt.show()
plt.clf()
_debug_histogram(primary_masses, 'Primary mass')
_debug_histogram(secondary_masses, 'Secondary mass')
_debug_histogram(mass_ratios, 'Mass ratio')
_debug_histogram(total_masses, 'Total Mass')
_debug_histogram(chirp_masses, 'Chirp mass')
def generate_spins(size=10000, plot=False):
prior = bilby.gw.prior.AlignedSpin(name='s13', a_prior=bilby.core.prior.Uniform(0.0, 0.5), latex_label='s13')
s13 = prior.sample(size)
s23 = prior.sample(size)
if plot:
spin_debug_plot(s13)
return SpinContainer(s13=np.array(s13), s23=np.array(s23))
def spin_debug_plot(spins):
_debug_histogram(spins, 'Spin', log=False)
def _debug_histogram(parameter, name, log=True):
import matplotlib.pyplot as plt
plt.hist(parameter, bins=int(np.sqrt(len(parameter))))
if log:
plt.semilogy()
plt.xlabel(name)
plt.show()
plt.clf()
def generate_extrinsic_parameters(size=10000, plot=False):
priors = bilby.core.prior.PriorDict()
priors.from_file(filename='aligned_spin.prior')
priors_inc = priors['inc']
priors_ra = priors['ra']
priors_dec = priors['dec']
priors_phase = priors['phase']
priors_psi = priors['psi']
priors_geocent_time = bilby.core.prior.Uniform(minimum=-0.1, maximum=0.2)
priors_luminosity_distance = bilby.gw.prior.UniformComovingVolume(minimum=10, maximum=10000,
name='luminosity_distance')
inc = priors_inc.sample(size=size)
ra = priors_ra.sample(size=size)
dec = priors_dec.sample(size=size)
phase = priors_phase.sample(size=size)
psi = priors_psi.sample(size=size)
geocent_time = priors_geocent_time.sample(size=size)
luminosity_distance = priors_luminosity_distance.sample(size=size)
if plot:
extrinsic_parameters_debug_plots(inc=inc, ra=ra, dec=dec, phase=phase, psi=psi,
geocent_time=geocent_time, luminosity_distance=luminosity_distance)
geocent_time = priors_geocent_time.sample(size=size)
return ExtrinsicParameterContainer(inc=inc, ra=ra, dec=dec, phase=phase, psi=psi,
geocent_time=geocent_time, luminosity_distance=luminosity_distance)
def extrinsic_parameters_debug_plots(inc, ra, dec, phase, psi, geocent_time, luminosity_distance):
_debug_histogram(inc, 'Inclination', log=False)
_debug_histogram(ra, 'RA', log=False)
_debug_histogram(dec, 'DEC', log=False)
_debug_histogram(phase, '$\phi$', log=False)
_debug_histogram(psi, '$\psi$', log=False)
_debug_histogram(geocent_time, 'Time of Coalescence', log=False)
_debug_histogram(luminosity_distance, 'Luminosity_distance', log=True)
def generate_all_parameters(size=10000, clean=False, plot=False, **mass_kwargs):
mps = generate_mass_parameters(size=size, plot=plot, clean=clean, **mass_kwargs)
sps = generate_spins(size=size, plot=plot)
eps = generate_extrinsic_parameters(size=size, plot=plot)
return AllParameterContainer(primary_masses=mps.primary_masses, secondary_masses=mps.secondary_masses,
total_masses=mps.total_masses, mass_ratios=mps.mass_ratios,
chirp_masses=mps.chirp_masses, s13=sps.s13, s23=sps.s23,
inc=eps.inc, ra=eps.ra, dec=eps.dec,
psi=eps.psi, phase=eps.phase, geocent_time=eps.geocent_time,
luminosity_distance=eps.luminosity_distance)
| [
"matplotlib.pyplot.ylabel",
"numpy.array",
"matplotlib.pyplot.semilogy",
"numpy.random.random",
"matplotlib.pyplot.xlabel",
"numpy.max",
"numpy.linspace",
"gwpopulation.models.mass.power_law_primary_mass_ratio",
"matplotlib.pyplot.scatter",
"numpy.meshgrid",
"collections.namedtuple",
"os.path.... | [((180, 298), 'collections.namedtuple', 'namedtuple', (['"""MassContainer"""', "['primary_masses', 'secondary_masses', 'mass_ratios', 'total_masses',\n 'chirp_masses']"], {}), "('MassContainer', ['primary_masses', 'secondary_masses',\n 'mass_ratios', 'total_masses', 'chirp_masses'])\n", (190, 298), False, 'from collections import namedtuple\n'), ((356, 399), 'collections.namedtuple', 'namedtuple', (['"""SpinContainer"""', "['s13', 's23']"], {}), "('SpinContainer', ['s13', 's23'])\n", (366, 399), False, 'from collections import namedtuple\n'), ((430, 552), 'collections.namedtuple', 'namedtuple', (['"""ExtrinisicParamterContainer"""', "['inc', 'ra', 'dec', 'phase', 'psi', 'geocent_time', 'luminosity_distance']"], {}), "('ExtrinisicParamterContainer', ['inc', 'ra', 'dec', 'phase',\n 'psi', 'geocent_time', 'luminosity_distance'])\n", (440, 552), False, 'from collections import namedtuple\n'), ((719, 938), 'collections.namedtuple', 'namedtuple', (['"""AllParameterContainer"""', "['primary_masses', 'secondary_masses', 'mass_ratios', 'total_masses',\n 'chirp_masses', 's13', 's23', 'inc', 'ra', 'dec', 'phase', 'psi',\n 'geocent_time', 'luminosity_distance']"], {}), "('AllParameterContainer', ['primary_masses', 'secondary_masses',\n 'mass_ratios', 'total_masses', 'chirp_masses', 's13', 's23', 'inc',\n 'ra', 'dec', 'phase', 'psi', 'geocent_time', 'luminosity_distance'])\n", (729, 938), False, 'from collections import namedtuple\n'), ((1153, 1177), 'numpy.linspace', 'np.linspace', (['(4)', '(45)', 'size'], {}), '(4, 45, size)\n', (1164, 1177), True, 'import numpy as np\n'), ((1187, 1213), 'numpy.linspace', 'np.linspace', (['(0.01)', '(1)', 'size'], {}), '(0.01, 1, size)\n', (1198, 1213), True, 'import numpy as np\n'), ((1235, 1255), 'numpy.meshgrid', 'np.meshgrid', (['qs', 'm1s'], {}), '(qs, m1s)\n', (1246, 1255), True, 'import numpy as np\n'), ((1828, 1903), 'bilby.gw.conversion.component_masses_to_chirp_mass', 'conversion.component_masses_to_chirp_mass', (['primary_masses', 'secondary_masses'], {}), '(primary_masses, secondary_masses)\n', (1869, 1903), False, 'from bilby.gw import conversion\n'), ((2352, 2474), 'gwpopulation.models.mass.power_law_primary_mass_ratio', 'gwpopulation.models.mass.power_law_primary_mass_ratio', ([], {'dataset': 'dataset', 'alpha': 'alpha', 'mmin': 'm_min', 'mmax': 'm_max', 'beta': 'beta'}), '(dataset=dataset,\n alpha=alpha, mmin=m_min, mmax=m_max, beta=beta)\n', (2405, 2474), False, 'import gwpopulation\n'), ((2605, 2640), 'numpy.random.random', 'np.random.random', ([], {'size': '(size, size)'}), '(size=(size, size))\n', (2621, 2640), True, 'import numpy as np\n'), ((3020, 3053), 'numpy.array', 'np.array', (['primary_masses_filtered'], {}), '(primary_masses_filtered)\n', (3028, 3053), True, 'import numpy as np\n'), ((3081, 3111), 'numpy.array', 'np.array', (['mass_ratios_filtered'], {}), '(mass_ratios_filtered)\n', (3089, 3111), True, 'import numpy as np\n'), ((3328, 3368), 'matplotlib.pyplot.scatter', 'plt.scatter', (['primary_masses', 'mass_ratios'], {}), '(primary_masses, mass_ratios)\n', (3339, 3368), True, 'import matplotlib.pyplot as plt\n'), ((3373, 3399), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Primary mass"""'], {}), "('Primary mass')\n", (3383, 3399), True, 'import matplotlib.pyplot as plt\n'), ((3404, 3428), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mass ratio"""'], {}), "('Mass ratio')\n", (3414, 3428), True, 'import matplotlib.pyplot as plt\n'), ((3433, 3443), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3441, 3443), True, 'import matplotlib.pyplot as plt\n'), ((3448, 3457), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (3455, 3457), True, 'import matplotlib.pyplot as plt\n'), ((4300, 4316), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['name'], {}), '(name)\n', (4310, 4316), True, 'import matplotlib.pyplot as plt\n'), ((4321, 4331), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4329, 4331), True, 'import matplotlib.pyplot as plt\n'), ((4336, 4345), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (4343, 4345), True, 'import matplotlib.pyplot as plt\n'), ((4420, 4448), 'bilby.core.prior.PriorDict', 'bilby.core.prior.PriorDict', ([], {}), '()\n', (4446, 4448), False, 'import bilby\n'), ((4684, 4735), 'bilby.core.prior.Uniform', 'bilby.core.prior.Uniform', ([], {'minimum': '(-0.1)', 'maximum': '(0.2)'}), '(minimum=-0.1, maximum=0.2)\n', (4708, 4735), False, 'import bilby\n'), ((4769, 4865), 'bilby.gw.prior.UniformComovingVolume', 'bilby.gw.prior.UniformComovingVolume', ([], {'minimum': '(10)', 'maximum': '(10000)', 'name': '"""luminosity_distance"""'}), "(minimum=10, maximum=10000, name=\n 'luminosity_distance')\n", (4805, 4865), False, 'import bilby\n'), ((1504, 1543), 'numpy.array', 'np.array', (['(primary_masses, mass_ratios)'], {}), '((primary_masses, mass_ratios))\n', (1512, 1543), True, 'import numpy as np\n'), ((1552, 1577), 'numpy.savetxt', 'np.savetxt', (['outfile', 'save'], {}), '(outfile, save)\n', (1562, 1577), True, 'import numpy as np\n'), ((1609, 1628), 'numpy.loadtxt', 'np.loadtxt', (['outfile'], {}), '(outfile)\n', (1619, 1628), True, 'import numpy as np\n'), ((2568, 2583), 'numpy.max', 'np.max', (['weights'], {}), '(weights)\n', (2574, 2583), True, 'import numpy as np\n'), ((3123, 3156), 'numpy.array', 'np.array', (['primary_masses_filtered'], {}), '(primary_masses_filtered)\n', (3131, 3156), True, 'import numpy as np\n'), ((3158, 3188), 'numpy.array', 'np.array', (['mass_ratios_filtered'], {}), '(mass_ratios_filtered)\n', (3166, 3188), True, 'import numpy as np\n'), ((4281, 4295), 'matplotlib.pyplot.semilogy', 'plt.semilogy', ([], {}), '()\n', (4293, 4295), True, 'import matplotlib.pyplot as plt\n'), ((1325, 1348), 'os.path.isfile', 'os.path.isfile', (['outfile'], {}), '(outfile)\n', (1339, 1348), False, 'import os\n'), ((3820, 3854), 'bilby.core.prior.Uniform', 'bilby.core.prior.Uniform', (['(0.0)', '(0.5)'], {}), '(0.0, 0.5)\n', (3844, 3854), False, 'import bilby\n'), ((4004, 4017), 'numpy.array', 'np.array', (['s13'], {}), '(s13)\n', (4012, 4017), True, 'import numpy as np\n'), ((4023, 4036), 'numpy.array', 'np.array', (['s23'], {}), '(s23)\n', (4031, 4036), True, 'import numpy as np\n')] |
import os
import numpy as np
import time
import sys
import paddle
import paddle.fluid as fluid
from resnet import TSN_ResNet
import reader
import argparse
import functools
from paddle.fluid.framework import Parameter
from utility import add_arguments, print_arguments
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('num_layers', int, 50, "How many layers for ResNet model.")
add_arg('with_mem_opt', bool, True, "Whether to use memory optimization or not.")
add_arg('class_dim', int, 101, "Number of class.")
add_arg('seg_num', int, 7, "Number of segments.")
add_arg('image_shape', str, "3,224,224", "Input image size.")
add_arg('test_model', str, None, "Test model path.")
# yapf: enable
def infer(args):
# parameters from arguments
seg_num = args.seg_num
class_dim = args.class_dim
num_layers = args.num_layers
test_model = args.test_model
if test_model == None:
print('Please specify the test model ...')
return
image_shape = [int(m) for m in args.image_shape.split(",")]
image_shape = [seg_num] + image_shape
# model definition
model = TSN_ResNet(layers=num_layers, seg_num=seg_num)
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
out = model.net(input=image, class_dim=class_dim)
# for test
inference_program = fluid.default_main_program().clone(for_test=True)
if args.with_mem_opt:
fluid.memory_optimize(fluid.default_main_program())
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
def is_parameter(var):
if isinstance(var, Parameter):
return isinstance(var, Parameter)
if test_model is not None:
vars = filter(is_parameter, inference_program.list_vars())
fluid.io.load_vars(exe, test_model, vars=vars)
# reader
test_reader = paddle.batch(reader.infer(seg_num), batch_size=1)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
fetch_list = [out.name]
# test
TOPK = 1
for batch_id, data in enumerate(test_reader()):
data, vid = data[0]
data = [[data]]
result = exe.run(inference_program,
fetch_list=fetch_list,
feed=feeder.feed(data))
result = result[0][0]
pred_label = np.argsort(result)[::-1][:TOPK]
print("Test sample: {0}, score: {1}, class {2}".format(vid, result[
pred_label], pred_label))
sys.stdout.flush()
def main():
args = parser.parse_args()
print_arguments(args)
infer(args)
if __name__ == '__main__':
main()
| [
"paddle.fluid.DataFeeder",
"reader.infer",
"argparse.ArgumentParser",
"paddle.fluid.default_startup_program",
"resnet.TSN_ResNet",
"paddle.fluid.layers.data",
"numpy.argsort",
"paddle.fluid.default_main_program",
"paddle.fluid.Executor",
"functools.partial",
"paddle.fluid.io.load_vars",
"paddl... | [((279, 323), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '__doc__'}), '(description=__doc__)\n', (302, 323), False, 'import argparse\n'), ((334, 384), 'functools.partial', 'functools.partial', (['add_arguments'], {'argparser': 'parser'}), '(add_arguments, argparser=parser)\n', (351, 384), False, 'import functools\n'), ((1307, 1353), 'resnet.TSN_ResNet', 'TSN_ResNet', ([], {'layers': 'num_layers', 'seg_num': 'seg_num'}), '(layers=num_layers, seg_num=seg_num)\n', (1317, 1353), False, 'from resnet import TSN_ResNet\n'), ((1366, 1433), 'paddle.fluid.layers.data', 'fluid.layers.data', ([], {'name': '"""image"""', 'shape': 'image_shape', 'dtype': '"""float32"""'}), "(name='image', shape=image_shape, dtype='float32')\n", (1383, 1433), True, 'import paddle.fluid as fluid\n'), ((1679, 1697), 'paddle.fluid.CUDAPlace', 'fluid.CUDAPlace', (['(0)'], {}), '(0)\n', (1694, 1697), True, 'import paddle.fluid as fluid\n'), ((1708, 1729), 'paddle.fluid.Executor', 'fluid.Executor', (['place'], {}), '(place)\n', (1722, 1729), True, 'import paddle.fluid as fluid\n'), ((2137, 2185), 'paddle.fluid.DataFeeder', 'fluid.DataFeeder', ([], {'place': 'place', 'feed_list': '[image]'}), '(place=place, feed_list=[image])\n', (2153, 2185), True, 'import paddle.fluid as fluid\n'), ((2758, 2779), 'utility.print_arguments', 'print_arguments', (['args'], {}), '(args)\n', (2773, 2779), False, 'from utility import add_arguments, print_arguments\n'), ((1742, 1773), 'paddle.fluid.default_startup_program', 'fluid.default_startup_program', ([], {}), '()\n', (1771, 1773), True, 'import paddle.fluid as fluid\n'), ((1995, 2041), 'paddle.fluid.io.load_vars', 'fluid.io.load_vars', (['exe', 'test_model'], {'vars': 'vars'}), '(exe, test_model, vars=vars)\n', (2013, 2041), True, 'import paddle.fluid as fluid\n'), ((2087, 2108), 'reader.infer', 'reader.infer', (['seg_num'], {}), '(seg_num)\n', (2099, 2108), False, 'import reader\n'), ((2690, 2708), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (2706, 2708), False, 'import sys\n'), ((1529, 1557), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (1555, 1557), True, 'import paddle.fluid as fluid\n'), ((1636, 1664), 'paddle.fluid.default_main_program', 'fluid.default_main_program', ([], {}), '()\n', (1662, 1664), True, 'import paddle.fluid as fluid\n'), ((2536, 2554), 'numpy.argsort', 'np.argsort', (['result'], {}), '(result)\n', (2546, 2554), True, 'import numpy as np\n')] |
import os
import numpy as np
import torch
import torch.utils.data as data
from .common import flatten_first_dim, load_dataset, load_datasets, data_dir, dataset_bounds
from .utils import *
batch_size = 1
input_features = [
'x', 'y', 'z',
'q',
'ax', 'ay', 'az',
'rq'
]
list_datasets_lab = [
'3d/lab'
]
list_datasets_carm = [
'3d/c-arm-9',
'3d/c-arm-13',
'3d/c-arm-16',
]
list_datasets_val = [
'3d/c-arm-12',
'3d/c-arm-15'
]
list_datasets_test = [
'3d/c-arm-10',
'3d/c-arm-11',
'3d/c-arm-14'
]
test_datasets = {}
x, y = load_datasets(list_datasets_lab, input_features)
xlab = flatten_first_dim(x)
ylab = flatten_first_dim(y)
x, y = load_datasets(list_datasets_carm, input_features)
xcarm = flatten_first_dim(x)
ycarm = flatten_first_dim(y)
x, y = load_datasets(list_datasets_val, input_features)
xval = flatten_first_dim(x)
yval = flatten_first_dim(y)
x = []
y = []
for carm in list_datasets_test:
print(f'{carm}.....', end='')
carm_path = os.path.join(data_dir, carm)
dataset = load_dataset(carm_path)
points, gt = dataset.displacements(input_features)
test_datasets[carm] = (points, gt)
x.append(points)
y.append(gt)
print('done')
xtest = flatten_first_dim(x)
ytest = flatten_first_dim(y)
lab_min, lab_max, lab_ymin, lab_ymax = dataset_bounds(xlab, ylab, input_features)
carm_min, carm_max, carm_ymin, carm_ymax = dataset_bounds(xcarm, ycarm, input_features)
val_min, val_max, val_ymin, val_ymax = dataset_bounds(xval, yval, input_features)
test_min, test_max, test_ymin, test_ymax = dataset_bounds(xtest, ytest, input_features)
min_vec = np.min((lab_min, carm_min, test_min, val_min), axis=0)
max_vec = np.max((lab_max, carm_max, test_max, val_max), axis=0)
min_vec2 = np.append(min_vec, min_vec)
max_vec2 = np.append(max_vec, max_vec)
min_y = np.min((lab_ymin, carm_ymin, test_ymin, val_ymin), axis=0)
max_y = np.max((lab_ymax, carm_ymax, test_ymax, val_ymax), axis=0)
xlab_N = (xlab - min_vec2) / (max_vec2 - min_vec2)
xcarm_N = (xcarm - min_vec2) / (max_vec2 - min_vec2)
xtest_N = (xtest - min_vec2) / (max_vec2 - min_vec2)
xval_N = (xval - min_vec2) / (max_vec2 - min_vec2)
class PointDataset(data.Dataset):
def __init__(self, x, gt):
self.x = x
self.gt = gt
def __len__(self):
return self.x.shape[0]
def __getitem__(self, index):
x = self.x[index]
gt = self.gt[index]
return {
'x': x.astype('float64'),
'gt': gt.astype('float64')
}
lab = PointDataset(xlab_N, ylab)
lab_2layers = PointDataset(xlab_N[xlab[:,2] > -10], ylab[xlab[:,2] > -10])
carm = PointDataset(xcarm_N, ycarm)
lab_dataloader = torch.utils.data.DataLoader(lab, batch_size=batch_size, shuffle=True)
lab_2layers_dataloader = torch.utils.data.DataLoader(lab_2layers, batch_size=batch_size, shuffle=True)
carm_dataloader = torch.utils.data.DataLoader(carm, batch_size=batch_size, shuffle=True)
T_max_vec = torch.from_numpy(max_vec).float().to(cuda)
T_min_vec = torch.from_numpy(min_vec).float().to(cuda)
def normalize(p):
return (p - min_vec[:p.shape[1]]) / (max_vec - min_vec)[:p.shape[1]]
def unnormalize(p):
return p * (max_vec - min_vec)[:p.shape[1]] + min_vec[:p.shape[1]]
def tensor_normalize(T, dim=0):
M = torch.sub(T, T_min_vec[:T.size(1)])
return torch.div(M, T_max_vec[:T.size(1)] - T_min_vec[:T.size(1)])
def tensor_unnormalize(T):
M = torch.mul(T, T_max_vec[:T.size(1)] - T_min_vec[:T.size(1)])
return torch.add(M, T_min_vec[:T.size(1)])
| [
"os.path.join",
"numpy.min",
"torch.from_numpy",
"numpy.max",
"numpy.append",
"torch.utils.data.DataLoader"
] | [((1640, 1694), 'numpy.min', 'np.min', (['(lab_min, carm_min, test_min, val_min)'], {'axis': '(0)'}), '((lab_min, carm_min, test_min, val_min), axis=0)\n', (1646, 1694), True, 'import numpy as np\n'), ((1705, 1759), 'numpy.max', 'np.max', (['(lab_max, carm_max, test_max, val_max)'], {'axis': '(0)'}), '((lab_max, carm_max, test_max, val_max), axis=0)\n', (1711, 1759), True, 'import numpy as np\n'), ((1771, 1798), 'numpy.append', 'np.append', (['min_vec', 'min_vec'], {}), '(min_vec, min_vec)\n', (1780, 1798), True, 'import numpy as np\n'), ((1810, 1837), 'numpy.append', 'np.append', (['max_vec', 'max_vec'], {}), '(max_vec, max_vec)\n', (1819, 1837), True, 'import numpy as np\n'), ((1847, 1905), 'numpy.min', 'np.min', (['(lab_ymin, carm_ymin, test_ymin, val_ymin)'], {'axis': '(0)'}), '((lab_ymin, carm_ymin, test_ymin, val_ymin), axis=0)\n', (1853, 1905), True, 'import numpy as np\n'), ((1914, 1972), 'numpy.max', 'np.max', (['(lab_ymax, carm_ymax, test_ymax, val_ymax)'], {'axis': '(0)'}), '((lab_ymax, carm_ymax, test_ymax, val_ymax), axis=0)\n', (1920, 1972), True, 'import numpy as np\n'), ((2720, 2789), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['lab'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(lab, batch_size=batch_size, shuffle=True)\n', (2747, 2789), False, 'import torch\n'), ((2815, 2892), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['lab_2layers'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(lab_2layers, batch_size=batch_size, shuffle=True)\n', (2842, 2892), False, 'import torch\n'), ((2911, 2981), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['carm'], {'batch_size': 'batch_size', 'shuffle': '(True)'}), '(carm, batch_size=batch_size, shuffle=True)\n', (2938, 2981), False, 'import torch\n'), ((1012, 1040), 'os.path.join', 'os.path.join', (['data_dir', 'carm'], {}), '(data_dir, carm)\n', (1024, 1040), False, 'import os\n'), ((2996, 3021), 'torch.from_numpy', 'torch.from_numpy', (['max_vec'], {}), '(max_vec)\n', (3012, 3021), False, 'import torch\n'), ((3051, 3076), 'torch.from_numpy', 'torch.from_numpy', (['min_vec'], {}), '(min_vec)\n', (3067, 3076), False, 'import torch\n')] |
import types
import logging
import time
import numpy as np
from jbopt.de import de
from jbopt.classic import classical
from starkit.fitkit.priors import PriorCollection
logger = logging.getLogger(__name__)
def fit_evaluate(self, model_param):
# returns the likelihood of observing the data given the model param_names
parameters = self.parameters.copy()
parameters[~self.fixed_mask()] = model_param
loglikelihood = self.evaluate(*parameters)
return float(loglikelihood)
def fixed_mask(self):
return np.array([getattr(self, param_name).fixed
for param_name in self.param_names])
class JBOptPriorCollection(PriorCollection):
def prior_transform(self, cube):
cube = np.asarray(cube)
super(JBOptPriorCollection, self).prior_transform(cube,
None, len(cube))
return cube
class JBOpt(object):
def __init__(self, likelihood, priors, output_basename='test_all'):
self.likelihood = likelihood
self.likelihood.fit_evaluate = types.MethodType(
fit_evaluate, self.likelihood)
self.likelihood.fixed_mask = types.MethodType(fixed_mask,
self.likelihood)
if not hasattr(priors, 'prior_transform'):
self.priors = JBOptPriorCollection(priors)
else:
self.priors = priors
self.fit_parameter_names = [
item for i, item in enumerate(self.likelihood.param_names)
if not self.likelihood.fixed_mask()[i]]
self.args = dict(loglikelihood=self.likelihood.fit_evaluate,
transform=self.priors.prior_transform,
prior=lambda x: 0,
parameter_names=self.fit_parameter_names,
)
def run(self, output_basename, method='de', start=None, nsteps=2000, verbose=0):
if start is None:
start = [0.5] * len(self.fit_parameter_names)
self.args['start'] = start
self.args['nsteps'] = nsteps
self.args['disp'] = verbose
self.args['output_basename'] = output_basename
start_time = time.time()
if method == 'de':
self.result = self._run_de()
elif method in ('cobyla', 'ralg', 'mma', 'auglag', 'minuit',
'neldermead'):
self.result = self._run_classical(method)
logger.info('Fit took {0:.2f}s'.format(time.time() - start_time))
self.result['best_values'] = self.priors.prior_transform(
self.result['start'])
self.likelihood.parameters[~self.likelihood.fixed_mask()] = (
self.result['best_values'])
return self.result
def _run_de(self):
return de(**self.args)
def _run_classical(self, method):
return classical(method=method, **self.args)
| [
"logging.getLogger",
"jbopt.de.de",
"numpy.asarray",
"jbopt.classic.classical",
"types.MethodType",
"time.time"
] | [((181, 208), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (198, 208), False, 'import logging\n'), ((732, 748), 'numpy.asarray', 'np.asarray', (['cube'], {}), '(cube)\n', (742, 748), True, 'import numpy as np\n'), ((1080, 1127), 'types.MethodType', 'types.MethodType', (['fit_evaluate', 'self.likelihood'], {}), '(fit_evaluate, self.likelihood)\n', (1096, 1127), False, 'import types\n'), ((1179, 1224), 'types.MethodType', 'types.MethodType', (['fixed_mask', 'self.likelihood'], {}), '(fixed_mask, self.likelihood)\n', (1195, 1224), False, 'import types\n'), ((2222, 2233), 'time.time', 'time.time', ([], {}), '()\n', (2231, 2233), False, 'import time\n'), ((2818, 2833), 'jbopt.de.de', 'de', ([], {}), '(**self.args)\n', (2820, 2833), False, 'from jbopt.de import de\n'), ((2888, 2925), 'jbopt.classic.classical', 'classical', ([], {'method': 'method'}), '(method=method, **self.args)\n', (2897, 2925), False, 'from jbopt.classic import classical\n'), ((2513, 2524), 'time.time', 'time.time', ([], {}), '()\n', (2522, 2524), False, 'import time\n')] |
import xbos_services_getter as xsg
import numpy as np
"""Thermostat class to model temperature change.
Note, set STANDARD fields to specify error for actions which do not have enough data for valid predictions. """
class Tstat:
STANDARD_MEAN = 0
STANDARD_VAR = 0
STANDARD_UNIT = "F"
def __init__(self, building, zone, temperature, last_temperature=None, suppress_not_enough_data_error=False):
self.temperature = temperature
self.last_temperature = last_temperature
self.indoor_temperature_prediction_stub = xsg.get_indoor_temperature_prediction_stub()
self.error = {}
for action in [xsg.NO_ACTION, xsg.HEATING_ACTION, xsg.COOLING_ACTION]:
try:
raise Exception("ERROR: Hack. Whoever sees this, yell at Daniel to get back to fixing the thermal model.")
mean, var, unit = xsg.get_indoor_temperature_prediction_error(self.indoor_temperature_prediction_stub,
building,
zone,
action)
except:
if not suppress_not_enough_data_error:
raise Exception("ERROR: Tstat for building: '{0}' and zone: '{1}' did not receive error data from "
"indoor_temperature_prediction microservice for action: '{2}'.")
print("WARNING: Tstat for building: '{0}' and zone: '{1}' did not receive error data from "
"indoor_temperature_prediction microservice for action: '{2}' and is now using STANDARD error.".format(building, zone, action))
mean, var, unit = Tstat.STANDARD_MEAN, Tstat.STANDARD_VAR, Tstat.STANDARD_UNIT
self.error[action] = {"mean": mean, "var": var}
def next_temperature(self, action):
self.last_temperature = self.temperature
self.temperature += 1 * (action == 1) - 1 * (action == 2) + np.random.normal(self.error[action]["mean"],
self.error[action]["var"])
return self.temperature
def reset(self, temperature, last_temperature=None):
self.temperature = temperature
self.last_temperature = last_temperature
class OutdoorThermostats:
pass
| [
"numpy.random.normal",
"xbos_services_getter.get_indoor_temperature_prediction_stub",
"xbos_services_getter.get_indoor_temperature_prediction_error"
] | [((549, 593), 'xbos_services_getter.get_indoor_temperature_prediction_stub', 'xsg.get_indoor_temperature_prediction_stub', ([], {}), '()\n', (591, 593), True, 'import xbos_services_getter as xsg\n'), ((2070, 2141), 'numpy.random.normal', 'np.random.normal', (["self.error[action]['mean']", "self.error[action]['var']"], {}), "(self.error[action]['mean'], self.error[action]['var'])\n", (2086, 2141), True, 'import numpy as np\n'), ((871, 984), 'xbos_services_getter.get_indoor_temperature_prediction_error', 'xsg.get_indoor_temperature_prediction_error', (['self.indoor_temperature_prediction_stub', 'building', 'zone', 'action'], {}), '(self.\n indoor_temperature_prediction_stub, building, zone, action)\n', (914, 984), True, 'import xbos_services_getter as xsg\n')] |
"""
Neural Network to implement AND gate using McCulloch-Pitts Neuron Model.
"""
import numpy as np
from .neurons import neuron
def main():
print("\n*** Neural Network for AND Operation ***")
print("\ny = x1 . x2")
x1 = np.array([0, 0, 1, 1])
x2 = np.array([0, 1, 0, 1])
y: np.array = np.logical_and(x1, x2).astype(int)
while True:
w1 = int(input("\nEnter Value for Weight w1: "))
w2 = int(input("Enter Value for Weight w2: "))
res = neuron(x1, x2, y, w1, w2)
print("\n\tX1\tX2\tNet\tO")
for i in res[2]:
print(f"\t{i['x1']}\t{i['x2']}\t{i['net']}\t{i['output']}")
if res[0]:
print("\nYour Weights are Correct!")
print(f"\nThreshold Value: {res[1]}")
break
else:
print(
"\nYour Weights are Incorrect! No Net Value satisfies Threshold Constraints."
)
print("\nEnter weights again...")
if __name__ == "__main__":
main()
| [
"numpy.array",
"numpy.logical_and"
] | [((236, 258), 'numpy.array', 'np.array', (['[0, 0, 1, 1]'], {}), '([0, 0, 1, 1])\n', (244, 258), True, 'import numpy as np\n'), ((268, 290), 'numpy.array', 'np.array', (['[0, 1, 0, 1]'], {}), '([0, 1, 0, 1])\n', (276, 290), True, 'import numpy as np\n'), ((310, 332), 'numpy.logical_and', 'np.logical_and', (['x1', 'x2'], {}), '(x1, x2)\n', (324, 332), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import base64
import glob
from scipy.signal import medfilt
from scipy.integrate import trapz
import xml.etree.ElementTree as et
from datetime import date
today = date.today()
np.warnings.filterwarnings('ignore')
sns.set(style="darkgrid")
roots = []
root_names = []
for n in glob.glob('*.xml'):
roots.append(et.parse(n).getroot())
root_names.append(n)
def modified_z_score(intensity):
median_int = np.median(intensity)
mad_int = np.median([np.abs(intensity - median_int)])
if mad_int == 0:
mad_int = 1
modified_z_scores = 0.6745 * (intensity - median_int) / mad_int
return modified_z_scores
def df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+55:
x += 5
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(data.Qonset[n*12])+x:int(data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+55:
x += 5
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def half_df_fixer(y,n):
threshold = 0
x = 0
while threshold == 0:
if np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) > 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .98)+60:
x += 2
elif np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), 1) <= 150:
if abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() < np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
threshold = abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() + 1
elif abs(np.array(modified_z_score(np.diff(y))))[int(half_data.Qonset[n*12])+x:int(half_data.Qoffset[n*12])+30].max() > np.nanquantile(abs(np.array(modified_z_score(np.diff(y)))), .992)+60:
x += 2
spikes = abs(np.array(modified_z_score(np.diff(y)))) > threshold
y_out = y.copy()
for i in np.arange(len(spikes)):
if spikes[i] != 0:
y_out[i+y_out.index[0]] = None
return y_out
def hanging_line(point1, point2):
a = (point2[1] - point1[1])/(np.cosh(point2[0] % 600) - np.cosh(point1[0] % 600))
b = point1[1] - a*np.cosh(point1[0] % 600)
x = np.linspace(point1[0], point2[0], (point2[0] - point1[0])+1)
y = a*np.cosh(x % 600) + b
return (x,y)
Tags = {'tags':[]}
tags = {'tags':[]}
for root in roots:
if len(root.find('{http://www3.medical.philips.com}waveforms').getchildren()) == 2:
if int(root.find('{http://www3.medical.philips.com}waveforms')[1].attrib['samplespersec']) == 1000:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
tag = {}
tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = 0
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Failed' or root[6][1][0][14].text == 'Failed' or (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid'):
tag['Ponset'] = 0
tag['Pdur'] = 0
tag['Print'] = 0
tag['Poffset'] = 0
else:
tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
tag['Pdur'] = int(elem[0].text)
tag['Print'] = int(root[6][1][0][14].text)
tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
tag['Qonset'] = np.nan
tag['Qrsdur'] = np.nan
tag['Qoffset'] = np.nan
tag['Tonset'] = np.nan
tag['Qtint'] = np.nan
tag['Toffset'] = np.nan
tag['Tdur'] = np.nan
else:
tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
tag['Qrsdur'] = int(root[6][0][29].text)
tag['Qoffset'] = tag['Qonset'] + tag['Qrsdur']
tag['Tonset'] = int(elem[4].text)
tag['Qtint'] = int(root[6][1][0][18].text)
tag['Toffset'] = tag['Qonset'] + tag['Qtint']
tag['Tdur'] = tag['Qoffset'] - tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None and root[6][0][31].text != 'Failed': tag['QTC'] = int(root[6][0][31].text)
tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
tag['HeartRate'] = np.nan
tag['RRint'] = np.nan
tag['AtrialRate'] = np.nan
tag['QRSFrontAxis'] = np.nan
tag['QTC'] = np.nan
tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
tag['Date'] = time['date']
tag['Time'] = time['time']
tag['Sex'] = root[5][0][6].text
tag['ID'] = root[5][0][0].text
tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
tag['Waveform'] = elem[6].text
# tag['LongWaveform'] = root[8][0].text
tags['tags'].append(tag)
else:
for elem in root.find('{http://www3.medical.philips.com}waveforms')[1]:
Tag = {}
Tag['Lead'] = elem.attrib['leadname']
if (root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid') and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == '\n ' or root[6][1][0][14].text == 'Failed':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = float(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = 0
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + 0
elif root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][14].text == None or root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == None or root[6][1][0][14].text == 'Invalid' or elem[0].text == 'Invalid' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
Tag['Ponset'] = 0
Tag['Pdur'] = 0
Tag['Print'] = 0
Tag['Poffset'] = 0
else:
Tag['Ponset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)
Tag['Pdur'] = int(elem[0].text)
Tag['Print'] = int(root[6][1][0][14].text)
Tag['Poffset'] = (int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text) - int(root[6][1][0][14].text)) + int(elem[0].text)
if (root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text == 'Invalid' or root[6][1][0][18].text == None or root[6][0][29].text == 'Invalid' or elem[4].text == 'Invalid' or root[6][1][0][18].text == 'Invalid'):
Tag['Qonset'] = np.nan
Tag['Qrsdur'] = np.nan
Tag['Qoffset'] = np.nan
Tag['Tonset'] = np.nan
Tag['Qtint'] = np.nan
Tag['Toffset'] = np.nan
Tag['Tdur'] = np.nan
else:
Tag['Qonset'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[5].text)
Tag['Qrsdur'] = int(root[6][0][29].text)
Tag['Qoffset'] = Tag['Qonset'] + Tag['Qrsdur']
Tag['Tonset'] = int(elem[4].text)
Tag['Qtint'] = int(root[6][1][0][18].text)
Tag['Toffset'] = Tag['Qonset'] + Tag['Qtint']
Tag['Tdur'] = Tag['Qoffset'] - Tag['Qonset']
if root[7].tag == '{http://www3.medical.philips.com}interpretations' and root[6].tag == '{http://www3.medical.philips.com}internalmeasurements':
if root[7][0][1][0].text != None and (root[7][0][1][0].text).isdigit(): Tag['HeartRate'] = int(root[7][0][1][0].text)
if root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text != None: Tag['RRint'] = int(root[7][0].find('{http://www3.medical.philips.com}globalmeasurements')[1].text)
if root[6][1][0][9].text != None: Tag['AtrialRate'] = int(root[6][1][0][9].text)
if root[6][0][15].text != None and root[6][0][15].text != 'Indeterminate': Tag['QRSFrontAxis'] = int(root[6][0][15].text)
if root[6][0][31].text != None: Tag['QTC'] = int(root[6][0][31].text)
Tag['Target'] = []
for n in range(len(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):])):
Tag['Target'].append(root[7][0][root[7][0].getchildren().index(root[7][0].find('{http://www3.medical.philips.com}statement')):][n][0].text)
else:
Tag['HeartRate'] = np.nan
Tag['RRint'] = np.nan
Tag['AtrialRate'] = np.nan
Tag['QRSFrontAxis'] = np.nan
Tag['QTC'] = np.nan
Tag['Target'] = []
if root[3].tag == '{http://www3.medical.philips.com}reportinfo' and root[5].tag == '{http://www3.medical.philips.com}patient':
time = root[3].attrib
Tag['Date'] = time['date']
Tag['Time'] = time['time']
Tag['Sex'] = root[5][0][6].text
Tag['ID'] = root[5][0][0].text
Tag['Name'] = root[5][0].find('{http://www3.medical.philips.com}name')[0].text + ', ' + root[5][0].find('{http://www3.medical.philips.com}name')[1].text
if len(root[5][0].find('{http://www3.medical.philips.com}age')) > 0:
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}dateofbirth':
Tag['Age'] = int(today.strftime("%Y")) - int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text[0:4])
if root[5][0].find('{http://www3.medical.philips.com}age')[0].tag == '{http://www3.medical.philips.com}years':
Tag['Age'] = int(root[5][0].find('{http://www3.medical.philips.com}age')[0].text)
Tag['Waveform'] = elem[6].text
# Tag['LongWaveform'] = root[8][0].text
Tags['tags'].append(Tag)
half_data = pd.DataFrame(Tags['tags'])
data = pd.DataFrame(tags['tags'])
del roots
del root
del elem
count1000 = int(len(data)/12)
count500 = int(len(half_data)/12)
count = count1000 + count500
if len(data) > 0:
array = np.unique(data[data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_data = data.loc[data['ID'].isin(array) & data['Date'].isin(array) & data['Time'].isin(array)]
data.drop(missing_data.index, axis=0,inplace=True)
missing_data = missing_data.reset_index(drop=True)
del tag
del tags
data = data.reset_index(drop=True)
for n in range(count1000):
data.Tonset[n*12:(n+1)*12] = np.repeat(int(data.Tonset[n*12:(n+1)*12].sum()/12), 12)
data.Pdur[n*12:(n+1)*12] = np.repeat(int(data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(data.Waveform)):
t = base64.b64decode(data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
df = pd.DataFrame(a)
df.insert(0, 'Lead', data['Lead'])
blank = []
for n in range(count1000):
blank.append(pd.pivot_table(df[(n*12):(n+1)*12], columns=df.Lead))
test = pd.concat(blank)
new = []
array = []
for n in range(13):
for index, num in zip(test.iloc[:, n-1][::2], test.iloc[:, n-1][1::2]):
if num > 128:
new.append(index - (256 * (256 - num)))
elif num < 128:
new.append(index + (256 * num))
elif num == 0:
new.append(index)
else:
new.append(index)
new = []
array.append(new)
array = np.asarray([array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7], array[8], array[9], array[10], array[11]])
df = pd.DataFrame(array)
df = pd.pivot_table(df, columns=test.columns)
df = df.fillna(0)
del a
del p
del o
del t
del blank
del new
del array
for n in range(count1000):
for x in range(12):
if (data.Toffset[n*12]-data.RRint[n*12]) >= data.Ponset[n*12] or (data.Ponset[n*12] + data.RRint[n*12]) - data.Toffset[n*12] == 1:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
rrint = data.RRint[n*12]
if (rrint + data.Ponset[n*12]) > 1200 and (data.Toffset[n*12]-rrint) < 0:
temp = df.iloc[:,x][int(n*1200):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif (rrint + data.Ponset[n*12]) > 1200 and (data.Toffset[n*12]-rrint) > 0:
temp = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)-rrint):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif rrint + data.Ponset[n*12] < 1200 and (data.Toffset[n*12]-rrint) < 0:
temp = df.iloc[:,x][int(n*1200):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int(rrint + data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
temp = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)-rrint):int(data.Ponset[n*12]+(n*1200))]
test = df.iloc[:,x][int(data.Toffset[n*12]+(n*1200)):int(rrint + data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
df.iloc[:,x][n*1200:1200*(n+1)] = df.iloc[:,x][n*1200:1200*(n+1)] - (df.iloc[:,x][n*1200:int(data.Qonset[n*12])+(n*1200)].mean() + df.iloc[:,x][int(data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
unfiltered_leads = df.copy()
for n in range(count1000):
for inx in range(12):
test = df_fixer(df.iloc[:,inx][n*1200:(n+1)*1200], n)
gaps = []
lstOfNs = []
gap = []
for num in test[test.isna() == True].index:
lstOfNs.append(num)
if len(lstOfNs) == 1:
gap.append(lstOfNs[0])
if len(lstOfNs) > 1:
if lstOfNs[-1] - lstOfNs[-2] < 5:
gap.append(num)
elif lstOfNs[-1] - lstOfNs[-2] > 5:
gaps.append(gap)
gap = []
gap.append(num)
gaps.append(gap)
if gaps != [[]]:
x = []
y = []
for g in gaps:
if len(g) == 1:
x.append([g[-1]+1])
y.append(test[g[-1]+1])
if np.isnan(test.iloc[0]):
point1 = [g[0], test[g[-1]+1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
else:
point1 = [g[0]-1, test[g[0]-1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
for i in range(len(x)):
test[x[i]] = y[i]
if (trapz(abs(test[int(data.Qonset[n*12]):int(data.Qoffset[n*12])]))/trapz(abs(df.iloc[:,inx][int(data.Qonset[12*n]+(1200*n)):int(data.Qoffset[12*n]+(1200*n))]))) < .60:
test = df.iloc[:,inx][n*1200:(n+1)*1200]
test = medfilt(test, kernel_size=9)
df.iloc[:,inx][n*1200:(n+1)*1200] = test
del gaps
del lstOfNs
del gap
del test
VTI_leads = df[['III', 'aVF', 'aVL', 'aVR']]
df = df[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
Unfiltered_VTI_leads = unfiltered_leads[['III', 'aVF', 'aVL', 'aVR']]
unfiltered_leads = unfiltered_leads[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
matrix = [[.38, -.07, -.13, .05, -.01, .14, .06, .54],
[-.07, .93, .06, -.02, -.05, .06, -.17, .13],
[.11, -.23, -.43, -.06, -.14, -.20, -.11, .31]]
x = matrix[0]
y = matrix[1]
z = matrix[2]
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(df)):
xtemp.append((df.iloc[n].values * x).sum())
ytemp.append((df.iloc[n].values * y).sum())
ztemp.append((df.iloc[n].values * z).sum())
n+=1
df['x'] = xtemp
df['y'] = ytemp
df['z'] = ztemp
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(unfiltered_leads)):
xtemp.append((unfiltered_leads.iloc[n].values * x).sum())
ytemp.append((unfiltered_leads.iloc[n].values * y).sum())
ztemp.append((unfiltered_leads.iloc[n].values * z).sum())
n+=1
df['Unfiltered_x'] = xtemp
df['Unfiltered_y'] = ytemp
df['Unfiltered_z'] = ztemp
del xtemp
del ytemp
del ztemp
df['Date'] = data['Date']
df['ID'] = data['ID']
df['Time'] = data['Time']
df['Print'] = data['Print']
df['Ponset'] = data['Ponset']
df['Pdur'] = data['Pdur']
df['Poffset'] = data['Poffset']
df['Qonset'] = data['Qonset']
df['Qrsdur'] = data['Qrsdur']
df['Qtint'] = data['Qtint']
df['Qoffset'] = data['Qoffset']
df['Tonset'] = data['Tonset']
df['Tdur'] = data['Tdur']
df['Toffset'] = data['Toffset']
df['HeartRate'] = data['HeartRate']
df['QRSFrontAxis'] = data['QRSFrontAxis']
df['Sex'] = data['Sex']
df['QTC'] = data['QTC']
df['Age'] = data['Age']
df['Name'] = data['Name']
for n in range(count1000):
df['Ponset'][(n*1200):(n+1)*1200] = data['Ponset'][n*12]
df['Print'][(n*1200):(n+1)*1200] = data['Print'][n*12]
df['Pdur'][(n*1200):(n+1)*1200] = data['Pdur'][n*12]
df['Poffset'][(n*1200):(n+1)*1200] = data['Poffset'][n*12]
df['Qonset'][(n*1200):(n+1)*1200] = data['Qonset'][n*12]
df['Qrsdur'][(n*1200):(n+1)*1200] = data['Qrsdur'][n*12]
df['Qtint'][(n*1200):(n+1)*1200] = data['Qtint'][n*12]
df['Qoffset'][(n*1200):(n+1)*1200] = data['Qoffset'][n*12]
df['Tonset'][(n*1200):(n+1)*1200] = data['Tonset'][n*12]
df['Tdur'][(n*1200):(n+1)*1200] = data['Tdur'][n*12]
df['Toffset'][(n*1200):(n+1)*1200] = data['Toffset'][n*12]
df['HeartRate'][(n*1200):(n+1)*1200] = data['HeartRate'][n*12]
df['QRSFrontAxis'][(n*1200):(n+1)*1200] = data['QRSFrontAxis'][n*12]
df['Sex'][(n*1200):(n+1)*1200] = data['Sex'][n*12]
df['QTC'][(n*1200):(n+1)*1200] = data['QTC'][n*12]
df['Age'][(n*1200):(n+1)*1200] = data['Age'][n*12]
df['Date'][(n*1200):(n+1)*1200] = data['Date'][12*n]
df['Time'][(n*1200):(n+1)*1200] = data['Time'][12*n]
df['ID'][(n*1200):(n+1)*1200] = data['ID'][12*n]
df['Name'][(n*1200):(n+1)*1200] = data['Name'][12*n]
df[['III', 'aVF', 'aVL', 'aVR']] = VTI_leads
unfiltered_leads[['III', 'aVF', 'aVL', 'aVR']] = Unfiltered_VTI_leads
df[['Unfiltered_I', 'Unfiltered_II', 'Unfiltered_III', 'Unfiltered_V1', 'Unfiltered_V2', 'Unfiltered_V3', 'Unfiltered_V4', 'Unfiltered_V5', 'Unfiltered_V6', 'Unfiltered_aVF', 'Unfiltered_aVL', 'Unfiltered_aVR']] = unfiltered_leads[['I', 'II', 'III', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'aVF', 'aVL', 'aVR']]
del unfiltered_leads
del VTI_leads
if len(half_data) > 0:
array = np.unique(half_data[half_data.isnull().any(axis=1)][['ID', 'Date', 'Time']])
missing_half_data = half_data.loc[half_data['ID'].isin(array) & half_data['Date'].isin(array) & half_data['Time'].isin(array)]
half_data.drop(missing_half_data.index, axis=0,inplace=True)
missing_half_data = missing_half_data.reset_index(drop=True)
del Tag
del Tags
half_data = half_data.reset_index(drop=True)
for n in range(count500):
half_data.Tonset[n*12:(n+1)*12] = np.repeat(int(half_data.Tonset[n*12:(n+1)*12].sum()/12), 12)
half_data.Pdur[n*12:(n+1)*12] = np.repeat(int(half_data.Pdur[n*12:(n+1)*12].sum()/12), 12)
x = 0
p = []
for x in range(len(half_data.Waveform)):
t = base64.b64decode(half_data.Waveform[x])
p.append(np.asarray(t))
x+=1
p = np.asarray(p)
a = []
for i in p:
o = []
for x in i:
o.append(x)
a.append(o)
half_df = pd.DataFrame(a)
half_df.insert(0, 'Lead', half_data['Lead'])
blank = []
for n in range(count500):
blank.append(pd.pivot_table(half_df[(n*12):(n+1)*12], columns=half_df.Lead))
test = pd.concat(blank)
new = []
array = []
for n in range(13):
for index, num in zip(test.iloc[:, n-1][::2], test.iloc[:, n-1][1::2]):
if num > 128:
new.append(index - (256 * (256 - num)))
elif num < 128:
new.append(index + (256 * num))
elif num == 0:
new.append(index)
else:
new.append(index)
new = []
array.append(new)
array = np.asarray([array[0], array[1], array[2], array[3], array[4], array[5], array[6], array[7], array[8], array[9], array[10], array[11]])
half_df = pd.DataFrame(array)
half_df = pd.pivot_table(half_df, columns=test.columns)
half_df = half_df.fillna(0)
blank = []
for n in range(count500):
blank.append(half_df[(n*1200):((n+1)*1200)-600])
test = pd.concat(blank)
half_df = test
half_df = half_df.reset_index(drop=True)
half_df = pd.pivot_table(half_df, columns=half_df.index)
array = []
for i in range(count500):
for x in range(12):
temp = []
new = []
for n in half_df.iloc[x,i*600:(i+1)*600]:
temp.append(n)
if len(temp) > 1:
new.append(temp[-2])
if len(temp) < 601 and len(temp) > 1:
new.append((temp[-1]+temp[-2])/2)
if len(temp) == 600:
new.append(temp[-1])
new.append(temp[-1])
array.append(new)
I = (np.asarray(array[::12])).reshape(count500*1200)
II = (np.asarray(array[1::12])).reshape(count500*1200)
III = (np.asarray(array[2::12])).reshape(count500*1200)
V1 = (np.asarray(array[3::12])).reshape(count500*1200)
V2 = (np.asarray(array[4::12])).reshape(count500*1200)
V3 = (np.asarray(array[5::12])).reshape(count500*1200)
V4 = (np.asarray(array[6::12])).reshape(count500*1200)
V5 = (np.asarray(array[7::12])).reshape(count500*1200)
V6 = (np.asarray(array[8::12])).reshape(count500*1200)
aVF = (np.asarray(array[9::12])).reshape(count500*1200)
aVL = (np.asarray(array[10::12])).reshape(count500*1200)
aVR = (np.asarray(array[11::12])).reshape(count500*1200)
half_df = pd.pivot_table(pd.DataFrame([I, II, III, V1, V2, V3, V4, V5, V6, aVF, aVL, aVR]), columns=test.columns)
half_df = half_df.fillna(0)
del I
del II
del III
del V1
del V2
del V3
del V4
del V5
del V6
del aVF
del aVL
del aVR
del a
del p
del o
del t
del blank
del new
del array
del temp
for n in range(count500):
for x in range(12):
if ((half_data.Toffset[n*12]-half_data.RRint[n*12]) >= half_data.Ponset[n*12]) or ((half_data.Ponset[n*12] + half_data.RRint[n*12]) - half_data.Toffset[n*12] == 1):
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
rrint = half_data.RRint[n*12]
if (rrint + half_data.Ponset[n*12]) > 1200 and (half_data.Toffset[n*12]-rrint) < 0:
temp = half_df.iloc[:,x][int(n*1200):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif (rrint + half_data.Ponset[n*12]) > 1200 and (half_data.Toffset[n*12]-rrint) > 0:
temp = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)-rrint):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int((n+1)*1200)]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
elif rrint + half_data.Ponset[n*12] < 1200 and (half_data.Toffset[n*12]-rrint) < 0:
temp = half_df.iloc[:,x][int(n*1200):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int(rrint + half_data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
else:
temp = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)-rrint):int(half_data.Ponset[n*12]+(n*1200))]
test = half_df.iloc[:,x][int(half_data.Toffset[n*12]+(n*1200)):int(rrint + half_data.Ponset[n*12]+(n*1200))]
if test.empty == False and temp.empty == False:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - ((temp[len(temp)//3:len(temp)*2//3].mean() + test[len(test)//3:len(test)*2//3].mean()) / 2)
elif temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - test[len(test)//3:len(test)*2//3].mean()
elif test.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - temp[len(temp)//3:len(temp)*2//3].mean()
elif test.empty and temp.empty:
half_df.iloc[:,x][n*1200:1200*(n+1)] = half_df.iloc[:,x][n*1200:1200*(n+1)] - (half_df.iloc[:,x][n*1200:int(half_data.Qonset[n*12])+(n*1200)].mean() + half_df.iloc[:,x][int(half_data.Qoffset[n*12])+(n*1200):(n+1)*1200].mean()) / 2
for x in range(12):
half_df.iloc[:,x] = half_df.iloc[:,x]*2.5
unfiltered_half_leads = half_df.copy()
for n in range(count500):
for inx in range(12):
test = half_df_fixer(half_df.iloc[:,inx][n*1200:(n+1)*1200], n)
gaps = []
lstOfNs = []
gap = []
for num in test[test.isna() == True].index:
lstOfNs.append(num)
if len(lstOfNs) == 1:
gap.append(lstOfNs[0])
if len(lstOfNs) > 1:
if lstOfNs[-1] - lstOfNs[-2] < 5:
gap.append(num)
elif lstOfNs[-1] - lstOfNs[-2] > 5:
gaps.append(gap)
gap = []
gap.append(num)
gaps.append(gap)
if gaps != [[]]:
x = []
y = []
for g in gaps:
if len(g) == 1:
x.append([g[-1]+1])
y.append(test[g[-1]+1])
if np.isnan(test.iloc[0]):
point1 = [g[0], test[g[-1]+1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
else:
point1 = [g[0]-1, test[g[0]-1]]
point2 = [g[-1]+1, test[g[-1]+1]]
x_temp,y_temp = hanging_line(point1, point2)
x.append(x_temp)
y.append(y_temp)
for i in range(len(x)):
test[x[i]] = y[i]
if (trapz(abs(test[int(half_data.Qonset[n*12]):int(half_data.Qoffset[n*12])]))/trapz(abs(half_df.iloc[:,inx][int(half_data.Qonset[12*n]+(1200*n)):int(half_data.Qoffset[12*n]+(1200*n))]))) < .60:
test = half_df.iloc[:,inx][n*1200:(n+1)*1200]
test = medfilt(test, kernel_size=9)
half_df.iloc[:,inx][n*1200:(n+1)*1200] = test
del gaps
del lstOfNs
del gap
del test
half_VTI_leads = half_df[['III', 'aVF', 'aVL', 'aVR']]
half_df = half_df[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
Unfiltered_half_VTI_leads = unfiltered_half_leads[['III', 'aVF', 'aVL', 'aVR']]
unfiltered_half_leads = unfiltered_half_leads[['I', 'II', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6']]
matrix = [[.38, -.07, -.13, .05, -.01, .14, .06, .54],
[-.07, .93, .06, -.02, -.05, .06, -.17, .13],
[.11, -.23, -.43, -.06, -.14, -.20, -.11, .31]]
x = matrix[0]
y = matrix[1]
z = matrix[2]
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(half_df)):
xtemp.append((half_df.iloc[n].values * x).sum())
ytemp.append((half_df.iloc[n].values * y).sum())
ztemp.append((half_df.iloc[n].values * z).sum())
n+=1
half_df['x'] = xtemp
half_df['y'] = ytemp
half_df['z'] = ztemp
x = matrix[0]
y = matrix[1]
z = matrix[2]
n = 0
xtemp = []
ytemp = []
ztemp = []
for i in range(len(unfiltered_half_leads)):
xtemp.append((unfiltered_half_leads.iloc[n].values * x).sum())
ytemp.append((unfiltered_half_leads.iloc[n].values * y).sum())
ztemp.append((unfiltered_half_leads.iloc[n].values * z).sum())
n+=1
half_df['Unfiltered_x'] = xtemp
half_df['Unfiltered_y'] = ytemp
half_df['Unfiltered_z'] = ztemp
del xtemp
del ytemp
del ztemp
half_df['Date'] = half_data['Date']
half_df['ID'] = half_data['ID']
half_df['Time'] = half_data['Time']
half_df['Ponset'] = half_data['Ponset']
half_df['Print'] = half_data['Print']
half_df['Pdur'] = half_data['Pdur']
half_df['Poffset'] = half_data['Poffset']
half_df['Qonset'] = half_data['Qonset']
half_df['Qrsdur'] = half_data['Qrsdur']
half_df['Qtint'] = half_data['Qtint']
half_df['Qoffset'] = half_data['Qoffset']
half_df['Tonset'] = half_data['Tonset']
half_df['Tdur'] = half_data['Tdur']
half_df['Toffset'] = half_data['Toffset']
half_df['HeartRate'] = half_data['HeartRate']
half_df['QRSFrontAxis'] = half_data['QRSFrontAxis']
half_df['Sex'] = half_data['Sex']
half_df['QTC'] = half_data['QTC']
half_df['Age'] = half_data['Age']
half_df['Name'] = half_data['Name']
for n in range(count500):
half_df['Ponset'][(n*1200):(n+1)*1200] = half_data['Ponset'][n*12]
half_df['Print'][(n*1200):(n+1)*1200] = half_data['Print'][n*12]
half_df['Pdur'][(n*1200):(n+1)*1200] = half_data['Pdur'][n*12]
half_df['Poffset'][(n*1200):(n+1)*1200] = half_data['Poffset'][n*12]
half_df['Qonset'][(n*1200):(n+1)*1200] = half_data['Qonset'][n*12]
half_df['Qrsdur'][(n*1200):(n+1)*1200] = half_data['Qrsdur'][n*12]
half_df['Qtint'][(n*1200):(n+1)*1200] = half_data['Qtint'][n*12]
half_df['Qoffset'][(n*1200):(n+1)*1200] = half_data['Qoffset'][n*12]
half_df['Tonset'][(n*1200):(n+1)*1200] = half_data['Tonset'][n*12]
half_df['Tdur'][(n*1200):(n+1)*1200] = half_data['Tdur'][n*12]
half_df['Toffset'][(n*1200):(n+1)*1200] = half_data['Toffset'][n*12]
half_df['HeartRate'][(n*1200):(n+1)*1200] = half_data['HeartRate'][n*12]
half_df['QRSFrontAxis'][(n*1200):(n+1)*1200] = half_data['QRSFrontAxis'][n*12]
half_df['Sex'][(n*1200):(n+1)*1200] = half_data['Sex'][n*12]
half_df['QTC'][(n*1200):(n+1)*1200] = half_data['QTC'][n*12]
half_df['Name'][(n*1200):(n+1)*1200] = half_data['Name'][12*n]
half_df['Age'][(n*1200):(n+1)*1200] = half_data['Age'][12*n]
half_df['ID'][(n*1200):(n+1)*1200] = half_data['ID'][12*n]
half_df['Date'][(n*1200):(n+1)*1200] = half_data['Date'][12*n]
half_df['Time'][(n*1200):(n+1)*1200] = half_data['Time'][12*n]
half_df[['III', 'aVF', 'aVL', 'aVR']] = half_VTI_leads
unfiltered_half_leads[['III', 'aVF', 'aVL', 'aVR']] = Unfiltered_half_VTI_leads
half_df[['Unfiltered_I', 'Unfiltered_II', 'Unfiltered_III', 'Unfiltered_V1', 'Unfiltered_V2', 'Unfiltered_V3', 'Unfiltered_V4', 'Unfiltered_V5', 'Unfiltered_V6', 'Unfiltered_aVF', 'Unfiltered_aVL', 'Unfiltered_aVR']] = unfiltered_half_leads[['I', 'II', 'III', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'aVF', 'aVL', 'aVR']]
del unfiltered_half_leads
del half_VTI_leads
if (len(half_data) > 0) and (len(data) > 0):
df = pd.concat([df, half_df])
df = df.reset_index(drop=True)
del half_data
del data
del half_df
if (len(half_data) > 0) and (len(data) == 0):
df = half_df
del half_df
del half_data
if (len(half_data) == 0) and (len(data) > 0):
df = df
del data
df['total_xyz'] = ((df.x)**2 + (df.y)**2 + (df.z)**2)**0.5
QRSVTI = []
for n in range(count):
QRSVTI.append(trapz(df.total_xyz[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))]))
QRSVTI = np.repeat(QRSVTI, 1200)
df['QRSVTI'] = QRSVTI
del QRSVTI
QRStVTI = []
for n in range(count):
QRStVTI.append(trapz(df.total_xyz[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))]))
QRStVTI = np.repeat(QRStVTI, 1200)
df['QRStVTI'] = QRStVTI
del QRStVTI
XVTI = []
for n in range(count):
XVTI.append(trapz(abs(df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
XVTI = np.repeat(XVTI, 1200)
df['XVTI'] = XVTI
del XVTI
YVTI = []
for n in range(count):
YVTI.append(trapz(abs(df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
YVTI = np.repeat(YVTI, 1200)
df['YVTI'] = YVTI
del YVTI
ZVTI = []
for n in range(count):
ZVTI.append(trapz(abs(df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
ZVTI = np.repeat(ZVTI, 1200)
df['ZVTI'] = ZVTI
del ZVTI
df['QRS3DArea'] = ((df.XVTI)**2 + (df.YVTI)**2 + (df.ZVTI)**2)**0.5
XtVTI = []
for n in range(count):
XtVTI.append(trapz(abs(df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
XtVTI = np.repeat(XtVTI, 1200)
df['XtVTI'] = XtVTI
del XtVTI
YtVTI = []
for n in range(count):
YtVTI.append(trapz(abs(df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
YtVTI = np.repeat(YtVTI, 1200)
df['YtVTI'] = YtVTI
del YtVTI
ZtVTI = []
for n in range(count):
ZtVTI.append(trapz(abs(df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
ZtVTI = np.repeat(ZtVTI, 1200)
df['ZtVTI'] = ZtVTI
del ZtVTI
df['QRSt3DArea'] = ((df.XtVTI)**2 + (df.YtVTI)**2 + (df.ZtVTI)**2)**0.5
XVTI = []
for n in range(count):
XVTI.append(trapz((df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
XVTI = np.repeat(XVTI, 1200)
df['XVector_VTI'] = XVTI
del XVTI
YVTI = []
for n in range(count):
YVTI.append(trapz((df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
YVTI = np.repeat(YVTI, 1200)
df['YVector_VTI'] = YVTI
del YVTI
ZVTI = []
for n in range(count):
ZVTI.append(trapz((df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Qoffset[1200*n]+(1200*n))])))
ZVTI = np.repeat(ZVTI, 1200)
df['ZVector_VTI'] = ZVTI
del ZVTI
df['QRS3DVector_Area'] = ((df.XVector_VTI)**2 + (df.YVector_VTI)**2 + (df.ZVector_VTI)**2)**0.5
XtVTI = []
for n in range(count):
XtVTI.append(trapz((df.x[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
XtVTI = np.repeat(XtVTI, 1200)
df['XtVector_VTI'] = XtVTI
del XtVTI
YtVTI = []
for n in range(count):
YtVTI.append(trapz((df.y[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
YtVTI = np.repeat(YtVTI, 1200)
df['YtVector_VTI'] = YtVTI
del YtVTI
ZtVTI = []
for n in range(count):
ZtVTI.append(trapz((df.z[int(df.Qonset[1200*n]+(1200*n)):int(df.Toffset[1200*n]+(1200*n))])))
ZtVTI = np.repeat(ZtVTI, 1200)
df['ZtVector_VTI'] = ZtVTI
del ZtVTI
df['QRSt3DVector_Area'] = ((df.XtVector_VTI)**2 + (df.YtVector_VTI)**2 + (df.ZtVector_VTI)**2)**0.5
Tamp = []
XTamp = []
YTamp = []
ZTamp = []
TpTe = []
XTpTe = []
YTpTe = []
ZTpTe = []
QpQe = []
for x in range(count):
if int(df.Tonset[1200*x]+(1200*x)) > int(df.Toffset[1200*x]+(1200*x)):
XTamp.append(np.nan)
XTpTe.append(np.nan)
YTamp.append(np.nan)
YTpTe.append(np.nan)
ZTamp.append(np.nan)
ZTpTe.append(np.nan)
Tamp.append(np.nan)
TpTe.append(np.nan)
Qa = [abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]]))
QpQe.append(len([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]][Qa:]))
elif df.Tonset[1200*x] == df.Toffset[1200*x]:
XTamp.append(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
XTpTe.append(len([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
YTamp.append(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
YTpTe.append(len([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
ZTamp.append(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
ZTpTe.append(len([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
Tamp.append(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
Ta = [abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]].index(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]]))
TpTe.append(len([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x))-10:int(df.Toffset[1200*x]+(1200*x))+10]][Ta:]))
Qa = [abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]]))
QpQe.append(len([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]][Qa:]))
else:
XTamp.append(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
XTpTe.append(len([abs(n) for n in df.x[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
YTamp.append(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
YTpTe.append(len([abs(n) for n in df.y[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
ZTamp.append(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
ZTpTe.append(len([abs(n) for n in df.z[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
Tamp.append(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
Ta = [abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]]))
TpTe.append(len([abs(n) for n in df.total_xyz[int(df.Tonset[1200*x]+(1200*x)):int(df.Toffset[1200*x]+(1200*x))]][Ta:]))
Qa = [abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]].index(max([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]]))
QpQe.append(len([abs(n) for n in df.total_xyz[int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]][Qa:]))
QpQe = np.repeat(QpQe, 1200)
df['QpQe'] = QpQe
Tamp = np.repeat(Tamp, 1200)
df['Tamp'] = Tamp
XTamp = np.repeat(XTamp, 1200)
df['XTamp'] = XTamp
YTamp = np.repeat(YTamp, 1200)
df['YTamp'] = YTamp
ZTamp = np.repeat(ZTamp, 1200)
df['ZTamp'] = ZTamp
XTpTe = np.repeat(XTpTe, 1200)
df['XTpTe'] = XTpTe
YTpTe = np.repeat(YTpTe, 1200)
df['YTpTe'] = YTpTe
ZTpTe = np.repeat(ZTpTe, 1200)
df['ZTpTe'] = ZTpTe
TpTe = np.repeat(TpTe, 1200)
df['TpTe'] = TpTe
del Tamp
del XTamp
del YTamp
del ZTamp
del XTpTe
del YTpTe
del ZTpTe
del TpTe
del QpQe
temp = df[['I', 'II', 'III', 'V1', 'V2', 'V3', 'V4', 'V5', 'V6', 'aVR', 'aVL', 'aVF', 'x', 'y', 'z', 'total_xyz']]
Qamp = []
for x in range(count):
for i in range(16):
if min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0 and max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) > 0:
Qamp.append(max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) - min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
elif min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) > 0 and max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0:
Qamp.append(max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) - min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
elif min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0 and max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]) < 0:
Qamp.append(min(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
else:
Qamp.append(max(temp.iloc[:,i][int(df.Qonset[1200*x]+(1200*x)):int(df.Qoffset[1200*x]+(1200*x))]))
del temp
XQamp = Qamp[12::16]
XQamp = np.repeat(XQamp, 1200)
YQamp = Qamp[13::16]
YQamp = np.repeat(YQamp, 1200)
ZQamp = Qamp[14::16]
ZQamp = np.repeat(ZQamp, 1200)
Qamp = Qamp[15::16]
Qamp = np.repeat(Qamp, 1200)
df['XQamp'] = XQamp
df['YQamp'] = YQamp
df['ZQamp'] = ZQamp
df['Qamp'] = Qamp
del XQamp
del YQamp
del ZQamp
del Qamp
text_df = df[['ID', 'Name', 'Age', 'Sex', 'Date', 'Time','HeartRate','Pdur','Print','Qrsdur','Qtint','QTC','TpTe','QRSFrontAxis',
'QRSVTI','XVector_VTI', 'YVector_VTI','ZVector_VTI',
'QRStVTI', 'XtVTI','YtVTI', 'ZtVTI',
'Qamp','XQamp','YQamp', 'ZQamp','Tamp','XTamp', 'YTamp','ZTamp']]
text_df = text_df[::1200]
# text_df.to_csv('Entresto_Final_Data.csv', index=False)
# signal_df.to_pickle('Entresto_Final_ML.pkl')
text_df.to_csv('data.csv', index=False)
for n in range(count):
# pd.DataFrame(text_df.iloc[n,:]).T.to_csv('{}.csv'.format(root_names[n][:-4]), index=False)
x = df.x[n*1200:(n+1)*1200]
y = df.y[n*1200:(n+1)*1200]
z = df.z[n*1200:(n+1)*1200]
rms = df.total_xyz[n*1200:(n+1)*1200]
fig, ((ax, ax1), (ax2, ax3)) = plt.subplots(2, 2, figsize=(15, 8))
ax.plot(x)
ax.set_title('Lead X')
ax1.plot(y)
ax1.set_title('Lead Y')
ax2.plot(z)
ax2.set_title('Lead Z')
ax3.plot(rms)
ax3.set_title('XYZ RMS')
fig.subplots_adjust(hspace=.3)
fig.subplots_adjust(wspace=.1)
fig.savefig('{}.png'.format(root_names[n][:-4]), dpi=1800, format='png')
del df | [
"seaborn.set",
"pandas.pivot_table",
"numpy.repeat",
"xml.etree.ElementTree.parse",
"numpy.asarray",
"numpy.diff",
"numpy.linspace",
"pandas.DataFrame",
"glob.glob",
"numpy.abs",
"numpy.warnings.filterwarnings",
"numpy.isnan",
"datetime.date.today",
"numpy.median",
"base64.b64decode",
... | [((266, 278), 'datetime.date.today', 'date.today', ([], {}), '()\n', (276, 278), False, 'from datetime import date\n'), ((282, 318), 'numpy.warnings.filterwarnings', 'np.warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (308, 318), True, 'import numpy as np\n'), ((320, 345), 'seaborn.set', 'sns.set', ([], {'style': '"""darkgrid"""'}), "(style='darkgrid')\n", (327, 345), True, 'import seaborn as sns\n'), ((387, 405), 'glob.glob', 'glob.glob', (['"""*.xml"""'], {}), "('*.xml')\n", (396, 405), False, 'import glob\n'), ((16716, 16742), 'pandas.DataFrame', 'pd.DataFrame', (["Tags['tags']"], {}), "(Tags['tags'])\n", (16728, 16742), True, 'import pandas as pd\n'), ((16751, 16777), 'pandas.DataFrame', 'pd.DataFrame', (["tags['tags']"], {}), "(tags['tags'])\n", (16763, 16777), True, 'import pandas as pd\n'), ((46884, 46907), 'numpy.repeat', 'np.repeat', (['QRSVTI', '(1200)'], {}), '(QRSVTI, 1200)\n', (46893, 46907), True, 'import numpy as np\n'), ((47105, 47129), 'numpy.repeat', 'np.repeat', (['QRStVTI', '(1200)'], {}), '(QRStVTI, 1200)\n', (47114, 47129), True, 'import numpy as np\n'), ((47318, 47339), 'numpy.repeat', 'np.repeat', (['XVTI', '(1200)'], {}), '(XVTI, 1200)\n', (47327, 47339), True, 'import numpy as np\n'), ((47519, 47540), 'numpy.repeat', 'np.repeat', (['YVTI', '(1200)'], {}), '(YVTI, 1200)\n', (47528, 47540), True, 'import numpy as np\n'), ((47720, 47741), 'numpy.repeat', 'np.repeat', (['ZVTI', '(1200)'], {}), '(ZVTI, 1200)\n', (47729, 47741), True, 'import numpy as np\n'), ((47995, 48017), 'numpy.repeat', 'np.repeat', (['XtVTI', '(1200)'], {}), '(XtVTI, 1200)\n', (48004, 48017), True, 'import numpy as np\n'), ((48203, 48225), 'numpy.repeat', 'np.repeat', (['YtVTI', '(1200)'], {}), '(YtVTI, 1200)\n', (48212, 48225), True, 'import numpy as np\n'), ((48411, 48433), 'numpy.repeat', 'np.repeat', (['ZtVTI', '(1200)'], {}), '(ZtVTI, 1200)\n', (48420, 48433), True, 'import numpy as np\n'), ((48690, 48711), 'numpy.repeat', 'np.repeat', (['XVTI', '(1200)'], {}), '(XVTI, 1200)\n', (48699, 48711), True, 'import numpy as np\n'), ((48895, 48916), 'numpy.repeat', 'np.repeat', (['YVTI', '(1200)'], {}), '(YVTI, 1200)\n', (48904, 48916), True, 'import numpy as np\n'), ((49100, 49121), 'numpy.repeat', 'np.repeat', (['ZVTI', '(1200)'], {}), '(ZVTI, 1200)\n', (49109, 49121), True, 'import numpy as np\n'), ((49407, 49429), 'numpy.repeat', 'np.repeat', (['XtVTI', '(1200)'], {}), '(XtVTI, 1200)\n', (49416, 49429), True, 'import numpy as np\n'), ((49619, 49641), 'numpy.repeat', 'np.repeat', (['YtVTI', '(1200)'], {}), '(YtVTI, 1200)\n', (49628, 49641), True, 'import numpy as np\n'), ((49831, 49853), 'numpy.repeat', 'np.repeat', (['ZtVTI', '(1200)'], {}), '(ZtVTI, 1200)\n', (49840, 49853), True, 'import numpy as np\n'), ((55288, 55309), 'numpy.repeat', 'np.repeat', (['QpQe', '(1200)'], {}), '(QpQe, 1200)\n', (55297, 55309), True, 'import numpy as np\n'), ((55339, 55360), 'numpy.repeat', 'np.repeat', (['Tamp', '(1200)'], {}), '(Tamp, 1200)\n', (55348, 55360), True, 'import numpy as np\n'), ((55391, 55413), 'numpy.repeat', 'np.repeat', (['XTamp', '(1200)'], {}), '(XTamp, 1200)\n', (55400, 55413), True, 'import numpy as np\n'), ((55446, 55468), 'numpy.repeat', 'np.repeat', (['YTamp', '(1200)'], {}), '(YTamp, 1200)\n', (55455, 55468), True, 'import numpy as np\n'), ((55501, 55523), 'numpy.repeat', 'np.repeat', (['ZTamp', '(1200)'], {}), '(ZTamp, 1200)\n', (55510, 55523), True, 'import numpy as np\n'), ((55556, 55578), 'numpy.repeat', 'np.repeat', (['XTpTe', '(1200)'], {}), '(XTpTe, 1200)\n', (55565, 55578), True, 'import numpy as np\n'), ((55611, 55633), 'numpy.repeat', 'np.repeat', (['YTpTe', '(1200)'], {}), '(YTpTe, 1200)\n', (55620, 55633), True, 'import numpy as np\n'), ((55666, 55688), 'numpy.repeat', 'np.repeat', (['ZTpTe', '(1200)'], {}), '(ZTpTe, 1200)\n', (55675, 55688), True, 'import numpy as np\n'), ((55720, 55741), 'numpy.repeat', 'np.repeat', (['TpTe', '(1200)'], {}), '(TpTe, 1200)\n', (55729, 55741), True, 'import numpy as np\n'), ((57344, 57366), 'numpy.repeat', 'np.repeat', (['XQamp', '(1200)'], {}), '(XQamp, 1200)\n', (57353, 57366), True, 'import numpy as np\n'), ((57398, 57420), 'numpy.repeat', 'np.repeat', (['YQamp', '(1200)'], {}), '(YQamp, 1200)\n', (57407, 57420), True, 'import numpy as np\n'), ((57452, 57474), 'numpy.repeat', 'np.repeat', (['ZQamp', '(1200)'], {}), '(ZQamp, 1200)\n', (57461, 57474), True, 'import numpy as np\n'), ((57504, 57525), 'numpy.repeat', 'np.repeat', (['Qamp', '(1200)'], {}), '(Qamp, 1200)\n', (57513, 57525), True, 'import numpy as np\n'), ((530, 550), 'numpy.median', 'np.median', (['intensity'], {}), '(intensity)\n', (539, 550), True, 'import numpy as np\n'), ((4206, 4266), 'numpy.linspace', 'np.linspace', (['point1[0]', 'point2[0]', '(point2[0] - point1[0] + 1)'], {}), '(point1[0], point2[0], point2[0] - point1[0] + 1)\n', (4217, 4266), True, 'import numpy as np\n'), ((17704, 17717), 'numpy.asarray', 'np.asarray', (['p'], {}), '(p)\n', (17714, 17717), True, 'import numpy as np\n'), ((17844, 17859), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (17856, 17859), True, 'import pandas as pd\n'), ((18550, 18688), 'numpy.asarray', 'np.asarray', (['[array[0], array[1], array[2], array[3], array[4], array[5], array[6],\n array[7], array[8], array[9], array[10], array[11]]'], {}), '([array[0], array[1], array[2], array[3], array[4], array[5],\n array[6], array[7], array[8], array[9], array[10], array[11]])\n', (18560, 18688), True, 'import numpy as np\n'), ((18695, 18714), 'pandas.DataFrame', 'pd.DataFrame', (['array'], {}), '(array)\n', (18707, 18714), True, 'import pandas as pd\n'), ((18725, 18765), 'pandas.pivot_table', 'pd.pivot_table', (['df'], {'columns': 'test.columns'}), '(df, columns=test.columns)\n', (18739, 18765), True, 'import pandas as pd\n'), ((30884, 30897), 'numpy.asarray', 'np.asarray', (['p'], {}), '(p)\n', (30894, 30897), True, 'import numpy as np\n'), ((31029, 31044), 'pandas.DataFrame', 'pd.DataFrame', (['a'], {}), '(a)\n', (31041, 31044), True, 'import pandas as pd\n'), ((31756, 31894), 'numpy.asarray', 'np.asarray', (['[array[0], array[1], array[2], array[3], array[4], array[5], array[6],\n array[7], array[8], array[9], array[10], array[11]]'], {}), '([array[0], array[1], array[2], array[3], array[4], array[5],\n array[6], array[7], array[8], array[9], array[10], array[11]])\n', (31766, 31894), True, 'import numpy as np\n'), ((31906, 31925), 'pandas.DataFrame', 'pd.DataFrame', (['array'], {}), '(array)\n', (31918, 31925), True, 'import pandas as pd\n'), ((31943, 31988), 'pandas.pivot_table', 'pd.pivot_table', (['half_df'], {'columns': 'test.columns'}), '(half_df, columns=test.columns)\n', (31957, 31988), True, 'import pandas as pd\n'), ((32243, 32289), 'pandas.pivot_table', 'pd.pivot_table', (['half_df'], {'columns': 'half_df.index'}), '(half_df, columns=half_df.index)\n', (32257, 32289), True, 'import pandas as pd\n'), ((46365, 46389), 'pandas.concat', 'pd.concat', (['[df, half_df]'], {}), '([df, half_df])\n', (46374, 46389), True, 'import pandas as pd\n'), ((58469, 58504), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(2)'], {'figsize': '(15, 8)'}), '(2, 2, figsize=(15, 8))\n', (58481, 58504), True, 'import matplotlib.pyplot as plt\n'), ((17613, 17647), 'base64.b64decode', 'base64.b64decode', (['data.Waveform[x]'], {}), '(data.Waveform[x])\n', (17629, 17647), False, 'import base64\n'), ((18042, 18058), 'pandas.concat', 'pd.concat', (['blank'], {}), '(blank)\n', (18051, 18058), True, 'import pandas as pd\n'), ((30787, 30826), 'base64.b64decode', 'base64.b64decode', (['half_data.Waveform[x]'], {}), '(half_data.Waveform[x])\n', (30803, 30826), False, 'import base64\n'), ((31246, 31262), 'pandas.concat', 'pd.concat', (['blank'], {}), '(blank)\n', (31255, 31262), True, 'import pandas as pd\n'), ((32145, 32161), 'pandas.concat', 'pd.concat', (['blank'], {}), '(blank)\n', (32154, 32161), True, 'import pandas as pd\n'), ((33606, 33671), 'pandas.DataFrame', 'pd.DataFrame', (['[I, II, III, V1, V2, V3, V4, V5, V6, aVF, aVL, aVR]'], {}), '([I, II, III, V1, V2, V3, V4, V5, V6, aVF, aVL, aVR])\n', (33618, 33671), True, 'import pandas as pd\n'), ((577, 607), 'numpy.abs', 'np.abs', (['(intensity - median_int)'], {}), '(intensity - median_int)\n', (583, 607), True, 'import numpy as np\n'), ((4096, 4120), 'numpy.cosh', 'np.cosh', (['(point2[0] % 600)'], {}), '(point2[0] % 600)\n', (4103, 4120), True, 'import numpy as np\n'), ((4123, 4147), 'numpy.cosh', 'np.cosh', (['(point1[0] % 600)'], {}), '(point1[0] % 600)\n', (4130, 4147), True, 'import numpy as np\n'), ((4172, 4196), 'numpy.cosh', 'np.cosh', (['(point1[0] % 600)'], {}), '(point1[0] % 600)\n', (4179, 4196), True, 'import numpy as np\n'), ((4278, 4294), 'numpy.cosh', 'np.cosh', (['(x % 600)'], {}), '(x % 600)\n', (4285, 4294), True, 'import numpy as np\n'), ((17666, 17679), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (17676, 17679), True, 'import numpy as np\n'), ((17972, 18028), 'pandas.pivot_table', 'pd.pivot_table', (['df[n * 12:(n + 1) * 12]'], {'columns': 'df.Lead'}), '(df[n * 12:(n + 1) * 12], columns=df.Lead)\n', (17986, 18028), True, 'import pandas as pd\n'), ((26003, 26031), 'scipy.signal.medfilt', 'medfilt', (['test'], {'kernel_size': '(9)'}), '(test, kernel_size=9)\n', (26010, 26031), False, 'from scipy.signal import medfilt\n'), ((30845, 30858), 'numpy.asarray', 'np.asarray', (['t'], {}), '(t)\n', (30855, 30858), True, 'import numpy as np\n'), ((31166, 31232), 'pandas.pivot_table', 'pd.pivot_table', (['half_df[n * 12:(n + 1) * 12]'], {'columns': 'half_df.Lead'}), '(half_df[n * 12:(n + 1) * 12], columns=half_df.Lead)\n', (31180, 31232), True, 'import pandas as pd\n'), ((32860, 32883), 'numpy.asarray', 'np.asarray', (['array[::12]'], {}), '(array[::12])\n', (32870, 32883), True, 'import numpy as np\n'), ((32919, 32943), 'numpy.asarray', 'np.asarray', (['array[1::12]'], {}), '(array[1::12])\n', (32929, 32943), True, 'import numpy as np\n'), ((32980, 33004), 'numpy.asarray', 'np.asarray', (['array[2::12]'], {}), '(array[2::12])\n', (32990, 33004), True, 'import numpy as np\n'), ((33040, 33064), 'numpy.asarray', 'np.asarray', (['array[3::12]'], {}), '(array[3::12])\n', (33050, 33064), True, 'import numpy as np\n'), ((33100, 33124), 'numpy.asarray', 'np.asarray', (['array[4::12]'], {}), '(array[4::12])\n', (33110, 33124), True, 'import numpy as np\n'), ((33160, 33184), 'numpy.asarray', 'np.asarray', (['array[5::12]'], {}), '(array[5::12])\n', (33170, 33184), True, 'import numpy as np\n'), ((33220, 33244), 'numpy.asarray', 'np.asarray', (['array[6::12]'], {}), '(array[6::12])\n', (33230, 33244), True, 'import numpy as np\n'), ((33280, 33304), 'numpy.asarray', 'np.asarray', (['array[7::12]'], {}), '(array[7::12])\n', (33290, 33304), True, 'import numpy as np\n'), ((33340, 33364), 'numpy.asarray', 'np.asarray', (['array[8::12]'], {}), '(array[8::12])\n', (33350, 33364), True, 'import numpy as np\n'), ((33401, 33425), 'numpy.asarray', 'np.asarray', (['array[9::12]'], {}), '(array[9::12])\n', (33411, 33425), True, 'import numpy as np\n'), ((33462, 33487), 'numpy.asarray', 'np.asarray', (['array[10::12]'], {}), '(array[10::12])\n', (33472, 33487), True, 'import numpy as np\n'), ((33524, 33549), 'numpy.asarray', 'np.asarray', (['array[11::12]'], {}), '(array[11::12])\n', (33534, 33549), True, 'import numpy as np\n'), ((41694, 41722), 'scipy.signal.medfilt', 'medfilt', (['test'], {'kernel_size': '(9)'}), '(test, kernel_size=9)\n', (41701, 41722), False, 'from scipy.signal import medfilt\n'), ((425, 436), 'xml.etree.ElementTree.parse', 'et.parse', (['n'], {}), '(n)\n', (433, 436), True, 'import xml.etree.ElementTree as et\n'), ((2184, 2194), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2191, 2194), True, 'import numpy as np\n'), ((3831, 3841), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3838, 3841), True, 'import numpy as np\n'), ((25056, 25078), 'numpy.isnan', 'np.isnan', (['test.iloc[0]'], {}), '(test.iloc[0])\n', (25064, 25078), True, 'import numpy as np\n'), ((40717, 40739), 'numpy.isnan', 'np.isnan', (['test.iloc[0]'], {}), '(test.iloc[0])\n', (40725, 40739), True, 'import numpy as np\n'), ((906, 916), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (913, 916), True, 'import numpy as np\n'), ((2521, 2531), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2528, 2531), True, 'import numpy as np\n'), ((1568, 1578), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1575, 1578), True, 'import numpy as np\n'), ((3183, 3193), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3190, 3193), True, 'import numpy as np\n'), ((1097, 1107), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1104, 1107), True, 'import numpy as np\n'), ((2722, 2732), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2729, 2732), True, 'import numpy as np\n'), ((977, 987), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (984, 987), True, 'import numpy as np\n'), ((1425, 1435), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1432, 1435), True, 'import numpy as np\n'), ((1760, 1770), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1767, 1770), True, 'import numpy as np\n'), ((2592, 2602), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2599, 2602), True, 'import numpy as np\n'), ((3070, 3080), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3077, 3080), True, 'import numpy as np\n'), ((3387, 3397), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3394, 3397), True, 'import numpy as np\n'), ((1180, 1190), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1187, 1190), True, 'import numpy as np\n'), ((1305, 1315), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1312, 1315), True, 'import numpy as np\n'), ((1640, 1650), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1647, 1650), True, 'import numpy as np\n'), ((2089, 2099), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2096, 2099), True, 'import numpy as np\n'), ((2805, 2815), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2812, 2815), True, 'import numpy as np\n'), ((2940, 2950), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (2947, 2950), True, 'import numpy as np\n'), ((3257, 3267), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3264, 3267), True, 'import numpy as np\n'), ((3736, 3746), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3743, 3746), True, 'import numpy as np\n'), ((1844, 1854), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1851, 1854), True, 'import numpy as np\n'), ((1969, 1979), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (1976, 1979), True, 'import numpy as np\n'), ((3471, 3481), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3478, 3481), True, 'import numpy as np\n'), ((3606, 3616), 'numpy.diff', 'np.diff', (['y'], {}), '(y)\n', (3613, 3616), True, 'import numpy as np\n')] |
import os
import numpy as np
import crepe
# this data contains a sine sweep
file = os.path.join(os.path.dirname(__file__), 'sweep.wav')
f0_file = os.path.join(os.path.dirname(__file__), 'sweep.f0.csv')
def verify_f0():
result = np.loadtxt(f0_file, delimiter=',', skiprows=1)
# it should be confident enough about the presence of pitch in every frame
assert np.mean(result[:, 2] > 0.5) > 0.98
# the frequencies should be linear
assert np.corrcoef(result[:, 1]) > 0.99
os.remove(f0_file)
def test_sweep():
crepe.process_file(file)
verify_f0()
def test_sweep_cli():
assert os.system("crepe {}".format(file)) == 0
verify_f0()
def test_sweep_torch():
crepe.process_file(file, backend='torch')
verify_f0()
# Test for frames slicing
# normalizing disabled due to numerical discrepancies between numpy and PyTorch
def test_get_frames_torch(normalize=False):
import torch
from crepe.torch_backend import DataHelper
try:
from scipy.io import wavfile
sr, audio = wavfile.read(file)
except ValueError:
import sys
print("CREPE: Could not read %s" % file, file=sys.stderr)
raise
frames_tf = crepe.core.get_frames(audio, sr, normalize=normalize)
audio_torch = torch.as_tensor(audio).unsqueeze(0)
data_helper = DataHelper(frame_duration_n=1024, hop_length_s=10e-3,
center=True, normalize=normalize)
assert sr == data_helper.fs_hz
frames_torch = data_helper.get_frames(audio_torch)[0].numpy()
assert np.allclose(frames_tf, frames_torch)
# test consistency of results between PyTorch and TF
# passes only if using very lax parameters for the np.allclose comparison,
# not sure if it's due to floating point numerical imprecisions
# or to an actual bug...
def test_activation_torch_tf():
try:
from scipy.io import wavfile
sr, audio = wavfile.read(file)
except ValueError:
import sys
print("CREPE: Could not read %s" % file, file=sys.stderr)
raise
*_, confidence_tf, activation_tf = crepe.predict(
audio, sr, backend='tf')
import torch
audio = torch.as_tensor(audio).unsqueeze(0)
device = 'cuda:0' if torch.cuda.is_available() else 'cpu'
audio = audio.to(device)
*_, confidence_torch, activation_torch = crepe.predict(
audio, sr, backend='torch')
from functools import partial
relaxed_allclose = partial(np.allclose, rtol=1e-2, atol=1e-8)
assert relaxed_allclose(confidence_tf,
confidence_torch[0].cpu().numpy())
assert relaxed_allclose(activation_tf,
activation_torch[0].cpu().numpy())
| [
"numpy.mean",
"numpy.allclose",
"torch.as_tensor",
"numpy.corrcoef",
"crepe.predict",
"crepe.torch_backend.DataHelper",
"os.path.dirname",
"crepe.core.get_frames",
"torch.cuda.is_available",
"functools.partial",
"scipy.io.wavfile.read",
"crepe.process_file",
"numpy.loadtxt",
"os.remove"
] | [((97, 122), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (112, 122), False, 'import os\n'), ((160, 185), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (175, 185), False, 'import os\n'), ((235, 281), 'numpy.loadtxt', 'np.loadtxt', (['f0_file'], {'delimiter': '""","""', 'skiprows': '(1)'}), "(f0_file, delimiter=',', skiprows=1)\n", (245, 281), True, 'import numpy as np\n'), ((497, 515), 'os.remove', 'os.remove', (['f0_file'], {}), '(f0_file)\n', (506, 515), False, 'import os\n'), ((540, 564), 'crepe.process_file', 'crepe.process_file', (['file'], {}), '(file)\n', (558, 564), False, 'import crepe\n'), ((702, 743), 'crepe.process_file', 'crepe.process_file', (['file'], {'backend': '"""torch"""'}), "(file, backend='torch')\n", (720, 743), False, 'import crepe\n'), ((1200, 1253), 'crepe.core.get_frames', 'crepe.core.get_frames', (['audio', 'sr'], {'normalize': 'normalize'}), '(audio, sr, normalize=normalize)\n', (1221, 1253), False, 'import crepe\n'), ((1327, 1418), 'crepe.torch_backend.DataHelper', 'DataHelper', ([], {'frame_duration_n': '(1024)', 'hop_length_s': '(0.01)', 'center': '(True)', 'normalize': 'normalize'}), '(frame_duration_n=1024, hop_length_s=0.01, center=True, normalize\n =normalize)\n', (1337, 1418), False, 'from crepe.torch_backend import DataHelper\n'), ((1557, 1593), 'numpy.allclose', 'np.allclose', (['frames_tf', 'frames_torch'], {}), '(frames_tf, frames_torch)\n', (1568, 1593), True, 'import numpy as np\n'), ((2092, 2130), 'crepe.predict', 'crepe.predict', (['audio', 'sr'], {'backend': '"""tf"""'}), "(audio, sr, backend='tf')\n", (2105, 2130), False, 'import crepe\n'), ((2342, 2383), 'crepe.predict', 'crepe.predict', (['audio', 'sr'], {'backend': '"""torch"""'}), "(audio, sr, backend='torch')\n", (2355, 2383), False, 'import crepe\n'), ((2451, 2494), 'functools.partial', 'partial', (['np.allclose'], {'rtol': '(0.01)', 'atol': '(1e-08)'}), '(np.allclose, rtol=0.01, atol=1e-08)\n', (2458, 2494), False, 'from functools import partial\n'), ((373, 400), 'numpy.mean', 'np.mean', (['(result[:, 2] > 0.5)'], {}), '(result[:, 2] > 0.5)\n', (380, 400), True, 'import numpy as np\n'), ((459, 484), 'numpy.corrcoef', 'np.corrcoef', (['result[:, 1]'], {}), '(result[:, 1])\n', (470, 484), True, 'import numpy as np\n'), ((1043, 1061), 'scipy.io.wavfile.read', 'wavfile.read', (['file'], {}), '(file)\n', (1055, 1061), False, 'from scipy.io import wavfile\n'), ((1911, 1929), 'scipy.io.wavfile.read', 'wavfile.read', (['file'], {}), '(file)\n', (1923, 1929), False, 'from scipy.io import wavfile\n'), ((2231, 2256), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2254, 2256), False, 'import torch\n'), ((1273, 1295), 'torch.as_tensor', 'torch.as_tensor', (['audio'], {}), '(audio)\n', (1288, 1295), False, 'import torch\n'), ((2170, 2192), 'torch.as_tensor', 'torch.as_tensor', (['audio'], {}), '(audio)\n', (2185, 2192), False, 'import torch\n')] |
import numpy as np
import collections
class Hopfield():
"""
The hopfield network in the simplest case of AMIT book in attractor neural network
"""
def __init__(self, n_dim=3, T=0, prng=np.random):
self.prng = prng
self.n_dim = n_dim
self.s = np.sign(prng.normal(size=n_dim))
self.h = np.zeros(n_dim)
# Noise parameters
self.T = T
self.sigma = T / (2 * np.sqrt(2)) # Check page 67 of Amit to see where does this comes from
self.list_of_patterns = None
self.w = None
self.m = None
self.state_distance = None
def train(self, list_of_patterns, normalize=True):
"""
Implements the Hebbian learning rule
:param list_of_patterns: This is a list with the desired parameters for equilibrum
normalize: normalizes the w matrix by its dimension
:return: w the weight matrix.
"""
self.list_of_patterns = list_of_patterns
self.w = np.zeros((self.n_dim, self.n_dim))
for pattern in list_of_patterns:
self.w += np.outer(pattern, pattern)
if normalize:
self.w *= (1.0 / self.n_dim)
# zeros in the diagonal
self.w[np.diag_indices_from(self.w)] = 0
def generate_random_patterns(self, n_store):
list_of_patterns = [np.sign(self.prng.normal(size=self.n_dim)) for i in range(n_store)]
return list_of_patterns
def update_sync(self):
"""
Updates the network state of all the neurons at the same time
"""
if self.sigma < 0.001:
noise = 0
else:
noise = self.prng.normal(0, scale=self.sigma, size=self.n_dim)
# Linear part
self.h = np.dot(self.w, self.s) + noise
# Non-linear part
# self.state = sigmoid_logistic(self.state)
self.s = np.sign(self.h)
def update_async(self):
"""
Updates the network state one neuron at a time
"""
# Generate random number
i = self.prng.randint(self.n_dim, size=1)[0]
# Linear
# self.state = np.dot(self.state, self.w[i, ...])
if self.sigma < 0.001:
noise = 0
else:
noise = self.prng.normal(loc=self.sigma)
self.h[i] = np.dot(self.w[i, ...], self.s) + noise
# Non-linear
self.s[i] = np.sign(self.h[i])
def calculate_overlap(self):
self.m = np.mean(self.s * self.list_of_patterns, axis=1)
return self.m
def calculate_state_distance(self):
"""
Calcualtes the distance between the state and
all the patterns
:return: A state distance vector with the distance between
the actual state of the system and all the stored patterns
"""
self.state_distance = np.ones(len(self.list_of_patterns))
for index, pattern in enumerate(self.list_of_patterns):
self.state_distance[index] = np.linalg.norm(self.s - pattern)
return self.state_distance
class HopfieldSequence():
"""
The hopfield as a sequence
"""
def __init__(self, n_dim=3, tau=10, g_delay=1.0, T=0, prng=np.random):
self.prng = prng
self.n_dim = n_dim
self.tau = tau
self.g_delay = g_delay
self.s = np.sign(prng.normal(size=n_dim))
self.h = np.zeros(n_dim)
# Noise parameters
self.T = T
self.sigma = T / (2 * np.sqrt(2)) # Check page 67 of Amit to see where does this comes from
self.list_of_patterns = None
self.w = None
self.w_delay = None
self.m = None
self.state_distance = None
aux = [np.zeros(n_dim) for i in range(self.tau)]
self.s_history = collections.deque(aux, maxlen=self.tau)
def train(self, list_of_patterns, normalize=True):
"""
Implements the Hebbian learning rule
:param list_of_patterns: This is a list with the desired parameters for equilibrum
normalize: normalizes the w matrix by its dimension
:return: w the weight matrix.
"""
self.list_of_patterns = list_of_patterns
self.w = np.zeros((self.n_dim, self.n_dim))
for pattern in list_of_patterns:
self.w += np.outer(pattern, pattern)
if normalize:
self.w *= (1.0 / self.n_dim)
# zeros in the diagonal
self.w[np.diag_indices_from(self.w)] = 0
def train_delays(self, list_of_patterns, normalize=True):
self.list_of_patterns_sequence = list_of_patterns
self.w_delay = np.zeros((self.n_dim, self.n_dim))
for index in range(len(list_of_patterns) - 1):
pattern1 = list_of_patterns[index + 1]
pattern2 = list_of_patterns[index]
self.w_delay += np.outer(pattern1, pattern2)
if normalize:
self.w_delay *= (1.0 / self.n_dim)
# zeros in the diagonal
self.w_delay[np.diag_indices_from(self.w_delay)] = 0
def generate_random_patterns(self, n_store):
list_of_patterns = [np.sign(self.prng.normal(size=self.n_dim)) for i in range(n_store)]
return list_of_patterns
def update_sync(self):
"""
Updates the network state of all the neurons at the same time
"""
if self.sigma < 0.001:
noise = 0
else:
noise = self.prng.normal(0, scale=self.sigma, size=self.n_dim)
# Linear part
self.h = np.dot(self.w, self.s) + \
self.g_delay * np.dot(self.w_delay, self.s_history.pop()) + noise
# Non-linear part
# self.state = sigmoid_logistic(self.state)
self.s = np.sign(self.h)
self.s_history.appendleft(np.copy(self.s))
def update_async_random_sequence(self):
random_sequence = self.prng.choice(self.n_dim, size=self.n_dim, replace=False)
for i in random_sequence:
self.update_async_one(i)
self.s_history.appendleft(np.copy(self.s))
def update_async(self, i=None):
"""
Updates the network state one neuron at a time
"""
# Generate random number
if i is None:
i = self.prng.randint(self.n_dim, size=1)[0]
if self.sigma < 0.001:
noise = 0
else:
noise = self.prng.normal(loc=self.sigma)
self.h[i] = np.dot(self.w[i, ...], self.s) \
+ self.g_delay * np.dot(self.w_delay[i, ...], self.s_history[-1]) + noise
# Non-linear
self.s[i] = np.sign(self.h[i])
self.s_history.appendleft(np.copy(self.s))
def calculate_overlap(self):
self.m = np.mean(self.s * self.list_of_patterns, axis=1)
return self.m
def calculate_state_distance(self):
"""
Calcualtes the distance between the state and
all the patterns
:return: A state distance vector with the distance between
the actual state of the system and all the stored patterns
"""
self.state_distance = np.ones(len(self.list_of_patterns))
for index, pattern in enumerate(self.list_of_patterns):
self.state_distance[index] = np.linalg.norm(self.s - pattern)
return self.state_distance
class HopfieldDiff():
def __init__(self, n_dim=3, tau_m=20.0, dt=0.1, T=0, prng=np.random):
self.prng = prng
self.tau_m = tau_m
self.dt = dt
self.n_dim = n_dim
self.s = np.sign(prng.normal(size=n_dim))
self.h = np.zeros(n_dim)
# Noise parameters
self.T = T
self.sigma = T / (2 * np.sqrt(2)) # Check page 67 of Amit to see where does this comes from
self.list_of_patterns = None
self.w = None
self.m = None
self.state_distance = None
def train(self, list_of_patterns, normalize=True):
"""
Implements the Hebbian learning rule
:param list_of_patterns: This is a list with the desired parameters for equilibrum
normalize: normalizes the w matrix by its dimension
:return: w the weight matrix.
"""
self.list_of_patterns = list_of_patterns
self.w = np.zeros((self.n_dim, self.n_dim))
for pattern in list_of_patterns:
self.w += np.outer(pattern, pattern)
if normalize:
self.w *= (1.0 / self.n_dim)
# zeros in the diagonal
self.w[np.diag_indices_from(self.w)] = 0
def generate_random_patterns(self, n_store):
list_of_patterns = [np.sign(self.prng.normal(size=self.n_dim)) for i in range(n_store)]
return list_of_patterns
def update(self):
"""
Updates the network state of all the neurons at the same time
"""
if self.sigma < 0.001:
noise = np.zeros(self.n_dim)
else:
noise = self.prng.normal(0, scale=self.sigma, size=self.n_dim)
aux = np.sign(np.dot(self.w, self.s))
self.s += (self.dt / self.tau_m) * (aux - self.s + noise)
def calculate_state_distance(self):
"""
Calcualtes the distance between the state and
all the patterns
:return: A state distance vector with the distance between
the actual state of the system and all the stored patterns
"""
n_patterns = len(self.list_of_patterns)
self.state_distance = np.ones((2, n_patterns))
for index, pattern in enumerate(self.list_of_patterns):
self.state_distance[0, index] = np.linalg.norm(self.s - pattern)
self.state_distance[1, index] = np.linalg.norm(self.s + pattern)
return self.state_distance
def calculate_overlap(self):
self.m = np.mean(self.s * self.list_of_patterns, axis=1)
return self.m | [
"numpy.mean",
"numpy.diag_indices_from",
"numpy.copy",
"collections.deque",
"numpy.ones",
"numpy.sqrt",
"numpy.zeros",
"numpy.outer",
"numpy.dot",
"numpy.sign",
"numpy.linalg.norm"
] | [((337, 352), 'numpy.zeros', 'np.zeros', (['n_dim'], {}), '(n_dim)\n', (345, 352), True, 'import numpy as np\n'), ((1001, 1035), 'numpy.zeros', 'np.zeros', (['(self.n_dim, self.n_dim)'], {}), '((self.n_dim, self.n_dim))\n', (1009, 1035), True, 'import numpy as np\n'), ((1884, 1899), 'numpy.sign', 'np.sign', (['self.h'], {}), '(self.h)\n', (1891, 1899), True, 'import numpy as np\n'), ((2391, 2409), 'numpy.sign', 'np.sign', (['self.h[i]'], {}), '(self.h[i])\n', (2398, 2409), True, 'import numpy as np\n'), ((2461, 2508), 'numpy.mean', 'np.mean', (['(self.s * self.list_of_patterns)'], {'axis': '(1)'}), '(self.s * self.list_of_patterns, axis=1)\n', (2468, 2508), True, 'import numpy as np\n'), ((3379, 3394), 'numpy.zeros', 'np.zeros', (['n_dim'], {}), '(n_dim)\n', (3387, 3394), True, 'import numpy as np\n'), ((3771, 3810), 'collections.deque', 'collections.deque', (['aux'], {'maxlen': 'self.tau'}), '(aux, maxlen=self.tau)\n', (3788, 3810), False, 'import collections\n'), ((4194, 4228), 'numpy.zeros', 'np.zeros', (['(self.n_dim, self.n_dim)'], {}), '((self.n_dim, self.n_dim))\n', (4202, 4228), True, 'import numpy as np\n'), ((4611, 4645), 'numpy.zeros', 'np.zeros', (['(self.n_dim, self.n_dim)'], {}), '((self.n_dim, self.n_dim))\n', (4619, 4645), True, 'import numpy as np\n'), ((5712, 5727), 'numpy.sign', 'np.sign', (['self.h'], {}), '(self.h)\n', (5719, 5727), True, 'import numpy as np\n'), ((6573, 6591), 'numpy.sign', 'np.sign', (['self.h[i]'], {}), '(self.h[i])\n', (6580, 6591), True, 'import numpy as np\n'), ((6694, 6741), 'numpy.mean', 'np.mean', (['(self.s * self.list_of_patterns)'], {'axis': '(1)'}), '(self.s * self.list_of_patterns, axis=1)\n', (6701, 6741), True, 'import numpy as np\n'), ((7179, 7211), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.s - pattern)'], {}), '(self.s - pattern)\n', (7193, 7211), True, 'import numpy as np\n'), ((7513, 7528), 'numpy.zeros', 'np.zeros', (['n_dim'], {}), '(n_dim)\n', (7521, 7528), True, 'import numpy as np\n'), ((8177, 8211), 'numpy.zeros', 'np.zeros', (['(self.n_dim, self.n_dim)'], {}), '((self.n_dim, self.n_dim))\n', (8185, 8211), True, 'import numpy as np\n'), ((9379, 9403), 'numpy.ones', 'np.ones', (['(2, n_patterns)'], {}), '((2, n_patterns))\n', (9386, 9403), True, 'import numpy as np\n'), ((9710, 9757), 'numpy.mean', 'np.mean', (['(self.s * self.list_of_patterns)'], {'axis': '(1)'}), '(self.s * self.list_of_patterns, axis=1)\n', (9717, 9757), True, 'import numpy as np\n'), ((1100, 1126), 'numpy.outer', 'np.outer', (['pattern', 'pattern'], {}), '(pattern, pattern)\n', (1108, 1126), True, 'import numpy as np\n'), ((1239, 1267), 'numpy.diag_indices_from', 'np.diag_indices_from', (['self.w'], {}), '(self.w)\n', (1259, 1267), True, 'import numpy as np\n'), ((1758, 1780), 'numpy.dot', 'np.dot', (['self.w', 'self.s'], {}), '(self.w, self.s)\n', (1764, 1780), True, 'import numpy as np\n'), ((2311, 2341), 'numpy.dot', 'np.dot', (['self.w[i, ...]', 'self.s'], {}), '(self.w[i, ...], self.s)\n', (2317, 2341), True, 'import numpy as np\n'), ((2985, 3017), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.s - pattern)'], {}), '(self.s - pattern)\n', (2999, 3017), True, 'import numpy as np\n'), ((3704, 3719), 'numpy.zeros', 'np.zeros', (['n_dim'], {}), '(n_dim)\n', (3712, 3719), True, 'import numpy as np\n'), ((4293, 4319), 'numpy.outer', 'np.outer', (['pattern', 'pattern'], {}), '(pattern, pattern)\n', (4301, 4319), True, 'import numpy as np\n'), ((4432, 4460), 'numpy.diag_indices_from', 'np.diag_indices_from', (['self.w'], {}), '(self.w)\n', (4452, 4460), True, 'import numpy as np\n'), ((4828, 4856), 'numpy.outer', 'np.outer', (['pattern1', 'pattern2'], {}), '(pattern1, pattern2)\n', (4836, 4856), True, 'import numpy as np\n'), ((4981, 5015), 'numpy.diag_indices_from', 'np.diag_indices_from', (['self.w_delay'], {}), '(self.w_delay)\n', (5001, 5015), True, 'import numpy as np\n'), ((5762, 5777), 'numpy.copy', 'np.copy', (['self.s'], {}), '(self.s)\n', (5769, 5777), True, 'import numpy as np\n'), ((6017, 6032), 'numpy.copy', 'np.copy', (['self.s'], {}), '(self.s)\n', (6024, 6032), True, 'import numpy as np\n'), ((6626, 6641), 'numpy.copy', 'np.copy', (['self.s'], {}), '(self.s)\n', (6633, 6641), True, 'import numpy as np\n'), ((8276, 8302), 'numpy.outer', 'np.outer', (['pattern', 'pattern'], {}), '(pattern, pattern)\n', (8284, 8302), True, 'import numpy as np\n'), ((8415, 8443), 'numpy.diag_indices_from', 'np.diag_indices_from', (['self.w'], {}), '(self.w)\n', (8435, 8443), True, 'import numpy as np\n'), ((8798, 8818), 'numpy.zeros', 'np.zeros', (['self.n_dim'], {}), '(self.n_dim)\n', (8806, 8818), True, 'import numpy as np\n'), ((8931, 8953), 'numpy.dot', 'np.dot', (['self.w', 'self.s'], {}), '(self.w, self.s)\n', (8937, 8953), True, 'import numpy as np\n'), ((9513, 9545), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.s - pattern)'], {}), '(self.s - pattern)\n', (9527, 9545), True, 'import numpy as np\n'), ((9590, 9622), 'numpy.linalg.norm', 'np.linalg.norm', (['(self.s + pattern)'], {}), '(self.s + pattern)\n', (9604, 9622), True, 'import numpy as np\n'), ((430, 440), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (437, 440), True, 'import numpy as np\n'), ((3472, 3482), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (3479, 3482), True, 'import numpy as np\n'), ((5506, 5528), 'numpy.dot', 'np.dot', (['self.w', 'self.s'], {}), '(self.w, self.s)\n', (5512, 5528), True, 'import numpy as np\n'), ((6405, 6435), 'numpy.dot', 'np.dot', (['self.w[i, ...]', 'self.s'], {}), '(self.w[i, ...], self.s)\n', (6411, 6435), True, 'import numpy as np\n'), ((7606, 7616), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (7613, 7616), True, 'import numpy as np\n'), ((6475, 6523), 'numpy.dot', 'np.dot', (['self.w_delay[i, ...]', 'self.s_history[-1]'], {}), '(self.w_delay[i, ...], self.s_history[-1])\n', (6481, 6523), True, 'import numpy as np\n')] |
from itertools import islice
from itertools import islice
import matplotlib.pyplot as plt
import numpy as np
import torch
from torch.autograd import Variable
from torch import utils
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import os
import pickle
from torchvision import datasets, utils
import torchvision.transforms as transforms
#import cv2
import numpy as np
from PIL import Image
import math
import torch
import os
from io import BytesIO
from torch import nn
import torch.nn.functional as F
from conv_utils import ReDCT,DCT
import torch
# Directory path
traindataset='./data/'
# Hyper Parameters
num_epochs = 3
batch_size = 2
learning_rate = 0.001
beta = 1
std = [0.229, 0.224, 0.225]
mean = [0.485, 0.456, 0.406]
def customized_loss(S_prime, C_prime, S, C, B):
''' Calculates loss specified on the paper.'''
loss_cover = torch.nn.functional.mse_loss (C_prime, S)
loss_secret = torch.nn.functional.mse_loss (S_prime, C)
loss_all = loss_cover + B * loss_secret
return loss_all, loss_cover, loss_secret
def denormalize(image, std, mean):
''' Denormalizes a tensor of images.'''
for t in range (3):
image[t, :, :] = (image[t, :, :] * std[t]) + mean[t]
return image
def imshow(img, idx, learning_rate, beta):
'''Prints out an image given in tensor format.'''
img = denormalize (img, std, mean)
npimg = img.numpy ()
plt.imshow (np.transpose (npimg, (1, 2, 0)))
plt.title ('Example ' + str (idx) + ', lr=' + str (learning_rate) + ', B=' + str (beta))
plt.show ()
return
def gaussian(tensor, mean=0, stddev=0.1):
'''Adds random noise to a tensor.'''
noise = torch.nn.init.normal (torch.Tensor (tensor.size ()), 0, 0.1)
return Variable (tensor + noise)
class PrepNetwork(nn.Module):
def __init__(self):
super(PrepNetwork, self).__init__()
self.initialP3 = nn.Sequential(
nn.Conv2d(3, 50, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=3, padding=1),
nn.ReLU())
self.initialP4 = nn.Sequential(
nn.Conv2d(3, 50, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=2),
nn.ReLU())
self.initialP5 = nn.Sequential(
nn.Conv2d(3, 50, kernel_size=5, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=5, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=5, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=5, padding=2),
nn.ReLU())
self.finalP3 = nn.Sequential(
nn.Conv2d(150, 50, kernel_size=3, padding=1),
nn.ReLU())
self.finalP4 = nn.Sequential(
nn.Conv2d(150, 50, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=2),
nn.ReLU())
self.finalP5 = nn.Sequential(
nn.Conv2d(150, 50, kernel_size=5, padding=2),
nn.ReLU())
def forward(self, p):
p1 = self.initialP3(p)
p2 = self.initialP4(p)
p3 = self.initialP5(p)
mid = torch.cat((p1, p2, p3), 1)
p4 = self.finalP3(mid)
p5 = self.finalP4(mid)
p6 = self.finalP5(mid)
out = torch.cat((p4, p5, p6), 1)
return out
class HidingNetwork (nn.Module):
def __init__(self):
super (HidingNetwork, self).__init__ ()
self.initialH3 = nn.Sequential (
nn.Conv2d (153, 50, kernel_size=3, padding=1),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=3, padding=1),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=3, padding=1),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=3, padding=1),
nn.ReLU ())
self.initialH4 = nn.Sequential (
nn.Conv2d (153, 50, kernel_size=4, padding=1),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=4, padding=2),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=4, padding=1),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=4, padding=2),
nn.ReLU ())
self.initialH5 = nn.Sequential (
nn.Conv2d (153, 50, kernel_size=5, padding=2),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=5, padding=2),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=5, padding=2),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=5, padding=2),
nn.ReLU ())
self.finalH3 = nn.Sequential (
nn.Conv2d (150, 50, kernel_size=3, padding=1),
nn.ReLU ())
self.finalH4 = nn.Sequential (
nn.Conv2d (150, 50, kernel_size=4, padding=1),
nn.ReLU (),
nn.Conv2d (50, 50, kernel_size=4, padding=2),
nn.ReLU ())
self.finalH5 = nn.Sequential (
nn.Conv2d (150, 50, kernel_size=5, padding=2),
nn.ReLU ())
self.finalH = nn.Sequential (
nn.Conv2d (150, 3, kernel_size=1, padding=0))
def forward(self, h):
h1 = self.initialH3 (h)
h2 = self.initialH4 (h)
h3 = self.initialH5 (h)
mid = torch.cat ((h1, h2, h3), 1)
h4 = self.finalH3 (mid)
h5 = self.finalH4 (mid)
h6 = self.finalH5 (mid)
mid2 = torch.cat ((h4, h5, h6), 1)
out = self.finalH (mid2)
out_noise = gaussian (out.data, 0, 0.1)
return out, out_noise
# Reveal Network (2 conv layers)
class RevealNetwork(nn.Module):
def __init__(self):
super(RevealNetwork, self).__init__()
self.initialR3 = nn.Sequential(
nn.Conv2d(3, 50, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=3, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=3, padding=1),
nn.ReLU())
self.initialR4 = nn.Sequential(
nn.Conv2d(3, 50, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=2),
nn.ReLU())
self.initialR5 = nn.Sequential(
nn.Conv2d(3, 50, kernel_size=5, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=5, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=5, padding=2),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=5, padding=2),
nn.ReLU())
self.finalR3 = nn.Sequential(
nn.Conv2d(150, 50, kernel_size=3, padding=1),
nn.ReLU())
self.finalR4 = nn.Sequential(
nn.Conv2d(150, 50, kernel_size=4, padding=1),
nn.ReLU(),
nn.Conv2d(50, 50, kernel_size=4, padding=2),
nn.ReLU())
self.finalR5 = nn.Sequential(
nn.Conv2d(150, 50, kernel_size=5, padding=2),
nn.ReLU())
self.finalR = nn.Sequential(
nn.Conv2d(150, 3, kernel_size=1, padding=0))
def forward(self, r):
r1 = self.initialR3 (r)
r2 = self.initialR4 (r)
r3 = self.initialR5 (r)
mid = torch.cat ((r1, r2, r3), 1)
r4 = self.finalR3 (mid)
r5 = self.finalR4 (mid)
r6 = self.finalR5 (mid)
mid2 = torch.cat ((r4, r5, r6), 1)
out = self.finalR (mid2)
return out
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.m0=DCT()
self.m1=PrepNetwork()
self.m2 = HidingNetwork()
self.m3 = RevealNetwork()
self.m4=ReDCT()
def forward(self, secret, cover):
dct_= self.m0(secret)
x_4 = self.m4(dct_)
# x=self.m1(x_4)
y=self.m1(secret)
mid = torch.cat((y, cover), 1)
x_2, x_2_noise = self.m2(mid)
x_3 = self.m3(x_2_noise)
return x_2, x_3
# Creates net object
net = Net()
# Creates training set
train_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
traindataset,
transforms.Compose([
transforms.Scale(256),
transforms.RandomCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=mean,
std=std)
])), batch_size=batch_size, num_workers=1,
pin_memory=True, shuffle=True, drop_last=True)
# Creates test set
test_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
traindataset,
transforms.Compose([
transforms.Scale(256),
transforms.RandomCrop (224),
transforms.ToTensor(),
transforms.Normalize(mean=mean,
std=std)
])), batch_size=2, num_workers=1,
pin_memory=True, shuffle=True, drop_last=True)
def train_model(train_loader, beta, learning_rate):
# Save optimizer
optimizer = optim.Adam (net.parameters (), lr=learning_rate)
loss_history = []
# Iterate over batches performing forward and backward passes
for epoch in range (num_epochs):
# Train mode
net.train ()
train_losses = []
# Train one epoch
for idx, train_batch in enumerate (train_loader):
data, _ = train_batch
# Saves secret images and secret covers
train_covers = data[:len (data) // 2]
train_secrets = data[len (data) // 2:]
# Creates variable from secret and cover images
train_secrets = Variable (train_secrets, requires_grad=False)
train_covers = Variable (train_covers, requires_grad=False)
# Forward + Backward + Optimize
optimizer.zero_grad ()
train_hidden, train_output = net (train_secrets, train_covers)
# Calculate loss and perform backprop
train_loss, train_loss_cover, train_loss_secret = customized_loss (train_output, train_hidden,
train_secrets, train_covers, beta)
train_loss.backward ()
optimizer.step ()
# Saves training loss
train_losses.append (train_loss.data[0])
loss_history.append (train_loss.data[0])
# Prints mini-batch losses
#print ('Training: Batch {0}/{1}. Loss of {2:.4f}, cover loss of {3:.4f}, secret loss of {4:.4f}'.format (
# idx + 1, len (train_loader), train_loss.data[0], train_loss_cover.data[0], train_loss_secret.data[0]))
return net, loss_history
net, loss_history = train_model(train_loader, beta, learning_rate)
# Plot loss through epochs
plt.plot(loss_history)
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Batch')
plt.show()
# Switch to evaluate mode
net.eval()
test_losses = []
# Show images
for idx, test_batch in enumerate(test_loader):
# Saves images
data, _ = test_batch
# Saves secret images and secret covers
test_secret = data[:len(data)//2]
test_cover = data[len(data)//2:]
# Creates variable from secret and cover images
test_secret = Variable(test_secret, volatile=True)
test_cover = Variable(test_cover, volatile=True)
test_hidden, test_output = net (test_secret, test_cover)
test_loss, loss_cover, loss_secret = customized_loss (test_output, test_hidden, test_secret, test_cover, beta)
if idx in [1,2,3,4]:
print ('Total loss: {:.2f} \nLoss on secret: {:.2f} \nLoss on cover: {:.2f}'.format(test_loss.data[0], loss_secret.data[0], loss_cover.data[0]))
# Creates img tensor
imgs = [test_secret.data, test_output.data, test_cover.data, test_hidden.data]
imgs_tsor = torch.cat(imgs, 0)
# Prints Images
imshow(utils.make_grid(imgs_tsor), idx+1, learning_rate=learning_rate, beta=beta)
| [
"numpy.transpose",
"torch.nn.functional.mse_loss",
"torch.nn.ReLU",
"torchvision.transforms.ToTensor",
"matplotlib.pyplot.ylabel",
"conv_utils.DCT",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"torchvision.transforms.Scale",
"torch.nn.Conv2d",
"torchvision.transforms.RandomCrop",
"to... | [((11196, 11218), 'matplotlib.pyplot.plot', 'plt.plot', (['loss_history'], {}), '(loss_history)\n', (11204, 11218), True, 'import matplotlib.pyplot as plt\n'), ((11219, 11242), 'matplotlib.pyplot.title', 'plt.title', (['"""Model loss"""'], {}), "('Model loss')\n", (11228, 11242), True, 'import matplotlib.pyplot as plt\n'), ((11243, 11261), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss"""'], {}), "('Loss')\n", (11253, 11261), True, 'import matplotlib.pyplot as plt\n'), ((11262, 11281), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch"""'], {}), "('Batch')\n", (11272, 11281), True, 'import matplotlib.pyplot as plt\n'), ((11282, 11292), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (11290, 11292), True, 'import matplotlib.pyplot as plt\n'), ((883, 923), 'torch.nn.functional.mse_loss', 'torch.nn.functional.mse_loss', (['C_prime', 'S'], {}), '(C_prime, S)\n', (911, 923), False, 'import torch\n'), ((943, 983), 'torch.nn.functional.mse_loss', 'torch.nn.functional.mse_loss', (['S_prime', 'C'], {}), '(S_prime, C)\n', (971, 983), False, 'import torch\n'), ((1568, 1578), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1576, 1578), True, 'import matplotlib.pyplot as plt\n'), ((1761, 1785), 'torch.autograd.Variable', 'Variable', (['(tensor + noise)'], {}), '(tensor + noise)\n', (1769, 1785), False, 'from torch.autograd import Variable\n'), ((11650, 11686), 'torch.autograd.Variable', 'Variable', (['test_secret'], {'volatile': '(True)'}), '(test_secret, volatile=True)\n', (11658, 11686), False, 'from torch.autograd import Variable\n'), ((11704, 11739), 'torch.autograd.Variable', 'Variable', (['test_cover'], {'volatile': '(True)'}), '(test_cover, volatile=True)\n', (11712, 11739), False, 'from torch.autograd import Variable\n'), ((1438, 1468), 'numpy.transpose', 'np.transpose', (['npimg', '(1, 2, 0)'], {}), '(npimg, (1, 2, 0))\n', (1450, 1468), True, 'import numpy as np\n'), ((3534, 3560), 'torch.cat', 'torch.cat', (['(p1, p2, p3)', '(1)'], {}), '((p1, p2, p3), 1)\n', (3543, 3560), False, 'import torch\n'), ((3668, 3694), 'torch.cat', 'torch.cat', (['(p4, p5, p6)', '(1)'], {}), '((p4, p5, p6), 1)\n', (3677, 3694), False, 'import torch\n'), ((5611, 5637), 'torch.cat', 'torch.cat', (['(h1, h2, h3)', '(1)'], {}), '((h1, h2, h3), 1)\n', (5620, 5637), False, 'import torch\n'), ((5750, 5776), 'torch.cat', 'torch.cat', (['(h4, h5, h6)', '(1)'], {}), '((h4, h5, h6), 1)\n', (5759, 5776), False, 'import torch\n'), ((7771, 7797), 'torch.cat', 'torch.cat', (['(r1, r2, r3)', '(1)'], {}), '((r1, r2, r3), 1)\n', (7780, 7797), False, 'import torch\n'), ((7910, 7936), 'torch.cat', 'torch.cat', (['(r4, r5, r6)', '(1)'], {}), '((r4, r5, r6), 1)\n', (7919, 7936), False, 'import torch\n'), ((8090, 8095), 'conv_utils.DCT', 'DCT', ([], {}), '()\n', (8093, 8095), False, 'from conv_utils import ReDCT, DCT\n'), ((8210, 8217), 'conv_utils.ReDCT', 'ReDCT', ([], {}), '()\n', (8215, 8217), False, 'from conv_utils import ReDCT, DCT\n'), ((8380, 8404), 'torch.cat', 'torch.cat', (['(y, cover)', '(1)'], {}), '((y, cover), 1)\n', (8389, 8404), False, 'import torch\n'), ((12234, 12252), 'torch.cat', 'torch.cat', (['imgs', '(0)'], {}), '(imgs, 0)\n', (12243, 12252), False, 'import torch\n'), ((1938, 1980), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(3, 50, kernel_size=3, padding=1)\n', (1947, 1980), False, 'from torch import nn\n'), ((1994, 2003), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2001, 2003), False, 'from torch import nn\n'), ((2017, 2060), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (2026, 2060), False, 'from torch import nn\n'), ((2074, 2083), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2081, 2083), False, 'from torch import nn\n'), ((2097, 2140), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (2106, 2140), False, 'from torch import nn\n'), ((2154, 2163), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2161, 2163), False, 'from torch import nn\n'), ((2177, 2220), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (2186, 2220), False, 'from torch import nn\n'), ((2234, 2243), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2241, 2243), False, 'from torch import nn\n'), ((2297, 2339), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(3, 50, kernel_size=4, padding=1)\n', (2306, 2339), False, 'from torch import nn\n'), ((2353, 2362), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2360, 2362), False, 'from torch import nn\n'), ((2376, 2419), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (2385, 2419), False, 'from torch import nn\n'), ((2433, 2442), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2440, 2442), False, 'from torch import nn\n'), ((2456, 2499), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(50, 50, kernel_size=4, padding=1)\n', (2465, 2499), False, 'from torch import nn\n'), ((2513, 2522), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2520, 2522), False, 'from torch import nn\n'), ((2536, 2579), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (2545, 2579), False, 'from torch import nn\n'), ((2593, 2602), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2600, 2602), False, 'from torch import nn\n'), ((2656, 2698), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(3, 50, kernel_size=5, padding=2)\n', (2665, 2698), False, 'from torch import nn\n'), ((2712, 2721), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2719, 2721), False, 'from torch import nn\n'), ((2735, 2778), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (2744, 2778), False, 'from torch import nn\n'), ((2792, 2801), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2799, 2801), False, 'from torch import nn\n'), ((2815, 2858), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (2824, 2858), False, 'from torch import nn\n'), ((2872, 2881), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2879, 2881), False, 'from torch import nn\n'), ((2895, 2938), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (2904, 2938), False, 'from torch import nn\n'), ((2952, 2961), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (2959, 2961), False, 'from torch import nn\n'), ((3013, 3057), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(150, 50, kernel_size=3, padding=1)\n', (3022, 3057), False, 'from torch import nn\n'), ((3071, 3080), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3078, 3080), False, 'from torch import nn\n'), ((3132, 3176), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(150, 50, kernel_size=4, padding=1)\n', (3141, 3176), False, 'from torch import nn\n'), ((3190, 3199), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3197, 3199), False, 'from torch import nn\n'), ((3213, 3256), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (3222, 3256), False, 'from torch import nn\n'), ((3270, 3279), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3277, 3279), False, 'from torch import nn\n'), ((3331, 3375), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(150, 50, kernel_size=5, padding=2)\n', (3340, 3375), False, 'from torch import nn\n'), ((3389, 3398), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3396, 3398), False, 'from torch import nn\n'), ((3873, 3917), 'torch.nn.Conv2d', 'nn.Conv2d', (['(153)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(153, 50, kernel_size=3, padding=1)\n', (3882, 3917), False, 'from torch import nn\n'), ((3932, 3941), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3939, 3941), False, 'from torch import nn\n'), ((3956, 3999), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (3965, 3999), False, 'from torch import nn\n'), ((4014, 4023), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4021, 4023), False, 'from torch import nn\n'), ((4038, 4081), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (4047, 4081), False, 'from torch import nn\n'), ((4096, 4105), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4103, 4105), False, 'from torch import nn\n'), ((4120, 4163), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (4129, 4163), False, 'from torch import nn\n'), ((4178, 4187), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4185, 4187), False, 'from torch import nn\n'), ((4243, 4287), 'torch.nn.Conv2d', 'nn.Conv2d', (['(153)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(153, 50, kernel_size=4, padding=1)\n', (4252, 4287), False, 'from torch import nn\n'), ((4302, 4311), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4309, 4311), False, 'from torch import nn\n'), ((4326, 4369), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (4335, 4369), False, 'from torch import nn\n'), ((4384, 4393), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4391, 4393), False, 'from torch import nn\n'), ((4408, 4451), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(50, 50, kernel_size=4, padding=1)\n', (4417, 4451), False, 'from torch import nn\n'), ((4466, 4475), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4473, 4475), False, 'from torch import nn\n'), ((4490, 4533), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (4499, 4533), False, 'from torch import nn\n'), ((4548, 4557), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4555, 4557), False, 'from torch import nn\n'), ((4613, 4657), 'torch.nn.Conv2d', 'nn.Conv2d', (['(153)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(153, 50, kernel_size=5, padding=2)\n', (4622, 4657), False, 'from torch import nn\n'), ((4672, 4681), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4679, 4681), False, 'from torch import nn\n'), ((4696, 4739), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (4705, 4739), False, 'from torch import nn\n'), ((4754, 4763), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4761, 4763), False, 'from torch import nn\n'), ((4778, 4821), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (4787, 4821), False, 'from torch import nn\n'), ((4836, 4845), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4843, 4845), False, 'from torch import nn\n'), ((4860, 4903), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (4869, 4903), False, 'from torch import nn\n'), ((4918, 4927), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (4925, 4927), False, 'from torch import nn\n'), ((4981, 5025), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(150, 50, kernel_size=3, padding=1)\n', (4990, 5025), False, 'from torch import nn\n'), ((5040, 5049), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5047, 5049), False, 'from torch import nn\n'), ((5103, 5147), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(150, 50, kernel_size=4, padding=1)\n', (5112, 5147), False, 'from torch import nn\n'), ((5162, 5171), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5169, 5171), False, 'from torch import nn\n'), ((5186, 5229), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (5195, 5229), False, 'from torch import nn\n'), ((5244, 5253), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5251, 5253), False, 'from torch import nn\n'), ((5307, 5351), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(150, 50, kernel_size=5, padding=2)\n', (5316, 5351), False, 'from torch import nn\n'), ((5366, 5375), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (5373, 5375), False, 'from torch import nn\n'), ((5428, 5471), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(3)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(150, 3, kernel_size=1, padding=0)\n', (5437, 5471), False, 'from torch import nn\n'), ((6077, 6119), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(3, 50, kernel_size=3, padding=1)\n', (6086, 6119), False, 'from torch import nn\n'), ((6133, 6142), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6140, 6142), False, 'from torch import nn\n'), ((6156, 6199), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (6165, 6199), False, 'from torch import nn\n'), ((6213, 6222), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6220, 6222), False, 'from torch import nn\n'), ((6236, 6279), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (6245, 6279), False, 'from torch import nn\n'), ((6293, 6302), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6300, 6302), False, 'from torch import nn\n'), ((6316, 6359), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(50, 50, kernel_size=3, padding=1)\n', (6325, 6359), False, 'from torch import nn\n'), ((6373, 6382), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6380, 6382), False, 'from torch import nn\n'), ((6436, 6478), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(3, 50, kernel_size=4, padding=1)\n', (6445, 6478), False, 'from torch import nn\n'), ((6492, 6501), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6499, 6501), False, 'from torch import nn\n'), ((6515, 6558), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (6524, 6558), False, 'from torch import nn\n'), ((6572, 6581), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6579, 6581), False, 'from torch import nn\n'), ((6595, 6638), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(50, 50, kernel_size=4, padding=1)\n', (6604, 6638), False, 'from torch import nn\n'), ((6652, 6661), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6659, 6661), False, 'from torch import nn\n'), ((6675, 6718), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (6684, 6718), False, 'from torch import nn\n'), ((6732, 6741), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6739, 6741), False, 'from torch import nn\n'), ((6795, 6837), 'torch.nn.Conv2d', 'nn.Conv2d', (['(3)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(3, 50, kernel_size=5, padding=2)\n', (6804, 6837), False, 'from torch import nn\n'), ((6851, 6860), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6858, 6860), False, 'from torch import nn\n'), ((6874, 6917), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (6883, 6917), False, 'from torch import nn\n'), ((6931, 6940), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (6938, 6940), False, 'from torch import nn\n'), ((6954, 6997), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (6963, 6997), False, 'from torch import nn\n'), ((7011, 7020), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7018, 7020), False, 'from torch import nn\n'), ((7034, 7077), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(50, 50, kernel_size=5, padding=2)\n', (7043, 7077), False, 'from torch import nn\n'), ((7091, 7100), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7098, 7100), False, 'from torch import nn\n'), ((7152, 7196), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(150, 50, kernel_size=3, padding=1)\n', (7161, 7196), False, 'from torch import nn\n'), ((7210, 7219), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7217, 7219), False, 'from torch import nn\n'), ((7271, 7315), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(4)', 'padding': '(1)'}), '(150, 50, kernel_size=4, padding=1)\n', (7280, 7315), False, 'from torch import nn\n'), ((7329, 7338), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7336, 7338), False, 'from torch import nn\n'), ((7352, 7395), 'torch.nn.Conv2d', 'nn.Conv2d', (['(50)', '(50)'], {'kernel_size': '(4)', 'padding': '(2)'}), '(50, 50, kernel_size=4, padding=2)\n', (7361, 7395), False, 'from torch import nn\n'), ((7409, 7418), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7416, 7418), False, 'from torch import nn\n'), ((7470, 7514), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(50)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(150, 50, kernel_size=5, padding=2)\n', (7479, 7514), False, 'from torch import nn\n'), ((7528, 7537), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (7535, 7537), False, 'from torch import nn\n'), ((7588, 7631), 'torch.nn.Conv2d', 'nn.Conv2d', (['(150)', '(3)'], {'kernel_size': '(1)', 'padding': '(0)'}), '(150, 3, kernel_size=1, padding=0)\n', (7597, 7631), False, 'from torch import nn\n'), ((10039, 10083), 'torch.autograd.Variable', 'Variable', (['train_secrets'], {'requires_grad': '(False)'}), '(train_secrets, requires_grad=False)\n', (10047, 10083), False, 'from torch.autograd import Variable\n'), ((10112, 10155), 'torch.autograd.Variable', 'Variable', (['train_covers'], {'requires_grad': '(False)'}), '(train_covers, requires_grad=False)\n', (10120, 10155), False, 'from torch.autograd import Variable\n'), ((12293, 12319), 'torchvision.utils.make_grid', 'utils.make_grid', (['imgs_tsor'], {}), '(imgs_tsor)\n', (12308, 12319), False, 'from torchvision import datasets, utils\n'), ((8692, 8713), 'torchvision.transforms.Scale', 'transforms.Scale', (['(256)'], {}), '(256)\n', (8708, 8713), True, 'import torchvision.transforms as transforms\n'), ((8723, 8749), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224)'], {}), '(224)\n', (8744, 8749), True, 'import torchvision.transforms as transforms\n'), ((8759, 8780), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (8778, 8780), True, 'import torchvision.transforms as transforms\n'), ((8790, 8830), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (8810, 8830), True, 'import torchvision.transforms as transforms\n'), ((9097, 9118), 'torchvision.transforms.Scale', 'transforms.Scale', (['(256)'], {}), '(256)\n', (9113, 9118), True, 'import torchvision.transforms as transforms\n'), ((9128, 9154), 'torchvision.transforms.RandomCrop', 'transforms.RandomCrop', (['(224)'], {}), '(224)\n', (9149, 9154), True, 'import torchvision.transforms as transforms\n'), ((9165, 9186), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (9184, 9186), True, 'import torchvision.transforms as transforms\n'), ((9196, 9236), 'torchvision.transforms.Normalize', 'transforms.Normalize', ([], {'mean': 'mean', 'std': 'std'}), '(mean=mean, std=std)\n', (9216, 9236), True, 'import torchvision.transforms as transforms\n')] |
# coding: utf-8
import numpy as np
import argparse
import torch
import os
from shutil import rmtree
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Bernoulli
from copy import deepcopy
from envs import IPD
# from torch.utils.tensorboard import SummaryWriter
from tensorboardX import SummaryWriter
from models import PolicyEvaluationNetwork, SteerablePolicy
from plotting import plot
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-lr_out', default=0.2, type=float, help='')
parser.add_argument('-lr_in', default=0.3, type=float, help='')
parser.add_argument('-lr_pen', default=0.3, type=float, help='Adam lr for PEN')
parser.add_argument('-gamma', default=0.96, type=float, help='')
parser.add_argument('-n_iter', default=50, type=int, help='No of algo iteration')
parser.add_argument('-len_rollout', default=200, type=int, help='Length of rollouts')
parser.add_argument('-batch_size', default=64, type=int, help='')
parser.add_argument('-seed', default=42, type=int, help='Random Seed')
parser.add_argument('-lookaheads', default=1, type=int, help='No of lola lookaheads')
parser.add_argument('-embedding_size', default=5, type=int, help='Size of the embedding z')
parser.add_argument('-nbins', default=15, type=int, help='No of bins to discretize return space')
parser.add_argument('-logdir', default='logdir/', type=str, help='Logging Directory')
parser.add_argument('-pen_hidden', default=80, type=int, help='Hidden size of PEN')
parser.add_argument('-n_policy', default=10, type=int, help='No of policy updates for each algo iteration')
parser.add_argument('-n_pen', default=10, type=int, help='')
parser.add_argument('-nsamples_bin', default=30, type=int, help='No of rollouts for given z1/z2 to compute histogram of returns. Usually 2*nbins')
parser.add_argument('-plot', action='store_true', help='Enable Plotting')
return parser.parse_args()
def phi(x1,x2):
return [x1*x2, x1*(1-x2), (1-x1)*x2,(1-x1)*(1-x2)]
def true_objective(theta1, theta2, ipd):
p1 = torch.sigmoid(theta1)
p2 = torch.sigmoid(theta2[[0,1,3,2,4]])
p0 = (p1[0], p2[0])
p = (p1[1:], p2[1:])
# create initial laws, transition matrix and rewards:
P0 = torch.stack(phi(*p0), dim=0).view(1,-1)
P = torch.stack(phi(*p), dim=1)
R = torch.from_numpy(ipd.payout_mat).view(-1,1).float()
# the true value to optimize:
objective = (P0.mm(torch.inverse(torch.eye(4) - args.gamma*P))).mm(R)
return -objective
def get_gradient(objective, z):
# create differentiable gradient for 2nd orders:
grad_objective = torch.autograd.grad(objective, (z), create_graph=True)[0]
return grad_objective
class Agent():
def __init__(self, args, z=None):
# init z and its optimizer. z is the embedding
# TODO: How to initialize z. 0 is bad
self.z = nn.Parameter(torch.zeros(args.embedding_size, requires_grad=True))
# self.z = nn.Parameter(torch.randn(args.embedding_size, requires_grad=True))
self.z_optimizer = torch.optim.Adam(params=(self.z,),lr=args.lr_out)
def z_update(self, objective):
self.z_optimizer.zero_grad()
objective.backward(retain_graph=True)
self.z_optimizer.step()
def in_lookahead(self, theta, other_z):
other_objective = true_objective(theta + other_z, theta + self.z, ipd)
grad = get_gradient(other_objective, other_z)
return grad
def out_lookahead(self, theta, other_z):
objective = true_objective(theta + self.z, theta + other_z, ipd)
self.z_update(objective)
class Polen():
def __init__(self, args):
"""
1. Initialize Agent embeddings z, poliy theta & pen psi
"""
self.args = args
self.agent1 = Agent(self.args)
self.agent2 = Agent(self.args)
self.policy = SteerablePolicy(self.args)
self.pen = PolicyEvaluationNetwork(self.args)
self.pen_optimizer = torch.optim.Adam(params=self.pen.parameters(), lr=args.lr_pen)
self.ipd2 = IPD(max_steps=args.len_rollout, batch_size=args.nsamples_bin)
if os.path.exists(args.logdir):
rmtree(args.logdir)
writer = SummaryWriter(args.logdir)
self.writer = SummaryWriter(args.logdir)
def rollout(self, nsteps):
# just to evaluate progress:
(s1, s2), _ = ipd.reset()
score1 = 0
score2 = 0
for t in range(nsteps):
a1, lp1 = self.policy.act(s1, self.agent1.z)
a2, lp2 = self.policy.act(s2, self.agent2.z)
(s1, s2), (r1, r2),_,_ = ipd.step((a1, a2))
# cumulate scores
score1 += np.mean(r1)/float(self.args.len_rollout)
score2 += np.mean(r2)/float(self.args.len_rollout)
return (score1, score2)
def rollout_binning(self, nsteps, z1, z2):
# just to evaluate progress:
(s1, s2), _ = self.ipd2.reset()
score1 = torch.zeros(self.args.nsamples_bin, dtype=torch.float)
score2 = torch.zeros(self.args.nsamples_bin, dtype=torch.float)
for t in range(nsteps):
a1, lp1 = self.policy.act(s1, z1)
a2, lp2 = self.policy.act(s2, z2)
(s1, s2), (r1, r2),_,_ = self.ipd2.step((a1, a2))
# cumulate scores
score1 += r1
score2 += r2
score1 = score1/nsteps
score2 = score2/nsteps
hist1 = torch.histc(score1, bins=self.args.nbins, min=-3, max=0)
hist2 = torch.histc(score2, bins=self.args.nbins, min=-3, max=0)
return hist1, hist2
def train(self):
print("start iterations with", self.args.lookaheads, "lookaheads:")
joint_scores = []
for update in range(self.args.n_iter):
# 1a. For fixed z1 & z2, Learn steerable policy theta & PEN by maximizing rollouts
for t in range(self.args.n_policy):
# Update steerable policy parameters. True possible for IPD
policy_loss = self.policy_update_true()
# self.policy_update_pg()
self.writer.add_scalar('PolicyObjective V1 plus V2', -policy_loss, update*self.args.n_policy + t )
# 1b. Train the PEN
# TODO: Convert this to a parallel version so one call to PEN is required
for t in range(self.args.n_pen):
# randomly generate z1, z2. Maybe generation centered on z0, z1 would be better.
z1 = torch.randn(self.args.embedding_size)
z2 = torch.randn(self.args.embedding_size)
# Experiment with smaller length of rollouts for estimation
hist1, hist2 = self.rollout_binning(self.args.len_rollout, z1, z2)
# Compute the KL Div
w1, w2 = self.pen.forward(self.agent1.z.unsqueeze(0), self.agent2.z.unsqueeze(0))
w1 = F.softmax(w1.squeeze(), dim=0)
w2 = F.softmax(w2.squeeze(), dim=0)
# F.kl_div(Q.log(), P, None, None, 'sum')
self.pen_optimizer.zero_grad()
# pen_loss = (hist1* (hist1 / w1).log()).sum() + (hist2* (hist2 / w2).log()).sum()
pen_loss = F.kl_div(hist1, w1) + F.kl_div(hist2, w2)
pen_loss.backward()
self.pen_optimizer.step()
self.writer.add_scalar('PEN Loss: KL1 plus KL2', pen_loss, update*self.args.n_pen + t )
# 2. Do on Lola Updates
self.lola_update_exact()
# evaluate:
score = self.rollout(self.args.len_rollout)
avg_score = 0.5*(score[0] + score[1])
self.writer.add_scalar('Avg Score of Agent', avg_score, update)
joint_scores.append(avg_score)
# Logging
if update%10==0 :
print('After update', update, '------------')
p0 = [p.item() for p in torch.sigmoid(self.policy.theta)]
p1 = [p.item() for p in torch.sigmoid(self.policy.theta + self.agent1.z)]
p2 = [p.item() for p in torch.sigmoid(self.policy.theta + self.agent2.z)]
print('score (%.3f,%.3f)\n' % (score[0], score[1]) , 'Default = {S: %.3f, DD: %.3f, DC: %.3f, CD: %.3f, CC: %.3f}\n' % (p0[0], p0[1], p0[2], p0[3], p0[4]), '(agent1) = {S: %.3f, DD: %.3f, DC: %.3f, CD: %.3f, CC: %.3f}\n' % (p1[0], p1[1], p1[2], p1[3], p1[4]), '(agent2) = {S: %.3f, DD: %.3f, DC: %.3f, CD: %.3f, CC: %.3f}' % (p2[0], p2[1], p2[2], p2[3], p2[4]))
# print('theta: ', self.policy.theta, '\n', 'z1: ', self.agent1.z, '\n', 'z2: ', self.agent2.z)
return joint_scores
def policy_update_true(self):
# Batching not needed here.
self.policy.theta_optimizer.zero_grad()
theta1 = self.agent1.z + self.policy.theta
theta2 = self.agent2.z + self.policy.theta
objective = (true_objective(theta1, theta2, ipd) + true_objective(theta2,theta1, ipd))
objective.backward()
self.policy.theta_optimizer.step()
return objective
def policy_update_pg(self):
"""
TODO:
Will need batching
"""
pass
def lola_update_exact(self):
"""
Do Lola Updates
"""
# copy other's parameters:
z1_ = self.agent1.z.clone().detach().requires_grad_(True)
z2_ = self.agent2.z.clone().detach().requires_grad_(True)
for k in range(self.args.lookaheads):
# estimate other's gradients from in_lookahead:
grad2 = self.agent1.in_lookahead(self.policy.theta, z2_)
grad1 = self.agent2.in_lookahead(self.policy.theta, z1_)
# update other's theta
z2_ = z2_ - self.args.lr_in * grad2
z1_ = z1_ - self.args.lr_in * grad1
# update own parameters from out_lookahead:
self.agent1.out_lookahead(self.policy.theta, z2_)
self.agent2.out_lookahead(self.policy.theta, z1_)
if __name__=="__main__":
args = get_args()
global ipd
ipd = IPD(max_steps=args.len_rollout, batch_size=args.batch_size)
torch.manual_seed(args.seed)
polen = Polen(args)
scores = polen.train()
polen.writer.close()
if args.plot:
plot(scores, args)
| [
"torch.histc",
"torch.from_numpy",
"envs.IPD",
"os.path.exists",
"numpy.mean",
"tensorboardX.SummaryWriter",
"argparse.ArgumentParser",
"torch.eye",
"torch.nn.functional.kl_div",
"models.SteerablePolicy",
"torch.randn",
"plotting.plot",
"torch.autograd.grad",
"models.PolicyEvaluationNetwor... | [((452, 477), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (475, 477), False, 'import argparse\n'), ((2116, 2137), 'torch.sigmoid', 'torch.sigmoid', (['theta1'], {}), '(theta1)\n', (2129, 2137), False, 'import torch\n'), ((2147, 2185), 'torch.sigmoid', 'torch.sigmoid', (['theta2[[0, 1, 3, 2, 4]]'], {}), '(theta2[[0, 1, 3, 2, 4]])\n', (2160, 2185), False, 'import torch\n'), ((10169, 10228), 'envs.IPD', 'IPD', ([], {'max_steps': 'args.len_rollout', 'batch_size': 'args.batch_size'}), '(max_steps=args.len_rollout, batch_size=args.batch_size)\n', (10172, 10228), False, 'from envs import IPD\n'), ((10233, 10261), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (10250, 10261), False, 'import torch\n'), ((2671, 2723), 'torch.autograd.grad', 'torch.autograd.grad', (['objective', 'z'], {'create_graph': '(True)'}), '(objective, z, create_graph=True)\n', (2690, 2723), False, 'import torch\n'), ((3108, 3158), 'torch.optim.Adam', 'torch.optim.Adam', ([], {'params': '(self.z,)', 'lr': 'args.lr_out'}), '(params=(self.z,), lr=args.lr_out)\n', (3124, 3158), False, 'import torch\n'), ((3946, 3972), 'models.SteerablePolicy', 'SteerablePolicy', (['self.args'], {}), '(self.args)\n', (3961, 3972), False, 'from models import PolicyEvaluationNetwork, SteerablePolicy\n'), ((3996, 4030), 'models.PolicyEvaluationNetwork', 'PolicyEvaluationNetwork', (['self.args'], {}), '(self.args)\n', (4019, 4030), False, 'from models import PolicyEvaluationNetwork, SteerablePolicy\n'), ((4151, 4212), 'envs.IPD', 'IPD', ([], {'max_steps': 'args.len_rollout', 'batch_size': 'args.nsamples_bin'}), '(max_steps=args.len_rollout, batch_size=args.nsamples_bin)\n', (4154, 4212), False, 'from envs import IPD\n'), ((4228, 4255), 'os.path.exists', 'os.path.exists', (['args.logdir'], {}), '(args.logdir)\n', (4242, 4255), False, 'import os\n'), ((4314, 4340), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.logdir'], {}), '(args.logdir)\n', (4327, 4340), False, 'from tensorboardX import SummaryWriter\n'), ((4367, 4393), 'tensorboardX.SummaryWriter', 'SummaryWriter', (['args.logdir'], {}), '(args.logdir)\n', (4380, 4393), False, 'from tensorboardX import SummaryWriter\n'), ((5067, 5121), 'torch.zeros', 'torch.zeros', (['self.args.nsamples_bin'], {'dtype': 'torch.float'}), '(self.args.nsamples_bin, dtype=torch.float)\n', (5078, 5121), False, 'import torch\n'), ((5139, 5193), 'torch.zeros', 'torch.zeros', (['self.args.nsamples_bin'], {'dtype': 'torch.float'}), '(self.args.nsamples_bin, dtype=torch.float)\n', (5150, 5193), False, 'import torch\n'), ((5538, 5594), 'torch.histc', 'torch.histc', (['score1'], {'bins': 'self.args.nbins', 'min': '(-3)', 'max': '(0)'}), '(score1, bins=self.args.nbins, min=-3, max=0)\n', (5549, 5594), False, 'import torch\n'), ((5611, 5667), 'torch.histc', 'torch.histc', (['score2'], {'bins': 'self.args.nbins', 'min': '(-3)', 'max': '(0)'}), '(score2, bins=self.args.nbins, min=-3, max=0)\n', (5622, 5667), False, 'import torch\n'), ((10364, 10382), 'plotting.plot', 'plot', (['scores', 'args'], {}), '(scores, args)\n', (10368, 10382), False, 'from plotting import plot\n'), ((2941, 2993), 'torch.zeros', 'torch.zeros', (['args.embedding_size'], {'requires_grad': '(True)'}), '(args.embedding_size, requires_grad=True)\n', (2952, 2993), False, 'import torch\n'), ((4273, 4292), 'shutil.rmtree', 'rmtree', (['args.logdir'], {}), '(args.logdir)\n', (4279, 4292), False, 'from shutil import rmtree\n'), ((4789, 4800), 'numpy.mean', 'np.mean', (['r1'], {}), '(r1)\n', (4796, 4800), True, 'import numpy as np\n'), ((4852, 4863), 'numpy.mean', 'np.mean', (['r2'], {}), '(r2)\n', (4859, 4863), True, 'import numpy as np\n'), ((6586, 6623), 'torch.randn', 'torch.randn', (['self.args.embedding_size'], {}), '(self.args.embedding_size)\n', (6597, 6623), False, 'import torch\n'), ((6645, 6682), 'torch.randn', 'torch.randn', (['self.args.embedding_size'], {}), '(self.args.embedding_size)\n', (6656, 6682), False, 'import torch\n'), ((2382, 2414), 'torch.from_numpy', 'torch.from_numpy', (['ipd.payout_mat'], {}), '(ipd.payout_mat)\n', (2398, 2414), False, 'import torch\n'), ((7313, 7332), 'torch.nn.functional.kl_div', 'F.kl_div', (['hist1', 'w1'], {}), '(hist1, w1)\n', (7321, 7332), True, 'import torch.nn.functional as F\n'), ((7335, 7354), 'torch.nn.functional.kl_div', 'F.kl_div', (['hist2', 'w2'], {}), '(hist2, w2)\n', (7343, 7354), True, 'import torch.nn.functional as F\n'), ((2505, 2517), 'torch.eye', 'torch.eye', (['(4)'], {}), '(4)\n', (2514, 2517), False, 'import torch\n'), ((8036, 8068), 'torch.sigmoid', 'torch.sigmoid', (['self.policy.theta'], {}), '(self.policy.theta)\n', (8049, 8068), False, 'import torch\n'), ((8110, 8158), 'torch.sigmoid', 'torch.sigmoid', (['(self.policy.theta + self.agent1.z)'], {}), '(self.policy.theta + self.agent1.z)\n', (8123, 8158), False, 'import torch\n'), ((8200, 8248), 'torch.sigmoid', 'torch.sigmoid', (['(self.policy.theta + self.agent2.z)'], {}), '(self.policy.theta + self.agent2.z)\n', (8213, 8248), False, 'import torch\n')] |
import json
import os
# Parse ISO date
import dateutil.parser as dapa
import pandas as pd
from matplotlib import pyplot as plt
import numpy as np
strategy_indices = {
'FIXED': 0,
'OPTIMIZED': 1,
'FINED': 2
}
strategy_labels = {
0: 'FIX({} ,{})',
1: 'OPT({}, {})',
2: 'FIN({}, {})'
}
legend_labels = ['COST', 'SLA']
ylabel = 'COST/SLA: Based on FIX'
xlabel = 'strategies'
plan_colors = {
0: ['#ff1a1a', '#1aff1a'],
1: ['#1a1aff', '#ff1aff'],
2: ['#1affff', '#0d0d0d']
}
plan_keys = {
0: 'WIGGLE',
1: 'OVERLOADED',
2: 'UNDERLOADED'
}
def perform(perform_map, output, x_tick_labels):
if len(perform_map) == 1:
ax = [None]
fig, ax[0] = plt.subplots(1, 1, sharey=True)
else:
fig, ax = plt.subplots(1, len(perform_map), sharey=True)
handles = []
for idx, plan in enumerate(perform_map):
df = perform_map[plan]
df = df.sort_index()
x = list(df.index)
expand_coe = 1.0
x_axis = list(expand_coe * np.asarray(range(1, len(x) + 1)))
cols = list(df.columns)
# Warning: hard coding for convenience drawing.
sla_scale = df.loc[1, 'sla'] / df.loc[0, 'sla']
y_limit = max(sla_scale, 1.5)
# ax[idx].axhline(y=1, ls='--', lw=2, c='black')
for i in range(len(x_tick_labels)):
x_tick_labels[i] = x_tick_labels[i].format(np.round(df.loc[i, 'cost'], 2), np.round(df.loc[i, 'sla'], 2))
for idx2, col in enumerate(cols):
handle = None
if idx2 == 0:
handle = ax[idx].bar(x_axis, np.array(df.loc[:, col].values) / df.loc[:, col].values[0], width=0.618,
color=plan_colors[plan][idx2], align='center', edgecolor='none')
elif idx2 == 1:
handle, = ax[idx].plot(x_axis, np.array(df.loc[:, col].values) / df.loc[:, col].values[0],
color=plan_colors[plan][idx2],
linestyle='-', marker='^', markeredgecolor=plan_colors[plan][idx2],
label=None)
else:
pass
if handle is not None:
handles.append(handle)
ax[idx].set(xlabel=xlabel)
ax[idx].set_xlim([0, expand_coe*(len(x) + 1)])
ax[idx].set_ylim([0, y_limit])
ax[idx].set_xticks(x_axis)
ax[idx].set_xticklabels(x_tick_labels)
ax[idx].text(0.04, 9, plan_keys[plan], size='large')
fig.legend(handles, legend_labels, 'upper center', ncol=len(legend_labels), frameon=False)
fig.text(0.04, 0.5, ylabel, va='center', rotation='vertical', size='large')
# plt.show()
if not os.path.exists(os.path.abspath(os.path.join(output, os.pardir))):
os.mkdir(os.path.abspath(os.path.join(output, os.pardir)))
plt.savefig(output) # Competition with show().
plt.close()
def draw_stats(metrics_dir, output):
perform_map = {}
strategy_list = []
for metrics_file in os.listdir(metrics_dir):
metrics = json.loads(open(metrics_dir + '/' + metrics_file, 'r').read())
if len(metrics) <= 1:
continue
plan = int(metrics[0]['plan'])
strategy = strategy_indices[metrics[0]['strategy']]
if not strategy_list.__contains__(strategy):
strategy_list.append(strategy)
cost = 0.0
sla = 0.0
total_t = 0.0
# drop out the first trace.
for idx in range(2, len(metrics)):
interval = (dapa.parse(metrics[idx]['date']['$date'])
- dapa.parse(metrics[idx - 1]['date']['$date'])).total_seconds()
total_t += interval
cost += metrics[idx - 1]['machinesRunning'] * interval
input_rate = (int(metrics[idx]['messagesTotal']['$numberLong'])
- int(metrics[idx - 1]['messagesTotal']['$numberLong']))
throughput = (int(metrics[idx]['messagesConsumed']['$numberLong'])
- int(metrics[idx - 1]['messagesConsumed']['$numberLong']))
if input_rate > throughput:
sla += 1
cost /= total_t
sla /= len(metrics) - 1
if not perform_map.__contains__(plan):
df = pd.DataFrame({'cost': cost, 'sla': sla}, index=[strategy])
perform_map[plan] = df
else:
df = perform_map[plan]
df.loc[strategy, 'cost'] = cost
df.loc[strategy, 'sla'] = sla
x_ticks_labels = []
strategy_list.sort()
for stra in strategy_list:
x_ticks_labels.append(strategy_labels[stra])
perform(perform_map, output, x_ticks_labels)
def fast_and_furious(metrics_dir, output):
for metrics_file in os.listdir(metrics_dir):
metrics = json.loads(open(metrics_dir + '/' + metrics_file, 'r').read())
if len(metrics) <= 1:
continue
driver = str.rsplit(metrics_file, '.')[0]
leader = list()
follower = list()
for idx in range(2, len(metrics)):
interval = (dapa.parse(metrics[idx]['date']['$date'])
- dapa.parse(metrics[idx - 1]['date']['$date'])).total_seconds()
leader.append(
((int(metrics[idx]['messagesTotal']['$numberLong'])
- int(metrics[idx - 1]['messagesTotal']['$numberLong']))
/ interval)
)
follower.append(
metrics[idx - 1]['machinesRunning']
)
handles = list()
labels = list()
avg_input_rate = int(np.round(np.mean(leader)))
labels.append('average input rate: {} msg/s'.format(avg_input_rate))
avg_mac_num = np.round(np.mean(follower), 2)
labels.append('average cost: {} machines'.format(avg_mac_num))
handles.append(plt.plot(list(range(len(leader))), list(np.array(leader) / avg_input_rate), ls='--', lw=2, c='red')[0])
handles.append(plt.plot(list(range(len(leader))), list(np.array(follower) / avg_mac_num), ls='-', lw=2, c='green')[0])
plt.figlegend(handles, labels, 'upper center', ncol=2, frameon=False)
plt.figtext(0.04, 0.5, 'Auto-scaling Zac', va='center', rotation='vertical', size='large')
plt.xlabel('time (interval 10 seconds)')
# plt.show()
plt.savefig(output + driver + '.pdf')
plt.close()
def main():
draw_stats(metrics_dir='zac-metrics/effectiveness', output='zac-viz/effectiveness.pdf')
# draw_stats(metrics_dir='zac-metrics/robustness', output='zac-viz/robustness.pdf')
# fast_and_furious(metrics_dir='zac-metrics/effectiveness', output='zac-viz/fast&furious_')
if __name__ == '__main__':
main() | [
"numpy.mean",
"dateutil.parser.parse",
"os.listdir",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.figtext",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.figlegend",
"os.path.join",
"matplotlib.pyplot.close",
"numpy.array",
"pandas.DataFrame",
"matplotlib.pyplot.subplots",
"numpy.round"
... | [((2860, 2879), 'matplotlib.pyplot.savefig', 'plt.savefig', (['output'], {}), '(output)\n', (2871, 2879), True, 'from matplotlib import pyplot as plt\n'), ((2911, 2922), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2920, 2922), True, 'from matplotlib import pyplot as plt\n'), ((3031, 3054), 'os.listdir', 'os.listdir', (['metrics_dir'], {}), '(metrics_dir)\n', (3041, 3054), False, 'import os\n'), ((4764, 4787), 'os.listdir', 'os.listdir', (['metrics_dir'], {}), '(metrics_dir)\n', (4774, 4787), False, 'import os\n'), ((708, 739), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'sharey': '(True)'}), '(1, 1, sharey=True)\n', (720, 739), True, 'from matplotlib import pyplot as plt\n'), ((6095, 6164), 'matplotlib.pyplot.figlegend', 'plt.figlegend', (['handles', 'labels', '"""upper center"""'], {'ncol': '(2)', 'frameon': '(False)'}), "(handles, labels, 'upper center', ncol=2, frameon=False)\n", (6108, 6164), True, 'from matplotlib import pyplot as plt\n'), ((6173, 6267), 'matplotlib.pyplot.figtext', 'plt.figtext', (['(0.04)', '(0.5)', '"""Auto-scaling Zac"""'], {'va': '"""center"""', 'rotation': '"""vertical"""', 'size': '"""large"""'}), "(0.04, 0.5, 'Auto-scaling Zac', va='center', rotation='vertical',\n size='large')\n", (6184, 6267), True, 'from matplotlib import pyplot as plt\n'), ((6272, 6312), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""time (interval 10 seconds)"""'], {}), "('time (interval 10 seconds)')\n", (6282, 6312), True, 'from matplotlib import pyplot as plt\n'), ((6342, 6379), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(output + driver + '.pdf')"], {}), "(output + driver + '.pdf')\n", (6353, 6379), True, 'from matplotlib import pyplot as plt\n'), ((6388, 6399), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (6397, 6399), True, 'from matplotlib import pyplot as plt\n'), ((4284, 4342), 'pandas.DataFrame', 'pd.DataFrame', (["{'cost': cost, 'sla': sla}"], {'index': '[strategy]'}), "({'cost': cost, 'sla': sla}, index=[strategy])\n", (4296, 4342), True, 'import pandas as pd\n'), ((5739, 5756), 'numpy.mean', 'np.mean', (['follower'], {}), '(follower)\n', (5746, 5756), True, 'import numpy as np\n'), ((1397, 1427), 'numpy.round', 'np.round', (["df.loc[i, 'cost']", '(2)'], {}), "(df.loc[i, 'cost'], 2)\n", (1405, 1427), True, 'import numpy as np\n'), ((1429, 1458), 'numpy.round', 'np.round', (["df.loc[i, 'sla']", '(2)'], {}), "(df.loc[i, 'sla'], 2)\n", (1437, 1458), True, 'import numpy as np\n'), ((2754, 2785), 'os.path.join', 'os.path.join', (['output', 'os.pardir'], {}), '(output, os.pardir)\n', (2766, 2785), False, 'import os\n'), ((2822, 2853), 'os.path.join', 'os.path.join', (['output', 'os.pardir'], {}), '(output, os.pardir)\n', (2834, 2853), False, 'import os\n'), ((5613, 5628), 'numpy.mean', 'np.mean', (['leader'], {}), '(leader)\n', (5620, 5628), True, 'import numpy as np\n'), ((1600, 1631), 'numpy.array', 'np.array', (['df.loc[:, col].values'], {}), '(df.loc[:, col].values)\n', (1608, 1631), True, 'import numpy as np\n'), ((3545, 3586), 'dateutil.parser.parse', 'dapa.parse', (["metrics[idx]['date']['$date']"], {}), "(metrics[idx]['date']['$date'])\n", (3555, 3586), True, 'import dateutil.parser as dapa\n'), ((3613, 3658), 'dateutil.parser.parse', 'dapa.parse', (["metrics[idx - 1]['date']['$date']"], {}), "(metrics[idx - 1]['date']['$date'])\n", (3623, 3658), True, 'import dateutil.parser as dapa\n'), ((5088, 5129), 'dateutil.parser.parse', 'dapa.parse', (["metrics[idx]['date']['$date']"], {}), "(metrics[idx]['date']['$date'])\n", (5098, 5129), True, 'import dateutil.parser as dapa\n'), ((5156, 5201), 'dateutil.parser.parse', 'dapa.parse', (["metrics[idx - 1]['date']['$date']"], {}), "(metrics[idx - 1]['date']['$date'])\n", (5166, 5201), True, 'import dateutil.parser as dapa\n'), ((1850, 1881), 'numpy.array', 'np.array', (['df.loc[:, col].values'], {}), '(df.loc[:, col].values)\n', (1858, 1881), True, 'import numpy as np\n'), ((5895, 5911), 'numpy.array', 'np.array', (['leader'], {}), '(leader)\n', (5903, 5911), True, 'import numpy as np\n'), ((6022, 6040), 'numpy.array', 'np.array', (['follower'], {}), '(follower)\n', (6030, 6040), True, 'import numpy as np\n')] |
import logging
from typing import Tuple
import pandas as pd
import numpy as np
from cachetools import cached
from cachetools.keys import hashkey
from . import _get_common_columns
logger = logging.getLogger(__name__)
ACCEPTED_TYPES = ["linear"]
def distance(source: pd.Series, target: pd.Series, answers: pd.DataFrame,
answer_scale=5, bias_min=0.2, bias_max=2.0) -> float:
""" Calculate distance between targets.
Uses less common answers to skew bias.
:param scale: (optional) Scale on which questions are asked, starting from 1. Defaults to 5.
:param bias_min: (optional) float Minimum allowed bias.
:param bias_max: (optional) float Maximum allowed bias
"""
# Collect columns that source and target have both answered.
columns = _get_common_columns(source, target, answers)
# Stores distances, and is used to calculate mean value.
distances = np.zeros(len(columns))
# Go through answers, and calculate answer distances from source to target
for i, col in enumerate(columns):
# Collect answers into unique set.
answers_set = tuple(set([
np.int(source[col]),
np.int(target[col])
]))
# Calculate similar and different answers
similar_count, different_count = _similar_counts(col, answers, answers_set)
similar_ratio = similar_count / len(answers_set)
different_ratio = different_count / (answer_scale - len(answers_set))
# Calculate bias
bias = np.float(min(bias_max, max(bias_min, different_ratio / similar_ratio)))
# Calculate distance between answers with bias.
distance = np.abs(np.int(source[col]) - np.int(target[col])) * bias
distances[i] = distance
distance_mean = distances.mean() or 0
return distance_mean if not np.isnan(distance_mean) else np.float(0)
@cached(cache={}, key=lambda column, answers, answer_set: hashkey(column, answer_set))
def _similar_counts(column: str, answers: pd.DataFrame, answers_set: Tuple[int]) -> Tuple[np.int, np.int]:
"""
Similar and different answers.
:return: Tuple of different and similar answers
"""
# Create boolean list of people who answered similarly to current `answers_set`
similar_filter = answers[column].isin(answers_set)
# Calculate similar and different answers
similar_count = answers[column].dropna()[similar_filter].count()
different_count = answers[column].dropna()[~similar_filter].count()
logger.debug("'%s': Similar/Different: %i / %i", column, similar_count, different_count)
return (similar_count, different_count)
| [
"logging.getLogger",
"numpy.float",
"cachetools.keys.hashkey",
"numpy.isnan",
"numpy.int"
] | [((192, 219), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (209, 219), False, 'import logging\n'), ((1858, 1869), 'numpy.float', 'np.float', (['(0)'], {}), '(0)\n', (1866, 1869), True, 'import numpy as np\n'), ((1829, 1852), 'numpy.isnan', 'np.isnan', (['distance_mean'], {}), '(distance_mean)\n', (1837, 1852), True, 'import numpy as np\n'), ((1930, 1957), 'cachetools.keys.hashkey', 'hashkey', (['column', 'answer_set'], {}), '(column, answer_set)\n', (1937, 1957), False, 'from cachetools.keys import hashkey\n'), ((1140, 1159), 'numpy.int', 'np.int', (['source[col]'], {}), '(source[col])\n', (1146, 1159), True, 'import numpy as np\n'), ((1173, 1192), 'numpy.int', 'np.int', (['target[col]'], {}), '(target[col])\n', (1179, 1192), True, 'import numpy as np\n'), ((1672, 1691), 'numpy.int', 'np.int', (['source[col]'], {}), '(source[col])\n', (1678, 1691), True, 'import numpy as np\n'), ((1694, 1713), 'numpy.int', 'np.int', (['target[col]'], {}), '(target[col])\n', (1700, 1713), True, 'import numpy as np\n')] |
"""
Defining standard tensorflow layers as modules.
"""
import tensorflow as tf
from deeplearning import module
from deeplearning import tf_util as U
import numpy as np
class Input(module.Module):
ninputs=0
class Placeholder(Input):
def __init__(self, dtype, shape, name, default=None):
super().__init__(name)
assert isinstance(shape, (list, tuple))
self.dtype = dtype
self.shape = list(shape)
self.phname = name
self.default = default
def _bsz(self):
return self.nbatch * self.nstep
def _build(self, inputs):
bsz = self._bsz()
shape = [bsz] + self.shape
if self.default is not None:
assert list(self.default.shape) == self.shape
default = np.repeat(self.default[None], bsz, axis=0)
self.ph = tf.placeholder_with_default(default, shape=shape, name=self.phname)
else:
self.ph = tf.placeholder(self.dtype, shape=shape, name=self.phname)
self.placeholders.append(self.ph)
return self.ph
class StatePlaceholder(Placeholder):
def _bsz(self):
return self.nbatch
class Dense(module.Module):
ninputs = 1
def __init__(self, name, *modules, units=1, activation=None, **kwargs):
super().__init__(name, *modules)
self.units = units
self.activation = activation
self.layer_kwargs = kwargs
def _build(self, inputs):
return tf.layers.dense(inputs[0],
self.units,
kernel_initializer=tf.variance_scaling_initializer(),
activation=self.activation,
name='dense',
**self.layer_kwargs)
class Conv2d(module.Module):
ninputs = 1
def __init__(self, name, *modules, filters=1, size=(3,3), strides=(1,1), padding='valid', activation=None, **kwargs):
super().__init__(name, *modules)
self.filters = filters
self.size = size
self.strides = strides
self.padding = padding
self.activation = activation
self.layer_kwargs = kwargs
def _build(self, inputs):
return tf.layers.conv2d(inputs[0],
filters=self.filters,
strides=self.strides,
kernel_size=self.size,
kernel_initializer=tf.variance_scaling_initializer(),
padding=self.padding,
activation=self.activation,
name='conv2d',
**self.layer_kwargs)
class LSTM(module.RecurrentModule):
def __init__(self, name, *modules, nlstm=256, nlayers=1, masked=False):
assert len(modules) == 1, "This LSTM is only designed to work with 1 input"
modules = list(modules)
if masked:
modules.append(Placeholder(tf.float32, [], name=name+'_ph_mask'))
state_modules = []
default = np.zeros((nlstm*2,), dtype=np.float32)
for i in range(nlayers):
state_modules.append(StatePlaceholder(tf.float32, (nlstm*2,), name+'_ph%d'%i, default))
super().__init__(name, *modules, state_modules=state_modules)
self.nlayers = nlayers
self.nlstm = nlstm
self.masked = masked
def _build(self, inputs, state):
X = inputs[0]
M = inputs[1] if self.masked else tf.zeros([self.nbatch * self.nstep])
ms = U.batch_to_seq(M, self.nbatch, self.nstep)
hs = U.batch_to_seq(X, self.nbatch, self.nstep)
state_out = []
for i in range(self.nlayers):
hs, soi = U.lstm(hs, ms, state[i], 'lstm{}'.format(i), nh=self.nlstm)
state_out.append(soi)
h = U.seq_to_batch(hs)
return h, state_out
class Flatten(module.Module):
ninputs = 1
def _build(self, inputs):
return tf.layers.flatten(inputs[0])
class StopGrad(module.Module):
def _build(self, inputs):
return [tf.stop_gradient(i) for i in inputs]
class Softmax(module.Module):
ninputs = 2 # logits, targets
def _build(self, inputs):
logits, target = inputs
return tf.losses.softmax_cross_entropy(target, logits)
| [
"tensorflow.variance_scaling_initializer",
"deeplearning.tf_util.batch_to_seq",
"tensorflow.layers.flatten",
"numpy.repeat",
"tensorflow.losses.softmax_cross_entropy",
"tensorflow.placeholder",
"deeplearning.tf_util.seq_to_batch",
"tensorflow.placeholder_with_default",
"numpy.zeros",
"tensorflow.s... | [((3068, 3108), 'numpy.zeros', 'np.zeros', (['(nlstm * 2,)'], {'dtype': 'np.float32'}), '((nlstm * 2,), dtype=np.float32)\n', (3076, 3108), True, 'import numpy as np\n'), ((3550, 3592), 'deeplearning.tf_util.batch_to_seq', 'U.batch_to_seq', (['M', 'self.nbatch', 'self.nstep'], {}), '(M, self.nbatch, self.nstep)\n', (3564, 3592), True, 'from deeplearning import tf_util as U\n'), ((3606, 3648), 'deeplearning.tf_util.batch_to_seq', 'U.batch_to_seq', (['X', 'self.nbatch', 'self.nstep'], {}), '(X, self.nbatch, self.nstep)\n', (3620, 3648), True, 'from deeplearning import tf_util as U\n'), ((3838, 3856), 'deeplearning.tf_util.seq_to_batch', 'U.seq_to_batch', (['hs'], {}), '(hs)\n', (3852, 3856), True, 'from deeplearning import tf_util as U\n'), ((3977, 4005), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['inputs[0]'], {}), '(inputs[0])\n', (3994, 4005), True, 'import tensorflow as tf\n'), ((4263, 4310), 'tensorflow.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['target', 'logits'], {}), '(target, logits)\n', (4294, 4310), True, 'import tensorflow as tf\n'), ((765, 807), 'numpy.repeat', 'np.repeat', (['self.default[None]', 'bsz'], {'axis': '(0)'}), '(self.default[None], bsz, axis=0)\n', (774, 807), True, 'import numpy as np\n'), ((830, 897), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['default'], {'shape': 'shape', 'name': 'self.phname'}), '(default, shape=shape, name=self.phname)\n', (857, 897), True, 'import tensorflow as tf\n'), ((934, 991), 'tensorflow.placeholder', 'tf.placeholder', (['self.dtype'], {'shape': 'shape', 'name': 'self.phname'}), '(self.dtype, shape=shape, name=self.phname)\n', (948, 991), True, 'import tensorflow as tf\n'), ((3500, 3536), 'tensorflow.zeros', 'tf.zeros', (['[self.nbatch * self.nstep]'], {}), '([self.nbatch * self.nstep])\n', (3508, 3536), True, 'import tensorflow as tf\n'), ((4084, 4103), 'tensorflow.stop_gradient', 'tf.stop_gradient', (['i'], {}), '(i)\n', (4100, 4103), True, 'import tensorflow as tf\n'), ((1569, 1602), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {}), '()\n', (1600, 1602), True, 'import tensorflow as tf\n'), ((2447, 2480), 'tensorflow.variance_scaling_initializer', 'tf.variance_scaling_initializer', ([], {}), '()\n', (2478, 2480), True, 'import tensorflow as tf\n')] |
import argparse
import string
from nltk.corpus import stopwords
from nltk.util import ngrams
from nltk.lm import NgramCounter
from sklearn.feature_extraction.text import CountVectorizer
import pandas as pd
import numpy as np
from tqdm import tqdm
import parmap
import os, pickle
from multiprocessing import Pool
from itertools import repeat
import time
def parse_args():
parser = argparse.ArgumentParser(description='Preprocess notes, extract ventilation duration, and prepare for running MixEHR')
parser.add_argument('input_file_path', help='path to the original file')
parser.add_argument('output_directory', help='directory to store outputs')
parser.add_argument('--no_data', action='store_true', help='NOT outputing data')
parser.add_argument('--use_vocab', help='specify vocabulary file to use. setting no_vocab to true')
parser.add_argument('--no_vocab', action='store_true', help='NOT storing vocabulary')
parser.add_argument('-m', '--meta_data', action='store_true', help='output meta data (default not)')
parser.add_argument('-v', '--ventilation_duration', action='store_true', help='output ventilation duration (default not)')
parser.add_argument('--binarized_mv', action='store_true', help='using Label in files that do not have continuous MV durations')
parser.add_argument('-s', '--split_datasets', action='store_true', help='split the whole cohort into train/validation/test (default not)')
parser.add_argument('--discard_ids', help='csv file containing the HADM_IDs that should be discarded')
parser.add_argument('--use_all_types', action='store_true', help='NOT restricting note types')
parser.add_argument('--no_phy', action='store_true', help='NOT using physicians notes')
parser.add_argument('--no_nur', action='store_true', help='NOT using nurses notes')
parser.add_argument('--res', action='store_true', help='using respiratory notes')
parser.add_argument('--other', action='store_true', help='using nursing/other notes. WARNING: this category contain other types of notes e.g. discharge summary, use with caution')
parser.add_argument('--max_procs', type=int, help='maximal number of processes (default 10)', default=10)
return parser.parse_args()
# print args for double check
def print_args(args):
print('Input file:', args.input_file_path)
print('Output directory:', args.output_directory)
print('Using', args.max_procs, 'cores')
if args.no_data:
print('Not outputting data file for mixehr')
if args.use_vocab:
print('Using vocabulary from', args.use_vocab)
if args.no_vocab:
print('Not outputting vocabulary')
if args.meta_data:
print('Outputting meta data')
if args.ventilation_duration:
if not args.binarized_mv:
print('Outputting continuous MV duration')
else:
print('Outputting binarized MV duration')
if args.split_datasets:
print('Splitting into train/valiation/test')
if args.discard_ids:
print('Discarding HADM_IDs in', args.discard_ids)
if args.use_all_types:
print('Using all note types, ignoring no_phy, no_nur, res and other options.')
if args.no_phy:
print('Not using physician notes')
if args.no_nur:
print('Not using nursing notes')
if args.res:
print('Using respiratory notes')
if args.other:
print('Using nursing/other notes')
def merge_notes(hadm_id):
return "\n".join(data[data.HADM_ID == hadm_id].TEXT)
def preprocess_text(text):
# convert to lower case
lower = text.lower()
# remove punc
no_punc = lower.translate(str.maketrans("", "", string.punctuation))
# remove white space
no_white = " ".join(no_punc.split())
# remove numbers
no_num = no_white.translate(str.maketrans("", "", string.digits))
# remove stopwords
normal_sws = list(set(stopwords.words('english')))
# more_sws = pickle.load(open('/home/mcb/li_lab/zwen8/data/mimic/more_stopwords.pickle', 'rb'))
# sws = set(normal_sws + more_sws)
no_sw = " ".join([word for word in no_num.split() if word not in normal_sws])
return no_sw
def get_unigram_counts(text):
text_word = [sentence.split() for sentence in text]
# text_word = [text.split()]
text_unigram = [ngrams(sentence, 1) for sentence in text_word]
return NgramCounter(text_unigram)
def get_word_index(data, max_df=0.15, min_df=5):
vectorizer = CountVectorizer(max_df=max_df, min_df=min_df)
vectorizer.fit(data['PROCTEXT'])
return {word: idx for idx, word in enumerate(vectorizer.get_feature_names())}
def create_output_dataframe(data, word_index, args):
ids = data["HADM_ID"].unique().tolist() # HADM_IDs
output = []
ventilation_duration = []
for idx, id in tqdm(enumerate(ids)):
set_ventilation = False # indicate whether ventilation duration is calculated for current HADM_ID
if not args.no_data or args.ventilation_duration:
unigram_counts = get_unigram_counts(data[data.HADM_ID == id]["PROCTEXT"].tolist())
for word in set(data[data.HADM_ID == id]["PROCTEXT"].values[0].split()):
if word not in word_index.keys():
continue
if not args.no_data:
output.append(" ".join([str(idx), "1", str(word_index[word]), "0", str(unigram_counts[word])]))
if args.ventilation_duration and not set_ventilation:
set_ventilation = True
if not args.binarized_mv:
ventilation_duration.append(" ".join([str(idx), str(data[data.HADM_ID == id]["DURATION"].values[0])]))
else:
ventilation_duration.append(" ".join([str(idx), str(data[data.HADM_ID == id]["BI_DURATION"].values[0])]))
# for idx, id in tqdm(enumerate(ids)):
# set_ventilation = False # indicate whether ventilation duration is calculated
# if not args.no_data or args.ventilation_duration:
# unigram_counts = get_unigram_counts(data[data.HADM_ID == id]["PROCTEXT"].tolist())
# for word in set(data[data.HADM_ID == id]["PROCTEXT"].values[0].split()):
# if word not in word_index.keys():
# continue
# if not args.no_data:
# output.append(" ".join([str(idx), "1", str(word_index[word]), "0", str(unigram_counts[word])]))
# if args.ventilation_duration and not set_ventilation:
# set_ventilation = True
# ventilation_duration.append(" ".join([str(idx), str(data[data.HADM_ID == id]["DURATION"].values[0])]))
return output, ventilation_duration
if __name__ == '__main__':
args = parse_args()
if args.no_vocab and args.no_data and not args.meta_data and not args.ventilation_duration:
raise Exception('no output specified')
if args.no_phy and args.no_nur and not args.res:
raise Exception('not using any type of note (physicians, nurses, respiratory, nursing/other)')
if args.use_vocab:
args.no_vocab = True
# print arguments for double check
print_args(args)
data = pd.read_csv(args.input_file_path)
print('[' + time.ctime() + ']', "data read")
if not args.use_all_types:
categories = []
if not args.no_phy:
categories.append('Physician ')
if not args.no_nur:
categories.append('Nursing')
if args.res:
categories.append('Respiratory ')
if args.other:
categories.append('Nursing/other')
args.categories = categories
# merge notes of same HADM_ID of specified categories
if args.use_all_types:
merged_data = data[["HADM_ID"]].drop_duplicates()
else:
merged_data = data[data['CATEGORY'].isin(categories)][["HADM_ID"]].drop_duplicates()
if args.discard_ids:
discard_ids_df = pd.read_csv(args.discard_ids, header=None)
discard_ids_df.columns = ['HADM_ID']
discard_ids = discard_ids_df['HADM_ID'].tolist()
merged_data = merged_data[~merged_data['HADM_ID'].isin(discard_ids)]
print('[' + time.ctime() + ']', 'discarding', len(discard_ids), 'HADM_IDs')
with Pool(processes=args.max_procs) as pool:
merged_data["ALLTEXT"] = pool.map(merge_notes, merged_data['HADM_ID'])
merged_data.dropna()
print('[' + time.ctime() + ']', 'after merging notes of same HADM_ID, we have', merged_data.shape[0], 'unique HADM_IDs')
# preprocess notes
with Pool(processes=args.max_procs) as pool:
merged_data["PROCTEXT"] = pool.map(preprocess_text, merged_data['ALLTEXT'])
print('[' + time.ctime() + ']', "notes preprocessed")
# get ventilation duration
if args.ventilation_duration:
if not args.binarized_mv:
try:
merged_data['STARTTIME'] = pd.to_datetime(merged_data.HADM_ID.apply(lambda hadm_id: np.min(data[data.HADM_ID == hadm_id].STARTTIME)))
except:
# accommadating files that use FIRST_VENT_STARTTIME to represent STARTTIME
merged_data['STARTTIME'] = pd.to_datetime(merged_data.HADM_ID.apply(lambda hadm_id: np.min(data[data.HADM_ID == hadm_id].FIRST_VENT_STARTTIME)))
merged_data['ENDTIME'] = pd.to_datetime(merged_data.HADM_ID.apply(lambda hadm_id: np.max(data[data.HADM_ID == hadm_id].ENDTIME)))
merged_data['DURATION'] = pd.to_timedelta(merged_data['ENDTIME'] - merged_data['STARTTIME'], unit='h') / np.timedelta64(1, 'h')
merged_data.drop(columns=['STARTTIME', 'ENDTIME'])
print('[' + time.ctime() + ']', "ventilation duration calculated")
else:
# accommadating files that have no information of MV sessions but only labels indicating prolonged or not
print('[' + time.ctime() + ']', "WARNING: no continuous MV duration available, using Label")
merged_data['BI_DURATION'] = merged_data.HADM_ID.apply(lambda hadm_id: data[data.HADM_ID == hadm_id].Label.values[0])
print('[' + time.ctime() + ']', "binarized ventilation duration calculated")
# split into train/valid/test sets
if args.split_datasets:
train_data = merged_data.sample(frac=0.6, random_state=1)
valid_data = merged_data.drop(train_data.index).sample(frac=0.5, random_state=1)
test_data = merged_data.drop(train_data.index).drop(valid_data.index)
held_out_ids = pd.concat([valid_data['HADM_ID'], test_data['HADM_ID']], ignore_index=True)
train_data.to_csv(os.path.join(args.output_directory, 'train_notes.csv'), header=False, index=False)
valid_data.to_csv(os.path.join(args.output_directory, 'validation_notes.csv'), header=False, index=False)
test_data.to_csv(os.path.join(args.output_directory, 'test_notes.csv'), header=False, index=False)
held_out_ids.to_csv(os.path.join(args.output_directory, 'held_out_ids.csv'), header=False, index=False)
print('[' + time.ctime() + ']', 'train/valid/test sets written to', args.output_directory)
# generate word indexes
if not args.use_vocab:
if args.split_datasets:
word_index = get_word_index(train_data)
else:
word_index = get_word_index(merged_data)
print('[' + time.ctime() + ']', 'word index generated')
else:
with open(args.use_vocab, 'r') as file:
vocab = [line.rstrip('\n') for line in file.readlines()]
word_index = {line.split(',')[0]: line.split(',')[1] for line in vocab}
print('[' + time.ctime() + ']', 'read word index from', args.use_vocab)
# store vocabulary
if not args.no_vocab:
word_index_output = [','.join([word, str(idx)]) for word, idx in word_index.items()]
with open(os.path.join(args.output_directory, 'vocab.txt'), "w") as file:
file.writelines('\n'.join(word_index_output))
print('[' + time.ctime() + ']', 'vocabulary written to', args.output_directory)
# create output dataframe
if not args.split_datasets:
output, ventilation_duration = create_output_dataframe(merged_data, word_index, args)
else:
with Pool(processes=args.max_procs) as pool:
(train_output, train_ventilation_duration), (valid_output, valid_ventilation_duration), (test_output, test_ventilation_duration) = \
pool.starmap(create_output_dataframe, zip([train_data, valid_data, test_data], repeat(word_index), repeat(args)))
print('[' + time.ctime() + ']', "data ready")
# create meta data
if args.meta_data:
meta_data = [" ".join(["1", str(idx), "1"]) for idx in word_index.values()]
print('[' + time.ctime() + ']', "meta data ready")
# write to output
if not args.no_data:
if not args.split_datasets:
with open(os.path.join(args.output_directory, 'data.txt'), "w") as file:
file.writelines("\n".join(output))
print('[' + time.ctime() + ']', "data written to", os.path.join(args.output_directory, 'data.txt'))
else:
with open(os.path.join(args.output_directory, 'train_data.txt'), "w") as file:
file.writelines("\n".join(train_output))
with open(os.path.join(args.output_directory, 'validation_data.txt'), "w") as file:
file.writelines("\n".join(valid_output))
with open(os.path.join(args.output_directory, 'test_data.txt'), "w") as file:
file.writelines("\n".join(test_output))
print('[' + time.ctime() + ']', "data written to", os.path.join(args.output_directory, 'train_data/validation_data/test_data.txt'))
if args.meta_data:
with open(os.path.join(args.output_directory, 'meta.txt'), "w") as file:
file.writelines("\n".join(meta_data))
print('[' + time.ctime() + ']', "meta data written to", os.path.join(args.output_directory, 'meta.txt'))
if args.ventilation_duration:
if not args.split_datasets:
if not args.binarized_mv:
with open(os.path.join(args.output_directory, 'vent.txt'), "w") as file:
file.writelines("\n".join(ventilation_duration))
print('[' + time.ctime() + ']', "ventilation duration written to", os.path.join(args.output_directory, 'vent.txt'))
else:
with open(os.path.join(args.output_directory, 'bi_vent.txt'), "w") as file:
file.writelines("\n".join(ventilation_duration))
print('[' + time.ctime() + ']', "binarized ventilation duration written to", os.path.join(args.output_directory, 'bi_vent.txt'))
else:
if not args.binarized_mv:
with open(os.path.join(args.output_directory, 'train_vent.txt'), "w") as file:
file.writelines("\n".join(train_ventilation_duration))
with open(os.path.join(args.output_directory, 'validation_vent.txt'), "w") as file:
file.writelines("\n".join(valid_ventilation_duration))
with open(os.path.join(args.output_directory, 'test_vent.txt'), "w") as file:
file.writelines("\n".join(test_ventilation_duration))
print('[' + time.ctime() + ']', "ventilation duration written to", os.path.join(args.output_directory, 'train_vent/validation_vent/test_vent.txt'))
else:
with open(os.path.join(args.output_directory, 'train_bi_vent.txt'), "w") as file:
file.writelines("\n".join(train_ventilation_duration))
with open(os.path.join(args.output_directory, 'validation_bi_vent.txt'), "w") as file:
file.writelines("\n".join(valid_ventilation_duration))
with open(os.path.join(args.output_directory, 'test_bi_vent.txt'), "w") as file:
file.writelines("\n".join(test_ventilation_duration))
print('[' + time.ctime() + ']', "binarized ventilation duration written to", os.path.join(args.output_directory, 'train_bi_vent/validation_bi_vent/test_bi_vent.txt'))
| [
"time.ctime",
"pandas.to_timedelta",
"nltk.corpus.stopwords.words",
"argparse.ArgumentParser",
"pandas.read_csv",
"sklearn.feature_extraction.text.CountVectorizer",
"os.path.join",
"nltk.lm.NgramCounter",
"numpy.max",
"nltk.util.ngrams",
"multiprocessing.Pool",
"numpy.min",
"numpy.timedelta6... | [((385, 512), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Preprocess notes, extract ventilation duration, and prepare for running MixEHR"""'}), "(description=\n 'Preprocess notes, extract ventilation duration, and prepare for running MixEHR'\n )\n", (408, 512), False, 'import argparse\n'), ((4359, 4385), 'nltk.lm.NgramCounter', 'NgramCounter', (['text_unigram'], {}), '(text_unigram)\n', (4371, 4385), False, 'from nltk.lm import NgramCounter\n'), ((4453, 4498), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'max_df': 'max_df', 'min_df': 'min_df'}), '(max_df=max_df, min_df=min_df)\n', (4468, 4498), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((7219, 7252), 'pandas.read_csv', 'pd.read_csv', (['args.input_file_path'], {}), '(args.input_file_path)\n', (7230, 7252), True, 'import pandas as pd\n'), ((4301, 4320), 'nltk.util.ngrams', 'ngrams', (['sentence', '(1)'], {}), '(sentence, 1)\n', (4307, 4320), False, 'from nltk.util import ngrams\n'), ((7978, 8020), 'pandas.read_csv', 'pd.read_csv', (['args.discard_ids'], {'header': 'None'}), '(args.discard_ids, header=None)\n', (7989, 8020), True, 'import pandas as pd\n'), ((8293, 8323), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'args.max_procs'}), '(processes=args.max_procs)\n', (8297, 8323), False, 'from multiprocessing import Pool\n'), ((8595, 8625), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'args.max_procs'}), '(processes=args.max_procs)\n', (8599, 8625), False, 'from multiprocessing import Pool\n'), ((10520, 10595), 'pandas.concat', 'pd.concat', (["[valid_data['HADM_ID'], test_data['HADM_ID']]"], {'ignore_index': '(True)'}), "([valid_data['HADM_ID'], test_data['HADM_ID']], ignore_index=True)\n", (10529, 10595), True, 'import pandas as pd\n'), ((3894, 3920), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (3909, 3920), False, 'from nltk.corpus import stopwords\n'), ((10622, 10676), 'os.path.join', 'os.path.join', (['args.output_directory', '"""train_notes.csv"""'], {}), "(args.output_directory, 'train_notes.csv')\n", (10634, 10676), False, 'import os, pickle\n'), ((10734, 10793), 'os.path.join', 'os.path.join', (['args.output_directory', '"""validation_notes.csv"""'], {}), "(args.output_directory, 'validation_notes.csv')\n", (10746, 10793), False, 'import os, pickle\n'), ((10848, 10901), 'os.path.join', 'os.path.join', (['args.output_directory', '"""test_notes.csv"""'], {}), "(args.output_directory, 'test_notes.csv')\n", (10860, 10901), False, 'import os, pickle\n'), ((10959, 11014), 'os.path.join', 'os.path.join', (['args.output_directory', '"""held_out_ids.csv"""'], {}), "(args.output_directory, 'held_out_ids.csv')\n", (10971, 11014), False, 'import os, pickle\n'), ((12271, 12301), 'multiprocessing.Pool', 'Pool', ([], {'processes': 'args.max_procs'}), '(processes=args.max_procs)\n', (12275, 12301), False, 'from multiprocessing import Pool\n'), ((13994, 14041), 'os.path.join', 'os.path.join', (['args.output_directory', '"""meta.txt"""'], {}), "(args.output_directory, 'meta.txt')\n", (14006, 14041), False, 'import os, pickle\n'), ((7269, 7281), 'time.ctime', 'time.ctime', ([], {}), '()\n', (7279, 7281), False, 'import time\n'), ((8453, 8465), 'time.ctime', 'time.ctime', ([], {}), '()\n', (8463, 8465), False, 'import time\n'), ((8735, 8747), 'time.ctime', 'time.ctime', ([], {}), '()\n', (8745, 8747), False, 'import time\n'), ((9496, 9572), 'pandas.to_timedelta', 'pd.to_timedelta', (["(merged_data['ENDTIME'] - merged_data['STARTTIME'])"], {'unit': '"""h"""'}), "(merged_data['ENDTIME'] - merged_data['STARTTIME'], unit='h')\n", (9511, 9572), True, 'import pandas as pd\n'), ((9575, 9597), 'numpy.timedelta64', 'np.timedelta64', (['(1)', '"""h"""'], {}), "(1, 'h')\n", (9589, 9597), True, 'import numpy as np\n'), ((11869, 11917), 'os.path.join', 'os.path.join', (['args.output_directory', '"""vocab.txt"""'], {}), "(args.output_directory, 'vocab.txt')\n", (11881, 11917), False, 'import os, pickle\n'), ((12607, 12619), 'time.ctime', 'time.ctime', ([], {}), '()\n', (12617, 12619), False, 'import time\n'), ((13122, 13169), 'os.path.join', 'os.path.join', (['args.output_directory', '"""data.txt"""'], {}), "(args.output_directory, 'data.txt')\n", (13134, 13169), False, 'import os, pickle\n'), ((13695, 13774), 'os.path.join', 'os.path.join', (['args.output_directory', '"""train_data/validation_data/test_data.txt"""'], {}), "(args.output_directory, 'train_data/validation_data/test_data.txt')\n", (13707, 13774), False, 'import os, pickle\n'), ((13817, 13864), 'os.path.join', 'os.path.join', (['args.output_directory', '"""meta.txt"""'], {}), "(args.output_directory, 'meta.txt')\n", (13829, 13864), False, 'import os, pickle\n'), ((8220, 8232), 'time.ctime', 'time.ctime', ([], {}), '()\n', (8230, 8232), False, 'import time\n'), ((11063, 11075), 'time.ctime', 'time.ctime', ([], {}), '()\n', (11073, 11075), False, 'import time\n'), ((11377, 11389), 'time.ctime', 'time.ctime', ([], {}), '()\n', (11387, 11389), False, 'import time\n'), ((11648, 11660), 'time.ctime', 'time.ctime', ([], {}), '()\n', (11658, 11660), False, 'import time\n'), ((12011, 12023), 'time.ctime', 'time.ctime', ([], {}), '()\n', (12021, 12023), False, 'import time\n'), ((12547, 12565), 'itertools.repeat', 'repeat', (['word_index'], {}), '(word_index)\n', (12553, 12565), False, 'from itertools import repeat\n'), ((12567, 12579), 'itertools.repeat', 'repeat', (['args'], {}), '(args)\n', (12573, 12579), False, 'from itertools import repeat\n'), ((12796, 12808), 'time.ctime', 'time.ctime', ([], {}), '()\n', (12806, 12808), False, 'import time\n'), ((12945, 12992), 'os.path.join', 'os.path.join', (['args.output_directory', '"""data.txt"""'], {}), "(args.output_directory, 'data.txt')\n", (12957, 12992), False, 'import os, pickle\n'), ((13207, 13260), 'os.path.join', 'os.path.join', (['args.output_directory', '"""train_data.txt"""'], {}), "(args.output_directory, 'train_data.txt')\n", (13219, 13260), False, 'import os, pickle\n'), ((13355, 13413), 'os.path.join', 'os.path.join', (['args.output_directory', '"""validation_data.txt"""'], {}), "(args.output_directory, 'validation_data.txt')\n", (13367, 13413), False, 'import os, pickle\n'), ((13508, 13560), 'os.path.join', 'os.path.join', (['args.output_directory', '"""test_data.txt"""'], {}), "(args.output_directory, 'test_data.txt')\n", (13520, 13560), False, 'import os, pickle\n'), ((13950, 13962), 'time.ctime', 'time.ctime', ([], {}), '()\n', (13960, 13962), False, 'import time\n'), ((14392, 14439), 'os.path.join', 'os.path.join', (['args.output_directory', '"""vent.txt"""'], {}), "(args.output_directory, 'vent.txt')\n", (14404, 14439), False, 'import os, pickle\n'), ((14713, 14763), 'os.path.join', 'os.path.join', (['args.output_directory', '"""bi_vent.txt"""'], {}), "(args.output_directory, 'bi_vent.txt')\n", (14725, 14763), False, 'import os, pickle\n'), ((15413, 15492), 'os.path.join', 'os.path.join', (['args.output_directory', '"""train_vent/validation_vent/test_vent.txt"""'], {}), "(args.output_directory, 'train_vent/validation_vent/test_vent.txt')\n", (15425, 15492), False, 'import os, pickle\n'), ((16127, 16219), 'os.path.join', 'os.path.join', (['args.output_directory', '"""train_bi_vent/validation_bi_vent/test_bi_vent.txt"""'], {}), "(args.output_directory,\n 'train_bi_vent/validation_bi_vent/test_bi_vent.txt')\n", (16139, 16219), False, 'import os, pickle\n'), ((9410, 9455), 'numpy.max', 'np.max', (['data[data.HADM_ID == hadm_id].ENDTIME'], {}), '(data[data.HADM_ID == hadm_id].ENDTIME)\n', (9416, 9455), True, 'import numpy as np\n'), ((9685, 9697), 'time.ctime', 'time.ctime', ([], {}), '()\n', (9695, 9697), False, 'import time\n'), ((9896, 9908), 'time.ctime', 'time.ctime', ([], {}), '()\n', (9906, 9908), False, 'import time\n'), ((10131, 10143), 'time.ctime', 'time.ctime', ([], {}), '()\n', (10141, 10143), False, 'import time\n'), ((13083, 13095), 'time.ctime', 'time.ctime', ([], {}), '()\n', (13093, 13095), False, 'import time\n'), ((13656, 13668), 'time.ctime', 'time.ctime', ([], {}), '()\n', (13666, 13668), False, 'import time\n'), ((14177, 14224), 'os.path.join', 'os.path.join', (['args.output_directory', '"""vent.txt"""'], {}), "(args.output_directory, 'vent.txt')\n", (14189, 14224), False, 'import os, pickle\n'), ((14485, 14535), 'os.path.join', 'os.path.join', (['args.output_directory', '"""bi_vent.txt"""'], {}), "(args.output_directory, 'bi_vent.txt')\n", (14497, 14535), False, 'import os, pickle\n'), ((14843, 14896), 'os.path.join', 'os.path.join', (['args.output_directory', '"""train_vent.txt"""'], {}), "(args.output_directory, 'train_vent.txt')\n", (14855, 14896), False, 'import os, pickle\n'), ((15013, 15071), 'os.path.join', 'os.path.join', (['args.output_directory', '"""validation_vent.txt"""'], {}), "(args.output_directory, 'validation_vent.txt')\n", (15025, 15071), False, 'import os, pickle\n'), ((15188, 15240), 'os.path.join', 'os.path.join', (['args.output_directory', '"""test_vent.txt"""'], {}), "(args.output_directory, 'test_vent.txt')\n", (15200, 15240), False, 'import os, pickle\n'), ((15538, 15594), 'os.path.join', 'os.path.join', (['args.output_directory', '"""train_bi_vent.txt"""'], {}), "(args.output_directory, 'train_bi_vent.txt')\n", (15550, 15594), False, 'import os, pickle\n'), ((15711, 15772), 'os.path.join', 'os.path.join', (['args.output_directory', '"""validation_bi_vent.txt"""'], {}), "(args.output_directory, 'validation_bi_vent.txt')\n", (15723, 15772), False, 'import os, pickle\n'), ((15889, 15944), 'os.path.join', 'os.path.join', (['args.output_directory', '"""test_bi_vent.txt"""'], {}), "(args.output_directory, 'test_bi_vent.txt')\n", (15901, 15944), False, 'import os, pickle\n'), ((8994, 9041), 'numpy.min', 'np.min', (['data[data.HADM_ID == hadm_id].STARTTIME'], {}), '(data[data.HADM_ID == hadm_id].STARTTIME)\n', (9000, 9041), True, 'import numpy as np\n'), ((14337, 14349), 'time.ctime', 'time.ctime', ([], {}), '()\n', (14347, 14349), False, 'import time\n'), ((14648, 14660), 'time.ctime', 'time.ctime', ([], {}), '()\n', (14658, 14660), False, 'import time\n'), ((15358, 15370), 'time.ctime', 'time.ctime', ([], {}), '()\n', (15368, 15370), False, 'import time\n'), ((16062, 16074), 'time.ctime', 'time.ctime', ([], {}), '()\n', (16072, 16074), False, 'import time\n'), ((9255, 9313), 'numpy.min', 'np.min', (['data[data.HADM_ID == hadm_id].FIRST_VENT_STARTTIME'], {}), '(data[data.HADM_ID == hadm_id].FIRST_VENT_STARTTIME)\n', (9261, 9313), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# ========================================================================
#
# Imports
#
# ========================================================================
import os
import shutil
import argparse
import subprocess as sp
import numpy as np
import time
from datetime import timedelta
# ========================================================================
#
# Main
#
# ========================================================================
if __name__ == "__main__":
# Timer
start = time.time()
# Parse arguments
parser = argparse.ArgumentParser(description="Run cases")
parser.add_argument(
"-np", "--num-procs", dest="np", help="Number of MPI ranks", type=int, default=1
)
args = parser.parse_args()
# Setup
ncells = np.arange(10, 66, 2)
cfls = np.linspace(1e-2, 0.999, 50)
# cfls = [0.056, 0.17, 0.28, 0.46, 0.86]
workdir = os.getcwd()
pelecbin = os.path.abspath("PeleC3d.gnu.MPI.ex")
casedir = os.path.abspath("cases")
iname = "inputs_3d"
pname = "probin"
if os.path.exists(casedir):
shutil.rmtree(casedir)
os.makedirs(casedir)
# Maximum velocity in domain, max(u+c)
umax = 41662.30355
L = 2
# Loop over number of cells
for i, ncell in enumerate(ncells):
for j, cfl in enumerate(cfls):
# Prep run directory
rundir = os.path.join(casedir, "{0:d}cells_{1:f}".format(ncell, cfl))
os.makedirs(rundir)
shutil.copy2(os.path.join(workdir, iname), rundir)
shutil.copy2(os.path.join(workdir, pname), rundir)
log = open(os.path.join(rundir, "out"), "w")
# Calculate fixed time step
dt = cfl * 2. / (ncell * umax)
status = "Running {0:d} cells at CFL = {1:f} DT = {2:e}".format(
ncell, cfl, dt
)
print(status)
log.write(status + "\n")
log.flush()
# Run Pele
os.chdir(rundir)
cmd = "mpirun -np {0:d} {1:s} {2:s} pelec.fixed_dt={3:e} amr.n_cell={4:d} {4:d} {4:d}".format(
args.np, pelecbin, iname, dt, ncell
)
proc = sp.Popen(cmd, shell=True, stdout=log, stderr=sp.PIPE)
retcode = proc.wait()
proc = sp.Popen(
"ls -1v plt*/Header | tee movie.visit",
shell=True,
stdout=log,
stderr=sp.PIPE,
)
retcode = proc.wait()
log.flush()
os.chdir(workdir)
# output timer
end = time.time() - start
print(
"Elapsed time "
+ str(timedelta(seconds=end))
+ " (or {0:f} seconds)".format(end)
)
| [
"os.path.exists",
"argparse.ArgumentParser",
"os.makedirs",
"subprocess.Popen",
"os.path.join",
"os.getcwd",
"os.chdir",
"numpy.linspace",
"shutil.rmtree",
"os.path.abspath",
"datetime.timedelta",
"time.time",
"numpy.arange"
] | [((528, 539), 'time.time', 'time.time', ([], {}), '()\n', (537, 539), False, 'import time\n'), ((576, 624), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Run cases"""'}), "(description='Run cases')\n", (599, 624), False, 'import argparse\n'), ((802, 822), 'numpy.arange', 'np.arange', (['(10)', '(66)', '(2)'], {}), '(10, 66, 2)\n', (811, 822), True, 'import numpy as np\n'), ((834, 862), 'numpy.linspace', 'np.linspace', (['(0.01)', '(0.999)', '(50)'], {}), '(0.01, 0.999, 50)\n', (845, 862), True, 'import numpy as np\n'), ((922, 933), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (931, 933), False, 'import os\n'), ((949, 986), 'os.path.abspath', 'os.path.abspath', (['"""PeleC3d.gnu.MPI.ex"""'], {}), "('PeleC3d.gnu.MPI.ex')\n", (964, 986), False, 'import os\n'), ((1001, 1025), 'os.path.abspath', 'os.path.abspath', (['"""cases"""'], {}), "('cases')\n", (1016, 1025), False, 'import os\n'), ((1078, 1101), 'os.path.exists', 'os.path.exists', (['casedir'], {}), '(casedir)\n', (1092, 1101), False, 'import os\n'), ((1138, 1158), 'os.makedirs', 'os.makedirs', (['casedir'], {}), '(casedir)\n', (1149, 1158), False, 'import os\n'), ((1111, 1133), 'shutil.rmtree', 'shutil.rmtree', (['casedir'], {}), '(casedir)\n', (1124, 1133), False, 'import shutil\n'), ((2610, 2621), 'time.time', 'time.time', ([], {}), '()\n', (2619, 2621), False, 'import time\n'), ((1475, 1494), 'os.makedirs', 'os.makedirs', (['rundir'], {}), '(rundir)\n', (1486, 1494), False, 'import os\n'), ((2007, 2023), 'os.chdir', 'os.chdir', (['rundir'], {}), '(rundir)\n', (2015, 2023), False, 'import os\n'), ((2216, 2269), 'subprocess.Popen', 'sp.Popen', (['cmd'], {'shell': '(True)', 'stdout': 'log', 'stderr': 'sp.PIPE'}), '(cmd, shell=True, stdout=log, stderr=sp.PIPE)\n', (2224, 2269), True, 'import subprocess as sp\n'), ((2323, 2415), 'subprocess.Popen', 'sp.Popen', (['"""ls -1v plt*/Header | tee movie.visit"""'], {'shell': '(True)', 'stdout': 'log', 'stderr': 'sp.PIPE'}), "('ls -1v plt*/Header | tee movie.visit', shell=True, stdout=log,\n stderr=sp.PIPE)\n", (2331, 2415), True, 'import subprocess as sp\n'), ((2562, 2579), 'os.chdir', 'os.chdir', (['workdir'], {}), '(workdir)\n', (2570, 2579), False, 'import os\n'), ((1520, 1548), 'os.path.join', 'os.path.join', (['workdir', 'iname'], {}), '(workdir, iname)\n', (1532, 1548), False, 'import os\n'), ((1583, 1611), 'os.path.join', 'os.path.join', (['workdir', 'pname'], {}), '(workdir, pname)\n', (1595, 1611), False, 'import os\n'), ((1644, 1671), 'os.path.join', 'os.path.join', (['rundir', '"""out"""'], {}), "(rundir, 'out')\n", (1656, 1671), False, 'import os\n'), ((2679, 2701), 'datetime.timedelta', 'timedelta', ([], {'seconds': 'end'}), '(seconds=end)\n', (2688, 2701), False, 'from datetime import timedelta\n')] |
"""
Автор: <NAME>
Группа: КБ-161
Вариант: 11
Дата создания: 19/04/2018
Python Version: 3.6
"""
import math
import sys
import warnings
import numpy as np
import matplotlib.pyplot as plt
# Constants
accuracy = 0.00001
START_X = 0.2
END_X = 0.8
START_Y = 1
END_Y = 3
x = [0.35, 0.41, 0.47, 0.51, 0.56, 0.64]
y = [2.73951, 2.30080, 1.96864, 1.78776, 1.59502, 1.34310]
point = 0.552
def build_points(x_array, y_array):
for i in range(0, len(x_array)):
plt.scatter(x_array[i], y_array[i])
def knowledge_of_maya(x_array, y_array, point):
return_value = 0
for i in range(0, len(y_array)):
temp = 1
for j in range(0, len(y_array)):
if i == j:
continue
else:
temp *= (point - x_array[j]) / (x_array[i] - x_array[j])
return_value += y_array[i] * temp
return return_value
def log_range(x_array, y_array):
print("метод мистера Лагранжа")
build_points(x_array, y_array)
points = np.linspace(x_array[0], x_array[len(x_array) - 1], 228)
plt.plot(points, knowledge_of_maya(x_array, y_array, points))
plt.grid(True)
plt.axis([START_X, END_X, START_Y, END_Y])
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.show()
def letting_kraken_out(point, start, end):
global x
global y
if abs(start - end) == 1:
matrix = np.array([[point - x[start], y[start]],
[point - x[end], y[end]]])
else:
matrix = np.array([[point - x[start], letting_kraken_out(point, start, end - 1)],
[point - x[end], letting_kraken_out(point, start + 1, end)]])
return 1 / (x[end] - x[start]) * np.linalg.det(matrix)
def hay_taken(point):
global x
print("метод дяди Эйткена")
print('P({0}) = {1}'.format(point, letting_kraken_out(point, 0, len(x) - 1)))
if __name__ == "__main__":
try:
log_range(x, y)
except Exception as e:
print(e)
try:
hay_taken(point)
except Exception as e:
print(e)
| [
"matplotlib.pyplot.grid",
"numpy.linalg.det",
"matplotlib.pyplot.axhline",
"numpy.array",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.axis",
"matplotlib.pyplot.axvline",
"matplotlib.pyplot.show"
] | [((1141, 1155), 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), '(True)\n', (1149, 1155), True, 'import matplotlib.pyplot as plt\n'), ((1160, 1202), 'matplotlib.pyplot.axis', 'plt.axis', (['[START_X, END_X, START_Y, END_Y]'], {}), '([START_X, END_X, START_Y, END_Y])\n', (1168, 1202), True, 'import matplotlib.pyplot as plt\n'), ((1207, 1234), 'matplotlib.pyplot.axhline', 'plt.axhline', ([], {'y': '(0)', 'color': '"""k"""'}), "(y=0, color='k')\n", (1218, 1234), True, 'import matplotlib.pyplot as plt\n'), ((1239, 1266), 'matplotlib.pyplot.axvline', 'plt.axvline', ([], {'x': '(0)', 'color': '"""k"""'}), "(x=0, color='k')\n", (1250, 1266), True, 'import matplotlib.pyplot as plt\n'), ((1271, 1281), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1279, 1281), True, 'import matplotlib.pyplot as plt\n'), ((483, 518), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x_array[i]', 'y_array[i]'], {}), '(x_array[i], y_array[i])\n', (494, 518), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1467), 'numpy.array', 'np.array', (['[[point - x[start], y[start]], [point - x[end], y[end]]]'], {}), '([[point - x[start], y[start]], [point - x[end], y[end]]])\n', (1409, 1467), True, 'import numpy as np\n'), ((1721, 1742), 'numpy.linalg.det', 'np.linalg.det', (['matrix'], {}), '(matrix)\n', (1734, 1742), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
from scipy.stats import skew
df_test = pd.read_csv("../../test.csv")
df_train = pd.read_csv("../../train.csv")
TARGET = 'SalePrice'
#删除缺失值特征
#对存在大量缺失值的特征进行删除
#对于缺失值,不同情况不同分析 高特征的低缺失值可以尝试填充估计;高缺失值的可以通过回归估计计算
#低特征的低缺失值可以不做处理;高缺失值的可直接剔除字段
#通过观察发现出现缺失值的字段的相关系数都很低,特征都不明显,因此可以删除
total = df_train.isnull().sum().sort_values(ascending=False)
percent = (df_train.isnull().sum()/df_train.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
df_train = df_train.drop((missing_data[missing_data['Total'] > 1]).index,1)#删除缺失率较高的特征
df_train = df_train.drop(df_train.loc[df_train['Electrical'].isnull()].index)#删除存在缺失值的一行
df_train.isnull().sum().max()
#对测试数据进行同样操作
total = df_test.isnull().sum().sort_values(ascending=False)
percent = (df_test.isnull().sum()/df_test.isnull().count()).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])
missing_data.head(20)
df_test = df_test.drop((missing_data[missing_data['Total'] > 1]).index,1)#删除缺失率较高的特征
#print(df_train)
#数据处理
y_train = np.log(df_train[TARGET]+1)
#print(y_train)
#print(df_train[TARGET])
#print(y_train)
df_train.drop([TARGET], axis=1, inplace=True)
#合并数据
all_data = pd.concat((df_train.loc[:,'MSSubClass':'SaleCondition'],df_test.loc[:,'MSSubClass':'SaleCondition']))
#print(all_data)df_
#用对数转换将倾偏态特征转换成正态
numeric_feats = all_data.dtypes[all_data.dtypes != "object"].index #转换类型
skewed_feats = df_train[numeric_feats].apply(lambda x: skew(x.dropna())) #计算倾斜度
skewed_feats = skewed_feats[skewed_feats > 0.75]
skewed_feats = skewed_feats.index # 选择倾斜度大于0.75的特征
#由于选中的数据倾斜度较高,呈现偏态分布
#对选中的数据进行对数变换
all_data[skewed_feats] = np.log1p(all_data[skewed_feats])
#离散特征 one-hot 编码
all_data = pd.get_dummies(all_data)
x_train = np.array(all_data[:df_train.shape[0]])
x_test = np.array(all_data[df_train.shape[0]:])
train = all_data[:df_train.shape[0]]
test = all_data[:df_test.shape[0]]
#train = df_train.insert(0,'SalePrice',y_train)
pd.DataFrame(train).to_csv('../../CleantrainingDataSet_Bang.csv', index=False)
pd.DataFrame(test).to_csv('../../CleantestingDataSet_Bang.csv', index=False)
| [
"pandas.read_csv",
"pandas.DataFrame",
"numpy.log",
"numpy.array",
"pandas.get_dummies",
"numpy.log1p",
"pandas.concat"
] | [((79, 108), 'pandas.read_csv', 'pd.read_csv', (['"""../../test.csv"""'], {}), "('../../test.csv')\n", (90, 108), True, 'import pandas as pd\n'), ((120, 150), 'pandas.read_csv', 'pd.read_csv', (['"""../../train.csv"""'], {}), "('../../train.csv')\n", (131, 150), True, 'import pandas as pd\n'), ((485, 547), 'pandas.concat', 'pd.concat', (['[total, percent]'], {'axis': '(1)', 'keys': "['Total', 'Percent']"}), "([total, percent], axis=1, keys=['Total', 'Percent'])\n", (494, 547), True, 'import pandas as pd\n'), ((954, 1016), 'pandas.concat', 'pd.concat', (['[total, percent]'], {'axis': '(1)', 'keys': "['Total', 'Percent']"}), "([total, percent], axis=1, keys=['Total', 'Percent'])\n", (963, 1016), True, 'import pandas as pd\n'), ((1158, 1186), 'numpy.log', 'np.log', (['(df_train[TARGET] + 1)'], {}), '(df_train[TARGET] + 1)\n', (1164, 1186), True, 'import numpy as np\n'), ((1306, 1414), 'pandas.concat', 'pd.concat', (["(df_train.loc[:, 'MSSubClass':'SaleCondition'], df_test.loc[:, 'MSSubClass'\n :'SaleCondition'])"], {}), "((df_train.loc[:, 'MSSubClass':'SaleCondition'], df_test.loc[:,\n 'MSSubClass':'SaleCondition']))\n", (1315, 1414), True, 'import pandas as pd\n'), ((1760, 1792), 'numpy.log1p', 'np.log1p', (['all_data[skewed_feats]'], {}), '(all_data[skewed_feats])\n', (1768, 1792), True, 'import numpy as np\n'), ((1821, 1845), 'pandas.get_dummies', 'pd.get_dummies', (['all_data'], {}), '(all_data)\n', (1835, 1845), True, 'import pandas as pd\n'), ((1858, 1896), 'numpy.array', 'np.array', (['all_data[:df_train.shape[0]]'], {}), '(all_data[:df_train.shape[0]])\n', (1866, 1896), True, 'import numpy as np\n'), ((1906, 1944), 'numpy.array', 'np.array', (['all_data[df_train.shape[0]:]'], {}), '(all_data[df_train.shape[0]:])\n', (1914, 1944), True, 'import numpy as np\n'), ((2067, 2086), 'pandas.DataFrame', 'pd.DataFrame', (['train'], {}), '(train)\n', (2079, 2086), True, 'import pandas as pd\n'), ((2146, 2164), 'pandas.DataFrame', 'pd.DataFrame', (['test'], {}), '(test)\n', (2158, 2164), True, 'import pandas as pd\n')] |
from torch import nn, optim
import torch
from .fit import set_determenistic
import numpy as np
class mlp(nn.Module):
def __init__(self, in_features, n_hidden, seed=None):
set_determenistic(seed)
super().__init__()
self.in_features = in_features
n_middle= int((in_features - n_hidden)/2) + n_hidden
self.linear1 = nn.Linear(in_features=in_features, out_features=n_middle)
self.linear2 = nn.Linear(in_features=n_middle, out_features=n_hidden)
self.linear3 = nn.Linear(in_features=n_hidden, out_features=n_middle)
self.linear4 = nn.Linear(in_features=n_middle, out_features=in_features)
self.Sigmoid = nn.Sigmoid()
def forward(self, x):
x = self.Sigmoid(self.linear1(x))
x = self.Sigmoid(self.linear2(x))
x = self.Sigmoid(self.linear3(x))
y_pred = self.linear4(x)
return y_pred
def run_epoch(self, iterator, optimizer, criterion, points_ahead=1, phase='train', device=torch.device('cuda:0')):
self.to(device)
is_train = (phase == 'train')
if is_train:
self.train()
else:
self.eval()
epoch_loss = 0
all_y_preds = []
with torch.set_grad_enabled(is_train):
for i, (x,y) in enumerate(iterator):
x,y = np.array(x),np.array(y) #df.index rif of
x = torch.tensor(x).float().to(device).requires_grad_()
y_true = torch.tensor(y).float().to(device)
y_pred = self.forward(x)
if phase == 'forecast':
all_y_preds.append(y_pred)
continue # in case of pahse = 'forecast' criterion is None
loss = criterion(y_pred,y_true)
if is_train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if phase != 'forecast':
return epoch_loss / len(iterator)#, n_true_predicted / n_predicted
else:
return torch.cat(all_y_preds).detach().cpu().numpy()
class lstm(nn.Module):
def __init__(self, in_features, n_hidden, seed=None):
super().__init__()
set_determenistic(seed)
n_middle= int((in_features - n_hidden)/2) + n_hidden
self.in_features = in_features
self.n_hidden = n_hidden
self.n_middle = n_middle
self.lstm1 = nn.LSTM(input_size=in_features,
hidden_size=n_middle,
batch_first =True)
self.lstm2 = nn.LSTM(input_size=n_middle,
hidden_size=n_hidden,
batch_first =True)
self.lstm3 = nn.LSTM(input_size=n_hidden,
hidden_size=n_middle,
batch_first =True)
self.lstm4 = nn.LSTM(input_size=n_middle,
hidden_size=in_features,
batch_first =True)
self.linear = nn.Linear(in_features=in_features, out_features=in_features)
def initHidden(self,batch_size,device):
self.hidden_lstm1 = (
torch.zeros(1, batch_size, self.n_middle).to(device),
torch.zeros(1, batch_size, self.n_middle).to(device)
)
self.hidden_lstm2 = (
torch.zeros(1, batch_size, self.n_hidden).to(device),
torch.zeros(1, batch_size, self.n_hidden).to(device)
)
self.hidden_lstm3 = (
torch.zeros(1, batch_size, self.n_middle).to(device),
torch.zeros(1, batch_size, self.n_middle).to(device)
)
self.hidden_lstm4 = (
torch.zeros(1, batch_size, self.in_features).to(device),
torch.zeros(1, batch_size, self.in_features).to(device)
)
def forward(self, sequences):
batch_size = len(sequences)
lstm_out1, self.hidden_lstm1 = self.lstm1(sequences, self.hidden_lstm1)
lstm_out2, self.hidden_lstm2 = self.lstm2(lstm_out1, self.hidden_lstm2)
lstm_out3, self.hidden_lstm3 = self.lstm3(lstm_out2, self.hidden_lstm3)
lstm_out4, self.hidden_lstm4 = self.lstm4(lstm_out3, self.hidden_lstm4)
# last_time_step = lstm_out4.reshape(-1, batch_size, self.in_features)[-1] # -1 is len_seq
last_time_step = lstm_out4[:,-1,:]
y_pred = self.linear(last_time_step)
return y_pred
def run_epoch(self, iterator, optimizer, criterion, phase='train', device=torch.device('cuda:0'), encod_decode_model=False, points_ahead=None):
self.to(device)
is_train = (phase == 'train')
if is_train:
self.train()
else:
self.eval()
epoch_loss = 0
all_y_preds = []
with torch.set_grad_enabled(is_train):
for i, (x,y) in enumerate(iterator):
x,y = np.array(x),np.array(x)[:,-1,:] #!!! тут суть, см y
self.initHidden(x.shape[0],device=device)
x = torch.tensor(x).float().to(device).requires_grad_()
y_true = torch.tensor(y).float().to(device)
y_pred = self.forward(x)
if phase == 'forecast':
all_y_preds.append(y_pred)
continue # in case of pahse = 'forecast' criterion is None
loss = criterion(y_pred,y_true)
if is_train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
if phase != 'forecast':
return epoch_loss / len(iterator)#, n_true_predicted / n_predicted
else:
return torch.cat(all_y_preds).detach().cpu().numpy() | [
"torch.nn.Sigmoid",
"torch.nn.LSTM",
"numpy.array",
"torch.tensor",
"torch.nn.Linear",
"torch.set_grad_enabled",
"torch.zeros",
"torch.cat",
"torch.device"
] | [((385, 442), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'in_features', 'out_features': 'n_middle'}), '(in_features=in_features, out_features=n_middle)\n', (394, 442), False, 'from torch import nn, optim\n'), ((466, 520), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'n_middle', 'out_features': 'n_hidden'}), '(in_features=n_middle, out_features=n_hidden)\n', (475, 520), False, 'from torch import nn, optim\n'), ((545, 599), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'n_hidden', 'out_features': 'n_middle'}), '(in_features=n_hidden, out_features=n_middle)\n', (554, 599), False, 'from torch import nn, optim\n'), ((624, 681), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'n_middle', 'out_features': 'in_features'}), '(in_features=n_middle, out_features=in_features)\n', (633, 681), False, 'from torch import nn, optim\n'), ((706, 718), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (716, 718), False, 'from torch import nn, optim\n'), ((1032, 1054), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1044, 1054), False, 'import torch\n'), ((2716, 2787), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'in_features', 'hidden_size': 'n_middle', 'batch_first': '(True)'}), '(input_size=in_features, hidden_size=n_middle, batch_first=True)\n', (2723, 2787), False, 'from torch import nn, optim\n'), ((2866, 2934), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'n_middle', 'hidden_size': 'n_hidden', 'batch_first': '(True)'}), '(input_size=n_middle, hidden_size=n_hidden, batch_first=True)\n', (2873, 2934), False, 'from torch import nn, optim\n'), ((3013, 3081), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'n_hidden', 'hidden_size': 'n_middle', 'batch_first': '(True)'}), '(input_size=n_hidden, hidden_size=n_middle, batch_first=True)\n', (3020, 3081), False, 'from torch import nn, optim\n'), ((3160, 3231), 'torch.nn.LSTM', 'nn.LSTM', ([], {'input_size': 'n_middle', 'hidden_size': 'in_features', 'batch_first': '(True)'}), '(input_size=n_middle, hidden_size=in_features, batch_first=True)\n', (3167, 3231), False, 'from torch import nn, optim\n'), ((3313, 3373), 'torch.nn.Linear', 'nn.Linear', ([], {'in_features': 'in_features', 'out_features': 'in_features'}), '(in_features=in_features, out_features=in_features)\n', (3322, 3373), False, 'from torch import nn, optim\n'), ((4894, 4916), 'torch.device', 'torch.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (4906, 4916), False, 'import torch\n'), ((1284, 1316), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['is_train'], {}), '(is_train)\n', (1306, 1316), False, 'import torch\n'), ((5180, 5212), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['is_train'], {}), '(is_train)\n', (5202, 5212), False, 'import torch\n'), ((1389, 1400), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1397, 1400), True, 'import numpy as np\n'), ((1401, 1412), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1409, 1412), True, 'import numpy as np\n'), ((3478, 3519), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.n_middle'], {}), '(1, batch_size, self.n_middle)\n', (3489, 3519), False, 'import torch\n'), ((3544, 3585), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.n_middle'], {}), '(1, batch_size, self.n_middle)\n', (3555, 3585), False, 'import torch\n'), ((3669, 3710), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.n_hidden'], {}), '(1, batch_size, self.n_hidden)\n', (3680, 3710), False, 'import torch\n'), ((3735, 3776), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.n_hidden'], {}), '(1, batch_size, self.n_hidden)\n', (3746, 3776), False, 'import torch\n'), ((3860, 3901), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.n_middle'], {}), '(1, batch_size, self.n_middle)\n', (3871, 3901), False, 'import torch\n'), ((3926, 3967), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.n_middle'], {}), '(1, batch_size, self.n_middle)\n', (3937, 3967), False, 'import torch\n'), ((4051, 4095), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.in_features'], {}), '(1, batch_size, self.in_features)\n', (4062, 4095), False, 'import torch\n'), ((4120, 4164), 'torch.zeros', 'torch.zeros', (['(1)', 'batch_size', 'self.in_features'], {}), '(1, batch_size, self.in_features)\n', (4131, 4164), False, 'import torch\n'), ((5285, 5296), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5293, 5296), True, 'import numpy as np\n'), ((5297, 5308), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (5305, 5308), True, 'import numpy as np\n'), ((1544, 1559), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (1556, 1559), False, 'import torch\n'), ((5509, 5524), 'torch.tensor', 'torch.tensor', (['y'], {}), '(y)\n', (5521, 5524), False, 'import torch\n'), ((2225, 2247), 'torch.cat', 'torch.cat', (['all_y_preds'], {}), '(all_y_preds)\n', (2234, 2247), False, 'import torch\n'), ((6189, 6211), 'torch.cat', 'torch.cat', (['all_y_preds'], {}), '(all_y_preds)\n', (6198, 6211), False, 'import torch\n'), ((1467, 1482), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (1479, 1482), False, 'import torch\n'), ((5432, 5447), 'torch.tensor', 'torch.tensor', (['x'], {}), '(x)\n', (5444, 5447), False, 'import torch\n')] |
# %%
import pandas as pd
import numpy as np
from datetime import datetime
import os
import pickle
import matplotlib.pyplot as plt
import scipy.special as sc
from scipy.stats import norm
from scipy.stats import lognorm
import copy
import matplotlib.pyplot as plt
exec(open('../env_vars.py').read())
dir_data = os.environ['dir_data']
dir_picklejar = os.environ['dir_picklejar']
dir_code_methods = os.environ['dir_code_methods']
exec(open(os.path.join(os.path.realpath(dir_code_methods), 'unit-test-00.py')).read())
# %%
# Test out class
tmp_latent_data = copy.deepcopy(latent_data)
tmp_clean_data = copy.deepcopy(clean_data)
#lat_pp = latent(data=tmp_latent_data, model=latent_poisson_process_ex2, params = {'lambda_prequit': 0.20, 'lambda_postquit': 0.10})
lat_pp = latent(data=tmp_latent_data, model=latent_poisson_process_ex2, params = {'lambda_prequit': 1, 'lambda_postquit': 1})
#lat_pp = latent(data=tmp_latent_data, model=latent_poisson_process_ex2, params = {'lambda_prequit': .3, 'lambda_postquit': 1.7})
sr_mem = measurement_model(data=tmp_clean_data, model=selfreport_mem_total, latent = tmp_latent_data, model_params={'p':0.9})
test_model = model(init = clean_data, latent = lat_pp , model = sr_mem)
num_iters = 105000
use_cutpoint = 5000
np.random.seed(seed = 412983)
#np.random.seed(seed = 34316)
#np.random.seed(seed = 527884)
# %%
###############################################################################
# Adaptive updates
###############################################################################
dict_store_params = {}
cov_init = ((.000001*2)/(2.38**2))*np.eye(2)
barX_init = np.array([0., 0.])
# %%
for iter in range(1,num_iters):
if iter == 1:
current_out_dict = test_model.adapMH_params(adaptive = True,
covariance = cov_init,
barX = barX_init,
covariance_init = cov_init,
barX_init = barX_init,
iteration = iter,
cutpoint = use_cutpoint,
sigma = 5)
# Store parameters
lat_pp.update_params(new_params = {'lambda_prequit':current_out_dict['new_params']['lambda_prequit'],
'lambda_postquit':current_out_dict['new_params']['lambda_postquit']})
dict_store_params.update({iter:current_out_dict})
# Update params
cov_new = current_out_dict['covariance_new']
sigma_new = current_out_dict['sigma_new']
barX_new = current_out_dict['barX_new']
else:
current_out_dict = test_model.adapMH_params(adaptive = True,
covariance = cov_new,
barX = barX_new,
covariance_init = cov_init,
barX_init = barX_init,
iteration = iter,
cutpoint = use_cutpoint,
sigma = sigma_new)
# Store parameters
lat_pp.update_params(new_params = {'lambda_prequit':current_out_dict['new_params']['lambda_prequit'],
'lambda_postquit':current_out_dict['new_params']['lambda_postquit']})
dict_store_params.update({iter:current_out_dict})
# Update params
cov_new = current_out_dict['covariance_new']
sigma_new = current_out_dict['sigma_new']
barX_new = current_out_dict['barX_new']
print(current_out_dict['new_params'])
# %%
# Print out acceptance probability
cnt = 0
for iter in range(1,num_iters):
cnt = cnt + dict_store_params[iter]['rejected']
accept_prob = 1 - cnt/num_iters
print(accept_prob)
# %%
temp = np.zeros(shape = (num_iters, 2+len(lat_pp.params.keys())))
for iter in range(1,num_iters):
temp[iter,0] = dict_store_params[iter]['new_params']['lambda_prequit']
temp[iter,1] = dict_store_params[iter]['new_params']['lambda_postquit']
temp[iter,2] = dict_store_params[iter]['sigma_new']
temp[iter,3] = dict_store_params[iter]['acceptprob']
plot_cutpoint = use_cutpoint + 0
fig, axs = plt.subplots(3,2)
fig.suptitle('Adaptive MH Parameter Updates\n' + 'Acceptance Probability is '+ str(round(accept_prob*100, 1)) + str('%'), fontsize=12)
axs[0,0].hist(temp[plot_cutpoint:,0], bins = 30)
axs[0,1].plot(np.arange(temp[plot_cutpoint:,0].size),temp[plot_cutpoint:,0])
axs[1,0].hist(temp[plot_cutpoint:,1], bins = 30)
axs[1,1].plot(np.arange(temp[plot_cutpoint:,1].size),temp[plot_cutpoint:,1])
axs[2,0].plot(np.arange(temp[plot_cutpoint:,2].size),temp[plot_cutpoint:,2])
axs[2,1].plot(np.arange(temp[plot_cutpoint:,3].size),temp[plot_cutpoint:,3])
plt.show()
| [
"numpy.eye",
"os.path.realpath",
"numpy.array",
"numpy.random.seed",
"copy.deepcopy",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((558, 584), 'copy.deepcopy', 'copy.deepcopy', (['latent_data'], {}), '(latent_data)\n', (571, 584), False, 'import copy\n'), ((602, 627), 'copy.deepcopy', 'copy.deepcopy', (['clean_data'], {}), '(clean_data)\n', (615, 627), False, 'import copy\n'), ((1257, 1284), 'numpy.random.seed', 'np.random.seed', ([], {'seed': '(412983)'}), '(seed=412983)\n', (1271, 1284), True, 'import numpy as np\n'), ((1613, 1633), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1621, 1633), True, 'import numpy as np\n'), ((4497, 4515), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(2)'], {}), '(3, 2)\n', (4509, 4515), True, 'import matplotlib.pyplot as plt\n'), ((5057, 5067), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5065, 5067), True, 'import matplotlib.pyplot as plt\n'), ((1591, 1600), 'numpy.eye', 'np.eye', (['(2)'], {}), '(2)\n', (1597, 1600), True, 'import numpy as np\n'), ((4713, 4752), 'numpy.arange', 'np.arange', (['temp[plot_cutpoint:, 0].size'], {}), '(temp[plot_cutpoint:, 0].size)\n', (4722, 4752), True, 'import numpy as np\n'), ((4839, 4878), 'numpy.arange', 'np.arange', (['temp[plot_cutpoint:, 1].size'], {}), '(temp[plot_cutpoint:, 1].size)\n', (4848, 4878), True, 'import numpy as np\n'), ((4917, 4956), 'numpy.arange', 'np.arange', (['temp[plot_cutpoint:, 2].size'], {}), '(temp[plot_cutpoint:, 2].size)\n', (4926, 4956), True, 'import numpy as np\n'), ((4994, 5033), 'numpy.arange', 'np.arange', (['temp[plot_cutpoint:, 3].size'], {}), '(temp[plot_cutpoint:, 3].size)\n', (5003, 5033), True, 'import numpy as np\n'), ((453, 487), 'os.path.realpath', 'os.path.realpath', (['dir_code_methods'], {}), '(dir_code_methods)\n', (469, 487), False, 'import os\n')] |
import glob
import os
from typing import Tuple
import numpy as np
from PIL import Image
import tensorflow as tf
from models import resnet50
from tensorflow import lite as tf_lite
CHECKPOINT_DIR = './checkpoints/resnet50'
TF_LITE_MODEL = './tflite-models/resnet50.tflite'
def run_tflite(interpreter: tf_lite.Interpreter, inputs: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray]:
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], inputs)
interpreter.invoke()
softmax_result = interpreter.get_tensor(output_details[0]['index'])
intermediate_result = interpreter.get_tensor(output_details[1]['index'])
return softmax_result, intermediate_result
def run_keras(model: tf.keras.Model, inputs: np.ndarray) \
-> Tuple[np.ndarray, np.ndarray]:
softmax_result, intermediate_result = model(inputs, training=False)
return softmax_result.numpy(), intermediate_result.numpy()
def main():
# load tensorflow keras model
image_files = glob.glob('./assets/imagenet-val-samples/*')
checkpoint = tf.train.latest_checkpoint(CHECKPOINT_DIR)
model = resnet50(1001)
model.load_weights(checkpoint)
# load tensorflow lite model
interpreter = tf_lite.Interpreter(model_path=TF_LITE_MODEL)
interpreter.allocate_tensors()
for image_file in image_files:
image = Image.open(image_file)
image = image.resize((224, 224))
image_data = np.asarray(image, dtype=np.float32)
image_data = np.expand_dims(image_data, axis=0)
tf_outputs = run_keras(model, image_data)
tflite_outputs = run_tflite(interpreter, image_data)
print('Diff 1: %.6f, Diff 2: %.6f' % \
(np.mean(tf_outputs[0] - tflite_outputs[0]),
np.mean(tf_outputs[1] - tflite_outputs[1])))
if __name__ == "__main__":
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
main()
| [
"tensorflow.lite.Interpreter",
"numpy.mean",
"PIL.Image.open",
"numpy.asarray",
"models.resnet50",
"numpy.expand_dims",
"tensorflow.train.latest_checkpoint",
"glob.glob"
] | [((1054, 1098), 'glob.glob', 'glob.glob', (['"""./assets/imagenet-val-samples/*"""'], {}), "('./assets/imagenet-val-samples/*')\n", (1063, 1098), False, 'import glob\n'), ((1114, 1156), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['CHECKPOINT_DIR'], {}), '(CHECKPOINT_DIR)\n', (1140, 1156), True, 'import tensorflow as tf\n'), ((1168, 1182), 'models.resnet50', 'resnet50', (['(1001)'], {}), '(1001)\n', (1176, 1182), False, 'from models import resnet50\n'), ((1264, 1309), 'tensorflow.lite.Interpreter', 'tf_lite.Interpreter', ([], {'model_path': 'TF_LITE_MODEL'}), '(model_path=TF_LITE_MODEL)\n', (1283, 1309), True, 'from tensorflow import lite as tf_lite\n'), ((1389, 1411), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (1399, 1411), False, 'from PIL import Image\n'), ((1466, 1501), 'numpy.asarray', 'np.asarray', (['image'], {'dtype': 'np.float32'}), '(image, dtype=np.float32)\n', (1476, 1501), True, 'import numpy as np\n'), ((1519, 1553), 'numpy.expand_dims', 'np.expand_dims', (['image_data'], {'axis': '(0)'}), '(image_data, axis=0)\n', (1533, 1553), True, 'import numpy as np\n'), ((1713, 1755), 'numpy.mean', 'np.mean', (['(tf_outputs[0] - tflite_outputs[0])'], {}), '(tf_outputs[0] - tflite_outputs[0])\n', (1720, 1755), True, 'import numpy as np\n'), ((1768, 1810), 'numpy.mean', 'np.mean', (['(tf_outputs[1] - tflite_outputs[1])'], {}), '(tf_outputs[1] - tflite_outputs[1])\n', (1775, 1810), True, 'import numpy as np\n')] |
import click
import os
import pandas as pd
import torch
import logging
import random
import numpy as np
import logging
import ray
from itertools import tee
import pickle
from sklearn.metrics import roc_auc_score, precision_recall_curve, auc
from ray import tune
from ray.tune import track
from ray.tune.suggest.ax import AxSearch
from ax.service.ax_client import AxClient
from sklearn.model_selection import TimeSeriesSplit, KFold, train_test_split
from datasets.cicflow import CICFlowADDataset
from networks.mlp import MLP
from models.deepSVDD import DeepSVDD
from datasets.main import load_dataset
from ray.tune.suggest import Repeater
class DriftCICFlowExp(tune.Trainable):
def _setup(self, params):
# self.training_iteration = 0
self.test_labels = None
self.val_labels = None
self.val_scores = None
self.test_scores = None
self.params = params
self.cfg = params['cfg']
self.incremental = params['incremental']
self.dates = self._get_train_test(params['dates'])
self.dataset = CICFlowADDataset(root=os.path.abspath(
self.params['data_path']),
n_known_outlier_classes=1,
train_dates=[params['dates'][0]],
val_dates=[params['dates'][0]],
test_dates=[params['dates'][0]],
shuffle=True)
self.model = DeepSVDD(self.cfg['objective'], self.cfg['nu'])
self.model.set_trainer(optimizer_name=self.cfg['optimizer_name'],
lr=self.cfg['lr'],
n_epochs=self.cfg['n_epochs'],
lr_milestones=self.cfg['lr_milestone'],
batch_size=self.cfg['batch_size'],
weight_decay=self.cfg['weight_decay'],
device=self.params['device'],
n_jobs_dataloader=self.cfg["n_jobs_dataloader"])
self.model.setup(self.dataset, self.cfg['net_name'])
self.model.load_model(params['model_path'])
self.model.test(self.dataset)
def _get_train_test(self, dates):
train, test = tee(dates)
next(test, None)
return zip(train, test)
def _train(self):
try:
train, test = next(self.dates)
except StopIteration:
return {'done': True}
self.dataset = CICFlowADDataset(root=os.path.abspath(
self.params['data_path']),
n_known_outlier_classes=1,
train_dates=[train],
val_dates=[train],
test_dates=[test],
shuffle=True)
if self.incremental:
self.model.train(dataset=self.dataset,
optimizer_name=self.cfg['optimizer_name'],
lr=self.cfg['lr'],
n_epochs=1,
lr_milestones=self.cfg['lr_milestone'],
batch_size=self.cfg['batch_size'],
weight_decay=self.cfg['weight_decay'],
device=self.params['device'],
n_jobs_dataloader=self.cfg["n_jobs_dataloader"])
self.model.test(self.dataset, set_split="test")
self.model.test(self.dataset, set_split="train")
test_labels, test_scores, _ = self.model.trainer.get_results("test")
results = locals().copy()
del results["self"]
self.results = results
rocs = {
phase + '_auc_roc': roc_auc_score(labels, scores)
for phase in ["test"]
for labels, scores, _ in [self.model.trainer.get_results(phase)]
}
prs = {
phase + '_auc_pr': auc(recall, precision)
for phase in ["test"]
for labels, scores, _ in [self.model.trainer.get_results(phase)]
for precision, recall, _ in
[precision_recall_curve(labels, scores)]
}
return {**rocs, **prs}
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir,
str(self.trial_id) + "_model.pth")
self.model.save_model(checkpoint_path)
pickle.dump(self.results,
open(os.path.join(checkpoint_dir, 'results.pkl'), "wb"))
return checkpoint_path
def _restore(self, checkpoint_path):
self.model.load_model(checkpoint_path)
################################################################################
# Settings
################################################################################
@click.command()
@click.argument('data_path', type=click.Path(exists=True))
@click.option('--model_path',
type=click.Path(exists=True),
default=None,
help='Model file path (default: None).')
@click.option('--params_path',
type=click.Path(exists=True),
default=None,
help='Model file path (default: None).')
@click.option('--experiment_path',
type=click.Path(exists=True),
default='~/ray_results',
help='Model file path (default: None).')
@click.option('--seed',
type=int,
default=0,
help='Set seed. If -1, use randomization.')
def main(data_path, experiment_path, model_path, params_path, seed):
ray.init(address='auto')
data_path = os.path.abspath(data_path)
params_path = os.path.abspath(params_path)
experiment_path = os.path.abspath(experiment_path)
model_path = os.path.abspath(model_path)
n_splits = 4
dates = np.array(['2019-11-09', '2019-11-11'])
# period = np.array([
# '2019-11-08', '2019-11-09', '2019-11-11', '2019-11-12', '2019-11-13',
# '2019-11-14', '2019-11-15'
# ])
# dates = period[:7]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
cfg = pickle.load(open(params_path, "rb"))
exp_config = {
**locals().copy(),
"incremental": tune.grid_search([True, False])
}
if exp_config['seed'] != -1:
random.seed(exp_config['seed'])
np.random.seed(exp_config['seed'])
torch.manual_seed(exp_config['seed'])
torch.cuda.manual_seed(exp_config['seed'])
torch.backends.cudnn.deterministic = True
analysis = tune.run(DriftCICFlowExp,
name="DriftCICFlowExp",
checkpoint_at_end=True,
checkpoint_freq=1,
stop={
"training_iteration": len(dates),
},
resources_per_trial={"gpu": 0},
num_samples=1,
local_dir=experiment_path,
config=exp_config)
if __name__ == '__main__':
main() | [
"torch.manual_seed",
"ray.init",
"models.deepSVDD.DeepSVDD",
"click.option",
"sklearn.metrics.auc",
"os.path.join",
"sklearn.metrics.precision_recall_curve",
"random.seed",
"sklearn.metrics.roc_auc_score",
"ray.tune.grid_search",
"numpy.array",
"torch.cuda.is_available",
"click.Path",
"num... | [((4872, 4887), 'click.command', 'click.command', ([], {}), '()\n', (4885, 4887), False, 'import click\n'), ((5436, 5528), 'click.option', 'click.option', (['"""--seed"""'], {'type': 'int', 'default': '(0)', 'help': '"""Set seed. If -1, use randomization."""'}), "('--seed', type=int, default=0, help=\n 'Set seed. If -1, use randomization.')\n", (5448, 5528), False, 'import click\n'), ((5640, 5664), 'ray.init', 'ray.init', ([], {'address': '"""auto"""'}), "(address='auto')\n", (5648, 5664), False, 'import ray\n'), ((5682, 5708), 'os.path.abspath', 'os.path.abspath', (['data_path'], {}), '(data_path)\n', (5697, 5708), False, 'import os\n'), ((5727, 5755), 'os.path.abspath', 'os.path.abspath', (['params_path'], {}), '(params_path)\n', (5742, 5755), False, 'import os\n'), ((5778, 5810), 'os.path.abspath', 'os.path.abspath', (['experiment_path'], {}), '(experiment_path)\n', (5793, 5810), False, 'import os\n'), ((5828, 5855), 'os.path.abspath', 'os.path.abspath', (['model_path'], {}), '(model_path)\n', (5843, 5855), False, 'import os\n'), ((5886, 5924), 'numpy.array', 'np.array', (["['2019-11-09', '2019-11-11']"], {}), "(['2019-11-09', '2019-11-11'])\n", (5894, 5924), True, 'import numpy as np\n'), ((1507, 1554), 'models.deepSVDD.DeepSVDD', 'DeepSVDD', (["self.cfg['objective']", "self.cfg['nu']"], {}), "(self.cfg['objective'], self.cfg['nu'])\n", (1515, 1554), False, 'from models.deepSVDD import DeepSVDD\n'), ((2301, 2311), 'itertools.tee', 'tee', (['dates'], {}), '(dates)\n', (2304, 2311), False, 'from itertools import tee\n'), ((6296, 6327), 'ray.tune.grid_search', 'tune.grid_search', (['[True, False]'], {}), '([True, False])\n', (6312, 6327), False, 'from ray import tune\n'), ((6376, 6407), 'random.seed', 'random.seed', (["exp_config['seed']"], {}), "(exp_config['seed'])\n", (6387, 6407), False, 'import random\n'), ((6416, 6450), 'numpy.random.seed', 'np.random.seed', (["exp_config['seed']"], {}), "(exp_config['seed'])\n", (6430, 6450), True, 'import numpy as np\n'), ((6459, 6496), 'torch.manual_seed', 'torch.manual_seed', (["exp_config['seed']"], {}), "(exp_config['seed'])\n", (6476, 6496), False, 'import torch\n'), ((6505, 6547), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (["exp_config['seed']"], {}), "(exp_config['seed'])\n", (6527, 6547), False, 'import torch\n'), ((4922, 4945), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (4932, 4945), False, 'import click\n'), ((4996, 5019), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (5006, 5019), False, 'import click\n'), ((5154, 5177), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (5164, 5177), False, 'import click\n'), ((5316, 5339), 'click.Path', 'click.Path', ([], {'exists': '(True)'}), '(exists=True)\n', (5326, 5339), False, 'import click\n'), ((3783, 3812), 'sklearn.metrics.roc_auc_score', 'roc_auc_score', (['labels', 'scores'], {}), '(labels, scores)\n', (3796, 3812), False, 'from sklearn.metrics import roc_auc_score, precision_recall_curve, auc\n'), ((3982, 4004), 'sklearn.metrics.auc', 'auc', (['recall', 'precision'], {}), '(recall, precision)\n', (3985, 4004), False, 'from sklearn.metrics import roc_auc_score, precision_recall_curve, auc\n'), ((6140, 6165), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6163, 6165), False, 'import torch\n'), ((1089, 1130), 'os.path.abspath', 'os.path.abspath', (["self.params['data_path']"], {}), "(self.params['data_path'])\n", (1104, 1130), False, 'import os\n'), ((2558, 2599), 'os.path.abspath', 'os.path.abspath', (["self.params['data_path']"], {}), "(self.params['data_path'])\n", (2573, 2599), False, 'import os\n'), ((4524, 4567), 'os.path.join', 'os.path.join', (['checkpoint_dir', '"""results.pkl"""'], {}), "(checkpoint_dir, 'results.pkl')\n", (4536, 4567), False, 'import os\n'), ((4169, 4207), 'sklearn.metrics.precision_recall_curve', 'precision_recall_curve', (['labels', 'scores'], {}), '(labels, scores)\n', (4191, 4207), False, 'from sklearn.metrics import roc_auc_score, precision_recall_curve, auc\n')] |
# Licensed under the BSD 3-Clause License
# Copyright (C) 2021 GeospaceLab (geospacelab)
# Author: <NAME>, Space Physics and Astronomy, University of Oulu
__author__ = "<NAME>"
__copyright__ = "Copyright 2021, GeospaceLab"
__license__ = "BSD-3-Clause License"
__email__ = "<EMAIL>"
__docformat__ = "reStructureText"
import datetime
import numpy as np
import requests
import bs4
import pathlib
import re
import netCDF4 as nc
import cftime
import ftplib
from contextlib import closing
import geospacelab.toolbox.utilities.pydatetime as dttool
import geospacelab.toolbox.utilities.pylogging as mylog
import geospacelab.datahub.sources.wdc as wdc
from geospacelab import preferences as prf
class Downloader(object):
def __init__(self, dt_fr, dt_to, data_res=None, pole='N', data_file_root_dir=None):
self.dt_fr = dt_fr
self.dt_to = dt_to
self.done = False
if data_res is None:
data_res = 2 # in minutes
self.data_res = data_res
self.pole = pole
if data_file_root_dir is None:
self.data_file_root_dir = prf.datahub_data_root_dir / 'SuperDARN' / 'PotentialMap'
else:
self.data_file_root_dir = pathlib.Path(data_file_root_dir)
self.download()
def download(self):
diff_days = dttool.get_diff_days(self.dt_fr, self.dt_to)
for i in range(diff_days + 1):
dt1 = self.dt_fr + datetime.timedelta(days=i)
fn = '_'.join(['SuperDARN', 'POTMAP', str(self.data_res) + 'min', dt1.strftime('%Y%m%d'), self.pole]) + '.dat'
file_path = self.data_file_root_dir / dt1.strftime('%Y') / fn
self.save_to_netcdf(file_path)
def save_to_netcdf(self, file_path):
with open(file_path, 'r') as f:
text = f.read()
results = re.findall(
r'^\s*(\d+)\s*\[(\d+),(\d+)]\s*([-\d.]+)\s*' +
r'([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*([-\d.]+)\s*' +
r'([\S]+)',
text,
re.M
)
results = list(zip(*results))
nlat = 40
nlon = 180
ntime = len(results[0]) / nlon / nlat
if ntime != int(ntime):
raise ValueError
ntime = int(ntime)
mlat_arr = np.array(results[3]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
mlon_arr = np.array(results[4]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
EF_N_arr = np.array(results[5]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
EF_E_arr = np.array(results[6]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
v_N_arr = np.array(results[7]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
v_E_arr = np.array(results[8]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
phi_arr = np.array(results[9]).reshape([ntime, nlat, nlon], order='C').transpose((0, 2, 1)).astype(np.float32)
dts = np.array(results[10])[::nlon * nlat]
dts = [datetime.datetime.strptime(dtstr, "%Y-%m-%d/%H:%M:%S") for dtstr in dts]
time_array = np.array(cftime.date2num(dts, units='seconds since 1970-01-01 00:00:00.0'))
import aacgmv2
mlt_arr = np.empty_like(mlat_arr)
for i in range(ntime):
mlt1 = aacgmv2.convert_mlt(mlon_arr[i].flatten(), dts[i]).reshape((nlon, nlat))
mlt_arr[i, ::] = mlt1[::]
fp = pathlib.Path(file_path.with_suffix('.nc'))
fp.parent.resolve().mkdir(parents=True, exist_ok=True)
fnc = nc.Dataset(fp, 'w')
fnc.createDimension('UNIX_TIME', ntime)
fnc.createDimension('MLAT', nlat)
fnc.createDimension('MLON', nlon)
fnc.title = "SuperDARN Potential maps"
time = fnc.createVariable('UNIX_TIME', np.float64, ('UNIX_TIME',))
time.units = 'seconds since 1970-01-01 00:00:00.0'
time[::] = time_array[::]
mlat = fnc.createVariable('MLAT', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
mlat[::] = mlat_arr[::]
mlon = fnc.createVariable('MLON', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
mlon[::] = mlon_arr[::]
mlt = fnc.createVariable('MLT', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
mlt[::] = mlt_arr[::]
EF_N = fnc.createVariable('E_N', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
EF_N[::] = EF_N_arr[::]
EF_E = fnc.createVariable('E_E', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
EF_E[::] = EF_E_arr[::]
v_N = fnc.createVariable('v_i_N', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
v_N[::] = v_N_arr[::]
v_E = fnc.createVariable('v_i_E', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
v_E[::] = v_E_arr[::]
phi = fnc.createVariable('phi', np.float32, ('UNIX_TIME', 'MLON', 'MLAT'))
phi[::] = phi_arr[::]
print('From {} to {}.'.format(
datetime.datetime.utcfromtimestamp(time_array[0]),
datetime.datetime.utcfromtimestamp(time_array[-1]))
)
mylog.StreamLogger.info(
"The requested SuperDARN map potential data has been saved in the file {}.".format(fp))
fnc.close()
self.done = True
if __name__ == "__main__":
dt_fr1 = datetime.datetime(2016, 3, 15)
dt_to1 = datetime.datetime(2016, 3, 15)
Downloader(dt_fr1, dt_to1)
| [
"datetime.datetime",
"geospacelab.toolbox.utilities.pydatetime.get_diff_days",
"datetime.datetime.utcfromtimestamp",
"cftime.date2num",
"pathlib.Path",
"datetime.datetime.strptime",
"netCDF4.Dataset",
"datetime.timedelta",
"numpy.array",
"numpy.empty_like",
"re.findall"
] | [((5637, 5667), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(3)', '(15)'], {}), '(2016, 3, 15)\n', (5654, 5667), False, 'import datetime\n'), ((5681, 5711), 'datetime.datetime', 'datetime.datetime', (['(2016)', '(3)', '(15)'], {}), '(2016, 3, 15)\n', (5698, 5711), False, 'import datetime\n'), ((1307, 1351), 'geospacelab.toolbox.utilities.pydatetime.get_diff_days', 'dttool.get_diff_days', (['self.dt_fr', 'self.dt_to'], {}), '(self.dt_fr, self.dt_to)\n', (1327, 1351), True, 'import geospacelab.toolbox.utilities.pydatetime as dttool\n'), ((1204, 1236), 'pathlib.Path', 'pathlib.Path', (['data_file_root_dir'], {}), '(data_file_root_dir)\n', (1216, 1236), False, 'import pathlib\n'), ((1825, 2012), 're.findall', 're.findall', (["('^\\\\s*(\\\\d+)\\\\s*\\\\[(\\\\d+),(\\\\d+)]\\\\s*([-\\\\d.]+)\\\\s*' +\n '([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*'\n + '([\\\\S]+)')", 'text', 're.M'], {}), "('^\\\\s*(\\\\d+)\\\\s*\\\\[(\\\\d+),(\\\\d+)]\\\\s*([-\\\\d.]+)\\\\s*' +\n '([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*([-\\\\d.]+)\\\\s*'\n + '([\\\\S]+)', text, re.M)\n", (1835, 2012), False, 'import re\n'), ((3480, 3503), 'numpy.empty_like', 'np.empty_like', (['mlat_arr'], {}), '(mlat_arr)\n', (3493, 3503), True, 'import numpy as np\n'), ((3823, 3842), 'netCDF4.Dataset', 'nc.Dataset', (['fp', '"""w"""'], {}), "(fp, 'w')\n", (3833, 3842), True, 'import netCDF4 as nc\n'), ((1423, 1449), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': 'i'}), '(days=i)\n', (1441, 1449), False, 'import datetime\n'), ((3200, 3221), 'numpy.array', 'np.array', (['results[10]'], {}), '(results[10])\n', (3208, 3221), True, 'import numpy as np\n'), ((3256, 3310), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['dtstr', '"""%Y-%m-%d/%H:%M:%S"""'], {}), "(dtstr, '%Y-%m-%d/%H:%M:%S')\n", (3282, 3310), False, 'import datetime\n'), ((3363, 3428), 'cftime.date2num', 'cftime.date2num', (['dts'], {'units': '"""seconds since 1970-01-01 00:00:00.0"""'}), "(dts, units='seconds since 1970-01-01 00:00:00.0')\n", (3378, 3428), False, 'import cftime\n'), ((5267, 5316), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['time_array[0]'], {}), '(time_array[0])\n', (5301, 5316), False, 'import datetime\n'), ((5334, 5384), 'datetime.datetime.utcfromtimestamp', 'datetime.datetime.utcfromtimestamp', (['time_array[-1]'], {}), '(time_array[-1])\n', (5368, 5384), False, 'import datetime\n'), ((2339, 2359), 'numpy.array', 'np.array', (['results[3]'], {}), '(results[3])\n', (2347, 2359), True, 'import numpy as np\n'), ((2463, 2483), 'numpy.array', 'np.array', (['results[4]'], {}), '(results[4])\n', (2471, 2483), True, 'import numpy as np\n'), ((2587, 2607), 'numpy.array', 'np.array', (['results[5]'], {}), '(results[5])\n', (2595, 2607), True, 'import numpy as np\n'), ((2711, 2731), 'numpy.array', 'np.array', (['results[6]'], {}), '(results[6])\n', (2719, 2731), True, 'import numpy as np\n'), ((2834, 2854), 'numpy.array', 'np.array', (['results[7]'], {}), '(results[7])\n', (2842, 2854), True, 'import numpy as np\n'), ((2957, 2977), 'numpy.array', 'np.array', (['results[8]'], {}), '(results[8])\n', (2965, 2977), True, 'import numpy as np\n'), ((3080, 3100), 'numpy.array', 'np.array', (['results[9]'], {}), '(results[9])\n', (3088, 3100), True, 'import numpy as np\n')] |
'''
Tools for generating fractals.
<NAME>, 2019
'''
import numpy;
import os;
import numba;
MAX_ITERATIONS=1000
NEXT_PLOT_NUM=0
# Wether or not to output information to the terminal when running.
PRINT_MESSAGES=True
# Have constantly updating filename
def NEXT_PLOT(suffix=''):
global NEXT_PLOT_NUM
NEXT_PLOT_NUM += 1
dot_suffix = ''
if not suffix == '':
dot_suffix = '.' + suffix
ret_val = 'output' + os.sep + 'output_' + str(NEXT_PLOT_NUM) + dot_suffix
if os.path.isfile(ret_val):
return NEXT_PLOT(suffix)
return ret_val
# Function for Mandelbrot sets.
@numba.jit(nopython=True)
def mandel(c, max_iter=MAX_ITERATIONS):
iterations = 0
z = 0 + 0j
while (((numpy.absolute(z)) < 2) and (iterations < max_iter)):
z = (z**2) + c
iterations = iterations + 1
return iterations
# Generic function template for Julia sets.
@numba.jit(nopython=True)
def julia(z, c, n, max_iter=MAX_ITERATIONS):
iterations = 0
while (((numpy.absolute(z)) < 2) and (iterations < max_iter)):
z = (z**n) + c
iterations = iterations + 1
return iterations
# Generator function for Julia set functions from given c, n.
def generate_julia(c, n):
return lambda z,m : julia(z, c, n, max_iter=m)
# Generate an image (numpy array) of iterations for a given size, function, range, and maximum iterations.
def generate_fractal(width, height, func, xmin=-2, xmax=1, ymin=-1, ymax=1, max_iter=MAX_ITERATIONS):
if PRINT_MESSAGES:
print('Generating...', end='', flush=True)
image = numpy.zeros((width, height), dtype=numpy.int64)
progress_mask = numpy.ones((11), dtype=bool)
ret_val = {}
xvals = numpy.linspace(xmin, xmax, width)
yvals = numpy.linspace(ymin, ymax, height)
for py in range(height):
for px in range(width):
c = xvals[px] + (1j*yvals[py])
image[px,height - py - 1] = func(c,max_iter)
if PRINT_MESSAGES:
prog = int(100 * (py/height))
if (prog % 10 == 0) and (progress_mask[int(prog/10)]):
print(str(prog) + '%...', end='', flush=True)
progress_mask[int(prog/10)] = False
ret_val['image'] = image
ret_val['depth'] = max_iter + 1
if PRINT_MESSAGES:
print('done.')
return ret_val
# generate a greyscale palette of colours for a given number of levels.
def generate_greyscale_palette(levels):
palette = []
for i in numpy.linspace(0,255,levels,dtype=int):
shade = hex(i)[2:]
if len(shade) == 1:
shade='0' + shade
colour = '#' + shade + shade + shade
palette.append(colour)
return palette
# Show a tkinter window containing a given image.
def show_image(image_data, palette=None):
import tkinter
image = image_data['image']
width = image.shape[0]
height = image.shape[1]
if (palette == None):
palette = generate_greyscale_palette(image_data['depth'])
window = tkinter.Tk()
window.title("Fractal Image")
canvas = tkinter.Canvas(window, width=width, height=height)
canvas.pack(expand=tkinter.YES, fill=tkinter.BOTH)
for py in range(height):
for px in range(width):
canvas.create_oval(px,py,px+1,(py+1),fill=palette[image[px,py]], outline=palette[image[px,py]])
window.mainloop()
# Plot our image with matplotlib
def show_image_matplotlib(image_data, palette=None):
import matplotlib.pyplot
image = numpy.flipud(numpy.rot90(image_data['image']))
matplotlib.pyplot.axis('off')
if palette == None:
matplotlib.pyplot.imshow(image)
else:
matplotlib.pyplot.imshow(image, cmap=palette)
matplotlib.pyplot.show()
# Plot our image with matplotlib to a file
def write_image_matplotlib(image_data, palette=None, filename=None):
import matplotlib.pyplot
image = numpy.flipud(numpy.rot90(image_data['image']))
if filename == None:
filename = NEXT_PLOT('png')
if PRINT_MESSAGES:
print('Writing ' + filename + '...', end='', flush=True)
matplotlib.pyplot.axis('off')
if palette == None:
matplotlib.pyplot.imshow(image)
else:
matplotlib.pyplot.imshow(image, cmap=palette)
matplotlib.pyplot.savefig(filename, bbox_inches='tight')
if PRINT_MESSAGES:
print('done.')
# Dump image to PGM file
def write_image(image_data, palette=None, filename=None):
image = image_data['image']
if filename == None:
filename = NEXT_PLOT('pgm')
if PRINT_MESSAGES:
print('Writing ' + filename + '...', end='', flush=True)
width = image.shape[0]
height = image.shape[1]
depth = image_data['depth']
f = open(filename, 'w')
# Write PGM header
f.write('P2\n')
f.write('# Generated by https://github.com/owainkenwayucl/Fractals\n')
f.write(str(width) + ' ' + str(height) + '\n')
f.write(str(depth) + '\n')
# Write PGM
for j in range(height):
for i in range(width):
f.write(str(image[i,j]) + ' ')
f.write('\n')
f.close()
if PRINT_MESSAGES:
print('done.') | [
"numpy.ones",
"numpy.absolute",
"os.path.isfile",
"tkinter.Canvas",
"numpy.zeros",
"numba.jit",
"numpy.linspace",
"tkinter.Tk",
"numpy.rot90"
] | [((610, 634), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (619, 634), False, 'import numba\n'), ((903, 927), 'numba.jit', 'numba.jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (912, 927), False, 'import numba\n'), ((499, 522), 'os.path.isfile', 'os.path.isfile', (['ret_val'], {}), '(ret_val)\n', (513, 522), False, 'import os\n'), ((1576, 1623), 'numpy.zeros', 'numpy.zeros', (['(width, height)'], {'dtype': 'numpy.int64'}), '((width, height), dtype=numpy.int64)\n', (1587, 1623), False, 'import numpy\n'), ((1644, 1670), 'numpy.ones', 'numpy.ones', (['(11)'], {'dtype': 'bool'}), '(11, dtype=bool)\n', (1654, 1670), False, 'import numpy\n'), ((1702, 1735), 'numpy.linspace', 'numpy.linspace', (['xmin', 'xmax', 'width'], {}), '(xmin, xmax, width)\n', (1716, 1735), False, 'import numpy\n'), ((1748, 1782), 'numpy.linspace', 'numpy.linspace', (['ymin', 'ymax', 'height'], {}), '(ymin, ymax, height)\n', (1762, 1782), False, 'import numpy\n'), ((2468, 2509), 'numpy.linspace', 'numpy.linspace', (['(0)', '(255)', 'levels'], {'dtype': 'int'}), '(0, 255, levels, dtype=int)\n', (2482, 2509), False, 'import numpy\n'), ((2999, 3011), 'tkinter.Tk', 'tkinter.Tk', ([], {}), '()\n', (3009, 3011), False, 'import tkinter\n'), ((3059, 3109), 'tkinter.Canvas', 'tkinter.Canvas', (['window'], {'width': 'width', 'height': 'height'}), '(window, width=width, height=height)\n', (3073, 3109), False, 'import tkinter\n'), ((3511, 3543), 'numpy.rot90', 'numpy.rot90', (["image_data['image']"], {}), "(image_data['image'])\n", (3522, 3543), False, 'import numpy\n'), ((3905, 3937), 'numpy.rot90', 'numpy.rot90', (["image_data['image']"], {}), "(image_data['image'])\n", (3916, 3937), False, 'import numpy\n'), ((722, 739), 'numpy.absolute', 'numpy.absolute', (['z'], {}), '(z)\n', (736, 739), False, 'import numpy\n'), ((1005, 1022), 'numpy.absolute', 'numpy.absolute', (['z'], {}), '(z)\n', (1019, 1022), False, 'import numpy\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.