code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
#%% Import the libraries for SER; the librosa is the main one for audio analysis
import pandas as pd
import numpy as np
import librosa
import librosa.display
import matplotlib.pyplot as plt
import json
from PyEMD import EMD
import opensmile
import warnings
warnings.filterwarnings(action="ignore")
#%% Prepare the downloaded IEMOCAP dataset for SER
#Deal with the IEMOCAP metadata to gain the file paths of the improvised speeches in the four desired emotion classes
#Read the metadata about the dataset
df_descri=pd.read_csv("/Users/talen/Documents/Datasets/IEMOCAP/iemocap_metadata.csv")
#Only select the improvised samples to create a description file
df_impro=df_descri[df_descri["method"]=="impro"]
#Replace the default file-path with the local file-path after downloaded in the author's computer
new_paths = []
#Gain the old paths from "path" column of the description file
old_paths = df_impro["path"].map(str)
for old_path in old_paths:
#Extract the file names
path_list = str(old_path).split("/")
file_name = path_list[-1]
#Concatenate the filename with the local folder path and saved in new_paths variable
new_path = "/Users/talen/Documents/Datasets/IEMOCAP/Data/" + file_name
new_paths.append(new_path)
#Replace the old paths with the new paths in the description file
df_impro.loc[:,["path"]]=new_paths
#Select the data about the angry, happy, sad, neutral emotions from the description file
df_ang = df_impro[df_impro["emotion"]=="ang"]
df_hap = df_impro[df_impro["emotion"]=="hap"]
df_sad = df_impro[df_impro["emotion"]=="sad"]
df_neu = df_impro[df_impro["emotion"]=="neu"]
#Concatenate the data of the four emotions
df_final = pd.concat([df_ang, df_hap, df_sad, df_neu])
df_final.shape
#%% Define the functions for preprocessing and feature extraction
#create a variable for restoring the LLDs, smfcc, their corresponding emotion classes,
#and the number of segments per signal
#emotion classes: ang -> 0, hap -> 1, sad -> 2, neu -> 3
Audio_features_final = {
"1": {
"LLDs":[], "Log-Mel-spectram":[], "smfcc":[], "class":[] #n_segments_per_signal
},
"2": {
"LLDs":[], "Log-Mel-spectram":[], "smfcc":[], "class":[]
},
"3": {
"LLDs":[], "Log-Mel-spectram":[], "smfcc":[], "class":[]
},
"4": {
"LLDs":[], "Log-Mel-spectram":[], "smfcc":[], "class":[]
},
"5": {
"LLDs":[], "Log-Mel-spectram":[], "smfcc":[], "class":[]
}
}
#Sampling and quantising the raw audio file into the digital signal
def Sampling_and_quantising(file_path):
audiofile = file_path
#Sampling and quantising the audio file into digital signals with the sampling rate of 16kHz
signal, sr = librosa.load(audiofile, sr=16000)
return signal, sr
#Framing and windowing the audio signal with specified window-width and sliding
def Framing_signal(signal,window_width, sliding, sr):
#Framing the signal, with the frame size of 25ms and sliding of 10ms
framesize = int((window_width/1000) * sr)
slide = int((sliding/1000) * sr)
frames = librosa.util.frame(signal, frame_length=framesize, hop_length=slide, axis=0)
#Create Hamming window
window = librosa.filters.get_window(window="ham",Nx=framesize,fftbins=True)
#Apply the window function on each frame
windowed_frames = []
for frame in frames:
windowed_frame = frame*window
windowed_frames.append(windowed_frame)
return windowed_frames, framesize
#Create signal segments for computing the segment-level features
def Frames_to_segments(frames, n_frames_per_segment, slide):
segments_list = []
n_segments = int((len(frames)-n_frames_per_segment)/slide + 1)
for s in range(n_segments):
segment_start = s*slide
segment_end = segment_start+n_frames_per_segment
seg = frames[segment_start : segment_end]
#Concatenate the frame-level coefficients in each segment
segment_tempo = seg[0]
for l in range(1, len(seg)):
segment_tempo = np.concatenate((segment_tempo, seg[l]))
segments_list.append(segment_tempo)
segments = np.array(segments_list)
return segments
# Extract the LLDs of ComParE_2016 by openSMILE, except for the mfcc-related data
def Gain_LLDs(signal, sr, session, emotion):
smile = opensmile.Smile(
feature_set=opensmile.FeatureSet.ComParE_2016,
feature_level=opensmile.FeatureLevel.Functionals,
)
LLDs = smile.process_signal(signal, sr)
drop_criteria = ["mfcc" in x for x in LLDs.columns]
drop_indices = []
for index in range(len(drop_criteria)):
if drop_criteria[index]:
drop_indices.append(LLDs.columns[index])
LLDs.drop(labels=drop_indices, axis=1, inplace=True)
values = LLDs.values[0]
# Restore the LLDs in the Audio_features dictionary
values = values.tolist()
return values
# Compute the Log-Mel-spectrogram
def Gain_Log_Mel_spectrogram(signal, sr, n_fft, n_mels, window, win_length, hop_length,
save_folder, session, emotion, count):
# Compute the Log-Mel-Spectrogram for each segment
# 1.Compute the spectrogram for each segment by 1024 point short-time FT
stft = librosa.core.stft(y=signal, n_fft=n_fft, window=window, win_length=win_length, hop_length=hop_length)
spectrogram = np.abs(stft)
# 2.Compute the mel-spectrogram by applying the 40 mel filter banks on the spectrogram coefficient
mel_spectrogram = librosa.feature.melspectrogram(sr=sr, S=spectrogram, n_mels=n_mels)
# 3.Compute the logarithm of the mel-spectrogram coefficient
log_mel_spectrogram = librosa.power_to_db(mel_spectrogram)
# Transpose the spectrogram to denote its rows as the time, and columns as the log-mel-spectrogram coefficient
log_mel_spectrogram = log_mel_spectrogram.T
# #4.Store the graphs of log-mel-spectrograms
# librosa.display.specshow(log_mel_spectrogram, x_axis="time", y_axis="log")
# ax = plt.gca()
# ax.axes.xaxis.set_visible(False)
# ax.axes.yaxis.set_visible(False)
# plt.savefig("/Users/talen/Desktop/"+save_folder+"/"+session+"/"+emotion+"."+str(count)+".jpg")
# print("Spectrogram of segment {0} of the audio of emotion {1} in session {2} is archived!".format(count,emotion,session))
# 4'.Store the log-mel-spectrum coefficients in the Audio_features dictionary
log_mel_spectrogram = log_mel_spectrogram.tolist()
# Audio_features_final[session]["Log-Mel-spectram"].append(log_mel_spectrogram)
return log_mel_spectrogram
# Compute the MFCCs
def Gain_MFCCs(signal, sr, n_fft, n_mels, n_mfcc, session, emotion):
smfccs = librosa.feature.mfcc(y=signal, sr=sr, n_mfcc=n_mfcc, dct_type=2, norm="ortho",
n_mels=n_mels, n_fft=n_fft)
# Transpose the SMFCCs so as the row denotes the time
smfccs = smfccs.T
# Store the SMFCCs and their emotion labels under the respective path in the Audio_features dictionary
smfccs = smfccs.tolist()
# Audio_features_final[session]["smfcc"].append(smfccs)
return smfccs
#%% Calculate segment-level features from raw audio signals
def Extract_segment_level_features(current_row):
global h, count1, n_segments_per_signal
# Gain the audio file path, together with its emotion classes, and session path
audiofile = str(current_row["path"])
emotion = str(current_row["emotion"])
session = str(current_row["session"])
# Sampling and quantising the raw audio into the digital signal
signal, sr = Sampling_and_quantising(audiofile)
# Channel 1: extracting features from original signal
# Framing and windowing, with Hamming-window of 25ms and slide of 10ms
frames1, _ = Framing_signal(signal=signal, window_width=25, sliding=10, sr=sr)
# Create segments with the size of 30 frames and 10 frames overlapping (or 20 frames sliding)
segments1 = Frames_to_segments(frames=frames1, n_frames_per_segment=30, slide=20)
#Check the length of the audio signal
audio_length = librosa.get_duration(signal, sr=sr)
No_samples_in_3s = sr * 3
signals_aligned = []
if audio_length < 3:
#Padding the signal if it is less than 3s
signal_padded = librosa.util.fix_length(data=signal, size=No_samples_in_3s)
signals_aligned.append(signal_padded)
# Extract the segment-level LLDs with their functionals from the original signal by openSMILE
LLDs = []
for segment1 in segments1:
llds = Gain_LLDs(signal=segment1, sr=sr, session=session, emotion=emotion)
LLDs.append(llds)
Audio_features_final[session]["LLDs"].append(LLDs)
if emotion == "ang":
Audio_features_final[session]["class"].append(0)
elif emotion == "hap":
Audio_features_final[session]["class"].append(1)
elif emotion == "sad":
Audio_features_final[session]["class"].append(2)
else:
Audio_features_final[session]["class"].append(3)
# Channel 2: extracting features from the signal with trend removed
# Remove signal trend by Zero-crossing detection method
# 1.Use Empirical Mode Decomposition (EMD) method to decompose the signal into IMFs
emd = EMD()
IMFs = emd.emd(signal, max_imf=9)
# 2. Select the IMFs that satisfy particular criterion
# 2.1 Criterion analysis: ZCR_IMF_i / ZCR_IMF_1 < 0.01 => N_ZC_IMF_i / N_ZC_IMF_1 < 0.01, when the IMFs has the same time length
IMFs_selected_index = []
# 2.2 The zero crossing of the first IMF
R_imf_1 = librosa.core.zero_crossings(IMFs[0], pad=False, zero_pos=True)
n_R_imf_1 = sum(R_imf_1)
for i in range(1, len(IMFs)):
R_imf_i = librosa.core.zero_crossings(IMFs[i], pad=False, zero_pos=True)
n_R_imf_i = sum(R_imf_i)
# 2.3 Check the criterion
if n_R_imf_i / n_R_imf_1 < 0.01:
IMFs_selected_index.append(i)
# 3. Derive the signal trend based on the selected IMFs
T = IMFs[0]
for index in range(1, len(IMFs_selected_index)):
T = T + IMFs[index]
# 4. Subtract the signal trend from the original signal
signal_trend_removed = signal - T
# Framing and windowing, with Hamming-window of 25ms and slide of 10ms
frames2, framesize = Framing_signal(signal=signal_trend_removed, window_width=25, sliding=10, sr=sr)
# print("There are {0} frames, each frame contains {1} samples".format(len(frames),framesize))
# Create segments with the size of 30 frames and 10 frames overlapping (or 20 frames sliding)
segments2 = Frames_to_segments(frames=frames2, n_frames_per_segment=30, slide=20)
# Record the lenght of segments for time-step definition of RNN
# Audio_features_final[session]["n_segments_per_signal"].append(len(segments2))
# Extract segment-level spectrograms and SMFCCs from the signal with trend removed
SPECTRO = []
SMFCC = []
for segment2 in segments2:
# Calculate the segment-level log-mel-spectrograms by 1024 point STFT and 40 mel-filter banks
spectro = Gain_Log_Mel_spectrogram(signal=segment2, sr=sr, n_fft=1024, n_mels=40, window=False, win_length=None,
hop_length=None,
save_folder="Spectrograms_segment", session=session, emotion=emotion,
count=count1)
count1 += 1
SPECTRO.append(spectro)
# Calculate the smfcc
smfcc = Gain_MFCCs(signal=segment2, sr=sr, n_fft=1024, n_mels=40, n_mfcc=14, session=session, emotion=emotion)
SMFCC.append(smfcc)
Audio_features_final[session]["Log-Mel-spectram"].append(SPECTRO)
Audio_features_final[session]["smfcc"].append(SMFCC)
print("Sample {} is done!".format(h))
h += 1
# Iterate all the speech samples
h = 1
count1 = 1
for r in range(len(df_final)):
row = df_final.iloc[r, :]
Extract_segment_level_features(row)
# Store the Audio_features file locally
data_path = "/Users/talen/Desktop/Audio_features_final.json"
with open(data_path, "w") as fp:
json.dump(Audio_features_final, fp, indent=4)
#%%
# with open(data_path, "w") as fp:
# json.dump(Audio_features, fp, indent=4)
#%% Calculate utterance-level log-mel-spectrograms from the audio signals with trend removed
# count2 = 1
#
# for r in range(len(df_final)):
# row = df_final.iloc[r,:]
#
# #Gain the audio file path, together with its emotion classes, and session path
# audio = str(row["path"])
# emotion_class = str(row["emotion"])
# session_num = str(row["session"])
#
# #Sampling and quantising the raw audio into the digital signal
# signal_ori, sampling_rate = Sampling_and_quantising(audio)
#
# # Visualise the original signal
# librosa.display.waveplot(signal_ori, sr=sampling_rate)
# plt.xlabel("Time")
# plt.ylabel("Amplitude")
# plt.title("Raw signal wave")
#
# #Remove signal trend by Zero-crossing detection method
# #1.Use Empirical Mode Decomposition (EMD) method to decompose the signal into IMFs
# emd = EMD()
# IMFs = emd.emd(signal_ori)
#
# #2. Select the IMFs that satisfy particular criterion
# #2.1 Criterion analysis: ZCR_IMF_i / ZCR_IMF_1 < 0.01 => N_ZC_IMF_i / N_ZC_IMF_1 < 0.01, when the IMFs has the same time length
#
# IMFs_selected_index = []
#
# #2.2 The zero crossing of the first IMF
# R_imf_1 = librosa.core.zero_crossings(IMFs[0], pad=False, zero_pos=True)
# n_R_imf_1 = sum(R_imf_1)
#
# for i in range(1, len(IMFs)):
# R_imf_i = librosa.core.zero_crossings(IMFs[i], pad=False, zero_pos=True)
# n_R_imf_i = sum(R_imf_i)
#
# #2.3 Check the criterion
# if n_R_imf_i / n_R_imf_1 < 0.01:
# IMFs_selected_index.append(i)
#
# #3. Derive the signal trend based on the selected IMFs
# T = IMFs[0]
#
# for index in range(1, len(IMFs_selected_index)):
# T = T + IMFs[index]
#
# #4. Subtract the signal trend from the original signal
# signal_final = signal_ori - T
#
# librosa.display.waveplot(signal_final, sr=sampling_rate)
# plt.xlabel("Time")
# plt.ylabel("Amplitude")
# plt.title("Signal wave with trend removed")
# plt.show()
#
# #Calculate the utterance-level log-mel-spectrograms by 512 point FFT and 40 mel-filter banks
# #The Hamming window, with the length of 25ms and shift of 10ms, is also applied
# #The frequency range in filter banks is set as 3000~8000
# window_length = int((window_width / 1000) * sampling_rate)
# window_shift = int((sliding / 1000) * sampling_rate)
#
# Gain_Log_Mel_spectrogram(signal=signal_final, sr=sampling_rate, n_fft=512, n_mels=40,
# window="ham", win_length=window_length, hop_length=window_shift,
# save_folder="Spectrograms_utterance", session=session_num, emotion=emotion_class,
# count=count2)
| [
"numpy.abs",
"pandas.read_csv",
"librosa.filters.get_window",
"librosa.power_to_db",
"librosa.feature.mfcc",
"librosa.feature.melspectrogram",
"librosa.get_duration",
"PyEMD.EMD",
"librosa.util.fix_length",
"pandas.concat",
"json.dump",
"librosa.util.frame",
"opensmile.Smile",
"librosa.loa... | [((258, 298), 'warnings.filterwarnings', 'warnings.filterwarnings', ([], {'action': '"""ignore"""'}), "(action='ignore')\n", (281, 298), False, 'import warnings\n'), ((517, 592), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/talen/Documents/Datasets/IEMOCAP/iemocap_metadata.csv"""'], {}), "('/Users/talen/Documents/Datasets/IEMOCAP/iemocap_metadata.csv')\n", (528, 592), True, 'import pandas as pd\n'), ((1681, 1724), 'pandas.concat', 'pd.concat', (['[df_ang, df_hap, df_sad, df_neu]'], {}), '([df_ang, df_hap, df_sad, df_neu])\n', (1690, 1724), True, 'import pandas as pd\n'), ((2709, 2742), 'librosa.load', 'librosa.load', (['audiofile'], {'sr': '(16000)'}), '(audiofile, sr=16000)\n', (2721, 2742), False, 'import librosa\n'), ((3072, 3148), 'librosa.util.frame', 'librosa.util.frame', (['signal'], {'frame_length': 'framesize', 'hop_length': 'slide', 'axis': '(0)'}), '(signal, frame_length=framesize, hop_length=slide, axis=0)\n', (3090, 3148), False, 'import librosa\n'), ((3190, 3258), 'librosa.filters.get_window', 'librosa.filters.get_window', ([], {'window': '"""ham"""', 'Nx': 'framesize', 'fftbins': '(True)'}), "(window='ham', Nx=framesize, fftbins=True)\n", (3216, 3258), False, 'import librosa\n'), ((4132, 4155), 'numpy.array', 'np.array', (['segments_list'], {}), '(segments_list)\n', (4140, 4155), True, 'import numpy as np\n'), ((4318, 4434), 'opensmile.Smile', 'opensmile.Smile', ([], {'feature_set': 'opensmile.FeatureSet.ComParE_2016', 'feature_level': 'opensmile.FeatureLevel.Functionals'}), '(feature_set=opensmile.FeatureSet.ComParE_2016,\n feature_level=opensmile.FeatureLevel.Functionals)\n', (4333, 4434), False, 'import opensmile\n'), ((5236, 5342), 'librosa.core.stft', 'librosa.core.stft', ([], {'y': 'signal', 'n_fft': 'n_fft', 'window': 'window', 'win_length': 'win_length', 'hop_length': 'hop_length'}), '(y=signal, n_fft=n_fft, window=window, win_length=\n win_length, hop_length=hop_length)\n', (5253, 5342), False, 'import librosa\n'), ((5356, 5368), 'numpy.abs', 'np.abs', (['stft'], {}), '(stft)\n', (5362, 5368), True, 'import numpy as np\n'), ((5495, 5562), 'librosa.feature.melspectrogram', 'librosa.feature.melspectrogram', ([], {'sr': 'sr', 'S': 'spectrogram', 'n_mels': 'n_mels'}), '(sr=sr, S=spectrogram, n_mels=n_mels)\n', (5525, 5562), False, 'import librosa\n'), ((5655, 5691), 'librosa.power_to_db', 'librosa.power_to_db', (['mel_spectrogram'], {}), '(mel_spectrogram)\n', (5674, 5691), False, 'import librosa\n'), ((6699, 6810), 'librosa.feature.mfcc', 'librosa.feature.mfcc', ([], {'y': 'signal', 'sr': 'sr', 'n_mfcc': 'n_mfcc', 'dct_type': '(2)', 'norm': '"""ortho"""', 'n_mels': 'n_mels', 'n_fft': 'n_fft'}), "(y=signal, sr=sr, n_mfcc=n_mfcc, dct_type=2, norm=\n 'ortho', n_mels=n_mels, n_fft=n_fft)\n", (6719, 6810), False, 'import librosa\n'), ((8085, 8120), 'librosa.get_duration', 'librosa.get_duration', (['signal'], {'sr': 'sr'}), '(signal, sr=sr)\n', (8105, 8120), False, 'import librosa\n'), ((9240, 9245), 'PyEMD.EMD', 'EMD', ([], {}), '()\n', (9243, 9245), False, 'from PyEMD import EMD\n'), ((9568, 9630), 'librosa.core.zero_crossings', 'librosa.core.zero_crossings', (['IMFs[0]'], {'pad': '(False)', 'zero_pos': '(True)'}), '(IMFs[0], pad=False, zero_pos=True)\n', (9595, 9630), False, 'import librosa\n'), ((12091, 12136), 'json.dump', 'json.dump', (['Audio_features_final', 'fp'], {'indent': '(4)'}), '(Audio_features_final, fp, indent=4)\n', (12100, 12136), False, 'import json\n'), ((8276, 8335), 'librosa.util.fix_length', 'librosa.util.fix_length', ([], {'data': 'signal', 'size': 'No_samples_in_3s'}), '(data=signal, size=No_samples_in_3s)\n', (8299, 8335), False, 'import librosa\n'), ((9713, 9775), 'librosa.core.zero_crossings', 'librosa.core.zero_crossings', (['IMFs[i]'], {'pad': '(False)', 'zero_pos': '(True)'}), '(IMFs[i], pad=False, zero_pos=True)\n', (9740, 9775), False, 'import librosa\n'), ((4031, 4070), 'numpy.concatenate', 'np.concatenate', (['(segment_tempo, seg[l])'], {}), '((segment_tempo, seg[l]))\n', (4045, 4070), True, 'import numpy as np\n')] |
import os
import numpy as np
import SimpleITK as sitk
from torch.utils.data.dataset import Dataset
ROOT_DIR = "/pylon5/ac5616p/Data/HeartSegmentationProject/CAP_challenge/CAP_challenge_training_set/test2/"
#ROOT_DIR = "/Users/yukeyi/Desktop/"
trainFileName = "trainfiles.txt"
testFileName = "testfiles.txt"
os.chdir(ROOT_DIR)
class Image:
def __init__(self, reg_dir):
# self.image_list = []
# self.aseg_list = []
self.reg_dir = reg_dir
self.parse_images()
self.parse_registration()
# self.make_xarray()
def parse_images(self):
images = self.reg_dir.split("-")
assert (len(images) == 2)
self.moving_image = sitk.ReadImage(ROOT_DIR + "Brain2NIFI/" + images[1] + "/norm.nii")
def parse_registration(self):
param0 = sitk.ReadParameterFile("BrainParameterMapsTuned/" + self.reg_dir + "/TransformParameters.0.txt")
param1 = sitk.ReadParameterFile("BrainParameterMapsTuned/" + self.reg_dir + "/TransformParameters.1.txt")
#param2 = sitk.ReadParameterFile("BrainParameterMapsTuned/" + self.reg_dir + "/TransformParameters.2.txt")
transformixImageFilter = sitk.TransformixImageFilter()
transformixImageFilter.LogToConsoleOff()
transformixImageFilter.AddTransformParameterMap(param0)
transformixImageFilter.AddTransformParameterMap(param1)
#transformixImageFilter.AddTransformParameterMap(param2)
self.transformixImageFilter = transformixImageFilter
def register_points(self, test_file='test.pts'):
if os.path.exists('outputpoints.txt'):
os.remove('outputpoints.txt')
self.transformixImageFilter.SetFixedPointSetFileName(test_file)
self.transformixImageFilter.SetMovingImage(self.moving_image)
print(1)
self.transformixImageFilter.Execute()
print(2)
class BrainImageDataset(Dataset):
def __init__(self, dirList, register_pairs, KNN, name_list_KNN):
self.data = dirList
self.register_pairs = register_pairs
self.name_list_KNN = name_list_KNN
self.KNN = KNN
def __getitem__(self, index):
fix = self.data[index]
#if (os.path.exists("points_data_tuned_hard/" + "".join(fix) + "-" + "".join(self.register_pairs[fix]) + "-points.npy")):
# return (np.array([]), np.array([]), fix, self.register_pairs[fix])
if (self.KNN != 0):
if (fix not in self.name_list_KNN):
return (np.array([]), np.array([]), fix, np.array([]))
fixed_image_array = get_data(ROOT_DIR + "Brain2NIFI/" + fix + "/norm.nii")
moving = self.register_pairs[fix]
moving_image_array = get_data(ROOT_DIR + "Brain2NIFI/" + moving + "/norm.nii")
return (fixed_image_array, moving_image_array, fix, moving)
def __len__(self):
return len(self.data)
def get_data(path):
heart = sitk.ReadImage(path)
heartArray = np.array([sitk.GetArrayFromImage(heart)]) / 256
return heartArray
def load_Directory(is_train, register_pairs):
#read file with train data path
if is_train:
file = open(ROOT_DIR + trainFileName, "r")
#read file with test data path
else:
file = open(ROOT_DIR + testFileName, "r")
dirnames = file.readlines()
data_directory = [x.strip() for x in dirnames]
dirList = []
for i in data_directory:
if i in register_pairs:
dirList.append(i)
return dirList
def load_pairs():
register_pairs = {}
for root, directories, filenames in os.walk(ROOT_DIR +"BrainParameterMapsTuned"):
for pairname in directories:
images = pairname.split("-")
assert(len(images)==2)
register_pairs[images[0]] = images[1]
return register_pairs | [
"os.remove",
"SimpleITK.ReadImage",
"os.walk",
"os.path.exists",
"SimpleITK.GetArrayFromImage",
"SimpleITK.ReadParameterFile",
"numpy.array",
"SimpleITK.TransformixImageFilter",
"os.chdir"
] | [((308, 326), 'os.chdir', 'os.chdir', (['ROOT_DIR'], {}), '(ROOT_DIR)\n', (316, 326), False, 'import os\n'), ((2898, 2918), 'SimpleITK.ReadImage', 'sitk.ReadImage', (['path'], {}), '(path)\n', (2912, 2918), True, 'import SimpleITK as sitk\n'), ((3547, 3592), 'os.walk', 'os.walk', (["(ROOT_DIR + 'BrainParameterMapsTuned')"], {}), "(ROOT_DIR + 'BrainParameterMapsTuned')\n", (3554, 3592), False, 'import os\n'), ((689, 755), 'SimpleITK.ReadImage', 'sitk.ReadImage', (["(ROOT_DIR + 'Brain2NIFI/' + images[1] + '/norm.nii')"], {}), "(ROOT_DIR + 'Brain2NIFI/' + images[1] + '/norm.nii')\n", (703, 755), True, 'import SimpleITK as sitk\n'), ((808, 908), 'SimpleITK.ReadParameterFile', 'sitk.ReadParameterFile', (["('BrainParameterMapsTuned/' + self.reg_dir + '/TransformParameters.0.txt')"], {}), "('BrainParameterMapsTuned/' + self.reg_dir +\n '/TransformParameters.0.txt')\n", (830, 908), True, 'import SimpleITK as sitk\n'), ((922, 1022), 'SimpleITK.ReadParameterFile', 'sitk.ReadParameterFile', (["('BrainParameterMapsTuned/' + self.reg_dir + '/TransformParameters.1.txt')"], {}), "('BrainParameterMapsTuned/' + self.reg_dir +\n '/TransformParameters.1.txt')\n", (944, 1022), True, 'import SimpleITK as sitk\n'), ((1167, 1196), 'SimpleITK.TransformixImageFilter', 'sitk.TransformixImageFilter', ([], {}), '()\n', (1194, 1196), True, 'import SimpleITK as sitk\n'), ((1565, 1599), 'os.path.exists', 'os.path.exists', (['"""outputpoints.txt"""'], {}), "('outputpoints.txt')\n", (1579, 1599), False, 'import os\n'), ((1613, 1642), 'os.remove', 'os.remove', (['"""outputpoints.txt"""'], {}), "('outputpoints.txt')\n", (1622, 1642), False, 'import os\n'), ((2946, 2975), 'SimpleITK.GetArrayFromImage', 'sitk.GetArrayFromImage', (['heart'], {}), '(heart)\n', (2968, 2975), True, 'import SimpleITK as sitk\n'), ((2484, 2496), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2492, 2496), True, 'import numpy as np\n'), ((2498, 2510), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2506, 2510), True, 'import numpy as np\n'), ((2517, 2529), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2525, 2529), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
# @author: Elie
#%% ==========================================================
# Import libraries set library params
# ============================================================
import pandas as pd
import numpy as np
import os
pd.options.mode.chained_assignment = None #Pandas warnings off
#plotting
import seaborn as sns
from matplotlib import pyplot as plt
from matplotlib.ticker import Locator
import matplotlib as mpl
# stats
from scipy import stats
#set matplotlib rcparams
mpl.rcParams['savefig.transparent'] = "False"
mpl.rcParams['axes.facecolor'] = "white"
mpl.rcParams['figure.facecolor'] = "white"
mpl.rcParams['font.size'] = "5"
plt.rcParams['savefig.transparent'] = "False"
plt.rcParams['axes.facecolor'] = "white"
plt.rcParams['figure.facecolor'] = "white"
plt.rcParams['font.size'] = "5"
#%% ==========================================================
# define these feature/headers here in case the headers
# are out of order in input files (often the case)
# ============================================================
snv_categories = ["sample",
"A[C>A]A", "A[C>A]C", "A[C>A]G", "A[C>A]T",
"C[C>A]A", "C[C>A]C", "C[C>A]G", "C[C>A]T",
"G[C>A]A", "G[C>A]C", "G[C>A]G", "G[C>A]T",
"T[C>A]A", "T[C>A]C", "T[C>A]G", "T[C>A]T",
"A[C>G]A", "A[C>G]C", "A[C>G]G", "A[C>G]T",
"C[C>G]A", "C[C>G]C", "C[C>G]G", "C[C>G]T",
"G[C>G]A", "G[C>G]C", "G[C>G]G", "G[C>G]T",
"T[C>G]A", "T[C>G]C", "T[C>G]G", "T[C>G]T",
"A[C>T]A", "A[C>T]C", "A[C>T]G", "A[C>T]T",
"C[C>T]A", "C[C>T]C", "C[C>T]G", "C[C>T]T",
"G[C>T]A", "G[C>T]C", "G[C>T]G", "G[C>T]T",
"T[C>T]A", "T[C>T]C", "T[C>T]G", "T[C>T]T",
"A[T>A]A", "A[T>A]C", "A[T>A]G", "A[T>A]T",
"C[T>A]A", "C[T>A]C", "C[T>A]G", "C[T>A]T",
"G[T>A]A", "G[T>A]C", "G[T>A]G", "G[T>A]T",
"T[T>A]A", "T[T>A]C", "T[T>A]G", "T[T>A]T",
"A[T>C]A", "A[T>C]C", "A[T>C]G", "A[T>C]T",
"C[T>C]A", "C[T>C]C", "C[T>C]G", "C[T>C]T",
"G[T>C]A", "G[T>C]C", "G[T>C]G", "G[T>C]T",
"T[T>C]A", "T[T>C]C", "T[T>C]G", "T[T>C]T",
"A[T>G]A", "A[T>G]C", "A[T>G]G", "A[T>G]T",
"C[T>G]A", "C[T>G]C", "C[T>G]G", "C[T>G]T",
"G[T>G]A", "G[T>G]C", "G[T>G]G", "G[T>G]T",
"T[T>G]A", "T[T>G]C", "T[T>G]G", "T[T>G]T"]
indel_categories = ["sample",
"1:Del:C:0", "1:Del:C:1", "1:Del:C:2", "1:Del:C:3", "1:Del:C:4", "1:Del:C:5",
"1:Del:T:0", "1:Del:T:1", "1:Del:T:2", "1:Del:T:3", "1:Del:T:4", "1:Del:T:5",
"1:Ins:C:0", "1:Ins:C:1", "1:Ins:C:2", "1:Ins:C:3", "1:Ins:C:4", "1:Ins:C:5",
"1:Ins:T:0", "1:Ins:T:1", "1:Ins:T:2", "1:Ins:T:3", "1:Ins:T:4", "1:Ins:T:5",
"2:Del:R:0", "2:Del:R:1", "2:Del:R:2", "2:Del:R:3", "2:Del:R:4", "2:Del:R:5",
"3:Del:R:0", "3:Del:R:1", "3:Del:R:2", "3:Del:R:3", "3:Del:R:4", "3:Del:R:5",
"4:Del:R:0", "4:Del:R:1", "4:Del:R:2", "4:Del:R:3", "4:Del:R:4", "4:Del:R:5",
"5:Del:R:0", "5:Del:R:1", "5:Del:R:2", "5:Del:R:3", "5:Del:R:4", "5:Del:R:5",
"2:Ins:R:0", "2:Ins:R:1", "2:Ins:R:2", "2:Ins:R:3", "2:Ins:R:4", "2:Ins:R:5",
"3:Ins:R:0", "3:Ins:R:1", "3:Ins:R:2", "3:Ins:R:3", "3:Ins:R:4", "3:Ins:R:5",
"4:Ins:R:0", "4:Ins:R:1", "4:Ins:R:2", "4:Ins:R:3", "4:Ins:R:4", "4:Ins:R:5",
"5:Ins:R:0", "5:Ins:R:1", "5:Ins:R:2", "5:Ins:R:3", "5:Ins:R:4", "5:Ins:R:5",
"2:Del:M:1", "3:Del:M:1", "3:Del:M:2", "4:Del:M:1", "4:Del:M:2", "4:Del:M:3",
"5:Del:M:1", "5:Del:M:2", "5:Del:M:3", "5:Del:M:4", "5:Del:M:5"]
cnv_categories = ["sample",
"BCper10mb_0", "BCper10mb_1", "BCper10mb_2", "BCper10mb_3",
"CN_0", "CN_1", "CN_2", "CN_3", "CN_4", "CN_5", "CN_6", "CN_7", "CN_8",
"CNCP_0", "CNCP_1", "CNCP_2", "CNCP_3", "CNCP_4", "CNCP_5", "CNCP_6", "CNCP_7",
"BCperCA_0", "BCperCA_1", "BCperCA_2", "BCperCA_3", "BCperCA_4", "BCperCA_5",
"SegSize_0", "SegSize_1", "SegSize_2", "SegSize_3", "SegSize_4", "SegSize_5",
"SegSize_6", "SegSize_7", "SegSize_8", "SegSize_9", "SegSize_10",
"CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4",
"CopyFraction_5", "CopyFraction_6"]
#%% ==========================================================
# make concat sig dataframe
# ============================================================
def load_data(snv_counts_path, indel_counts_path, cnv_counts_path):
df_snv = pd.read_csv(snv_counts_path, sep='\t', low_memory=False)
df_snv = df_snv[snv_categories]
df_snv["sample"] = df_snv["sample"].astype(str)
df_indel = pd.read_csv(indel_counts_path, sep='\t', low_memory=False)
df_indel = df_indel[indel_categories]
df_indel["sample"] = df_indel["sample"].astype(str)
df_cnv = pd.read_csv(cnv_counts_path, sep='\t', low_memory=False)
df_cnv = df_cnv[cnv_categories]
df_cnv["sample"] = df_cnv["sample"].astype(str)
df_sigs = pd.merge(df_snv, df_indel, on="sample", how='left').fillna(0)
df_sigs = pd.merge(df_sigs, df_cnv, on="sample", how='left').reset_index(drop=True)
return df_sigs
#%% ==========================================================
# need to seperately handle minor ticks on sym log axis. Taken from:
# https://stackoverflow.com/questions/20470892/how-to-place-minor-ticks-on-symlog-scale
# ============================================================
class MinorSymLogLocator(Locator):
"""
Dynamically find minor tick positions based on the positions of
major ticks for a symlog scaling.
"""
def __init__(self, linthresh):
"""
Ticks will be placed between the major ticks.
The placement is linear for x between -linthresh and linthresh,
otherwise its logarithmically
"""
self.linthresh = linthresh
def __call__(self):
'Return the locations of the ticks'
majorlocs = self.axis.get_majorticklocs()
# iterate through minor locs
minorlocs = []
# handle the lowest part
for i in range(1, len(majorlocs)):
majorstep = majorlocs[i] - majorlocs[i-1]
if abs(majorlocs[i-1] + majorstep/2) < self.linthresh:
ndivs = 10
else:
ndivs = 9
minorstep = majorstep / ndivs
locs = np.arange(majorlocs[i-1], majorlocs[i], minorstep)[1:]
minorlocs.extend(locs)
return self.raise_if_exceeds(np.array(minorlocs))
def tick_values(self, vmin, vmax):
raise NotImplementedError('Cannot get tick locations for a '
'%s type.' % type(self))
#%% ==========================================================
# some other fig settings that are same for all 4 figs.
# ============================================================
def common_settings(fig, ax):
fig.set_size_inches(3.25, 1.1)
ax.set_xlabel("")
ax.tick_params(axis='y', which="major", length=2, labelsize=6, pad=1, reset=False)
ax.tick_params(axis='x', which="major", length=2, labelsize=7, pad=0, reset=False)
sns.despine(ax=ax, top=True, right=True, left=False, bottom=False)
ax.yaxis.set_label_coords(-0.08, 0.5)
ax.set_xticklabels(["DRwt", "ATMd", "CDK12d", "BRCA2d", "MMRd", "Bladder"])
fig.subplots_adjust(left=0.11, right=0.995, top=0.91, bottom=0.1)
return fig, ax
#%% ==========================================================
# get paths, load data and make df with each file merged
# ============================================================
#file from paths relative to this script
rootdir = os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
figdir = os.path.join(rootdir, "figures", "fig1")
datadir = os.path.join(rootdir, "data")
cohort_data = os.path.join(datadir, "cohort.tsv")
snv_features = os.path.join(datadir, "tns_features.tsv")
ndl_features = os.path.join(datadir, "ndl_features.tsv")
cnv_features = os.path.join(datadir, "cnv_features.tsv")
sigs = load_data(snv_features, ndl_features, cnv_features)
sample_labels = pd.read_csv(cohort_data, sep='\t', low_memory=False)
# sample_labels = sample_labels[sample_labels['manual check for usefullness (0=Fail)'] != 0]
df = pd.merge(sample_labels, sigs, how='left', on='sample')
# df.loc[df["cancer"] == "BC", 'primary_label'] = "Bladder"
#%% ==========================================================
# some calculation/manipulations needed for the graphs
# ============================================================
def make_df_for_graph(original_df):
df_mut_rate = original_df.copy(deep=True)
df_mut_rate.loc[df_mut_rate["cancer"] == "BC", 'label'] = "Bladder"
df_mut_rate["total_snv"] = df_mut_rate[snv_categories[1:]].sum(axis=1)
df_mut_rate["snv_rate"] = df_mut_rate["total_snv"]/45 #45mb is size of medexome panel
df_mut_rate["total_indel"] = df_mut_rate[indel_categories[1:]].sum(axis=1)
df_mut_rate["indel_rate"] = df_mut_rate["total_indel"]/45 #45mb is size of medexome panel
cpf_list = ["CopyFraction_0", "CopyFraction_1", "CopyFraction_2", "CopyFraction_3", "CopyFraction_4", "CopyFraction_5", "CopyFraction_6"]
df_mut_rate['max_cn_fraction'] = df_mut_rate[cpf_list].max(axis=1)
df_mut_rate['not_max_cn_fraction'] = 1 - df_mut_rate['max_cn_fraction']
# This is just to order the df for graphing and using seperate dfs is good for calculating stats later.
df_mut_rate = df_mut_rate.drop(columns=snv_categories[1:]).drop(columns=indel_categories[1:]).drop(columns=cnv_categories[1:])
brca2_mut_rate = df_mut_rate.query('(label == "BRCA2d")')
cdk12_mut_rate = df_mut_rate.query('(label == "CDK12d")')
mmrd_mut_rate = df_mut_rate.query('(label == "MMRd")')
atm_mut_rate = df_mut_rate.query('(label == "ATMd")')
drp_mut_rate = df_mut_rate.query('(label == "DRwt")')
bladder_mut_rate = df_mut_rate.query('(label == "Bladder")')
df_for_graph = pd.concat([drp_mut_rate, atm_mut_rate, cdk12_mut_rate, brca2_mut_rate, mmrd_mut_rate, bladder_mut_rate]).reset_index(drop=True)
return df_for_graph
graphdata = make_df_for_graph(df)
#%% ==========================================================
# setup graph values
# ============================================================
#%% values for annot lines for snv and indel
h = 0.015 #amount of y for line nub
text_pos = 0.013
space = 0.09 #total space between lines
bladder_line = 0.99
top_line = bladder_line #height of top line
mmrd_line = top_line-(space*1)
brca2_line = top_line-(space*2)
cdk12_line = top_line-(space*3)
atm_line = top_line-(space*4)
# bladder_line
mmrd_line = top_line-(space*1)
brca2_line = top_line-(space*2)
cdk12_line = top_line-(space*3)
atm_line = top_line-(space*4)
psize = 5 #font size for pvalues
fliersize = 1.9
swarmsize = 1.8
total_values = 6
position = 1/(total_values*2)
label_fs = 7
ytickfs = 6
ticklength = 2
lineweight=0.7
#%% ==========================================================
# SNV comparisons
# ============================================================
ax = sns.boxplot(x="label", y="snv_rate", data=graphdata, linewidth=0.7, fliersize=fliersize)
ax = sns.swarmplot(x="label", y="snv_rate", data=graphdata, color=".3", size=swarmsize)
ax.set_yscale('symlog', linthresh=1, base=10)
xtickslocs = ax.get_xticks()
yaxis = plt.gca().yaxis
yaxis.set_minor_locator(MinorSymLogLocator(10e0))
ax.set_ylim(0, 300)
ax.set_ylabel("SNVs/Mb", labelpad=1, fontsize = label_fs)
common_settings(plt.gcf(), ax)
y=atm_line
xstart = position
xend = 3*position
a = graphdata.query('(label == "DRwt")')["snv_rate"]
b = graphdata.query('(label == "ATMd")')["snv_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=cdk12_line
xstart = position
xend = 5*position
b = graphdata.query('(label == "CDK12d")')["snv_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=brca2_line
xstart = position
xend = 7*position
b = graphdata.query('(label == "BRCA2d")')["snv_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=mmrd_line
xstart = position
xend = 9*position
b = graphdata.query('(label == "MMRd")')["snv_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=bladder_line
xstart = position
xend = 11*position
b = graphdata.query('(label == "Bladder")')["snv_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
plt.savefig(os.path.join(figdir, "snv_rate_comparison.png"), dpi=500)
plt.savefig(os.path.join(figdir, "snv_rate_comparison.pdf"))
plt.close()
#%% ==========================================================
# Indel comparisons
# ============================================================
ax = sns.boxplot(x="label", y="indel_rate", data=graphdata, linewidth=0.7, fliersize=fliersize)
ax = sns.swarmplot(x="label", y="indel_rate", data=graphdata, color=".3", size=swarmsize)
ax.set_yscale('symlog', linthresh=1, base=10)
xtickslocs = ax.get_xticks()
yaxis = plt.gca().yaxis
yaxis.set_minor_locator(MinorSymLogLocator(10e0))
ax.set_ylim(0,60)
ax.set_ylabel("INDELs/Mb", labelpad=1, fontsize = label_fs)
common_settings(plt.gcf(), ax)
y=atm_line
xstart = position
xend = 3*position
a = graphdata.query('(label == "DRwt")')["indel_rate"]
b = graphdata.query('(label == "ATMd")')["indel_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=cdk12_line
xstart = position
xend = 5*position
b = graphdata.query('(label == "CDK12d")')["indel_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=brca2_line
xstart = position
xend = 7*position
b = graphdata.query('(label == "BRCA2d")')["indel_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=mmrd_line
xstart = position
xend = 9*position
b = graphdata.query('(label == "MMRd")')["indel_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=bladder_line
xstart = position
xend = 11*position
b = graphdata.query('(label == "Bladder")')["indel_rate"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
plt.savefig(os.path.join(figdir, "indel_rate_comparison.png"), dpi=500)
plt.savefig(os.path.join(figdir, "indel_rate_comparison.pdf"))
plt.close()
#%% ==========================================================
# Ploidy comparisons
# ============================================================
ax = sns.boxplot(x="label", y="ploidy_estimate_sequenza", data=graphdata, linewidth=0.7, fliersize=fliersize)
ax = sns.swarmplot(x="label", y="ploidy_estimate_sequenza", data=graphdata, color=".2", size=swarmsize)
ax.set_ylim(1.0, 9)
ax.set_yticks([x.round(2) for x in np.arange(1.0, 8.1, 1.0)])
ax.set_yticklabels([x.round(2) for x in np.arange(1.0, 8.1, 1.0)])
ax.set_ylabel("Estimated Ploidy", labelpad=1, fontsize = label_fs)
common_settings(plt.gcf(), ax)
y=atm_line
xstart = position
xend = 3*position
a = graphdata.query('(label == "DRwt")')["ploidy_estimate_sequenza"]
b = graphdata.query('(label == "ATMd")')["ploidy_estimate_sequenza"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=cdk12_line
xstart = position
xend = 5*position
b = graphdata.query('(label == "CDK12d")')["ploidy_estimate_sequenza"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=brca2_line
xstart = position
xend = 7*position
b = graphdata.query('(label == "BRCA2d")')["ploidy_estimate_sequenza"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=mmrd_line
xstart = position
xend = 9*position
b = graphdata.query('(label == "MMRd")')["ploidy_estimate_sequenza"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=bladder_line
xstart = position
xend = 11*position
b = graphdata.query('(label == "Bladder")')["ploidy_estimate_sequenza"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
plt.savefig(os.path.join(figdir, "ploidy_estimate_comparison.png"), dpi=500)
plt.savefig(os.path.join(figdir, "ploidy_estimate_comparison.pdf"))
plt.close()
#%% ==========================================================
# Allelic imbalance comparisons
# ============================================================
ax = sns.boxplot(x="label", y="not_max_cn_fraction", data=graphdata, linewidth=0.7, fliersize=fliersize)
ax = sns.swarmplot(x="label", y="not_max_cn_fraction", data=graphdata, color=".3", size=swarmsize)
ax.set_ylim(0, 1.2)
ax.set_yticks([x.round(2) for x in np.arange(0.0, 1.1, 0.2)])
ax.set_yticklabels([x.round(2) for x in np.arange(0.0, 1.1, 0.2)])
ax.set_ylabel("Fraction with CNV", labelpad=1, fontsize = label_fs)
common_settings(plt.gcf(), ax)
y=atm_line
xstart = position
xend = 3*position
a = graphdata.query('(label == "DRwt")')["not_max_cn_fraction"]
b = graphdata.query('(label == "ATMd")')["not_max_cn_fraction"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=cdk12_line
xstart = position
xend = 5*position
b = graphdata.query('(label == "CDK12d")')["not_max_cn_fraction"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=brca2_line
xstart = position
xend = 7*position
b = graphdata.query('(label == "BRCA2d")')["not_max_cn_fraction"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=mmrd_line
xstart = position
xend = 9*position
b = graphdata.query('(label == "MMRd")')["not_max_cn_fraction"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
y=bladder_line
xstart = position
xend = 11*position
b = graphdata.query('(label == "Bladder")')["not_max_cn_fraction"]
u, p = stats.mannwhitneyu(a,b)
annot = "{:.1e}".format(p)
ax.plot([xstart, xstart, xend, xend], [y, y+h, y+h, y], lw=lineweight, c=".3", transform=ax.transAxes, clip_on = False)
ax.text((xstart+xend)*.5, y+h+text_pos, f"p = {annot}", ha='center', va='baseline', color="k", fontsize=psize, linespacing=0, transform=ax.transAxes)
plt.savefig(os.path.join(figdir, "allelic_imbalance_comparison.png"), dpi=500)
plt.savefig(os.path.join(figdir, "allelic_imbalance_comparison.pdf"))
plt.close()
| [
"pandas.read_csv",
"scipy.stats.mannwhitneyu",
"pandas.merge",
"matplotlib.pyplot.close",
"os.path.dirname",
"seaborn.swarmplot",
"seaborn.despine",
"seaborn.boxplot",
"numpy.array",
"numpy.arange",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.gcf",
"os.path.join",
"pandas.concat"
] | [((7272, 7312), 'os.path.join', 'os.path.join', (['rootdir', '"""figures"""', '"""fig1"""'], {}), "(rootdir, 'figures', 'fig1')\n", (7284, 7312), False, 'import os\n'), ((7323, 7352), 'os.path.join', 'os.path.join', (['rootdir', '"""data"""'], {}), "(rootdir, 'data')\n", (7335, 7352), False, 'import os\n'), ((7367, 7402), 'os.path.join', 'os.path.join', (['datadir', '"""cohort.tsv"""'], {}), "(datadir, 'cohort.tsv')\n", (7379, 7402), False, 'import os\n'), ((7418, 7459), 'os.path.join', 'os.path.join', (['datadir', '"""tns_features.tsv"""'], {}), "(datadir, 'tns_features.tsv')\n", (7430, 7459), False, 'import os\n'), ((7475, 7516), 'os.path.join', 'os.path.join', (['datadir', '"""ndl_features.tsv"""'], {}), "(datadir, 'ndl_features.tsv')\n", (7487, 7516), False, 'import os\n'), ((7532, 7573), 'os.path.join', 'os.path.join', (['datadir', '"""cnv_features.tsv"""'], {}), "(datadir, 'cnv_features.tsv')\n", (7544, 7573), False, 'import os\n'), ((7650, 7702), 'pandas.read_csv', 'pd.read_csv', (['cohort_data'], {'sep': '"""\t"""', 'low_memory': '(False)'}), "(cohort_data, sep='\\t', low_memory=False)\n", (7661, 7702), True, 'import pandas as pd\n'), ((7801, 7855), 'pandas.merge', 'pd.merge', (['sample_labels', 'sigs'], {'how': '"""left"""', 'on': '"""sample"""'}), "(sample_labels, sigs, how='left', on='sample')\n", (7809, 7855), True, 'import pandas as pd\n'), ((10569, 10661), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""label"""', 'y': '"""snv_rate"""', 'data': 'graphdata', 'linewidth': '(0.7)', 'fliersize': 'fliersize'}), "(x='label', y='snv_rate', data=graphdata, linewidth=0.7,\n fliersize=fliersize)\n", (10580, 10661), True, 'import seaborn as sns\n'), ((10663, 10750), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""label"""', 'y': '"""snv_rate"""', 'data': 'graphdata', 'color': '""".3"""', 'size': 'swarmsize'}), "(x='label', y='snv_rate', data=graphdata, color='.3', size=\n swarmsize)\n", (10676, 10750), True, 'import seaborn as sns\n'), ((11166, 11190), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (11184, 11190), False, 'from scipy import stats\n'), ((11599, 11623), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (11617, 11623), False, 'from scipy import stats\n'), ((12032, 12056), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (12050, 12056), False, 'from scipy import stats\n'), ((12462, 12486), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (12480, 12486), False, 'from scipy import stats\n'), ((12899, 12923), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (12917, 12923), False, 'from scipy import stats\n'), ((13352, 13363), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (13361, 13363), True, 'from matplotlib import pyplot as plt\n'), ((13516, 13610), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""label"""', 'y': '"""indel_rate"""', 'data': 'graphdata', 'linewidth': '(0.7)', 'fliersize': 'fliersize'}), "(x='label', y='indel_rate', data=graphdata, linewidth=0.7,\n fliersize=fliersize)\n", (13527, 13610), True, 'import seaborn as sns\n'), ((13612, 13701), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""label"""', 'y': '"""indel_rate"""', 'data': 'graphdata', 'color': '""".3"""', 'size': 'swarmsize'}), "(x='label', y='indel_rate', data=graphdata, color='.3', size=\n swarmsize)\n", (13625, 13701), True, 'import seaborn as sns\n'), ((14121, 14145), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (14139, 14145), False, 'from scipy import stats\n'), ((14556, 14580), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (14574, 14580), False, 'from scipy import stats\n'), ((14991, 15015), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (15009, 15015), False, 'from scipy import stats\n'), ((15423, 15447), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (15441, 15447), False, 'from scipy import stats\n'), ((15862, 15886), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (15880, 15886), False, 'from scipy import stats\n'), ((16319, 16330), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (16328, 16330), True, 'from matplotlib import pyplot as plt\n'), ((16484, 16592), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""label"""', 'y': '"""ploidy_estimate_sequenza"""', 'data': 'graphdata', 'linewidth': '(0.7)', 'fliersize': 'fliersize'}), "(x='label', y='ploidy_estimate_sequenza', data=graphdata,\n linewidth=0.7, fliersize=fliersize)\n", (16495, 16592), True, 'import seaborn as sns\n'), ((16594, 16696), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""label"""', 'y': '"""ploidy_estimate_sequenza"""', 'data': 'graphdata', 'color': '""".2"""', 'size': 'swarmsize'}), "(x='label', y='ploidy_estimate_sequenza', data=graphdata,\n color='.2', size=swarmsize)\n", (16607, 16696), True, 'import seaborn as sns\n'), ((17135, 17159), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (17153, 17159), False, 'from scipy import stats\n'), ((17584, 17608), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (17602, 17608), False, 'from scipy import stats\n'), ((18033, 18057), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (18051, 18057), False, 'from scipy import stats\n'), ((18479, 18503), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (18497, 18503), False, 'from scipy import stats\n'), ((18932, 18956), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (18950, 18956), False, 'from scipy import stats\n'), ((19399, 19410), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (19408, 19410), True, 'from matplotlib import pyplot as plt\n'), ((19575, 19679), 'seaborn.boxplot', 'sns.boxplot', ([], {'x': '"""label"""', 'y': '"""not_max_cn_fraction"""', 'data': 'graphdata', 'linewidth': '(0.7)', 'fliersize': 'fliersize'}), "(x='label', y='not_max_cn_fraction', data=graphdata, linewidth=\n 0.7, fliersize=fliersize)\n", (19586, 19679), True, 'import seaborn as sns\n'), ((19680, 19778), 'seaborn.swarmplot', 'sns.swarmplot', ([], {'x': '"""label"""', 'y': '"""not_max_cn_fraction"""', 'data': 'graphdata', 'color': '""".3"""', 'size': 'swarmsize'}), "(x='label', y='not_max_cn_fraction', data=graphdata, color=\n '.3', size=swarmsize)\n", (19693, 19778), True, 'import seaborn as sns\n'), ((20206, 20230), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (20224, 20230), False, 'from scipy import stats\n'), ((20650, 20674), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (20668, 20674), False, 'from scipy import stats\n'), ((21094, 21118), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (21112, 21118), False, 'from scipy import stats\n'), ((21535, 21559), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (21553, 21559), False, 'from scipy import stats\n'), ((21983, 22007), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['a', 'b'], {}), '(a, b)\n', (22001, 22007), False, 'from scipy import stats\n'), ((22454, 22465), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (22463, 22465), True, 'from matplotlib import pyplot as plt\n'), ((4323, 4379), 'pandas.read_csv', 'pd.read_csv', (['snv_counts_path'], {'sep': '"""\t"""', 'low_memory': '(False)'}), "(snv_counts_path, sep='\\t', low_memory=False)\n", (4334, 4379), True, 'import pandas as pd\n'), ((4476, 4534), 'pandas.read_csv', 'pd.read_csv', (['indel_counts_path'], {'sep': '"""\t"""', 'low_memory': '(False)'}), "(indel_counts_path, sep='\\t', low_memory=False)\n", (4487, 4534), True, 'import pandas as pd\n'), ((4639, 4695), 'pandas.read_csv', 'pd.read_csv', (['cnv_counts_path'], {'sep': '"""\t"""', 'low_memory': '(False)'}), "(cnv_counts_path, sep='\\t', low_memory=False)\n", (4650, 4695), True, 'import pandas as pd\n'), ((6701, 6767), 'seaborn.despine', 'sns.despine', ([], {'ax': 'ax', 'top': '(True)', 'right': '(True)', 'left': '(False)', 'bottom': '(False)'}), '(ax=ax, top=True, right=True, left=False, bottom=False)\n', (6712, 6767), True, 'import seaborn as sns\n'), ((10830, 10839), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (10837, 10839), True, 'from matplotlib import pyplot as plt\n'), ((10990, 10999), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (10997, 10999), True, 'from matplotlib import pyplot as plt\n'), ((13233, 13280), 'os.path.join', 'os.path.join', (['figdir', '"""snv_rate_comparison.png"""'], {}), "(figdir, 'snv_rate_comparison.png')\n", (13245, 13280), False, 'import os\n'), ((13303, 13350), 'os.path.join', 'os.path.join', (['figdir', '"""snv_rate_comparison.pdf"""'], {}), "(figdir, 'snv_rate_comparison.pdf')\n", (13315, 13350), False, 'import os\n'), ((13781, 13790), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (13788, 13790), True, 'from matplotlib import pyplot as plt\n'), ((13941, 13950), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (13948, 13950), True, 'from matplotlib import pyplot as plt\n'), ((16196, 16245), 'os.path.join', 'os.path.join', (['figdir', '"""indel_rate_comparison.png"""'], {}), "(figdir, 'indel_rate_comparison.png')\n", (16208, 16245), False, 'import os\n'), ((16268, 16317), 'os.path.join', 'os.path.join', (['figdir', '"""indel_rate_comparison.pdf"""'], {}), "(figdir, 'indel_rate_comparison.pdf')\n", (16280, 16317), False, 'import os\n'), ((16927, 16936), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (16934, 16936), True, 'from matplotlib import pyplot as plt\n'), ((19266, 19320), 'os.path.join', 'os.path.join', (['figdir', '"""ploidy_estimate_comparison.png"""'], {}), "(figdir, 'ploidy_estimate_comparison.png')\n", (19278, 19320), False, 'import os\n'), ((19343, 19397), 'os.path.join', 'os.path.join', (['figdir', '"""ploidy_estimate_comparison.pdf"""'], {}), "(figdir, 'ploidy_estimate_comparison.pdf')\n", (19355, 19397), False, 'import os\n'), ((20008, 20017), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (20015, 20017), True, 'from matplotlib import pyplot as plt\n'), ((22317, 22373), 'os.path.join', 'os.path.join', (['figdir', '"""allelic_imbalance_comparison.png"""'], {}), "(figdir, 'allelic_imbalance_comparison.png')\n", (22329, 22373), False, 'import os\n'), ((22396, 22452), 'os.path.join', 'os.path.join', (['figdir', '"""allelic_imbalance_comparison.pdf"""'], {}), "(figdir, 'allelic_imbalance_comparison.pdf')\n", (22408, 22452), False, 'import os\n'), ((7235, 7260), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (7250, 7260), False, 'import os\n'), ((4791, 4842), 'pandas.merge', 'pd.merge', (['df_snv', 'df_indel'], {'on': '"""sample"""', 'how': '"""left"""'}), "(df_snv, df_indel, on='sample', how='left')\n", (4799, 4842), True, 'import pandas as pd\n'), ((4864, 4914), 'pandas.merge', 'pd.merge', (['df_sigs', 'df_cnv'], {'on': '"""sample"""', 'how': '"""left"""'}), "(df_sigs, df_cnv, on='sample', how='left')\n", (4872, 4914), True, 'import pandas as pd\n'), ((6113, 6132), 'numpy.array', 'np.array', (['minorlocs'], {}), '(minorlocs)\n', (6121, 6132), True, 'import numpy as np\n'), ((9450, 9558), 'pandas.concat', 'pd.concat', (['[drp_mut_rate, atm_mut_rate, cdk12_mut_rate, brca2_mut_rate, mmrd_mut_rate,\n bladder_mut_rate]'], {}), '([drp_mut_rate, atm_mut_rate, cdk12_mut_rate, brca2_mut_rate,\n mmrd_mut_rate, bladder_mut_rate])\n', (9459, 9558), True, 'import pandas as pd\n'), ((16749, 16773), 'numpy.arange', 'np.arange', (['(1.0)', '(8.1)', '(1.0)'], {}), '(1.0, 8.1, 1.0)\n', (16758, 16773), True, 'import numpy as np\n'), ((16816, 16840), 'numpy.arange', 'np.arange', (['(1.0)', '(8.1)', '(1.0)'], {}), '(1.0, 8.1, 1.0)\n', (16825, 16840), True, 'import numpy as np\n'), ((19830, 19854), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.2)'], {}), '(0.0, 1.1, 0.2)\n', (19839, 19854), True, 'import numpy as np\n'), ((19897, 19921), 'numpy.arange', 'np.arange', (['(0.0)', '(1.1)', '(0.2)'], {}), '(0.0, 1.1, 0.2)\n', (19906, 19921), True, 'import numpy as np\n'), ((6001, 6053), 'numpy.arange', 'np.arange', (['majorlocs[i - 1]', 'majorlocs[i]', 'minorstep'], {}), '(majorlocs[i - 1], majorlocs[i], minorstep)\n', (6010, 6053), True, 'import numpy as np\n')] |
from blackmagic import app
from blackmagic.blueprints import prediction
from blackmagic.data import ceph
from cytoolz import count
from cytoolz import first
from cytoolz import get
from cytoolz import merge
from cytoolz import reduce
from datetime import date
import json
import numpy
import os
import pytest
import random
import test
_ceph = ceph.Ceph(app.cfg)
_ceph.start()
def delete_predictions(cx, cy):
return _ceph.delete_predictions(cx, cy)
@pytest.fixture
def client():
app.app.config['TESTING'] = True
yield app.app.test_client()
def prediction_test_data(segment):
return merge(segment,
{'sday': date.fromordinal(2).isoformat(),
'eday': date.fromordinal(1000).isoformat(),
'blcoef': [random.uniform(0, 1) for i in range(7)],
'blint': random.randint(0, 90),
'blmag': random.randint(0, 10),
'blrmse': random.random(),
'grcoef': [random.uniform(0, 1) for i in range(7)],
'grint': random.randint(0, 90),
'grmag': random.randint(0, 10),
'grrmse': random.random(),
'nicoef': [random.uniform(0, 1) for i in range(7)],
'niint': random.randint(0, 90),
'nimag': random.randint(0, 10),
'nirmse': random.random(),
'recoef': [random.uniform(0, 1) for i in range(7)],
'reint': random.randint(0, 90),
'remag': random.randint(0, 10),
'rermse': random.random(),
's1coef': [random.uniform(0, 1) for i in range(7)],
's1int': random.randint(0, 90),
's1mag': random.randint(0, 10),
's1rmse': random.random(),
's2coef': [random.uniform(0, 1) for i in range(7)],
's2int': random.randint(0, 90),
's2mag': random.randint(0, 10),
's2rmse': random.random(),
'thcoef': [random.uniform(0, 1) for i in range(7)],
'thint': random.randint(0, 90),
'thmag': random.randint(0, 10),
'thrmse': random.random()})
def create_prediction_test_data(client):
# prepopulate a chip of segments
assert client.post('/segment',
json={'cx': test.cx,
'cy': test.cy,
'acquired': test.acquired}).status == '200 OK'
# pull the segments
segments = _ceph.select_segments(cx=test.cx, cy=test.cy)
# remove old and busted test data
_ceph.delete_segments(cx=test.cx, cy=test.cy)
# add new and better test data
_ceph.insert_segments(list(map(prediction_test_data, segments)))
# train tile against new segment values
assert client.post('/tile',
json={'tx': test.tx,
'ty': test.ty,
'acquired': test.acquired,
'chips': test.chips,
'date': '0001-01-02'}).status == '200 OK'
return True
def test_prediction_runs_as_expected(client):
'''
As a blackmagic user, when I send tx, ty, acquired, month, day and chip list
via HTTP POST, predictions are generated and saved
so that they can be retrieved later.
'''
create_prediction_test_data(client)
# test prediction
response = client.post('/prediction',
json={'tx': test.tx,
'ty': test.ty,
'cx': test.cx,
'cy': test.cy,
'month': test.prediction_month,
'day': test.prediction_day,
'acquired': test.acquired})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '200 OK'
assert get('tx', response.get_json()) == test.tx
assert get('ty', response.get_json()) == test.ty
assert get('cx', response.get_json()) == test.cx
assert get('cy', response.get_json()) == test.cy
assert get('acquired', response.get_json()) == test.acquired
assert get('month', response.get_json()) == test.prediction_month
assert get('day', response.get_json()) == test.prediction_day
assert get('exception', response.get_json(), None) == None
# The number of predictions is dictated by the NLCDTRN dataset for the chip,
# and the number of non-zero classifications available.
assert len([p for p in predictions]) == 30000
def test_prediction_bad_parameters(client):
'''
As a blackmagic user, when I don't send tx, ty, acquired, date and chips list
via HTTP POST the HTTP status is 400 and the response body tells
me the required parameters so that I can send a good request.
'''
# bad parameters
tx = None
ty = test.ty
cx = test.cx
cy = test.cy
a = test.acquired
month = test.prediction_month
day = test.prediction_day
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': a,
'month': month,
'day': day})
delete_predictions(test.cx, test.cy)
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '400 BAD REQUEST'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == a
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_missing_model(client):
'''
As a blackmagic user, when I send tx, ty, acquired, month, day, and chips via HTTP POST
and no trained xgboost model is found for the given tx/ty, an exception is raised
with HTTP 500 so that prediction does not occur and the problem may be resolved.
'''
tx = test.missing_tx
ty = test.missing_ty
cx = test.cx
cy = test.cy
a = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'month': month,
'day': day,
'acquired': a})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == a
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_load_model_exception(client):
'''
As a blackmagic user, when an exception occurs loading
a model, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_load_model_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_group_data_exception(client):
'''
As a blackmagic user, when an exception occurs
grouping prediction data, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_group_data_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_matrix_exception(client):
'''
As a blackmagic user, when an exception occurs
constructing a prediction matrix, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_matrix_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_default_predictions_exception(client):
'''
As a blackmagic user, when an exception occurs
creating default predictions, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_default_predictions_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_load_data_exception(client):
'''
As a blackmagic user, when an exception occurs loading
data, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_load_data_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_prediction_exception(client):
'''
As a blackmagic user, when an exception occurs predicting
probabilities, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_prediction_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_delete_exception(client):
'''
As a blackmagic user, when an exception occurs deleting
predictions, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_delete_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_save_exception(client):
'''
As a blackmagic user, when an exception occurs saving
a predictions, an HTTP 500 is issued
with a descriptive message so that the issue may be
investigated, corrected & retried.
'''
tx = test.tx
ty = test.ty
cx = test.cx
cy = test.cy
acquired = test.acquired
month = test.prediction_month
day = test.prediction_day
delete_predictions(test.cx, test.cy)
response = client.post('/prediction',
json={'tx': tx,
'ty': ty,
'cx': cx,
'cy': cy,
'acquired': acquired,
'month': month,
'day': day,
'test_save_exception': True})
predictions = _ceph.select_predictions(cx=test.cx, cy=test.cy)
assert response.status == '500 INTERNAL SERVER ERROR'
assert get('tx', response.get_json()) == tx
assert get('ty', response.get_json()) == ty
assert get('cx', response.get_json()) == cx
assert get('cy', response.get_json()) == cy
assert get('acquired', response.get_json()) == acquired
assert get('month', response.get_json()) == month
assert get('day', response.get_json()) == day
assert type(get('exception', response.get_json())) is str
assert len(get('exception', response.get_json())) > 0
assert len(list(map(lambda x: x, predictions))) == 0
def test_prediction_group_data():
# both data and default
inputs = {'data': [{'sday': '0001-01-01', 'eday': '0001-01-01'},
{'sday': '0001-01-02', 'eday': '0001-01-02'}]}
expected = {'data': [{'sday': '0001-01-02', 'eday': '0001-01-02'}],
'defaults': [{'sday': '0001-01-01', 'eday': '0001-01-01'}]}
outputs = prediction.group_data(inputs)
assert expected == outputs
# data only
inputs = {'data': [{'sday': '0001-01-03', 'eday': '0001-01-03'},
{'sday': '0001-01-02', 'eday': '0001-01-02'}]}
expected = {'data': [{'sday': '0001-01-03', 'eday': '0001-01-03'},
{'sday': '0001-01-02', 'eday': '0001-01-02'}],
'defaults': []}
outputs = prediction.group_data(inputs)
assert expected == outputs
# defaults only
inputs = {'data': [{'sday': '0001-01-01', 'eday': '0001-01-01'},
{'sday': '0001-01-01', 'eday': '0001-01-01'}]}
expected = {'data': [],
'defaults': [{'sday': '0001-01-01', 'eday': '0001-01-01'},
{'sday': '0001-01-01', 'eday': '0001-01-01'}]}
outputs = prediction.group_data(inputs)
assert expected == outputs
def test_prediction_matrix():
# normal independent variables have 68 values
# independent variables derived from default segments
# have 19 values because there were no coefficients saved
# but there are aux data + other vals.
#
# You cannot build a 2d numpy array out of
# numpy arrays of varying size.
#
# How to represent default predictions in the system?
#
# Doing checks for length 19 and 68 is a horrible idea.
#
# This demonstrates the error
# >>> numpy.array([numpy.array([1,2,3]), numpy.array([1,1,1,1,1,1])], dtype='float32')
# Traceback (most recent call last):
# File "<stdin>", line 1, in <module>
# ValueError: setting an array element with a sequence.
#
# One idea is to detect and split all records that will wind up being
# a default prediction prior to constructing the matrix. They are detectable
# by the sday & eday being set to '0001-01-01'.
#
# Another idea is to (somehow) detect the default independent variables without
# hardcoding the length as a check and pad or change them so they fit into the
# 2d array.
#
# Using the 2nd approach, if the interface requires a zero length probability array,
# you'll need to detect them again post-prediction, wipe out the predicted values
# and then save those.
#
# Approach 1 seems least complex. Detect default segments, group them, run prediction
# on the remainder, combine default segments back into the resultset with the desired
# default value ([]), save.
# normal expected input with no default segments
inputs = {'data': [{'independent': [1.,2.,3.,4.]},
{'independent': [5.,6.,7.,8.]}]}
expected = {'data': [{'independent': [1.,2.,3.,4.]},
{'independent': [5.,6.,7.,8.]}],
'ndata': numpy.array([[1.,2.,3.,4.],
[5.,6.,7.,8.]], dtype='float32')}
outputs = prediction.matrix(inputs)
assert numpy.array_equal(outputs['ndata'], expected['ndata'])
assert expected['data'] == outputs['data']
def test_prediction_default_predictions():
# both defaults and predictions
inputs = {'defaults': [{"a": 1}, {"a": 2}],
'predictions': [{"b": 3}, {"b": 4}]}
expected = {'defaults': [{"a": 1}, {"a": 2}],
'predictions': [{"a": 1, "prob": []},
{"a": 2, "prob": []},
{"b": 3},
{"b": 4}]}
outputs = prediction.default_predictions(inputs)
assert outputs == expected
# defaults only
inputs = {'defaults': [{"a": 1}, {"a": 2}],
'predictions': []}
expected = {'defaults': [{"a": 1}, {"a": 2}],
'predictions': [{"a": 1, "prob": []},
{"a": 2, "prob": []}]}
outputs = prediction.default_predictions(inputs)
assert outputs == expected
# predictions only
inputs = {'defaults': [],
'predictions': [{"b": 3}, {"b": 4}]}
expected = {'defaults': [],
'predictions': [{"b": 3},{"b": 4}]}
outputs = prediction.default_predictions(inputs)
assert outputs == expected
# neither
inputs = {'defaults': [],
'predictions': []}
expected = {'defaults': [],
'predictions': []}
outputs = prediction.default_predictions(inputs)
assert outputs == expected
| [
"blackmagic.blueprints.prediction.group_data",
"random.randint",
"random.uniform",
"random.random",
"numpy.array",
"blackmagic.data.ceph.Ceph",
"blackmagic.app.app.test_client",
"numpy.array_equal",
"datetime.date.fromordinal",
"blackmagic.blueprints.prediction.default_predictions",
"blackmagic.... | [((345, 363), 'blackmagic.data.ceph.Ceph', 'ceph.Ceph', (['app.cfg'], {}), '(app.cfg)\n', (354, 363), False, 'from blackmagic.data import ceph\n'), ((20823, 20852), 'blackmagic.blueprints.prediction.group_data', 'prediction.group_data', (['inputs'], {}), '(inputs)\n', (20844, 20852), False, 'from blackmagic.blueprints import prediction\n'), ((21241, 21270), 'blackmagic.blueprints.prediction.group_data', 'prediction.group_data', (['inputs'], {}), '(inputs)\n', (21262, 21270), False, 'from blackmagic.blueprints import prediction\n'), ((21667, 21696), 'blackmagic.blueprints.prediction.group_data', 'prediction.group_data', (['inputs'], {}), '(inputs)\n', (21688, 21696), False, 'from blackmagic.blueprints import prediction\n'), ((23729, 23754), 'blackmagic.blueprints.prediction.matrix', 'prediction.matrix', (['inputs'], {}), '(inputs)\n', (23746, 23754), False, 'from blackmagic.blueprints import prediction\n'), ((23767, 23821), 'numpy.array_equal', 'numpy.array_equal', (["outputs['ndata']", "expected['ndata']"], {}), "(outputs['ndata'], expected['ndata'])\n", (23784, 23821), False, 'import numpy\n'), ((24312, 24350), 'blackmagic.blueprints.prediction.default_predictions', 'prediction.default_predictions', (['inputs'], {}), '(inputs)\n', (24342, 24350), False, 'from blackmagic.blueprints import prediction\n'), ((24697, 24735), 'blackmagic.blueprints.prediction.default_predictions', 'prediction.default_predictions', (['inputs'], {}), '(inputs)\n', (24727, 24735), False, 'from blackmagic.blueprints import prediction\n'), ((24981, 25019), 'blackmagic.blueprints.prediction.default_predictions', 'prediction.default_predictions', (['inputs'], {}), '(inputs)\n', (25011, 25019), False, 'from blackmagic.blueprints import prediction\n'), ((25213, 25251), 'blackmagic.blueprints.prediction.default_predictions', 'prediction.default_predictions', (['inputs'], {}), '(inputs)\n', (25243, 25251), False, 'from blackmagic.blueprints import prediction\n'), ((535, 556), 'blackmagic.app.app.test_client', 'app.app.test_client', ([], {}), '()\n', (554, 556), False, 'from blackmagic import app\n'), ((23614, 23688), 'numpy.array', 'numpy.array', (['[[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]]'], {'dtype': '"""float32"""'}), "([[1.0, 2.0, 3.0, 4.0], [5.0, 6.0, 7.0, 8.0]], dtype='float32')\n", (23625, 23688), False, 'import numpy\n'), ((843, 864), 'random.randint', 'random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (857, 864), False, 'import random\n'), ((894, 915), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (908, 915), False, 'import random\n'), ((945, 960), 'random.random', 'random.random', ([], {}), '()\n', (958, 960), False, 'import random\n'), ((1060, 1081), 'random.randint', 'random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (1074, 1081), False, 'import random\n'), ((1111, 1132), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1125, 1132), False, 'import random\n'), ((1162, 1177), 'random.random', 'random.random', ([], {}), '()\n', (1175, 1177), False, 'import random\n'), ((1277, 1298), 'random.randint', 'random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (1291, 1298), False, 'import random\n'), ((1328, 1349), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1342, 1349), False, 'import random\n'), ((1379, 1394), 'random.random', 'random.random', ([], {}), '()\n', (1392, 1394), False, 'import random\n'), ((1494, 1515), 'random.randint', 'random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (1508, 1515), False, 'import random\n'), ((1545, 1566), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1559, 1566), False, 'import random\n'), ((1596, 1611), 'random.random', 'random.random', ([], {}), '()\n', (1609, 1611), False, 'import random\n'), ((1732, 1753), 'random.randint', 'random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (1746, 1753), False, 'import random\n'), ((1783, 1804), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (1797, 1804), False, 'import random\n'), ((1834, 1849), 'random.random', 'random.random', ([], {}), '()\n', (1847, 1849), False, 'import random\n'), ((1949, 1970), 'random.randint', 'random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (1963, 1970), False, 'import random\n'), ((2000, 2021), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2014, 2021), False, 'import random\n'), ((2051, 2066), 'random.random', 'random.random', ([], {}), '()\n', (2064, 2066), False, 'import random\n'), ((2166, 2187), 'random.randint', 'random.randint', (['(0)', '(90)'], {}), '(0, 90)\n', (2180, 2187), False, 'import random\n'), ((2217, 2238), 'random.randint', 'random.randint', (['(0)', '(10)'], {}), '(0, 10)\n', (2231, 2238), False, 'import random\n'), ((2268, 2283), 'random.random', 'random.random', ([], {}), '()\n', (2281, 2283), False, 'import random\n'), ((774, 794), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (788, 794), False, 'import random\n'), ((991, 1011), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1005, 1011), False, 'import random\n'), ((1208, 1228), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1222, 1228), False, 'import random\n'), ((1425, 1445), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1439, 1445), False, 'import random\n'), ((1663, 1683), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1677, 1683), False, 'import random\n'), ((1880, 1900), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1894, 1900), False, 'import random\n'), ((2097, 2117), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (2111, 2117), False, 'import random\n'), ((648, 667), 'datetime.date.fromordinal', 'date.fromordinal', (['(2)'], {}), '(2)\n', (664, 667), False, 'from datetime import date\n'), ((709, 731), 'datetime.date.fromordinal', 'date.fromordinal', (['(1000)'], {}), '(1000)\n', (725, 731), False, 'from datetime import date\n')] |
#!/usr/bin/env python3
## 16 Oct 2019 | EHU modifying BM's original code
import sys,os
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.colors import LightSource, Normalize
#from cpt_tools import cpt2python
def main(datadic,skafta):
data = Data(datadic,skafta)
data.read_data()
data.region_of_interest(ul_row=948, ul_col=2791, lr_row=2851, lr_col=5126)
data.calc_elastic_stress()
data.calc_maxprinc_stress()
data.dem[data.dem < 0] = np.nan
# Create a light source
ls = LightSource(azdeg=315, altdeg=45)
#
#plot_dem_only(data,ls)
plot_filled_dem(data,ls)
# plot_mask(data,ls)
# plot_slope(data,ls)
# plot_curvature(data,ls)
# plot_strain(data,ls)
plot_elastic_stress(data,ls)
# plot_strain_energy_density(data,ls)
# def plot_strain_energy_density(data,ls):
# # Choose colormap and data range normalization
# cmap = plt.get_cmap('magma_r')
#
# fig, ax = plt.subplots()
#
# # data.gridx,data.gridy,
# hatch = np.isnan(data.dem).astype(np.float32)
# hatch[hatch < 0.5] = np.nan
#
# ax.imshow(ls.hillshade(data.dem,vert_exag=2,dx=data.hdr['spx'],dy=data.hdr['spy']),cmap='gray')
# cf00 = ax.contourf(hatch,hatches=['xxx'],cmap=None,colors='0.4')
# cf0 = ax.contourf(data.filled_strainenergy,cmap=cmap,extend='both',levels=np.linspace(1.e3,1.e6,30),alpha=0.8)
# cbar = fig.colorbar(cf0,ax=ax,ticks=[1e3,100e3,250e3,500e3,750e3,1000e3])
# cbar.ax.set_ylabel(r'Surface strain energy density [kJ/m$^{3}$]',fontsize=12)
# cbar.ax.set_yticklabels([1,100,250,500,750,1000])
#
# ax.set_xlabel('Relative $x$ position [km]',fontsize=12)
# ax.set_ylabel('Relative $y$ position [km]',fontsize=12)
#
# ax.set_xticklabels(['%1.1f' % x for x in data.hdr['spx']*1.e-3*ax.get_xticks()])
# ax.set_yticklabels(['%1.1f' % (4-x) for x in data.hdr['spy']*1.e-3*ax.get_yticks()])
# plt.savefig('figs/strain_energy_shaded.pdf',bbox_inches='tight')
def plot_elastic_stress(data, ls, saturation_stress=10, axlabels=False):
"""Plot maximum stress attainable in a purely elastic deformation.
Optional argument:
saturation_stress: default 30 MPa, can be increased or decreased to highlight qualitative features"""
# Choose colormap and data range normalization
cmap = plt.get_cmap('Spectral')
fig, ax = plt.subplots()
hatch = np.isnan(data.dem).astype(np.float32)
hatch[hatch < 0.5] = np.nan
cf0 = ax.contourf(1.e-6*data.filled_maxprinc_stress,cmap=cmap,extend='both',levels=np.linspace(-1*saturation_stress,saturation_stress,30),alpha=0.8)
ax.imshow(ls.hillshade(data.dem,vert_exag=2,dx=data.hdr['spx'],dy=data.hdr['spy']),cmap='gray')
cf00 = ax.contourf(hatch,hatches=['xxx'],cmap=None,colors='0.4', alpha=0.5)
cbar = fig.colorbar(cf0,ax=ax,ticks=[-1*saturation_stress,-0.5*saturation_stress,0,0.5*saturation_stress,saturation_stress])
cbar.ax.set_ylabel('Surface max. princ. stress [MPa]',fontsize=12)
# cbar.ax.set_yticklabels([format(),format(),format(),format(),format()])
# cs0 = ax.contour(data.dem,colors='0.4',linewidths=0.5,levels=np.arange(1500,1800,10))
# ax.clabel(cs0,list(np.arange(1550,1800,50)),inline=1,fontsize=8,fmt='%d')
ax.set_xlim((0, 2000))
# ax.set_aspect(1)
if axlabels:
ax.set_xticklabels(['%1.1f' % x for x in data.hdr['spx']*1.e-3*ax.get_xticks()])
ax.set_yticklabels(['%1.1f' % (4-x) for x in data.hdr['spy']*1.e-3*ax.get_yticks()])
ax.set_xlabel('Relative $x$ position [km]',fontsize=12)
ax.set_ylabel('Relative $y$ position [km]',fontsize=12)
else:
ax.set_xticklabels(())
ax.set_yticklabels(())
plt.savefig('/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/maxprinc_stress_shaded-{}MPa.pdf'.format(saturation_stress),bbox_inches='tight')
def plot_strain(data,ls):
# Choose colormap and data range normalization
cmap = plt.get_cmap('Spectral')
fig, ax = plt.subplots()
# data.gridx,data.gridy,
hatch = np.isnan(data.dem).astype(np.float32)
hatch[hatch < 0.5] = np.nan
ax.imshow(ls.hillshade(data.dem,vert_exag=2,dx=data.hdr['spx'],dy=data.hdr['spy']),cmap='gray')
cf00 = ax.contourf(hatch,hatches=['xxx'],cmap=None,colors='0.4')
cf0 = ax.contourf(data.filled_strain,cmap=cmap,extend='both',levels=np.linspace(-2.e-2,2.e-2,30),alpha=0.8)
cbar = fig.colorbar(cf0,ax=ax,ticks=[-2.e-2,-1e-2,0,1e-2,2.e-2])
cbar.ax.set_ylabel(r'Surface strain [$\times10^{-3}$]',fontsize=12)
cbar.ax.set_yticklabels(['20','10','0','10','20'])
# cs0 = ax.contour(data.dem,colors='0.4',linewidths=0.5,levels=np.arange(1500,1800,10))
# ax.clabel(cs0,list(np.arange(1550,1800,50)),inline=1,fontsize=8,fmt='%d')
ax.set_xlabel('Relative $x$ position [km]',fontsize=12)
ax.set_ylabel('Relative $y$ position [km]',fontsize=12)
ax.set_xticklabels(['%1.1f' % x for x in data.hdr['spx']*1.e-3*ax.get_xticks()])
ax.set_yticklabels(['%1.1f' % (4-x) for x in data.hdr['spy']*1.e-3*ax.get_yticks()])
plt.savefig('/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/strain_shaded.pdf',bbox_inches='tight')
def plot_curvature(data,ls):
# Choose colormap and data range normalization
cmap = plt.get_cmap('Spectral')
norm = Normalize(vmin=0,vmax=1600)
fig, ax = plt.subplots()
# data.gridx,data.gridy,
hatch = np.isnan(data.dem).astype(np.float32)
hatch[hatch < 0.5] = np.nan
ax.imshow(ls.hillshade(data.dem,vert_exag=2,dx=data.hdr['spx'],dy=data.hdr['spy']),cmap='gray')
cf00 = ax.contourf(hatch,hatches=['xxx'],cmap=None,colors='0.4')
cf0 = ax.contourf(data.filled_curvature,cmap=cmap,extend='both',levels=np.linspace(-1.e-4,1.e-4,30),alpha=0.8)
cbar = fig.colorbar(cf0,ax=ax,ticks=[-1.e-4,-0.5e-4,0,0.5e-4,1.e-4])
cbar.ax.set_ylabel('Mean surface curvature [m$^{-1}$]',fontsize=12)
cbar.ax.set_yticklabels(['-10$^{-4}$','-10$^{-4}$/2','0','10$^{-4}$/2','10$^{-4}$'])
# cs0 = ax.contour(data.dem,colors='0.4',linewidths=0.5,levels=np.arange(1500,1800,10))
# ax.clabel(cs0,list(np.arange(1550,1800,50)),inline=1,fontsize=8,fmt='%d')
ax.set_xlabel('Relative $x$ position [km]',fontsize=12)
ax.set_ylabel('Relative $y$ position [km]',fontsize=12)
ax.set_xticklabels(['%1.1f' % x for x in data.hdr['spx']*1.e-3*ax.get_xticks()])
ax.set_yticklabels(['%1.1f' % (4-x) for x in data.hdr['spy']*1.e-3*ax.get_yticks()])
plt.savefig('/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/curvature_shaded.pdf',bbox_inches='tight')
def plot_slope(data,ls):
# Choose colormap and data range normalization
cmap = plt.get_cmap('magma_r')
norm = Normalize(vmin=0,vmax=1600)
fig, ax = plt.subplots()
# data.gridx,data.gridy,
hatch = np.isnan(data.dem).astype(np.float32)
hatch[hatch < 0.5] = np.nan
ax.imshow(ls.hillshade(data.dem,vert_exag=2,dx=data.hdr['spx'],dy=data.hdr['spy']),cmap='gray')
cf00 = ax.contourf(hatch,hatches=['xxx'],cmap=None,colors='0.4')
cf0 = ax.contourf(data.filled_slope,cmap=cmap,extend='max',levels=np.linspace(0,0.15,30),alpha=0.8)
cbar = fig.colorbar(cf0,ax=ax,ticks=[0,0.03,0.06,0.09,0.12,0.15])
cbar.ax.set_ylabel('Surface slope [-]',fontsize=12)
# cs0 = ax.contour(data.dem,colors='0.4',linewidths=0.5,levels=np.arange(1500,1800,10))
# ax.clabel(cs0,list(np.arange(1550,1800,50)),inline=1,fontsize=8,fmt='%d')
ax.set_xlabel('Relative $x$ position [km]',fontsize=12)
ax.set_ylabel('Relative $y$ position [km]',fontsize=12)
ax.set_xticklabels(['%1.1f' % x for x in data.hdr['spx']*1.e-3*ax.get_xticks()])
ax.set_yticklabels(['%1.1f' % (4-x) for x in data.hdr['spy']*1.e-3*ax.get_yticks()])
plt.savefig('/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/slope_shaded.pdf',bbox_inches='tight')
def plot_filled_dem(data, ls, axlabels=False):
# Choose colormap and data range normalization
# cmap = plt.get_cmap('cividis_r')
# norm = Normalize(1550, 1700)
# rgb = ls.shade_rgb(cmap(norm(data.dem)), data.filled_dem, blend_mode='overlay', fraction=0.6)
cmap = plt.get_cmap('viridis_r')
fig, ax = plt.subplots()
hatch = np.isnan(data.dem).astype(np.float32)
hatch[hatch < 0.5] = np.nan
cf0 = ax.contourf(data.filled_dem,cmap=cmap,extend='both',levels=np.linspace(1550,1750,30),alpha=0.8)
ax.imshow(ls.hillshade(data.dem,vert_exag=2,dx=data.hdr['spx'],dy=data.hdr['spy']),cmap='gray')
cf00 = ax.contourf(hatch,hatches=['xxx'],cmap=None,colors='0.4', alpha=0.5)
cbar = fig.colorbar(cf0,ax=ax,ticks=[1550,1600,1650,1700,1750])
cbar.ax.set_ylabel('Surface elevation [m a.s.l.]',fontsize=12)
# cs0 = ax.contour(data.dem,colors='0.4',linewidths=0.5,levels=np.arange(1500,1800,10))
# ax.clabel(cs0,list(np.arange(1550,1800,50)),inline=1,fontsize=8,fmt='%d')
ax.set_xlim((0, 2000))
# ax.set_aspect(1)
if axlabels:
ax.set_xticklabels(['%1.1f' % x for x in data.hdr['spx']*1.e-3*ax.get_xticks()])
ax.set_yticklabels(['%1.1f' % (4-x) for x in data.hdr['spy']*1.e-3*ax.get_yticks()])
ax.set_xlabel('Relative $x$ position [km]',fontsize=12)
ax.set_ylabel('Relative $y$ position [km]',fontsize=12)
else:
ax.set_xticklabels(())
ax.set_yticklabels(())
plt.savefig('/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/dem_filled_shaded.pdf',bbox_inches='tight')
def plot_dem_only(data,ls):
# Choose colormap and data range normalization
cmap = plt.get_cmap('cividis_r')
norm = Normalize(1550, 1700)
rgb = ls.shade_rgb(cmap(norm(data.dem)), data.dem, blend_mode='overlay', fraction=0.6)
fig, ax = plt.subplots()
ax.imshow(rgb)
# Use a proxy artist for the colorbar...
im = ax.imshow(data.dem, cmap=cmap)
im.remove()
fig.colorbar(im)
plt.savefig('figs/dem_only_shaded.pdf',bbox_inches='tight')
def plot_mask(data, ls, axlabels=False):
# Choose colormap and data range normalization
# cmap = plt.get_cmap('cividis_r')
# norm = Normalize(1550, 1700)
# rgb = ls.shade_rgb(cmap(norm(data.dem)), data.filled_dem, blend_mode='overlay', fraction=0.6)
cmap = plt.get_cmap('Greys')
fig, ax = plt.subplots()
hatch = np.isnan(data.dem).astype(np.float32)
hatch[hatch < 0.5] = np.nan
crevasses = np.isnan(data.mask).astype(np.float32)
crevasses[crevasses < 0.5] = np.nan
ax.imshow(ls.hillshade(data.dem,vert_exag=2,dx=data.hdr['spx'],dy=data.hdr['spy']),cmap='gray')
cf1 = ax.contourf(hatch,hatches=['xxx'],cmap=None,colors='0.4')
# cf2 = ax.contourf(crevasses, hatches=['xxx'], colors='Indigo')
cf0 = ax.contourf(data.mask,cmap=cmap, alpha=0.8)
# cs0 = ax.contour(data.dem,colors='0.4',linewidths=0.5,levels=np.arange(1500,1800,10))
# ax.clabel(cs0,list(np.arange(1550,1800,50)),inline=1,fontsize=8,fmt='%d')
ax.set_xlim((0, 2000))
# ax.set_aspect(1)
if axlabels:
ax.set_xticklabels(['%1.1f' % x for x in data.hdr['spx']*1.e-3*ax.get_xticks()])
ax.set_yticklabels(['%1.1f' % (4-x) for x in data.hdr['spy']*1.e-3*ax.get_yticks()])
ax.set_xlabel('Relative $x$ position [km]',fontsize=12)
ax.set_ylabel('Relative $y$ position [km]',fontsize=12)
else:
ax.set_xticklabels(())
ax.set_yticklabels(())
plt.savefig('/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/mask.pdf',bbox_inches='tight')
class Data():
def __init__(data,datafiles,skafta, youngmod=1.e9, poisson_nu=0.3, mean_thickness=300., sign_compression=1.):
data.files = datafiles
data.skafta = skafta
data.hdr = data.get_header('hdr')
data.youngs = youngmod ### Youngs modulus in Pa
data.poissons = poisson_nu ### Poissons ratio (dimensionless)
data.thickness_m = mean_thickness ### ice thickness in m
data.sign_compression = sign_compression ### sign convention applied for compressive stress/strain (-1 for positive tension)
def calc_elastic_stress(data):
# """Compute the surface elastic stress field from the mean curvature.
# This is an order-of-magnitude estimate that draws on an assumption of cylindrical symmetry.
# Use calc_maxprinc_stress() for more general maximum principal stress."""
data.filled_strain = data.sign_compression * 0.5 * data.thickness_m * data.filled_curvature
data.filled_stress = data.youngs * data.filled_strain / (1.-data.poissons**2)
data.filled_strainenergy = data.filled_strain*data.filled_stress
def calc_maxprinc_stress(data):
# """Compute surface stresses sigma_x, sigma_y, tau_xy and return maximum principal stress.
# Return sigma_2 (min) instead of sigma_1 (max) if sign convention calls for negative tension, as we are studying tension.
# Drawing on Ugural (2017) definition of stresses for arbitrary thin plate bending geometry."""
## compute sigma_x
prefactor = data.sign_compression * 0.5 * data.thickness_m * data.youngs / (1.-data.poissons**2)
sigma_x = prefactor * (data.filled_ddx2 + (data.poissons * data.filled_ddy2))
## compute sigma_y
sigma_y = prefactor * (data.filled_ddy2 + (data.poissons * data.filled_ddx2))
#compute tau_xy
tau_xy = (data.sign_compression * 0.5 * data.thickness_m /(1.+data.poissons)) * data.filled_ddxdy
#compute sigma_max and sigma_min
A = ((sigma_x + sigma_y)/2)
B = np.sqrt(((sigma_x - sigma_y)/2)**2 + tau_xy**2)
sigma_max = A + B
sigma_min = A - B
data.filled_maxprinc_stress = sigma_max
# if data.sign_compression<0: ### condition that tries to return "most tensile" stress...but possibly misguided
# data.filled_maxprinc_stress = sigma_min
# else:
# data.filled_maxprinc_stress = sigma_max
def region_of_interest(data, ul_row=None, ul_col=None, lr_row=None, lr_col=None):
"""Get row and column of DEM to focus on Skafta"""
if ul_row is None:
### get row and column for Skafta
ul_row = np.int(np.abs(data.hdr['ulx'] - data.skafta['ul_polstr'][0]) / data.hdr['spx'])
ul_col = np.int(np.abs(data.hdr['uly'] - data.skafta['ul_polstr'][1]) / data.hdr['spy'])
lr_row = np.int(np.abs(data.hdr['ulx'] - data.skafta['lr_polstr'][0]) / data.hdr['spx'])
lr_col = np.int(np.abs(data.hdr['uly'] - data.skafta['lr_polstr'][1]) / data.hdr['spy'])
data.dem = data.dem[ul_row:lr_row,ul_col:lr_col]
data.filled_dem = data.filled_dem[ul_row:lr_row,ul_col:lr_col]
data.filled_diff = data.filled_diff[ul_row:lr_row,ul_col:lr_col]
data.filled_slope = data.filled_slope[ul_row:lr_row,ul_col:lr_col]
data.filled_curvature = data.filled_curvature[ul_row:lr_row,ul_col:lr_col]
data.filled_ddx2 = data.filled_ddx2[ul_row:lr_row,ul_col:lr_col]
data.filled_ddy2 = data.filled_ddy2[ul_row:lr_row,ul_col:lr_col]
data.filled_ddxdy = data.filled_ddxdy[ul_row:lr_row,ul_col:lr_col]
data.mask = data.mask[ul_row:lr_row,ul_col:lr_col]
data.cols = data.dem.shape[1]
data.rows = data.dem.shape[0]
#data.gridx = data.skafta['ul_polstr'][0] + np.arange(data.cols)*data.hdr['spx']
#data.gridy = data.skafta['ul_polstr'][1] - np.arange(data.rows)*np.abs(data.hdr['spy'])
def read_data(data):
with open(data.files['dem'],'r') as fid:
data.dem = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.dem.shape[0] != data.hdr['rows']:
raise ValueError('dem not the right size according to header')
with open(data.files['filled_dem'],'r') as fid:
data.filled_dem = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.filled_dem.shape[0] != data.hdr['rows']:
raise ValueError('filled dem not the right size according to header')
with open(data.files['filled_diff'],'r') as fid:
data.filled_diff = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.filled_diff.shape[0] != data.hdr['rows']:
raise ValueError('diff not the right size according to header')
with open(data.files['filled_slope'],'r') as fid:
data.filled_slope = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.filled_slope.shape[0] != data.hdr['rows']:
raise ValueError('slope not the right size according to header')
with open(data.files['filled_curvature'],'r') as fid:
data.filled_curvature = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.filled_curvature.shape[0] != data.hdr['rows']:
raise ValueError('curvature not the right size according to header')
with open(data.files['filled_ddx2'],'r') as fid:
data.filled_ddx2 = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.filled_ddx2.shape[0] != data.hdr['rows']:
raise ValueError('ddx2 not the right size according to header')
with open(data.files['filled_ddy2'],'r') as fid:
data.filled_ddy2 = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.filled_ddy2.shape[0] != data.hdr['rows']:
raise ValueError('ddy2 not the right size according to header')
with open(data.files['filled_ddxdy'],'r') as fid:
data.filled_ddxdy = np.fromfile(fid,dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.filled_ddy2.shape[0] != data.hdr['rows']:
raise ValueError('ddxdy not the right size according to header')
with open(data.files['mask'], 'r') as fid:
data.mask = np.fromfile(fid, dtype=np.float32).reshape(-1,data.hdr['cols'])
if data.mask.shape[0] != data.hdr['rows']:
raise ValueError('mask not the right size according to header')
def get_header(data,hdrstr='hdr'):
hdr = {}
with open(data.files[hdrstr],'r') as fid:
reads = fid.readlines()
for read in reads:
if read.startswith('samples'):
hdr['cols'] = int(read.split('=')[1])
elif read.startswith('lines'):
hdr['rows'] = int(read.split('=')[1])
elif read.startswith('map info') or read.startswith('map_info'):
r = read.split('=')[1].split(',')
hdr['ulx'] = np.float(r[3])
hdr['uly'] = np.float(r[4])
hdr['spx'] = np.abs(np.float(r[5]))
hdr['spy'] = np.abs(np.float(r[6]))
hdr['lrx'] = hdr['ulx'] + (hdr['cols']-1)*hdr['spx']
hdr['lry'] = hdr['uly'] - (hdr['rows']-1)*np.abs(hdr['spy'])
return hdr
if __name__=='__main__':
fpath = '/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/'
datadic = {}
### inputs - cropped DEMs
datadic['hdr'] = fpath+'diff/skaftar_east/SETSM_WV02_20151010_skaftar_east_medflt.hdr'
datadic['dem'] = fpath+'diff/skaftar_east/SETSM_WV02_20151010_skaftar_east_medflt.bin'
datadic['mask'] = fpath+'diff/skaftar_east/SETSM_WV02_20151010_skafar_east_dem_highpass_mask_smooth.bin'
### inputs - processed by smooth_crevasses.py
datadic['filled_dem'] = fpath+'SETSM_WV02_20151010_skaftar_east_dem_filled.bin'
datadic['filled_diff'] = fpath+'SETSM_WV02_20151010_skaftar_east_dem_filled_diff.bin'
datadic['filled_slope'] = fpath+'SETSM_WV02_20151010_skaftar_east_dem_filled_slope.bin'
datadic['filled_curvature'] = fpath+'SETSM_WV02_20151010_skaftar_east_dem_filled_curvature.bin'
datadic['filled_ddx2'] = '../SETSM_WV02_20151010_skaftar_east_dem_filled_ddx2.bin'
datadic['filled_ddy2'] = '../SETSM_WV02_20151010_skaftar_east_dem_filled_ddy2.bin'
datadic['filled_ddxdy'] = '../SETSM_WV02_20151010_skaftar_east_dem_filled_ddxdy.bin'
skafta = {}
# skafta['ul_polstr'] = [1294500.,-2489500.]
# skafta['lr_polstr'] = [1298500.,-2493500.]
#
# main(datadic,skafta)
| [
"numpy.abs",
"matplotlib.pyplot.get_cmap",
"matplotlib.colors.Normalize",
"numpy.fromfile",
"numpy.float",
"numpy.isnan",
"numpy.linspace",
"matplotlib.colors.LightSource",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig",
"numpy.sqrt"
] | [((526, 559), 'matplotlib.colors.LightSource', 'LightSource', ([], {'azdeg': '(315)', 'altdeg': '(45)'}), '(azdeg=315, altdeg=45)\n', (537, 559), False, 'from matplotlib.colors import LightSource, Normalize\n'), ((2298, 2322), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (2310, 2322), True, 'import matplotlib.pyplot as plt\n'), ((2336, 2350), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2348, 2350), True, 'import matplotlib.pyplot as plt\n'), ((3852, 3876), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (3864, 3876), True, 'import matplotlib.pyplot as plt\n'), ((3892, 3906), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (3904, 3906), True, 'import matplotlib.pyplot as plt\n'), ((4969, 5100), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/strain_shaded.pdf"""'], {'bbox_inches': '"""tight"""'}), "(\n '/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/strain_shaded.pdf'\n , bbox_inches='tight')\n", (4980, 5100), True, 'import matplotlib.pyplot as plt\n'), ((5182, 5206), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Spectral"""'], {}), "('Spectral')\n", (5194, 5206), True, 'import matplotlib.pyplot as plt\n'), ((5218, 5246), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(1600)'}), '(vmin=0, vmax=1600)\n', (5227, 5246), False, 'from matplotlib.colors import LightSource, Normalize\n'), ((5261, 5275), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (5273, 5275), True, 'import matplotlib.pyplot as plt\n'), ((6379, 6513), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/curvature_shaded.pdf"""'], {'bbox_inches': '"""tight"""'}), "(\n '/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/curvature_shaded.pdf'\n , bbox_inches='tight')\n", (6390, 6513), True, 'import matplotlib.pyplot as plt\n'), ((6591, 6614), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""magma_r"""'], {}), "('magma_r')\n", (6603, 6614), True, 'import matplotlib.pyplot as plt\n'), ((6626, 6654), 'matplotlib.colors.Normalize', 'Normalize', ([], {'vmin': '(0)', 'vmax': '(1600)'}), '(vmin=0, vmax=1600)\n', (6635, 6654), False, 'from matplotlib.colors import LightSource, Normalize\n'), ((6669, 6683), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (6681, 6683), True, 'import matplotlib.pyplot as plt\n'), ((7668, 7798), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/slope_shaded.pdf"""'], {'bbox_inches': '"""tight"""'}), "(\n '/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/slope_shaded.pdf'\n , bbox_inches='tight')\n", (7679, 7798), True, 'import matplotlib.pyplot as plt\n'), ((8071, 8096), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""viridis_r"""'], {}), "('viridis_r')\n", (8083, 8096), True, 'import matplotlib.pyplot as plt\n'), ((8109, 8123), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (8121, 8123), True, 'import matplotlib.pyplot as plt\n'), ((9187, 9322), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/dem_filled_shaded.pdf"""'], {'bbox_inches': '"""tight"""'}), "(\n '/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/dem_filled_shaded.pdf'\n , bbox_inches='tight')\n", (9198, 9322), True, 'import matplotlib.pyplot as plt\n'), ((9407, 9432), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""cividis_r"""'], {}), "('cividis_r')\n", (9419, 9432), True, 'import matplotlib.pyplot as plt\n'), ((9444, 9465), 'matplotlib.colors.Normalize', 'Normalize', (['(1550)', '(1700)'], {}), '(1550, 1700)\n', (9453, 9465), False, 'from matplotlib.colors import LightSource, Normalize\n'), ((9573, 9587), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (9585, 9587), True, 'import matplotlib.pyplot as plt\n'), ((9735, 9795), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""figs/dem_only_shaded.pdf"""'], {'bbox_inches': '"""tight"""'}), "('figs/dem_only_shaded.pdf', bbox_inches='tight')\n", (9746, 9795), True, 'import matplotlib.pyplot as plt\n'), ((10072, 10093), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['"""Greys"""'], {}), "('Greys')\n", (10084, 10093), True, 'import matplotlib.pyplot as plt\n'), ((10106, 10120), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (10118, 10120), True, 'import matplotlib.pyplot as plt\n'), ((11149, 11271), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/mask.pdf"""'], {'bbox_inches': '"""tight"""'}), "(\n '/Users/ehultee/Documents/6. MIT/Skaftar collapse/Crevasse_mask/figs/mask.pdf'\n , bbox_inches='tight')\n", (11160, 11271), True, 'import matplotlib.pyplot as plt\n'), ((13250, 13303), 'numpy.sqrt', 'np.sqrt', (['(((sigma_x - sigma_y) / 2) ** 2 + tau_xy ** 2)'], {}), '(((sigma_x - sigma_y) / 2) ** 2 + tau_xy ** 2)\n', (13257, 13303), True, 'import numpy as np\n'), ((2361, 2379), 'numpy.isnan', 'np.isnan', (['data.dem'], {}), '(data.dem)\n', (2369, 2379), True, 'import numpy as np\n'), ((2513, 2571), 'numpy.linspace', 'np.linspace', (['(-1 * saturation_stress)', 'saturation_stress', '(30)'], {}), '(-1 * saturation_stress, saturation_stress, 30)\n', (2524, 2571), True, 'import numpy as np\n'), ((3949, 3967), 'numpy.isnan', 'np.isnan', (['data.dem'], {}), '(data.dem)\n', (3957, 3967), True, 'import numpy as np\n'), ((4261, 4289), 'numpy.linspace', 'np.linspace', (['(-0.02)', '(0.02)', '(30)'], {}), '(-0.02, 0.02, 30)\n', (4272, 4289), True, 'import numpy as np\n'), ((5318, 5336), 'numpy.isnan', 'np.isnan', (['data.dem'], {}), '(data.dem)\n', (5326, 5336), True, 'import numpy as np\n'), ((5633, 5665), 'numpy.linspace', 'np.linspace', (['(-0.0001)', '(0.0001)', '(30)'], {}), '(-0.0001, 0.0001, 30)\n', (5644, 5665), True, 'import numpy as np\n'), ((6726, 6744), 'numpy.isnan', 'np.isnan', (['data.dem'], {}), '(data.dem)\n', (6734, 6744), True, 'import numpy as np\n'), ((7036, 7060), 'numpy.linspace', 'np.linspace', (['(0)', '(0.15)', '(30)'], {}), '(0, 0.15, 30)\n', (7047, 7060), True, 'import numpy as np\n'), ((8134, 8152), 'numpy.isnan', 'np.isnan', (['data.dem'], {}), '(data.dem)\n', (8142, 8152), True, 'import numpy as np\n'), ((8268, 8295), 'numpy.linspace', 'np.linspace', (['(1550)', '(1750)', '(30)'], {}), '(1550, 1750, 30)\n', (8279, 8295), True, 'import numpy as np\n'), ((10131, 10149), 'numpy.isnan', 'np.isnan', (['data.dem'], {}), '(data.dem)\n', (10139, 10149), True, 'import numpy as np\n'), ((10213, 10232), 'numpy.isnan', 'np.isnan', (['data.mask'], {}), '(data.mask)\n', (10221, 10232), True, 'import numpy as np\n'), ((13864, 13917), 'numpy.abs', 'np.abs', (["(data.hdr['ulx'] - data.skafta['ul_polstr'][0])"], {}), "(data.hdr['ulx'] - data.skafta['ul_polstr'][0])\n", (13870, 13917), True, 'import numpy as np\n'), ((13965, 14018), 'numpy.abs', 'np.abs', (["(data.hdr['uly'] - data.skafta['ul_polstr'][1])"], {}), "(data.hdr['uly'] - data.skafta['ul_polstr'][1])\n", (13971, 14018), True, 'import numpy as np\n'), ((14066, 14119), 'numpy.abs', 'np.abs', (["(data.hdr['ulx'] - data.skafta['lr_polstr'][0])"], {}), "(data.hdr['ulx'] - data.skafta['lr_polstr'][0])\n", (14072, 14119), True, 'import numpy as np\n'), ((14167, 14220), 'numpy.abs', 'np.abs', (["(data.hdr['uly'] - data.skafta['lr_polstr'][1])"], {}), "(data.hdr['uly'] - data.skafta['lr_polstr'][1])\n", (14173, 14220), True, 'import numpy as np\n'), ((15249, 15283), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (15260, 15283), True, 'import numpy as np\n'), ((15531, 15565), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (15542, 15565), True, 'import numpy as np\n'), ((15829, 15863), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (15840, 15863), True, 'import numpy as np\n'), ((16124, 16158), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (16135, 16158), True, 'import numpy as np\n'), ((16429, 16463), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (16440, 16463), True, 'import numpy as np\n'), ((16732, 16766), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (16743, 16766), True, 'import numpy as np\n'), ((17025, 17059), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (17036, 17059), True, 'import numpy as np\n'), ((17320, 17354), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (17331, 17354), True, 'import numpy as np\n'), ((17601, 17635), 'numpy.fromfile', 'np.fromfile', (['fid'], {'dtype': 'np.float32'}), '(fid, dtype=np.float32)\n', (17612, 17635), True, 'import numpy as np\n'), ((18321, 18335), 'numpy.float', 'np.float', (['r[3]'], {}), '(r[3])\n', (18329, 18335), True, 'import numpy as np\n'), ((18365, 18379), 'numpy.float', 'np.float', (['r[4]'], {}), '(r[4])\n', (18373, 18379), True, 'import numpy as np\n'), ((18416, 18430), 'numpy.float', 'np.float', (['r[5]'], {}), '(r[5])\n', (18424, 18430), True, 'import numpy as np\n'), ((18468, 18482), 'numpy.float', 'np.float', (['r[6]'], {}), '(r[6])\n', (18476, 18482), True, 'import numpy as np\n'), ((18611, 18629), 'numpy.abs', 'np.abs', (["hdr['spy']"], {}), "(hdr['spy'])\n", (18617, 18629), True, 'import numpy as np\n')] |
"""Noise generators to simulate noise in sensor / actuator classes."""
import abc
import gin
import numpy as np
class NoiseGenerator(metaclass=abc.ABCMeta):
"""Base class for noise generators."""
@abc.abstractmethod
def _get_noise(self, shape, dtype=None):
"""Gets noise as a numpy array in the specified shape and dtype.
Tensorflow requires the shape and dtype of noise to be correctly specified,
so the generator needs to know this to produce data of the correct type.
Args:
shape: Shape of the returned array.
dtype: Datatype of returned array (None for default).
"""
@abc.abstractmethod
def add_noise(self, data):
"""Adds noise generated by _get_noise to the given data with clipping.
Args:
data: Numpy array of data to be modified with noise.
"""
@gin.configurable
class BiasNoise(NoiseGenerator):
"""Adds bias to the data, possibly with clipping."""
def __init__(self,
bias=0.0,
clipping_lower_bound=-np.inf,
clipping_upper_bound=np.inf):
"""Create a bias noise generator.
Args:
bias: Absolute magnitude of bias applied to input.
clipping_lower_bound: lower bound of add_noise (use -np.inf to ignore).
clipping_upper_bound: Upper bound of add_noise (use np.inf to ignore).
"""
self._bias = bias
self._clipping_lower_bound = clipping_lower_bound
self._clipping_upper_bound = clipping_upper_bound
def _get_noise(self, shape, dtype=None):
"""Create bias noise of the given direction and datatype."""
return np.full(shape, self._bias, dtype)
def add_noise(self, data):
"""Add bias noise to the given data, clipping to the given range."""
noise = self._get_noise(data.shape, data.dtype)
return np.clip(data + noise, self._clipping_lower_bound,
self._clipping_upper_bound)
@gin.configurable
class NormalNoise(BiasNoise):
"""Adds Gaussian noise to the data, possibly with clipping."""
def __init__(self, scale, **kwargs):
"""Create a normal noise generator.
Args:
scale: Absolute magnitude of standard deviation of Gaussian noise. Note
numpy will throw an error if scale < 0.
**kwargs: Arguments passed to BiasNoise (e.g. bias and clipping).
"""
super(NormalNoise, self).__init__(**kwargs)
self._scale = scale
def _get_noise(self, shape, dtype=None):
"""Create normal noise of the given direction and datatype."""
return np.random.normal(self._bias, self._scale, shape).astype(dtype)
@gin.configurable
class UniformNoise(NoiseGenerator):
"""Generates uniform noise in the given range."""
def __init__(self,
low,
high,
clipping_lower_bound=-np.inf,
clipping_upper_bound=np.inf):
"""Creates a uniform noise generator.
Args:
low: the lower bound of the noise.
high: the higher bound of the noise.
clipping_lower_bound: lower bound of add_noise (use -np.inf to ignore).
clipping_upper_bound: Upper bound of add_noise (use np.inf to ignore).
"""
super().__init__()
self._low = low
self._high = high
self._clipping_lower_bound = clipping_lower_bound
self._clipping_upper_bound = clipping_upper_bound
def _get_noise(self, shape, dtype=None):
"""Generates a noise using the given shape and data type."""
return np.random.uniform(self._low, self._high, shape).astype(dtype)
def add_noise(self, data):
"""Adds noise to the given data, clipping to the given bound."""
noise = self._get_noise(data.shape, data.dtype)
return np.clip(data + noise, self._clipping_lower_bound,
self._clipping_upper_bound)
@gin.configurable
class RangeNoise(NormalNoise):
"""Add normally distributed noise in m, applied to hit fractions in (0, 1).
This enables us to specify range noise in terms of meters of Gaussian noise
between a maximum and minimum range, but the add_noise is applied as above
to values expected to be in a hit fraction range of (0, 1) as needed for the
SimLidarSensor API. Separate methods return noise or noisify data in meters.
"""
def __init__(self, range_noise_m, max_range_m, min_range_m=0.0, **kwargs):
"""Create a normal noise generator suitable for use in a range scanner.
Args:
range_noise_m: Absolute magnitude of standard deviation of Gaussian noise,
applied to range observation readngs, measured in meters.
max_range_m: Maximum range in meters of the data, used for clipping.
min_range_m: Minimum range in meters of the data, used for clipping.
**kwargs: Other arguments passed to NormalNoise (principally bias).
"""
# Validate range values.
if range_noise_m < 0.0:
raise ValueError("Range noise should not be negative: %r" % range_noise_m)
if min_range_m >= max_range_m:
raise ValueError("min_range_m %s must be less than max_range_m %s" %
(min_range_m, max_range_m))
self._range_noise_m = range_noise_m
self._max_range_m = max_range_m
self._min_range_m = min_range_m
self._total_range = max_range_m - min_range_m
super(RangeNoise, self).__init__(
scale=range_noise_m / self._total_range,
clipping_lower_bound=0.0,
clipping_upper_bound=1.0,
**kwargs)
def _get_noise_m(self, shape, dtype=None):
"""Create normal noise of the given direction and datatype, in meters."""
return self.range_to_m(self._get_noise(shape=shape, dtype=dtype))
def add_noise_m(self, data):
"""Add normal noise to the given data, scaled in meters."""
return self.range_to_m(self.add_noise(self.m_to_range(data)))
def m_to_range(self, data):
"""Scale data in meters to a range of (0, 1)."""
return (data - self._min_range_m) / self._total_range
def range_to_m(self, data):
"""Scale data in range of (0, 1) to meters."""
return data * self._total_range + self._min_range_m
@gin.configurable
class TwistNoise(object):
"""Add normally distributed noise to twist actions.
Note this is a simplified noise model in action space designed for parity
with DriveWorld's r/s/e/drive_models/twist_drive.py;rcl=307540784;l=161.
This assumes that the TwistNoise will be applied to a twist action which is
then clipped, as currently done in wheeled_robot_base.py:
robotics/reinforcement_learning/minitaur/robots/wheeled_robot_base.py;l=533
# We assume that the velocity clipping would be absorbed in this API.
if self._action_filter:
action = self._action_filter.filter(action)
where action is a linear_velocity, angular_velocity pair, which is clipped
to limits subsequently by the _compute_kinematic_base_velocity method.
"""
def __init__(self,
linear_velocity_noise_stdev_mps: float,
linear_velocity_noise_max_stdevs: float,
angular_velocity_noise_stdev_rps: float,
angular_velocity_noise_max_stdevs: float,
noise_scaling_cutoff_mps: float = 0.0):
"""Create a normal noise generator suitable for use in a range scanner.
Supports the API specified in the DriveWorld TwistDrive class:
robotics/simulation/environments/drive_models/twist_drive.py;l=54
Args:
linear_velocity_noise_stdev_mps: One standard deviation of normal noise
for linear velocity in meters per second.
linear_velocity_noise_max_stdevs: Max stdevs for linear velocity noise.
This ensures that the noise values do not spike too crazy.
angular_velocity_noise_stdev_rps: One standard deviation of normal noise
for angular velocity in radians per second.
angular_velocity_noise_max_stdevs: Max stdevs for angular velocity noise.
noise_scaling_cutoff_mps: If linear velocity is less than this cutoff,
linear and angular noise are scaled so that zero velocity produces zero
noise. This enables a robot at rest to remain at rest, while still
applying reasonable noise values to finite velocities. Angular velocity
does not contribute to this computation to keep the model simple.
"""
# Validate range values.
if linear_velocity_noise_stdev_mps < 0.0:
raise ValueError("Linear action noise should not be negative: %r" %
linear_velocity_noise_stdev_mps)
if linear_velocity_noise_max_stdevs < 0.0:
raise ValueError("Maximum linear noise should not be negative: %r" %
linear_velocity_noise_max_stdevs)
if angular_velocity_noise_stdev_rps < 0.0:
raise ValueError("Angular action noise should not be negative: %r" %
angular_velocity_noise_stdev_rps)
if angular_velocity_noise_max_stdevs < 0.0:
raise ValueError("Maximum action noise should not be negative: %r" %
angular_velocity_noise_max_stdevs)
if noise_scaling_cutoff_mps < 0.0:
raise ValueError("Noise scaling cutoff should not be negative: %r" %
noise_scaling_cutoff_mps)
# Save the values to create our noise later.
self._noise_shape = [
linear_velocity_noise_stdev_mps, angular_velocity_noise_stdev_rps
]
# The noise clipping is performed using one standard deviation as the unit.
self._noise_lower_bound = np.array([
-linear_velocity_noise_max_stdevs * linear_velocity_noise_stdev_mps,
-angular_velocity_noise_max_stdevs * angular_velocity_noise_stdev_rps
])
self._noise_upper_bound = -self._noise_lower_bound
self._noise_scaling_cutoff_mps = noise_scaling_cutoff_mps
def filter(self, action):
"""Filter the linear and angular velocity by adding noise."""
linear_velocity, angular_velocity = action
linear_noise, angular_noise = np.clip(
np.random.normal(0, self._noise_shape, 2), self._noise_lower_bound,
self._noise_upper_bound)
if self._noise_scaling_cutoff_mps:
clipped_velocity = min(abs(linear_velocity),
self._noise_scaling_cutoff_mps)
scaling_factor = clipped_velocity / self._noise_scaling_cutoff_mps
linear_noise *= scaling_factor
angular_noise *= scaling_factor
return (linear_velocity + linear_noise, angular_velocity + angular_noise)
| [
"numpy.full",
"numpy.random.uniform",
"numpy.clip",
"numpy.array",
"numpy.random.normal"
] | [((1584, 1617), 'numpy.full', 'np.full', (['shape', 'self._bias', 'dtype'], {}), '(shape, self._bias, dtype)\n', (1591, 1617), True, 'import numpy as np\n'), ((1784, 1861), 'numpy.clip', 'np.clip', (['(data + noise)', 'self._clipping_lower_bound', 'self._clipping_upper_bound'], {}), '(data + noise, self._clipping_lower_bound, self._clipping_upper_bound)\n', (1791, 1861), True, 'import numpy as np\n'), ((3628, 3705), 'numpy.clip', 'np.clip', (['(data + noise)', 'self._clipping_lower_bound', 'self._clipping_upper_bound'], {}), '(data + noise, self._clipping_lower_bound, self._clipping_upper_bound)\n', (3635, 3705), True, 'import numpy as np\n'), ((9359, 9517), 'numpy.array', 'np.array', (['[-linear_velocity_noise_max_stdevs * linear_velocity_noise_stdev_mps, -\n angular_velocity_noise_max_stdevs * angular_velocity_noise_stdev_rps]'], {}), '([-linear_velocity_noise_max_stdevs *\n linear_velocity_noise_stdev_mps, -angular_velocity_noise_max_stdevs *\n angular_velocity_noise_stdev_rps])\n', (9367, 9517), True, 'import numpy as np\n'), ((9842, 9883), 'numpy.random.normal', 'np.random.normal', (['(0)', 'self._noise_shape', '(2)'], {}), '(0, self._noise_shape, 2)\n', (9858, 9883), True, 'import numpy as np\n'), ((2487, 2535), 'numpy.random.normal', 'np.random.normal', (['self._bias', 'self._scale', 'shape'], {}), '(self._bias, self._scale, shape)\n', (2503, 2535), True, 'import numpy as np\n'), ((3404, 3451), 'numpy.random.uniform', 'np.random.uniform', (['self._low', 'self._high', 'shape'], {}), '(self._low, self._high, shape)\n', (3421, 3451), True, 'import numpy as np\n')] |
#===============================================================================
# Copyright (c) 2016, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of topslam nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#===============================================================================
import GPy
import numpy as np
def distance_matrix(X):
"""
Compute the pairwise distance matrix of X with itself.
"""
return (X[:,None]-X[None, :])
# def _distance_G(dM, G):
# tmp = GPy.util.linalg.tdot(G)
# sdist = np.einsum('ijp,ijq,ipqj->ij', dM, dM, tmp)
# return sdist#, 1./2.)
#
# def _distance(dM):
# return np.einsum('ijp,ijp->ij', dM, dM)#, 1./2.)
#
# def _multi_chol(G):
# chols = np.empty(G.shape)
# for i in range(G.shape[0]):
# chols[i] = GPy.util.linalg.jitchol(G[i])
# return chols
#
# def cholesky_dist(X, G):
# """
# first product cholesky on vector, then take distance
# """
# chols = _multi_chol(G)
# distM = np.einsum('iq,iqp->ip',X,chols)
# return np.power(_distance(distance_matrix(distM)), 1./2.)
#
# def cholesky_dist_product(X, G):
# """first take distance, then product onto cholesky"""
# chols = _multi_chol(G)
# return np.power(np.abs(_distance_G((distance_matrix(X)), chols)), 1./2.)
def mean_embedding_dist(X, G):
"""
The mean correction, correcting distances using the mean between both
manifold metrics to correct for pairwise distances in X.
"""
dM = distance_matrix(X)
mean_geometry = (G[:, None, :, :] + G[None, :, :, :])/2.
return np.sqrt(np.einsum('ijp,ijpq,ijq->ij', dM, mean_geometry, dM)) | [
"numpy.einsum"
] | [((2977, 3029), 'numpy.einsum', 'np.einsum', (['"""ijp,ijpq,ijq->ij"""', 'dM', 'mean_geometry', 'dM'], {}), "('ijp,ijpq,ijq->ij', dM, mean_geometry, dM)\n", (2986, 3029), True, 'import numpy as np\n')] |
"""
Author: <NAME>
Date: 2021
Description: Class that contains different loss functions and their derivatives.
"""
import numpy as np
class Loss:
def __init__(self, loss_function) -> None:
self.loss = loss_function
# Finds the derivative of the given loss function which is used for calculating gradients
def derivativeLoss(self, batch_labels, y, derivative_matrix):
if self.loss == 'cross-entropy':
delta = self.derivativeCrossEntropy(batch_labels, y)
else:
delta = self.derivativeMeanSquare(batch_labels, y, derivative_matrix)
return delta
# Derivative of mean square error
def derivativeMeanSquare(self, batch_labels, y, derivative_matrix):
e_n = np.subtract(batch_labels, y)
delta = np.multiply(e_n, derivative_matrix)
return delta
# Derivative of cross entropy loss
def derivativeCrossEntropy(self, batch_labels, y):
delta = np.subtract(batch_labels, y)
return delta
| [
"numpy.multiply",
"numpy.subtract"
] | [((744, 772), 'numpy.subtract', 'np.subtract', (['batch_labels', 'y'], {}), '(batch_labels, y)\n', (755, 772), True, 'import numpy as np\n'), ((789, 824), 'numpy.multiply', 'np.multiply', (['e_n', 'derivative_matrix'], {}), '(e_n, derivative_matrix)\n', (800, 824), True, 'import numpy as np\n'), ((957, 985), 'numpy.subtract', 'np.subtract', (['batch_labels', 'y'], {}), '(batch_labels, y)\n', (968, 985), True, 'import numpy as np\n')] |
# neuronal network connection functions
import numpy as np
#
def gid2pos (numc, startgid, gid):
nrow = ncol = int(np.sqrt(numc))
y = int((gid - startgid) / nrow)
x = (gid - startgid) % ncol
return (x,y)
def prob2conv (prob, npre):
# probability to convergence; prob is connection probability, npre is number of presynaptic neurons
return int(0.5 + prob * npre)
def connectOnePreNtoOneMNeuron (NBNeurons,offset_pre,offset_post):
#this method is used to generate list of connections between preSynNeurons and motor neurons.
blist = []
for i in range(NBNeurons):
preN = i+offset_pre
postN = i+offset_post
blist.append([preN,postN])
return blist
def connectLayerswithOverlap (NBpreN, NBpostN, overlap_xdir):
#NBpreN = 6400 #number of presynaptic neurons
NBpreN_x = int(np.sqrt(NBpreN))
NBpreN_y = int(np.sqrt(NBpreN))
#NBpostN = 6400 #number of postsynaptic neurons
NBpostN_x = int(np.sqrt(NBpostN))
NBpostN_y = int(np.sqrt(NBpostN))
convergence_factor = NBpreN/NBpostN
convergence_factor_x = np.ceil(np.sqrt(convergence_factor))
convergence_factor_y = np.ceil(np.sqrt(convergence_factor))
#overlap_xdir = 5 #number of rows in a window for overlapping connectivity
#overlap_ydir = 5 #number of columns in a window for overlapping connectivity
overlap_ydir = overlap_xdir
preNIndices = np.zeros((NBpreN_x,NBpreN_y))
postNIndices = np.zeros((NBpostN_x,NBpostN_y)) #list created for indices from linear (1-6400) to square indexing (1-80,81-160,....)
blist = []
for i in range(NBpreN_x):
for j in range(NBpreN_y):
preNIndices[i,j]=j+(NBpreN_y*i)
for i in range(NBpostN_x):
for j in range(NBpostN_y):
postNIndices[i,j]=j+(NBpostN_y*i)
for i in range(NBpostN_x): #boundary conditions are implemented here
for j in range(NBpostN_y):
postN = int(postNIndices[i,j])
if convergence_factor_x>1:
preN = preNIndices[int(i*convergence_factor_y),int(j*convergence_factor_x)]
#preN = int(convergence_factor_x*convergence_factor_y*NBpostN_y*i) + int(convergence_factor_y*j)
else:
preN = int(postN)
preN_ind = np.where(preNIndices==preN)
#print(preN_ind)
x0 = preN_ind[0][0] - int(overlap_xdir/2)
if x0<0:
x0 = 0
y0 = preN_ind[1][0] - int(overlap_ydir/2)
if y0<0:
y0 = 0
xlast = preN_ind[0][0] + int(overlap_xdir/2)
if xlast>NBpreN_x-1:
xlast = NBpreN_x-1
ylast = preN_ind[1][0] + int(overlap_ydir/2)
if ylast>NBpreN_y-1:
ylast = NBpreN_y-1
xinds = [x0]
for _ in range(xlast-x0):
xinds.append(xinds[-1]+1)
yinds = [y0]
for _ in range(ylast-y0):
yinds.append(yinds[-1]+1)
for xi in range(len(xinds)):
for yi in range(len(yinds)):
preN = int(preNIndices[xinds[xi],yinds[yi]])
blist.append([preN,postN]) #list of [presynaptic_neuron, postsynaptic_neuron]
return blist
def connectLayerswithOverlapDiv(NBpreN, NBpostN, overlap_xdir):
NBpreN_x = int(np.sqrt(NBpreN))
NBpreN_y = int(np.sqrt(NBpreN))
NBpostN_x = int(np.sqrt(NBpostN))
NBpostN_y = int(np.sqrt(NBpostN))
divergence_factor = NBpostN/NBpreN
divergence_factor_x = np.ceil(np.sqrt(divergence_factor))
divergence_factor_y = np.ceil(np.sqrt(divergence_factor))
overlap_ydir = overlap_xdir
preNIndices = np.zeros((NBpreN_x,NBpreN_y))
postNIndices = np.zeros((NBpostN_x,NBpostN_y)) #list created for indices from linear (1-6400) to square indexing (1-80,81-160,....)
blist = []
for i in range(NBpreN_x):
for j in range(NBpreN_y):
preNIndices[i,j]=j+(NBpreN_y*i)
for i in range(NBpostN_x):
for j in range(NBpostN_y):
postNIndices[i,j]=j+(NBpostN_y*i)
for i in range(NBpreN_x): #boundary conditions are implemented here
for j in range(NBpreN_y):
preN = int(preNIndices[i,j])
if divergence_factor_x>1:
postN = postNIndices[int(i*divergence_factor_y),int(j*divergence_factor_x)]
else:
postN = int(preN)
postN_ind = np.where(postNIndices==postN)
x0 = postN_ind[0][0] - int(overlap_xdir/2)
if x0<0:
x0 = 0
y0 = postN_ind[1][0] - int(overlap_ydir/2)
if y0<0:
y0 = 0
xlast = postN_ind[0][0] + int(overlap_xdir/2)
if xlast>NBpostN_x-1:
xlast = NBpostN_x-1
ylast = postN_ind[1][0] + int(overlap_ydir/2)
if ylast>NBpostN_y-1:
ylast = NBpostN_y-1
xinds = [x0]
for _ in range(xlast-x0):
xinds.append(xinds[-1]+1)
yinds = [y0]
for _ in range(ylast-y0):
yinds.append(yinds[-1]+1)
for xi in range(len(xinds)):
for yi in range(len(yinds)):
postN = int(postNIndices[xinds[xi],yinds[yi]])
blist.append([preN,postN]) #list of [presynaptic_neuron, postsynaptic_neuron]
return blist
| [
"numpy.where",
"numpy.zeros",
"numpy.sqrt"
] | [((1372, 1402), 'numpy.zeros', 'np.zeros', (['(NBpreN_x, NBpreN_y)'], {}), '((NBpreN_x, NBpreN_y))\n', (1380, 1402), True, 'import numpy as np\n'), ((1421, 1453), 'numpy.zeros', 'np.zeros', (['(NBpostN_x, NBpostN_y)'], {}), '((NBpostN_x, NBpostN_y))\n', (1429, 1453), True, 'import numpy as np\n'), ((3657, 3687), 'numpy.zeros', 'np.zeros', (['(NBpreN_x, NBpreN_y)'], {}), '((NBpreN_x, NBpreN_y))\n', (3665, 3687), True, 'import numpy as np\n'), ((3706, 3738), 'numpy.zeros', 'np.zeros', (['(NBpostN_x, NBpostN_y)'], {}), '((NBpostN_x, NBpostN_y))\n', (3714, 3738), True, 'import numpy as np\n'), ((117, 130), 'numpy.sqrt', 'np.sqrt', (['numc'], {}), '(numc)\n', (124, 130), True, 'import numpy as np\n'), ((812, 827), 'numpy.sqrt', 'np.sqrt', (['NBpreN'], {}), '(NBpreN)\n', (819, 827), True, 'import numpy as np\n'), ((848, 863), 'numpy.sqrt', 'np.sqrt', (['NBpreN'], {}), '(NBpreN)\n', (855, 863), True, 'import numpy as np\n'), ((937, 953), 'numpy.sqrt', 'np.sqrt', (['NBpostN'], {}), '(NBpostN)\n', (944, 953), True, 'import numpy as np\n'), ((975, 991), 'numpy.sqrt', 'np.sqrt', (['NBpostN'], {}), '(NBpostN)\n', (982, 991), True, 'import numpy as np\n'), ((1068, 1095), 'numpy.sqrt', 'np.sqrt', (['convergence_factor'], {}), '(convergence_factor)\n', (1075, 1095), True, 'import numpy as np\n'), ((1132, 1159), 'numpy.sqrt', 'np.sqrt', (['convergence_factor'], {}), '(convergence_factor)\n', (1139, 1159), True, 'import numpy as np\n'), ((3315, 3330), 'numpy.sqrt', 'np.sqrt', (['NBpreN'], {}), '(NBpreN)\n', (3322, 3330), True, 'import numpy as np\n'), ((3351, 3366), 'numpy.sqrt', 'np.sqrt', (['NBpreN'], {}), '(NBpreN)\n', (3358, 3366), True, 'import numpy as np\n'), ((3388, 3404), 'numpy.sqrt', 'np.sqrt', (['NBpostN'], {}), '(NBpostN)\n', (3395, 3404), True, 'import numpy as np\n'), ((3426, 3442), 'numpy.sqrt', 'np.sqrt', (['NBpostN'], {}), '(NBpostN)\n', (3433, 3442), True, 'import numpy as np\n'), ((3517, 3543), 'numpy.sqrt', 'np.sqrt', (['divergence_factor'], {}), '(divergence_factor)\n', (3524, 3543), True, 'import numpy as np\n'), ((3579, 3605), 'numpy.sqrt', 'np.sqrt', (['divergence_factor'], {}), '(divergence_factor)\n', (3586, 3605), True, 'import numpy as np\n'), ((2248, 2277), 'numpy.where', 'np.where', (['(preNIndices == preN)'], {}), '(preNIndices == preN)\n', (2256, 2277), True, 'import numpy as np\n'), ((4416, 4447), 'numpy.where', 'np.where', (['(postNIndices == postN)'], {}), '(postNIndices == postN)\n', (4424, 4447), True, 'import numpy as np\n')] |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.naive_bayes import BernoulliNB
from hmmlearn import hmm
def nb_train(train_data):
# for index_i, row_i in train_data.iterrows():
# prior[table.get(row_i["letter"])] += 1
# for i in range(128):
# con_pro[table.get(row_i["letter"]), i, row_i.iloc[6 + i]] += 1
prior = train_data.groupby(["letter"]).count()["id"] / len(train_data)
con_pro = train_data.groupby(['letter']).sum().iloc[:, 5:134] / train_data.groupby(["letter"]).count().iloc[:,5:134]
return prior, con_pro
def nb_accuracy(test_data, train_parameter):
table = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7, "i": 8, "j": 9, "k": 10, "l": 11, "m": 12,
"n": 13, "o": 14, "p": 15, "q": 16, "r": 17, "s": 18, "t": 19, "u": 20, "v": 21, "w": 22, "x": 23, "y": 24,
"z": 25}
predict_result = np.zeros(len(test_data))
id = 0
count = 0
for index, row in test_data.iterrows():
predict_result[id] = nb_predict(row, train_parameter)
if predict_result[id] == table.get(row["letter"]):
count += 1
id +=1
accuracy = count / len(test_data)
return accuracy, predict_result
def nb_predict(data, train_parameter):
prior, con_pro = train_parameter
result_pro = np.zeros(26)
for i in range(26):
result_pro[i] = np.sum([np.log(con_pro.iloc[i, j]) if data.iloc[6 + j] == 1 else np.log(1 - con_pro.iloc[i, j]) for j in range(128)]) + np.log(prior[i])
return np.argmax(result_pro)
def hmm_train(hmm_train_data):
table = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7, "i": 8, "j": 9, "k": 10, "l": 11, "m": 12,
"n": 13, "o": 14, "p": 15, "q": 16, "r": 17, "s": 18, "t": 19, "u": 20, "v": 21, "w": 22, "x": 23, "y": 24,
"z": 25}
trasfer_mat = np.zeros((26, 26))
startprob = np.zeros(26)
emissionprob = np.zeros((26, 26))
for index, row in hmm_train_data.iterrows():
startprob[table.get(row["letter"])] += 1
emissionprob[table.get(row["letter"]), table.get(row["bayes"])] += 1
if row["next_id"] == -1:
continue
next_letter = hmm_train_data.loc[row["next_id"] - 1, "letter"]
trasfer_mat[table.get(row["letter"]), table.get(next_letter)] += 1
trasfer_mat = trasfer_mat / np.sum(trasfer_mat, axis=1).reshape(-1, 1)
emissionprob = emissionprob / np.sum(emissionprob, axis=1).reshape(-1, 1)
startprob = startprob / np.sum(startprob)
return trasfer_mat, emissionprob, startprob
def hmm_accuracy(hmm_test_data, hmm_train_result):
table = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7, "i": 8, "j": 9, "k": 10, "l": 11, "m": 12,
"n": 13, "o": 14, "p": 15, "q": 16, "r": 17, "s": 18, "t": 19, "u": 20, "v": 21, "w": 22, "x": 23, "y": 24,
"z": 25}
hmm_model = hmm.MultinomialHMM(n_components=26)
hmm_model.transmat_, hmm_model.emissionprob_, hmm_model.startprob_ = hmm_train_result
querySequence = []
resultSet = []
for index, row in hmm_test_data.iterrows():
querySequence.append(table.get(hmm_test_data.loc[index, "letter"]))
if (hmm_test_data.loc[index, "next_id"] == -1):
resultSet.extend(hmm_model.predict(np.array(querySequence).reshape(-1, 1)))
querySequence = []
# print(resultSet)
accuracy = 0
table2 = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t",
"u",
"v", 'w', "x", "y", "z"]
expect_array = []
hmm_test_data_ite = hmm_test_data.iterrows()
for j in range(len(resultSet)):
if table2[resultSet[j]] == hmm_test_data.loc[next(hmm_test_data_ite)[0], "letter"]:
accuracy += 1
expect_array.append(table2[resultSet[j]])
accuracy /= len(resultSet)
return accuracy
def main():
header = pd.read_csv("letter.names", header=None)
# print(header.values.reshape(1, -1)[0])
data = pd.read_csv("letter.data", sep="\s+", names=header.values.reshape(1, -1)[0])
'''
10 cross fold for naive bayes
'''
bayes_result = np.zeros(10)
for i in range(10):
train_data = data[data["fold"] != i]
test_data = data[data["fold"] == i]
train_data.index = range(len(train_data))
test_data.index = range(len(test_data))
clf = BernoulliNB()
clf.fit(train_data.iloc[:, 6:134], train_data.iloc[:, 1])
bayes_result[i] = clf.score(test_data.iloc[:, 6:134], test_data.iloc[:, 1])
# accuracy, predic_result = nb_accuracy(test_data, nb_train(train_data))
# print(accuracy)
# print(predic_result)
'''
combine the naive bayes predictions to dataset
'''
beyess_predicts = clf.predict(data.iloc[:, 6:134])
bayes = pd.DataFrame(beyess_predicts.reshape(-1, 1), columns=["bayes"])
# print(bayes.values.shape)
hmm_data = pd.concat([data, bayes], axis=1)
table = {"a": 0, "b": 1, "c": 2, "d": 3, "e": 4, "f": 5, "g": 6, "h": 7, "i": 8, "j": 9, "k": 10, "l": 11, "m": 12,
"n": 13, "o": 14, "p": 15, "q": 16, "r": 17, "s": 18, "t": 19, "u": 20, "v": 21, "w": 22, "x": 23, "y": 24,
"z": 25}
'''
10 cross fold for HMM
'''
hmm_result = np.zeros(10)
for i in range(10):
hmm_train_data = hmm_data[hmm_data["fold"] != i]
hmm_test_data = hmm_data[hmm_data["fold"] == i]
hmm_result[i] = hmm_accuracy(hmm_test_data, hmm_train(hmm_train_data))
print("The accuracy for bayes without adding HMM")
print(bayes_result)
print("The average accuracy: ")
print(np.average(bayes_result))
print("The accuracy for bayes after adding HMM")
print(hmm_result)
print("The average accuracy: ")
print(np.average(hmm_result))
if __name__ == '__main__':
main()
| [
"numpy.sum",
"numpy.average",
"numpy.log",
"numpy.argmax",
"pandas.read_csv",
"numpy.zeros",
"numpy.array",
"hmmlearn.hmm.MultinomialHMM",
"sklearn.naive_bayes.BernoulliNB",
"pandas.concat"
] | [((1351, 1363), 'numpy.zeros', 'np.zeros', (['(26)'], {}), '(26)\n', (1359, 1363), True, 'import numpy as np\n'), ((1560, 1581), 'numpy.argmax', 'np.argmax', (['result_pro'], {}), '(result_pro)\n', (1569, 1581), True, 'import numpy as np\n'), ((1896, 1914), 'numpy.zeros', 'np.zeros', (['(26, 26)'], {}), '((26, 26))\n', (1904, 1914), True, 'import numpy as np\n'), ((1931, 1943), 'numpy.zeros', 'np.zeros', (['(26)'], {}), '(26)\n', (1939, 1943), True, 'import numpy as np\n'), ((1963, 1981), 'numpy.zeros', 'np.zeros', (['(26, 26)'], {}), '((26, 26))\n', (1971, 1981), True, 'import numpy as np\n'), ((2937, 2972), 'hmmlearn.hmm.MultinomialHMM', 'hmm.MultinomialHMM', ([], {'n_components': '(26)'}), '(n_components=26)\n', (2955, 2972), False, 'from hmmlearn import hmm\n'), ((3970, 4010), 'pandas.read_csv', 'pd.read_csv', (['"""letter.names"""'], {'header': 'None'}), "('letter.names', header=None)\n", (3981, 4010), True, 'import pandas as pd\n'), ((4215, 4227), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (4223, 4227), True, 'import numpy as np\n'), ((5002, 5034), 'pandas.concat', 'pd.concat', (['[data, bayes]'], {'axis': '(1)'}), '([data, bayes], axis=1)\n', (5011, 5034), True, 'import pandas as pd\n'), ((5358, 5370), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (5366, 5370), True, 'import numpy as np\n'), ((2539, 2556), 'numpy.sum', 'np.sum', (['startprob'], {}), '(startprob)\n', (2545, 2556), True, 'import numpy as np\n'), ((4453, 4466), 'sklearn.naive_bayes.BernoulliNB', 'BernoulliNB', ([], {}), '()\n', (4464, 4466), False, 'from sklearn.naive_bayes import BernoulliNB\n'), ((5713, 5737), 'numpy.average', 'np.average', (['bayes_result'], {}), '(bayes_result)\n', (5723, 5737), True, 'import numpy as np\n'), ((5860, 5882), 'numpy.average', 'np.average', (['hmm_result'], {}), '(hmm_result)\n', (5870, 5882), True, 'import numpy as np\n'), ((1532, 1548), 'numpy.log', 'np.log', (['prior[i]'], {}), '(prior[i])\n', (1538, 1548), True, 'import numpy as np\n'), ((2390, 2417), 'numpy.sum', 'np.sum', (['trasfer_mat'], {'axis': '(1)'}), '(trasfer_mat, axis=1)\n', (2396, 2417), True, 'import numpy as np\n'), ((2467, 2495), 'numpy.sum', 'np.sum', (['emissionprob'], {'axis': '(1)'}), '(emissionprob, axis=1)\n', (2473, 2495), True, 'import numpy as np\n'), ((1420, 1446), 'numpy.log', 'np.log', (['con_pro.iloc[i, j]'], {}), '(con_pro.iloc[i, j])\n', (1426, 1446), True, 'import numpy as np\n'), ((1477, 1507), 'numpy.log', 'np.log', (['(1 - con_pro.iloc[i, j])'], {}), '(1 - con_pro.iloc[i, j])\n', (1483, 1507), True, 'import numpy as np\n'), ((3333, 3356), 'numpy.array', 'np.array', (['querySequence'], {}), '(querySequence)\n', (3341, 3356), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# In[26]:
from scipy.io import loadmat
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.axes_style('whitegrid');
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix
x = loadmat('PaviaU.mat')['paviaU']
y = loadmat('PaviaU_gt.mat')['paviaU_gt']
print(x.shape)
# In[27]:
fig = plt.figure(figsize = (12, 6))
q = np.random.randint(x.shape[2])
plt.imshow(x[:,:,q], cmap='nipy_spectral')
plt.axis('off')
plt.title(f'Band - {q}')
plt.savefig('IP_Bands.png')
# In[28]:
plt.figure(figsize=(12,6))
plt.imshow(y,cmap='jet')
plt.colorbar()
plt.axis('off')
plt.show()
# In[29]:
def extract_pixels(x, y):
q = x.reshape(-1, x.shape[2])
df = pd.DataFrame(data = q)
df = pd.concat([df, pd.DataFrame(data = y.ravel())], axis=1)
df.columns= [f'band{i}' for i in range(1, 1+x.shape[2])]+['class']
df.to_csv('hsi.csv')
return df
df = extract_pixels(x, y)
# In[30]:
df2 = pd.read_csv('hsi.csv')
del df2['Unnamed: 0']
df2['class']
# In[31]:
df2.iloc[:,:-1].describe()
# In[32]:
from sklearn.decomposition import PCA
pca = PCA(n_components = 40)
dt = pca.fit_transform(df2.iloc[:, :-1].values)
r = pd.concat([pd.DataFrame(data = dt), pd.DataFrame(data = y.ravel())], axis = 1)
r.columns = [f'PC-{i}' for i in range(1,41)]+['class']
r.head()
r.to_csv('hsi_after_pca.csv')
# In[33]:
df4 = pd.read_csv('hsi_after_pca.csv')
del df4['Unnamed: 0']
# In[34]:
fig = plt.figure(figsize = (12, 6))
q = np.random.randint(x.shape[2])
plt.imshow(x[:,:,q], cmap='nipy_spectral')
plt.axis('off')
plt.title(f'Band - {q}')
plt.savefig('IP_Bands.png')
# In[35]:
f = r[r['class'] != 0]
X = f.iloc[:, :-1].values
y = f.loc[:, 'class'].values
names = ['Asphalt','Meadows','Gravel','Trees','Painted metal sheets','Bare Soil','Bitumen','Self-Blocking Bricks','Shadows']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=11, stratify=y)
svm = SVC(C = 100, kernel = 'rbf', cache_size = 10*1024)
svm.fit(X_train, y_train)
ypred = svm.predict(X_test)
# In[36]:
data = confusion_matrix(y_test, ypred)
df_cm = pd.DataFrame(data, columns=np.unique(names), index = np.unique(names))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (12,6))
sns.set(font_scale=1.4)
sns.heatmap(df_cm, cmap="Reds", annot=True,annot_kws={"size": 16}, fmt='d')
plt.savefig('cmap.png', dpi=300)
# In[37]:
print(classification_report(y_test, ypred, target_names = names))
# In[38]:
l=[]
for i in range(r.shape[0]):
if r.iloc[i, -1] == 0:
l.append(0)
else:
l.append(svm.predict(r.iloc[i, :-1].values.reshape(1, -1)))
# In[39]:
clmap = np.array(l).reshape(610, 340,).astype('float')
plt.figure(figsize=(12, 6))
plt.imshow(clmap, cmap='nipy_spectral')
plt.colorbar()
plt.axis('off')
plt.savefig('IP_cmap.png')
plt.show()
# In[ ]:
# In[59]:
from sklearn.tree import DecisionTreeClassifier
f = r[r['class'] != 0]
X = f.iloc[:, :-1].values
y = f.loc[:, 'class'].values
names = ['Asphalt','Meadows','Gravel','Trees','Painted metal sheets','Bare Soil','Bitumen','Self-Blocking Bricks','Shadows']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=11, stratify=y)
model = DecisionTreeClassifier()
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
# In[41]:
data = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(data, columns=np.unique(names), index = np.unique(names))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (12,6))
sns.set(font_scale=1.4)
sns.heatmap(df_cm, cmap="Reds", annot=True,annot_kws={"size": 16}, fmt='d')
plt.savefig('cmap2.png', dpi=300)
# In[42]:
from sklearn import metrics
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
# In[43]:
l2=[]
for i in range(r.shape[0]):
if r.iloc[i, -1] == 0:
l2.append(0)
else:
l2.append(model.predict(r.iloc[i, :-1].values.reshape(1, -1)))
cl2map = np.array(l2).reshape(610, 340,).astype('float')
plt.figure(figsize=(12, 6))
plt.imshow(cl2map, cmap='nipy_spectral')
plt.colorbar()
plt.axis('off')
plt.savefig('IP_cmap2.png')
plt.show()
# In[44]:
print(classification_report(y_test, y_pred, target_names = names))
# In[45]:
from sklearn.neighbors import KNeighborsClassifier
f = r[r['class'] != 0]
X = f.iloc[:, :-1].values
y = f.loc[:, 'class'].values
names = ['Asphalt','Meadows','Gravel','Trees','Painted metal sheets','Bare Soil','Bitumen','Self-Blocking Bricks','Shadows']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=11, stratify=y)
model = KNeighborsClassifier(n_neighbors=5)
model = model.fit(X_train,y_train)
y_pred = model.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
# In[46]:
data = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(data, columns=np.unique(names), index = np.unique(names))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (12,6))
sns.set(font_scale=1.4)
sns.heatmap(df_cm, cmap="Reds", annot=True,annot_kws={"size": 16}, fmt='d')
plt.savefig('cmap5.png', dpi=300)
# In[47]:
l3=[]
for i in range(r.shape[0]):
if r.iloc[i, -1] == 0:
l3.append(0)
else:
l3.append(model.predict(r.iloc[i, :-1].values.reshape(1, -1)))
cl3map = np.array(l2).reshape(610, 340,).astype('float')
plt.figure(figsize=(12, 6))
plt.imshow(cl3map, cmap='nipy_spectral')
plt.colorbar()
plt.axis('off')
plt.savefig('IP_cmap3.png')
plt.show()
# In[61]:
print(classification_report(y_test, y_pred, target_names = names))
# In[48]:
from sklearn.ensemble import RandomForestClassifier
f = r[r['class'] != 0]
X = f.iloc[:, :-1].values
y = f.loc[:, 'class'].values
names = ['Asphalt','Meadows','Gravel','Trees','Painted metal sheets','Bare Soil','Bitumen','Self-Blocking Bricks','Shadows']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=11, stratify=y)
model3=RandomForestClassifier(n_estimators=100)
model3.fit(X_train,y_train)
y_pred=model3.predict(X_test)
print("Accuracy:",metrics.accuracy_score(y_test, y_pred))
# In[49]:
data = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(data, columns=np.unique(names), index = np.unique(names))
df_cm.index.name = 'Actual'
df_cm.columns.name = 'Predicted'
plt.figure(figsize = (12,6))
sns.set(font_scale=1.4)
sns.heatmap(df_cm, cmap="Reds", annot=True,annot_kws={"size": 16}, fmt='d')
plt.savefig('cmap6.png', dpi=300)
# In[50]:
l4=[]
for i in range(r.shape[0]):
if r.iloc[i, -1] == 0:
l4.append(0)
else:
l4.append(model3.predict(r.iloc[i, :-1].values.reshape(1, -1)))
cl4map = np.array(l2).reshape(610, 340).astype('float')
plt.figure(figsize=(12, 6))
plt.imshow(cl4map, cmap='nipy_spectral')
plt.colorbar()
plt.axis('off')
plt.savefig('IP_cmap4.png')
plt.show()
# In[62]:
print(classification_report(y_test, y_pred, target_names = names))
# In[ ]:
| [
"matplotlib.pyplot.title",
"seaborn.heatmap",
"scipy.io.loadmat",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.metrics.accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.figure",
"numpy.random.randint",
"... | [((175, 202), 'seaborn.axes_style', 'sns.axes_style', (['"""whitegrid"""'], {}), "('whitegrid')\n", (189, 202), True, 'import seaborn as sns\n'), ((533, 560), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (543, 560), True, 'import matplotlib.pyplot as plt\n'), ((569, 598), 'numpy.random.randint', 'np.random.randint', (['x.shape[2]'], {}), '(x.shape[2])\n', (586, 598), True, 'import numpy as np\n'), ((599, 643), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x[:, :, q]'], {'cmap': '"""nipy_spectral"""'}), "(x[:, :, q], cmap='nipy_spectral')\n", (609, 643), True, 'import matplotlib.pyplot as plt\n'), ((642, 657), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (650, 657), True, 'import matplotlib.pyplot as plt\n'), ((658, 682), 'matplotlib.pyplot.title', 'plt.title', (['f"""Band - {q}"""'], {}), "(f'Band - {q}')\n", (667, 682), True, 'import matplotlib.pyplot as plt\n'), ((683, 710), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IP_Bands.png"""'], {}), "('IP_Bands.png')\n", (694, 710), True, 'import matplotlib.pyplot as plt\n'), ((725, 752), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (735, 752), True, 'import matplotlib.pyplot as plt\n'), ((752, 777), 'matplotlib.pyplot.imshow', 'plt.imshow', (['y'], {'cmap': '"""jet"""'}), "(y, cmap='jet')\n", (762, 777), True, 'import matplotlib.pyplot as plt\n'), ((777, 791), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (789, 791), True, 'import matplotlib.pyplot as plt\n'), ((792, 807), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (800, 807), True, 'import matplotlib.pyplot as plt\n'), ((808, 818), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (816, 818), True, 'import matplotlib.pyplot as plt\n'), ((1139, 1161), 'pandas.read_csv', 'pd.read_csv', (['"""hsi.csv"""'], {}), "('hsi.csv')\n", (1150, 1161), True, 'import pandas as pd\n'), ((1296, 1316), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(40)'}), '(n_components=40)\n', (1299, 1316), False, 'from sklearn.decomposition import PCA\n'), ((1564, 1596), 'pandas.read_csv', 'pd.read_csv', (['"""hsi_after_pca.csv"""'], {}), "('hsi_after_pca.csv')\n", (1575, 1596), True, 'import pandas as pd\n'), ((1640, 1667), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (1650, 1667), True, 'import matplotlib.pyplot as plt\n'), ((1676, 1705), 'numpy.random.randint', 'np.random.randint', (['x.shape[2]'], {}), '(x.shape[2])\n', (1693, 1705), True, 'import numpy as np\n'), ((1706, 1750), 'matplotlib.pyplot.imshow', 'plt.imshow', (['x[:, :, q]'], {'cmap': '"""nipy_spectral"""'}), "(x[:, :, q], cmap='nipy_spectral')\n", (1716, 1750), True, 'import matplotlib.pyplot as plt\n'), ((1749, 1764), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (1757, 1764), True, 'import matplotlib.pyplot as plt\n'), ((1765, 1789), 'matplotlib.pyplot.title', 'plt.title', (['f"""Band - {q}"""'], {}), "(f'Band - {q}')\n", (1774, 1789), True, 'import matplotlib.pyplot as plt\n'), ((1790, 1817), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IP_Bands.png"""'], {}), "('IP_Bands.png')\n", (1801, 1817), True, 'import matplotlib.pyplot as plt\n'), ((2075, 2141), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(11)', 'stratify': 'y'}), '(X, y, test_size=0.2, random_state=11, stratify=y)\n', (2091, 2141), False, 'from sklearn.model_selection import train_test_split\n'), ((2151, 2197), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(100)', 'kernel': '"""rbf"""', 'cache_size': '(10 * 1024)'}), "(C=100, kernel='rbf', cache_size=10 * 1024)\n", (2154, 2197), False, 'from sklearn.svm import SVC\n'), ((2279, 2310), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'ypred'], {}), '(y_test, ypred)\n', (2295, 2310), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((2451, 2478), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2461, 2478), True, 'import matplotlib.pyplot as plt\n'), ((2480, 2503), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.4)'}), '(font_scale=1.4)\n', (2487, 2503), True, 'import seaborn as sns\n'), ((2504, 2580), 'seaborn.heatmap', 'sns.heatmap', (['df_cm'], {'cmap': '"""Reds"""', 'annot': '(True)', 'annot_kws': "{'size': 16}", 'fmt': '"""d"""'}), "(df_cm, cmap='Reds', annot=True, annot_kws={'size': 16}, fmt='d')\n", (2515, 2580), True, 'import seaborn as sns\n'), ((2580, 2612), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cmap.png"""'], {'dpi': '(300)'}), "('cmap.png', dpi=300)\n", (2591, 2612), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2949), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (2932, 2949), True, 'import matplotlib.pyplot as plt\n'), ((2950, 2989), 'matplotlib.pyplot.imshow', 'plt.imshow', (['clmap'], {'cmap': '"""nipy_spectral"""'}), "(clmap, cmap='nipy_spectral')\n", (2960, 2989), True, 'import matplotlib.pyplot as plt\n'), ((2990, 3004), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (3002, 3004), True, 'import matplotlib.pyplot as plt\n'), ((3005, 3020), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3013, 3020), True, 'import matplotlib.pyplot as plt\n'), ((3021, 3047), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IP_cmap.png"""'], {}), "('IP_cmap.png')\n", (3032, 3047), True, 'import matplotlib.pyplot as plt\n'), ((3048, 3058), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3056, 3058), True, 'import matplotlib.pyplot as plt\n'), ((3376, 3442), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(11)', 'stratify': 'y'}), '(X, y, test_size=0.2, random_state=11, stratify=y)\n', (3392, 3442), False, 'from sklearn.model_selection import train_test_split\n'), ((3453, 3477), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {}), '()\n', (3475, 3477), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((3570, 3602), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3586, 3602), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((3743, 3770), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (3753, 3770), True, 'import matplotlib.pyplot as plt\n'), ((3772, 3795), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.4)'}), '(font_scale=1.4)\n', (3779, 3795), True, 'import seaborn as sns\n'), ((3796, 3872), 'seaborn.heatmap', 'sns.heatmap', (['df_cm'], {'cmap': '"""Reds"""', 'annot': '(True)', 'annot_kws': "{'size': 16}", 'fmt': '"""d"""'}), "(df_cm, cmap='Reds', annot=True, annot_kws={'size': 16}, fmt='d')\n", (3807, 3872), True, 'import seaborn as sns\n'), ((3872, 3905), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cmap2.png"""'], {'dpi': '(300)'}), "('cmap2.png', dpi=300)\n", (3883, 3905), True, 'import matplotlib.pyplot as plt\n'), ((4228, 4255), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (4238, 4255), True, 'import matplotlib.pyplot as plt\n'), ((4256, 4296), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cl2map'], {'cmap': '"""nipy_spectral"""'}), "(cl2map, cmap='nipy_spectral')\n", (4266, 4296), True, 'import matplotlib.pyplot as plt\n'), ((4297, 4311), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (4309, 4311), True, 'import matplotlib.pyplot as plt\n'), ((4312, 4327), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4320, 4327), True, 'import matplotlib.pyplot as plt\n'), ((4328, 4355), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IP_cmap2.png"""'], {}), "('IP_cmap2.png')\n", (4339, 4355), True, 'import matplotlib.pyplot as plt\n'), ((4356, 4366), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4364, 4366), True, 'import matplotlib.pyplot as plt\n'), ((4754, 4820), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(11)', 'stratify': 'y'}), '(X, y, test_size=0.2, random_state=11, stratify=y)\n', (4770, 4820), False, 'from sklearn.model_selection import train_test_split\n'), ((4832, 4867), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': '(5)'}), '(n_neighbors=5)\n', (4852, 4867), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((5015, 5047), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5031, 5047), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((5188, 5215), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (5198, 5215), True, 'import matplotlib.pyplot as plt\n'), ((5217, 5240), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.4)'}), '(font_scale=1.4)\n', (5224, 5240), True, 'import seaborn as sns\n'), ((5241, 5317), 'seaborn.heatmap', 'sns.heatmap', (['df_cm'], {'cmap': '"""Reds"""', 'annot': '(True)', 'annot_kws': "{'size': 16}", 'fmt': '"""d"""'}), "(df_cm, cmap='Reds', annot=True, annot_kws={'size': 16}, fmt='d')\n", (5252, 5317), True, 'import seaborn as sns\n'), ((5317, 5350), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cmap5.png"""'], {'dpi': '(300)'}), "('cmap5.png', dpi=300)\n", (5328, 5350), True, 'import matplotlib.pyplot as plt\n'), ((5573, 5600), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (5583, 5600), True, 'import matplotlib.pyplot as plt\n'), ((5601, 5641), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cl3map'], {'cmap': '"""nipy_spectral"""'}), "(cl3map, cmap='nipy_spectral')\n", (5611, 5641), True, 'import matplotlib.pyplot as plt\n'), ((5642, 5656), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (5654, 5656), True, 'import matplotlib.pyplot as plt\n'), ((5657, 5672), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (5665, 5672), True, 'import matplotlib.pyplot as plt\n'), ((5673, 5700), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IP_cmap3.png"""'], {}), "('IP_cmap3.png')\n", (5684, 5700), True, 'import matplotlib.pyplot as plt\n'), ((5701, 5711), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5709, 5711), True, 'import matplotlib.pyplot as plt\n'), ((6100, 6166), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)', 'random_state': '(11)', 'stratify': 'y'}), '(X, y, test_size=0.2, random_state=11, stratify=y)\n', (6116, 6166), False, 'from sklearn.model_selection import train_test_split\n'), ((6178, 6218), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'n_estimators': '(100)'}), '(n_estimators=100)\n', (6200, 6218), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((6358, 6390), 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6374, 6390), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((6531, 6558), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (6541, 6558), True, 'import matplotlib.pyplot as plt\n'), ((6560, 6583), 'seaborn.set', 'sns.set', ([], {'font_scale': '(1.4)'}), '(font_scale=1.4)\n', (6567, 6583), True, 'import seaborn as sns\n'), ((6584, 6660), 'seaborn.heatmap', 'sns.heatmap', (['df_cm'], {'cmap': '"""Reds"""', 'annot': '(True)', 'annot_kws': "{'size': 16}", 'fmt': '"""d"""'}), "(df_cm, cmap='Reds', annot=True, annot_kws={'size': 16}, fmt='d')\n", (6595, 6660), True, 'import seaborn as sns\n'), ((6660, 6693), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""cmap6.png"""'], {'dpi': '(300)'}), "('cmap6.png', dpi=300)\n", (6671, 6693), True, 'import matplotlib.pyplot as plt\n'), ((6916, 6943), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(12, 6)'}), '(figsize=(12, 6))\n', (6926, 6943), True, 'import matplotlib.pyplot as plt\n'), ((6944, 6984), 'matplotlib.pyplot.imshow', 'plt.imshow', (['cl4map'], {'cmap': '"""nipy_spectral"""'}), "(cl4map, cmap='nipy_spectral')\n", (6954, 6984), True, 'import matplotlib.pyplot as plt\n'), ((6985, 6999), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (6997, 6999), True, 'import matplotlib.pyplot as plt\n'), ((7000, 7015), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (7008, 7015), True, 'import matplotlib.pyplot as plt\n'), ((7016, 7043), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""IP_cmap4.png"""'], {}), "('IP_cmap4.png')\n", (7027, 7043), True, 'import matplotlib.pyplot as plt\n'), ((7044, 7054), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7052, 7054), True, 'import matplotlib.pyplot as plt\n'), ((424, 445), 'scipy.io.loadmat', 'loadmat', (['"""PaviaU.mat"""'], {}), "('PaviaU.mat')\n", (431, 445), False, 'from scipy.io import loadmat\n'), ((460, 484), 'scipy.io.loadmat', 'loadmat', (['"""PaviaU_gt.mat"""'], {}), "('PaviaU_gt.mat')\n", (467, 484), False, 'from scipy.io import loadmat\n'), ((900, 920), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'q'}), '(data=q)\n', (912, 920), True, 'import pandas as pd\n'), ((2633, 2689), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'ypred'], {'target_names': 'names'}), '(y_test, ypred, target_names=names)\n', (2654, 2689), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((3966, 4004), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (3988, 4004), False, 'from sklearn import metrics\n'), ((4387, 4444), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {'target_names': 'names'}), '(y_test, y_pred, target_names=names)\n', (4408, 4444), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((4954, 4992), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4976, 4992), False, 'from sklearn import metrics\n'), ((5732, 5789), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {'target_names': 'names'}), '(y_test, y_pred, target_names=names)\n', (5753, 5789), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((6297, 6335), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (6319, 6335), False, 'from sklearn import metrics\n'), ((7075, 7132), 'sklearn.metrics.classification_report', 'classification_report', (['y_test', 'y_pred'], {'target_names': 'names'}), '(y_test, y_pred, target_names=names)\n', (7096, 7132), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), ((1382, 1403), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'dt'}), '(data=dt)\n', (1394, 1403), True, 'import pandas as pd\n'), ((2346, 2362), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (2355, 2362), True, 'import numpy as np\n'), ((2372, 2388), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (2381, 2388), True, 'import numpy as np\n'), ((3638, 3654), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (3647, 3654), True, 'import numpy as np\n'), ((3664, 3680), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (3673, 3680), True, 'import numpy as np\n'), ((5083, 5099), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (5092, 5099), True, 'import numpy as np\n'), ((5109, 5125), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (5118, 5125), True, 'import numpy as np\n'), ((6426, 6442), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (6435, 6442), True, 'import numpy as np\n'), ((6452, 6468), 'numpy.unique', 'np.unique', (['names'], {}), '(names)\n', (6461, 6468), True, 'import numpy as np\n'), ((2875, 2886), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (2883, 2886), True, 'import numpy as np\n'), ((4180, 4192), 'numpy.array', 'np.array', (['l2'], {}), '(l2)\n', (4188, 4192), True, 'import numpy as np\n'), ((5525, 5537), 'numpy.array', 'np.array', (['l2'], {}), '(l2)\n', (5533, 5537), True, 'import numpy as np\n'), ((6869, 6881), 'numpy.array', 'np.array', (['l2'], {}), '(l2)\n', (6877, 6881), True, 'import numpy as np\n')] |
"""Datasets for CosmoFlow."""
import pickle
import os.path
import functools
import operator
import glob
import torch
import numpy as np
try:
import h5py
except:
h5py = None
class CosmoFlowDataset(torch.utils.data.Dataset):
"""Cosmoflow data."""
SUBDIR_FORMAT = '{:03d}'
def __init__(self, data_dir, dataset_size=None,
transform=None, transform_y=None, base_universe_size=512):
"""Set up the CosmoFlow HDF5 dataset.
This expects pre-split universes per split_hdf5_cosmoflow.py.
You may need to transpose the universes to make the channel
dimension be first. It is up to you to do this in the
transforms or preprocessing.
The sample will be provided to transforms in int16 format.
The target will be provided to transforms in float format.
"""
super().__init__()
self.data_dir = data_dir
self.transform = transform
self.transform_y = transform_y
if h5py is None:
raise ImportError('HDF5 dataset requires h5py')
# Load info from cached index.
idx_filename = os.path.join(data_dir, 'idx')
if not os.path.exists(idx_filename):
if torch.distributed.get_rank() == 0:
CosmoFlowDataset._make_index(
data_dir, base_universe_size=base_universe_size)
# Wait for file to be created.
torch.distributed.barrier()
with open(idx_filename, 'rb') as f:
idx_data = pickle.load(f)
self.sample_base_filenames = idx_data['filenames']
self.num_subdirs = idx_data['num_subdirs']
self.num_splits = (base_universe_size // idx_data['split_size'])**3
self.num_samples = len(self.sample_base_filenames) * self.num_splits
if dataset_size is not None:
self.num_samples = min(dataset_size, self.num_samples)
def __len__(self):
return self.num_samples
def __getitem__(self, index):
# Handle flat vs nested directory structure.
base_index = index // self.num_splits # Base filename.
split_index = index % self.num_splits # Split within the universe.
if self.num_subdirs:
subdir = CosmoFlowDataset.SUBDIR_FORMAT.format(
base_index // self.num_subdirs)
filename = os.path.join(
self.data_dir,
subdir,
self.sample_base_filenames[base_index]
+ f'_{split_index:03d}.hdf5')
x_idx = 'split'
else:
filename = os.path.join(
self.data_dir,
self.sample_base_filenames[base_index]
+ f'_{split_index:03d}.hdf5')
x_idx = 'full'
with h5py.File(filename, 'r') as f:
x, y = f[x_idx][:], f['unitPar'][:]
# Convert to Tensors.
x = torch.from_numpy(x)
y = torch.from_numpy(y)
if self.transform is not None:
x = self.transform(x)
if self.transform_y is not None:
y = self.transform_y(y)
return x, y
@staticmethod
def _make_index(data_dir, split_size=128, base_universe_size=512):
"""Generate an index file if a dataset does not have one."""
print(f'Generating index file for {data_dir}', flush=True)
subdirs = glob.glob(os.path.join(data_dir, '*', ''))
if subdirs:
raise RuntimeError(
'Will not reconstruct index for subdir-based data')
files = glob.glob(os.path.join(data_dir, '*.hdf5'))
univs_per_subdir = 0
# Identify the base universe names.
univ_names = set(map(
lambda x: os.path.splitext(os.path.basename(x))[0][:-4], files))
data = {
'split_size': split_size,
'num_subdirs': univs_per_subdir,
'filenames': list(univ_names)
}
with open(os.path.join(data_dir, 'idx'), 'wb') as f:
pickle.dump(data, f, pickle.HIGHEST_PROTOCOL)
class CosmoFlowDatasetBinary(torch.utils.data.Dataset):
"""CosmoFlow data, stored in binary format."""
def __init__(self, data_dir, input_shape, dataset_size=None,
transform=None, transform_y=None, base_universe_size=512):
super().__init__()
self.data_dir = data_dir
self.input_shape = input_shape
self.transform = transform
self.transform_y = transform_y
# Load info from cached index.
idx_filename = os.path.join(data_dir, 'idx')
if not os.path.exists(idx_filename):
raise RuntimeError(f'Index file {idx_filename} not found')
with open(idx_filename, 'rb') as f:
self.filenames = pickle.load(f)
self.num_samples = len(self.filenames)
if dataset_size is not None:
self.num_samples = min(dataset_size, self.num_samples)
def __len__(self):
return self.num_samples
def __getitem__(self, index):
filename = os.path.join(
self.data_dir, self.filenames[index]) + '.bin'
with open(filename, 'rb') as f:
y = np.fromfile(f, dtype=np.float32, count=4)
x = np.fromfile(f, dtype=np.int16)
x = x.reshape(self.input_shape)
# Convert to Tensors.
x = torch.from_numpy(x)
y = torch.from_numpy(y)
if self.transform is not None:
x = self.transform(x)
if self.transform_y is not None:
y = self.transform_y(y)
return x, y
class CosmoFlowTransform:
"""Standard transformations for a single CosmoFlow sample."""
def __init__(self, apply_log):
"""Set up the transform.
apply_log: If True, log-transform the data, otherwise use
mean normalization.
"""
self.apply_log = apply_log
def __call__(self, x):
x = x.float()
if self.apply_log:
x.log1p_()
else:
x /= x.mean() / functools.reduce(operator.__mul__, x.size())
return x
def __repr__(self):
return self.__class__.__name__ + '()'
# Adapted from:
# https://github.com/NVIDIA/DeepLearningExamples/blob/master/PyTorch/Classification/ConvNets/image_classification/dataloaders.py#L319
class PrefetchWrapper:
"""Prefetch ahead and perform preprocessing on GPU."""
def __init__(self, data_loader, transform):
self.data_loader = data_loader
self.transform = transform
# Will perform transforms on a separate CUDA stream.
self.stream = torch.cuda.Stream()
# Simplifies set_epoch.
if hasattr(data_loader, 'sampler'):
self.sampler = data_loader.sampler
@staticmethod
def prefetch_loader(data_loader, transform, stream):
"""Actual iterator for loading."""
first = True
sample, target = None, None
for next_sample, next_target in data_loader:
with torch.cuda.stream(stream):
next_sample = next_sample.cuda(non_blocking=True)
next_target = next_target.cuda(non_blocking=True)
if transform is not None:
next_sample = transform(next_sample)
if not first:
yield sample, target
else:
first = False
torch.cuda.current_stream().wait_stream(stream)
sample = next_sample
target = next_target
yield sample, target # Last sample.
def __iter__(self):
return PrefetchWrapper.prefetch_loader(
self.data_loader, self.transform, self.stream)
def __len__(self):
return len(self.data_loader)
class RandomDataset(torch.utils.data.Dataset):
"""Dataset that just returns a random tensor for debugging."""
def __init__(self, sample_shape, target_shape, dataset_size,
transform=None):
super().__init__()
self.sample_shape = sample_shape
self.target_shape = target_shape
self.dataset_size = dataset_size
self.transform = transform
self.sample = torch.randint(0, 1000, sample_shape, dtype=torch.int16)
self.target = torch.rand(target_shape)
def __len__(self):
return self.dataset_size
def __getitem__(self, index):
x = self.sample
y = self.target
if self.transform is not None:
x = self.transform(x)
return x, y
| [
"h5py.File",
"torch.randint",
"pickle.dump",
"numpy.fromfile",
"torch.distributed.get_rank",
"torch.distributed.barrier",
"pickle.load",
"torch.rand",
"torch.cuda.current_stream",
"torch.cuda.Stream",
"torch.cuda.stream",
"torch.from_numpy"
] | [((2896, 2915), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (2912, 2915), False, 'import torch\n'), ((2928, 2947), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (2944, 2947), False, 'import torch\n'), ((5318, 5337), 'torch.from_numpy', 'torch.from_numpy', (['x'], {}), '(x)\n', (5334, 5337), False, 'import torch\n'), ((5350, 5369), 'torch.from_numpy', 'torch.from_numpy', (['y'], {}), '(y)\n', (5366, 5369), False, 'import torch\n'), ((6561, 6580), 'torch.cuda.Stream', 'torch.cuda.Stream', ([], {}), '()\n', (6578, 6580), False, 'import torch\n'), ((8108, 8163), 'torch.randint', 'torch.randint', (['(0)', '(1000)', 'sample_shape'], {'dtype': 'torch.int16'}), '(0, 1000, sample_shape, dtype=torch.int16)\n', (8121, 8163), False, 'import torch\n'), ((8186, 8210), 'torch.rand', 'torch.rand', (['target_shape'], {}), '(target_shape)\n', (8196, 8210), False, 'import torch\n'), ((1432, 1459), 'torch.distributed.barrier', 'torch.distributed.barrier', ([], {}), '()\n', (1457, 1459), False, 'import torch\n'), ((1527, 1541), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1538, 1541), False, 'import pickle\n'), ((2775, 2799), 'h5py.File', 'h5py.File', (['filename', '"""r"""'], {}), "(filename, 'r')\n", (2784, 2799), False, 'import h5py\n'), ((3990, 4035), 'pickle.dump', 'pickle.dump', (['data', 'f', 'pickle.HIGHEST_PROTOCOL'], {}), '(data, f, pickle.HIGHEST_PROTOCOL)\n', (4001, 4035), False, 'import pickle\n'), ((4742, 4756), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4753, 4756), False, 'import pickle\n'), ((5147, 5188), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.float32', 'count': '(4)'}), '(f, dtype=np.float32, count=4)\n', (5158, 5188), True, 'import numpy as np\n'), ((5205, 5235), 'numpy.fromfile', 'np.fromfile', (['f'], {'dtype': 'np.int16'}), '(f, dtype=np.int16)\n', (5216, 5235), True, 'import numpy as np\n'), ((1227, 1255), 'torch.distributed.get_rank', 'torch.distributed.get_rank', ([], {}), '()\n', (1253, 1255), False, 'import torch\n'), ((6950, 6975), 'torch.cuda.stream', 'torch.cuda.stream', (['stream'], {}), '(stream)\n', (6967, 6975), False, 'import torch\n'), ((7333, 7360), 'torch.cuda.current_stream', 'torch.cuda.current_stream', ([], {}), '()\n', (7358, 7360), False, 'import torch\n')] |
# Copyright 2019-2021, the MIDOSS project contributors, The University of British Columbia,
# and Dalhousie University.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy
import xarray
class weighting_matrix:
def __init__(self, path):
weights_file = xarray.open_dataset(path)
y, x, weights = weights_file.y, weights_file.x, weights_file.weights
y1, y2, y3, y4 = (
y.isel(index=0).values.astype(int),
y.isel(index=1).values.astype(int),
y.isel(index=2).values.astype(int),
y.isel(index=3).values.astype(int),
)
x1, x2, x3, x4 = (
x.isel(index=0).values.astype(int),
x.isel(index=1).values.astype(int),
x.isel(index=2).values.astype(int),
x.isel(index=3).values.astype(int),
)
w1, w2, w3, w4 = (
weights.isel(index=0).values,
weights.isel(index=1).values,
weights.isel(index=2).values,
weights.isel(index=3).values,
)
self.y_indices = (y1, y2, y3, y4)
self.x_indices = (x1, x2, x3, x4)
self.weights = (w1, w2, w3, w4)
def hrdps(windarr, weighting_matrix_obj):
shape = windarr.shape
if len(shape) == 3:
windarr = numpy.transpose(windarr, [1, 2, 0])
new_grid = numpy.zeros([898, 398, shape[0]])
elif len(shape) == 2:
new_grid = numpy.zeros([898, 398])
wind_y01, wind_y02, wind_y03, wind_y04 = weighting_matrix_obj.y_indices
wind_x01, wind_x02, wind_x03, wind_x04 = weighting_matrix_obj.x_indices
wind_wgt01, wind_wgt02, wind_wgt03, wind_wgt04 = weighting_matrix_obj.weights
for i in range(898):
for j in range(398):
y1, y2, y3, y4 = (
wind_y01[i][j],
wind_y02[i][j],
wind_y03[i][j],
wind_y04[i][j],
)
x1, x2, x3, x4 = (
wind_x01[i][j],
wind_x02[i][j],
wind_x03[i][j],
wind_x04[i][j],
)
w1, w2, w3, w4 = (
wind_wgt01[i][j],
wind_wgt02[i][j],
wind_wgt03[i][j],
wind_wgt04[i][j],
)
s1 = windarr[y1][x1] * w1
s2 = windarr[y2][x2] * w2
s3 = windarr[y3][x3] * w3
s4 = windarr[y4][x4] * w4
new_grid[i][j] = s1 + s2 + s3 + s4
new_grid = numpy.transpose(new_grid, [2, 0, 1])
return new_grid
def wavewatch(wavewatcharr, weighting_matrix_obj):
shape = wavewatcharr.shape
ndims = len(shape)
if ndims == 3:
wavewatcharr = numpy.transpose(wavewatcharr, [1, 2, 0])
new_grid = numpy.zeros([898, 398, shape[0]])
elif ndims == 2:
new_grid = numpy.zeros([898, 398])
new_grid[:] = numpy.nan
# do the inteprolation
wave_y01, wave_y02, wave_y03, wave_y04 = weighting_matrix_obj.y_indices
wave_x01, wave_x02, wave_x03, wave_x04 = weighting_matrix_obj.x_indices
wave_wgt01, wave_wgt02, wave_wgt03, wave_wgt04 = weighting_matrix_obj.weights
for i in range(898):
for j in range(398):
y1, y2, y3, y4 = (
wave_y01[i][j],
wave_y02[i][j],
wave_y03[i][j],
wave_y04[i][j],
)
x1, x2, x3, x4 = (
wave_x01[i][j],
wave_x02[i][j],
wave_x03[i][j],
wave_x04[i][j],
)
w1, w2, w3, w4 = (
wave_wgt01[i][j],
wave_wgt02[i][j],
wave_wgt03[i][j],
wave_wgt04[i][j],
)
if y1 == -9223372036854775808:
s1 = False
else:
s1 = wavewatcharr[y1][x1] * w1
if y2 == -9223372036854775808:
s2 = False
else:
s2 = wavewatcharr[y2][x2] * w2
if y3 == -9223372036854775808:
s3 = False
else:
s3 = wavewatcharr[y3][x3] * w3
if y4 == -9223372036854775808:
s4 = False
else:
s4 = wavewatcharr[y4][x4] * w4
array = []
for element in (s1, s2, s3, s4):
if type(element) is bool:
pass
else:
array.append(element)
if len(array) == 0:
continue
else:
array = numpy.asarray(array)
nansum = numpy.nansum(array, axis=0)
new_grid[i][j] = nansum
if ndims == 3:
new_grid = numpy.transpose(new_grid, [2, 0, 1])
return new_grid
| [
"numpy.nansum",
"numpy.asarray",
"xarray.open_dataset",
"numpy.zeros",
"numpy.transpose"
] | [((2973, 3009), 'numpy.transpose', 'numpy.transpose', (['new_grid', '[2, 0, 1]'], {}), '(new_grid, [2, 0, 1])\n', (2988, 3009), False, 'import numpy\n'), ((784, 809), 'xarray.open_dataset', 'xarray.open_dataset', (['path'], {}), '(path)\n', (803, 809), False, 'import xarray\n'), ((1786, 1821), 'numpy.transpose', 'numpy.transpose', (['windarr', '[1, 2, 0]'], {}), '(windarr, [1, 2, 0])\n', (1801, 1821), False, 'import numpy\n'), ((1841, 1874), 'numpy.zeros', 'numpy.zeros', (['[898, 398, shape[0]]'], {}), '([898, 398, shape[0]])\n', (1852, 1874), False, 'import numpy\n'), ((3179, 3219), 'numpy.transpose', 'numpy.transpose', (['wavewatcharr', '[1, 2, 0]'], {}), '(wavewatcharr, [1, 2, 0])\n', (3194, 3219), False, 'import numpy\n'), ((3239, 3272), 'numpy.zeros', 'numpy.zeros', (['[898, 398, shape[0]]'], {}), '([898, 398, shape[0]])\n', (3250, 3272), False, 'import numpy\n'), ((5197, 5233), 'numpy.transpose', 'numpy.transpose', (['new_grid', '[2, 0, 1]'], {}), '(new_grid, [2, 0, 1])\n', (5212, 5233), False, 'import numpy\n'), ((1920, 1943), 'numpy.zeros', 'numpy.zeros', (['[898, 398]'], {}), '([898, 398])\n', (1931, 1943), False, 'import numpy\n'), ((3313, 3336), 'numpy.zeros', 'numpy.zeros', (['[898, 398]'], {}), '([898, 398])\n', (3324, 3336), False, 'import numpy\n'), ((5045, 5065), 'numpy.asarray', 'numpy.asarray', (['array'], {}), '(array)\n', (5058, 5065), False, 'import numpy\n'), ((5091, 5118), 'numpy.nansum', 'numpy.nansum', (['array'], {'axis': '(0)'}), '(array, axis=0)\n', (5103, 5118), False, 'import numpy\n')] |
"""
ET Dataset from Informer Paper.
Dataset: https://github.com/zhouhaoyi/ETDataset
Dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from src.dataloaders.datasets import SequenceDataset, default_data_path
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc=1, freq="h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates["month"] = dates.date.apply(lambda row: row.month, 1)
dates["day"] = dates.date.apply(lambda row: row.day, 1)
dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1)
dates["hour"] = dates.date.apply(lambda row: row.hour, 1)
dates["minute"] = dates.date.apply(lambda row: row.minute, 1)
dates["minute"] = dates.minute.map(lambda x: x // 15)
freq_map = {
"y": [],
"m": ["month"],
"w": ["month"],
"d": ["month", "day", "weekday"],
"b": ["month", "day", "weekday"],
"h": ["month", "day", "weekday", "hour"],
"t": ["month", "day", "weekday", "hour", "minute"],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack(
[feat(dates) for feat in time_features_from_frequency_str(freq)]
).transpose(1, 0)
class StandardScaler:
def __init__(self):
self.mean = 0.0
self.std = 1.0
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data - mean) / std
def inverse_transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data * std) + mean
class InformerDataset(Dataset):
def __init__(
self,
root_path,
flag="train",
size=None,
features="S",
data_path="ETTh1.csv",
target="OT",
scale=True,
inverse=False,
timeenc=0,
freq="h",
cols=None,
eval_stamp=False,
eval_mask=False,
):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ["train", "test", "val"]
type_map = {"train": 0, "val": 1, "test": 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols = cols
self.eval_stamp = eval_stamp
self.eval_mask = eval_mask
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def _borders(self, df_raw):
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
return border1s, border2s
def _process_columns(self, df_raw):
if self.cols:
cols = self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns)
cols.remove(self.target)
cols.remove("date")
return df_raw[["date"] + cols + [self.target]]
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
df_raw = self._process_columns(df_raw)
border1s, border2s = self._borders(df_raw)
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features == "M" or self.features == "MS":
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == "S":
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0] : border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[["date"]][border1:border2]
df_stamp["date"] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_x = np.concatenate(
[seq_x, np.zeros((self.pred_len, self.data_x.shape[-1]))], axis=0
)
if self.inverse:
seq_y = np.concatenate(
[
self.data_x[r_begin : r_begin + self.label_len],
self.data_y[r_begin + self.label_len : r_end],
],
0,
)
raise NotImplementedError
else:
# seq_y = self.data_y[r_begin:r_end] # OLD in Informer codebase
seq_y = self.data_y[s_end:r_end]
# OLD in Informer codebase
# seq_x_mark = self.data_stamp[s_begin:s_end]
# seq_y_mark = self.data_stamp[r_begin:r_end]
if self.eval_stamp:
mark = self.data_stamp[s_begin:r_end]
else:
mark = self.data_stamp[s_begin:s_end]
mark = np.concatenate([mark, np.zeros((self.pred_len, mark.shape[-1]))], axis=0)
if self.eval_mask:
mask = np.concatenate([np.zeros(self.seq_len), np.ones(self.pred_len)], axis=0)
else:
mask = np.concatenate([np.zeros(self.seq_len), np.zeros(self.pred_len)], axis=0)
mask = mask[:, None]
# Add the mask to the timestamps: # 480, 5
# mark = np.concatenate([mark, mask[:, np.newaxis]], axis=1)
seq_x = seq_x.astype(np.float32)
seq_y = seq_y.astype(np.float32)
if self.timeenc == 0:
mark = mark.astype(np.int64)
else:
mark = mark.astype(np.float32)
mask = mask.astype(np.int64)
return torch.tensor(seq_x), torch.tensor(seq_y), torch.tensor(mark), torch.tensor(mask)
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
@property
def d_input(self):
return self.data_x.shape[-1]
@property
def d_output(self):
if self.features in ["M", "S"]:
return self.data_x.shape[-1]
elif self.features == "MS":
return 1
else:
raise NotImplementedError
@property
def n_tokens_time(self):
if self.freq == 'h':
return [13, 32, 7, 24]
elif self.freq == 't':
return [13, 32, 7, 24, 4]
else:
raise NotImplementedError
class _Dataset_ETT_hour(InformerDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 - self.seq_len,
12 * 30 * 24 + 4 * 30 * 24 - self.seq_len,
]
border2s = [
12 * 30 * 24,
12 * 30 * 24 + 4 * 30 * 24,
12 * 30 * 24 + 8 * 30 * 24,
]
return border1s, border2s
def _process_columns(self, df_raw):
return df_raw
@property
def n_tokens_time(self):
assert self.freq == "h"
return [13, 32, 7, 24]
class _Dataset_ETT_minute(_Dataset_ETT_hour):
def __init__(self, data_path="ETTm1.csv", freq="t", **kwargs):
super().__init__(data_path=data_path, freq=freq, **kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 * 4 - self.seq_len,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
]
border2s = [
12 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
]
return border1s, border2s
@property
def n_tokens_time(self):
assert self.freq == "t"
return [13, 32, 7, 24, 4]
class _Dataset_Weather(InformerDataset):
def __init__(self, data_path="WTH.csv", target="WetBulbCelsius", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_ECL(InformerDataset):
def __init__(self, data_path="ECL.csv", target="MT_320", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class InformerSequenceDataset(SequenceDataset):
@property
def n_tokens_time(self):
# Shape of the dates: depends on `timeenc` and `freq`
return self.dataset_train.n_tokens_time # data_stamp.shape[-1]
@property
def d_input(self):
return self.dataset_train.d_input
@property
def d_output(self):
return self.dataset_train.d_output
@property
def l_output(self):
return self.dataset_train.pred_len
def _get_data_filename(self, variant):
return self.variants[variant]
@staticmethod
def collate_fn(batch, resolution):
x, y, *z = zip(*batch)
x = torch.stack(x, dim=0)[:, ::resolution]
y = torch.stack(y, dim=0)
z = [torch.stack(e, dim=0)[:, ::resolution] for e in z]
return x, y, *z
def setup(self):
self.data_dir = self.data_dir or default_data_path / 'informer' / self._name_
self.dataset_train = self._dataset_cls(
root_path=self.data_dir,
flag="train",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_val = self._dataset_cls(
root_path=self.data_dir,
flag="val",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_test = self._dataset_cls(
root_path=self.data_dir,
flag="test",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
class ETTHour(InformerSequenceDataset):
_name_ = "etth"
_dataset_cls = _Dataset_ETT_hour
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ETTh1.csv",
1: "ETTh2.csv",
}
class ETTMinute(InformerSequenceDataset):
_name_ = "ettm"
_dataset_cls = _Dataset_ETT_minute
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "t",
"cols": None,
}
variants = {
0: "ETTm1.csv",
1: "ETTm2.csv",
}
class Weather(InformerSequenceDataset):
_name_ = "weather"
_dataset_cls = _Dataset_Weather
init_defaults = {
"size": None,
"features": "S",
"target": "WetBulbCelsius",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "WTH.csv",
}
class ECL(InformerSequenceDataset):
_name_ = "ecl"
_dataset_cls = _Dataset_ECL
init_defaults = {
"size": None,
"features": "S",
"target": "MT_320",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ECL.csv",
}
| [
"torch.stack",
"pandas.tseries.frequencies.to_offset",
"warnings.filterwarnings",
"os.path.join",
"numpy.concatenate",
"numpy.zeros",
"numpy.ones",
"pandas.to_datetime",
"torch.tensor",
"torch.is_tensor",
"torch.utils.data.std",
"torch.utils.data.mean",
"torch.from_numpy"
] | [((408, 441), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (431, 441), False, 'import warnings\n'), ((3410, 3429), 'pandas.tseries.frequencies.to_offset', 'to_offset', (['freq_str'], {}), '(freq_str)\n', (3419, 3429), False, 'from pandas.tseries.frequencies import to_offset\n'), ((5904, 5937), 'pandas.to_datetime', 'pd.to_datetime', (['dates.date.values'], {}), '(dates.date.values)\n', (5918, 5937), True, 'import pandas as pd\n'), ((6208, 6220), 'torch.utils.data.mean', 'data.mean', (['(0)'], {}), '(0)\n', (6217, 6220), False, 'from torch.utils import data\n'), ((6240, 6251), 'torch.utils.data.std', 'data.std', (['(0)'], {}), '(0)\n', (6248, 6251), False, 'from torch.utils import data\n'), ((9775, 9804), 'pandas.to_datetime', 'pd.to_datetime', (['df_stamp.date'], {}), '(df_stamp.date)\n', (9789, 9804), True, 'import pandas as pd\n'), ((15113, 15134), 'torch.stack', 'torch.stack', (['y'], {'dim': '(0)'}), '(y, dim=0)\n', (15124, 15134), False, 'import torch\n'), ((6386, 6407), 'torch.is_tensor', 'torch.is_tensor', (['data'], {}), '(data)\n', (6401, 6407), False, 'import torch\n'), ((6545, 6566), 'torch.is_tensor', 'torch.is_tensor', (['data'], {}), '(data)\n', (6560, 6566), False, 'import torch\n'), ((6780, 6801), 'torch.is_tensor', 'torch.is_tensor', (['data'], {}), '(data)\n', (6795, 6801), False, 'import torch\n'), ((6939, 6960), 'torch.is_tensor', 'torch.is_tensor', (['data'], {}), '(data)\n', (6954, 6960), False, 'import torch\n'), ((9007, 9051), 'os.path.join', 'os.path.join', (['self.root_path', 'self.data_path'], {}), '(self.root_path, self.data_path)\n', (9019, 9051), False, 'import os\n'), ((10522, 10638), 'numpy.concatenate', 'np.concatenate', (['[self.data_x[r_begin:r_begin + self.label_len], self.data_y[r_begin + self.\n label_len:r_end]]', '(0)'], {}), '([self.data_x[r_begin:r_begin + self.label_len], self.data_y[\n r_begin + self.label_len:r_end]], 0)\n', (10536, 10638), True, 'import numpy as np\n'), ((11938, 11957), 'torch.tensor', 'torch.tensor', (['seq_x'], {}), '(seq_x)\n', (11950, 11957), False, 'import torch\n'), ((11959, 11978), 'torch.tensor', 'torch.tensor', (['seq_y'], {}), '(seq_y)\n', (11971, 11978), False, 'import torch\n'), ((11980, 11998), 'torch.tensor', 'torch.tensor', (['mark'], {}), '(mark)\n', (11992, 11998), False, 'import torch\n'), ((12000, 12018), 'torch.tensor', 'torch.tensor', (['mask'], {}), '(mask)\n', (12012, 12018), False, 'import torch\n'), ((15062, 15083), 'torch.stack', 'torch.stack', (['x'], {'dim': '(0)'}), '(x, dim=0)\n', (15073, 15083), False, 'import torch\n'), ((10408, 10456), 'numpy.zeros', 'np.zeros', (['(self.pred_len, self.data_x.shape[-1])'], {}), '((self.pred_len, self.data_x.shape[-1]))\n', (10416, 10456), True, 'import numpy as np\n'), ((15148, 15169), 'torch.stack', 'torch.stack', (['e'], {'dim': '(0)'}), '(e, dim=0)\n', (15159, 15169), False, 'import torch\n'), ((11245, 11286), 'numpy.zeros', 'np.zeros', (['(self.pred_len, mark.shape[-1])'], {}), '((self.pred_len, mark.shape[-1]))\n', (11253, 11286), True, 'import numpy as np\n'), ((11360, 11382), 'numpy.zeros', 'np.zeros', (['self.seq_len'], {}), '(self.seq_len)\n', (11368, 11382), True, 'import numpy as np\n'), ((11384, 11406), 'numpy.ones', 'np.ones', (['self.pred_len'], {}), '(self.pred_len)\n', (11391, 11406), True, 'import numpy as np\n'), ((11466, 11488), 'numpy.zeros', 'np.zeros', (['self.seq_len'], {}), '(self.seq_len)\n', (11474, 11488), True, 'import numpy as np\n'), ((11490, 11513), 'numpy.zeros', 'np.zeros', (['self.pred_len'], {}), '(self.pred_len)\n', (11498, 11513), True, 'import numpy as np\n'), ((6313, 6340), 'torch.from_numpy', 'torch.from_numpy', (['self.mean'], {}), '(self.mean)\n', (6329, 6340), False, 'import torch\n'), ((6473, 6499), 'torch.from_numpy', 'torch.from_numpy', (['self.std'], {}), '(self.std)\n', (6489, 6499), False, 'import torch\n'), ((6707, 6734), 'torch.from_numpy', 'torch.from_numpy', (['self.mean'], {}), '(self.mean)\n', (6723, 6734), False, 'import torch\n'), ((6867, 6893), 'torch.from_numpy', 'torch.from_numpy', (['self.std'], {}), '(self.std)\n', (6883, 6893), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sklearn
def split_data(X, y, train_frac=0.8, seed=1):
# Reseed the random state
np.random.seed(seed)
# Perform a train/test split of the data
num_examples = y.shape[0]
num_train_examples = int(train_frac * num_examples)
assert num_train_examples > 0
shuffled_idxs = np.random.permutation(num_examples)
train_idxs = shuffled_idxs[:num_train_examples]
test_idxs = shuffled_idxs[num_train_examples:]
X_train, y_train = X[train_idxs], y[train_idxs]
X_test, y_test = X[test_idxs], y[test_idxs]
return X_train, y_train, X_test, y_test, train_idxs
def logistic(x):
return 1.0 / (1.0 + np.exp(-x))
def log_likelihood(y_true, y_pred):
return y_true * np.log(y_pred) + (1 - y_true) * np.log(1.0 - y_pred)
def avg_log_likelihood(y_true, y_pred):
return log_likelihood(y_true, y_pred).mean()
def expand_inputs(l, X, Z):
X2 = np.sum(X**2, 1)
Z2 = np.sum(Z**2, 1)
ones_Z = np.ones(Z.shape[0])
ones_X = np.ones(X.shape[0])
r2 = np.outer(X2, ones_Z) - 2 * np.dot(X, Z.T) + np.outer(ones_X, Z2)
return np.exp(-(0.5 / l**2) * r2)
def transform_to_rbf(data, radial_basis, width=0.01, add_bias_term=True):
rbf_data = expand_inputs(width, data, radial_basis)
if add_bias_term:
rbf_data = np.hstack((np.ones([data.shape[0], 1]), rbf_data))
return rbf_data
def log_determinant(mat):
cholesky_l = np.linalg.cholesky(mat)
# print(cholesky_l)
return 2*np.sum(np.log(np.diag(cholesky_l)))
def hard_prediction(probs):
return np.where(probs > .5, 1, 0)
def confusion_matrix_norm(y_true, y_pred):
conf_mat = sklearn.metrics.confusion_matrix(y_true, y_pred).astype(np.float64).T
for i in range(conf_mat.shape[0]):
conf_mat[i, :] = conf_mat[i, :] / np.sum(y_pred == i)
return conf_mat
| [
"sklearn.metrics.confusion_matrix",
"numpy.outer",
"numpy.random.seed",
"numpy.sum",
"numpy.log",
"numpy.ones",
"numpy.where",
"numpy.exp",
"numpy.random.permutation",
"numpy.dot",
"numpy.diag",
"numpy.linalg.cholesky"
] | [((170, 190), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (184, 190), True, 'import numpy as np\n'), ((378, 413), 'numpy.random.permutation', 'np.random.permutation', (['num_examples'], {}), '(num_examples)\n', (399, 413), True, 'import numpy as np\n'), ((969, 986), 'numpy.sum', 'np.sum', (['(X ** 2)', '(1)'], {}), '(X ** 2, 1)\n', (975, 986), True, 'import numpy as np\n'), ((994, 1011), 'numpy.sum', 'np.sum', (['(Z ** 2)', '(1)'], {}), '(Z ** 2, 1)\n', (1000, 1011), True, 'import numpy as np\n'), ((1023, 1042), 'numpy.ones', 'np.ones', (['Z.shape[0]'], {}), '(Z.shape[0])\n', (1030, 1042), True, 'import numpy as np\n'), ((1056, 1075), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (1063, 1075), True, 'import numpy as np\n'), ((1161, 1189), 'numpy.exp', 'np.exp', (['(-(0.5 / l ** 2) * r2)'], {}), '(-(0.5 / l ** 2) * r2)\n', (1167, 1189), True, 'import numpy as np\n'), ((1477, 1500), 'numpy.linalg.cholesky', 'np.linalg.cholesky', (['mat'], {}), '(mat)\n', (1495, 1500), True, 'import numpy as np\n'), ((1615, 1642), 'numpy.where', 'np.where', (['(probs > 0.5)', '(1)', '(0)'], {}), '(probs > 0.5, 1, 0)\n', (1623, 1642), True, 'import numpy as np\n'), ((1129, 1149), 'numpy.outer', 'np.outer', (['ones_X', 'Z2'], {}), '(ones_X, Z2)\n', (1137, 1149), True, 'import numpy as np\n'), ((716, 726), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (722, 726), True, 'import numpy as np\n'), ((786, 800), 'numpy.log', 'np.log', (['y_pred'], {}), '(y_pred)\n', (792, 800), True, 'import numpy as np\n'), ((818, 838), 'numpy.log', 'np.log', (['(1.0 - y_pred)'], {}), '(1.0 - y_pred)\n', (824, 838), True, 'import numpy as np\n'), ((1085, 1105), 'numpy.outer', 'np.outer', (['X2', 'ones_Z'], {}), '(X2, ones_Z)\n', (1093, 1105), True, 'import numpy as np\n'), ((1853, 1872), 'numpy.sum', 'np.sum', (['(y_pred == i)'], {}), '(y_pred == i)\n', (1859, 1872), True, 'import numpy as np\n'), ((1112, 1126), 'numpy.dot', 'np.dot', (['X', 'Z.T'], {}), '(X, Z.T)\n', (1118, 1126), True, 'import numpy as np\n'), ((1372, 1399), 'numpy.ones', 'np.ones', (['[data.shape[0], 1]'], {}), '([data.shape[0], 1])\n', (1379, 1399), True, 'import numpy as np\n'), ((1552, 1571), 'numpy.diag', 'np.diag', (['cholesky_l'], {}), '(cholesky_l)\n', (1559, 1571), True, 'import numpy as np\n'), ((1702, 1750), 'sklearn.metrics.confusion_matrix', 'sklearn.metrics.confusion_matrix', (['y_true', 'y_pred'], {}), '(y_true, y_pred)\n', (1734, 1750), False, 'import sklearn\n')] |
__author__ = 'jrx'
import argparse
import PIL.Image as Image
import numpy as np
from encoder.algorithms.xor_encoding import XorEncoding
def instantiate_algorithm(args):
""" Instantiate algorithm object from given args """
if args.algorithm == 'xor_encoding':
return XorEncoding(block_size=args.block_size, intensity=args.intensity)
raise RuntimeError('Algorithm type not detected')
def info_action(args):
""" Run info action """
if not args.infile:
raise RuntimeError('You must specify infile for this action')
algorithm = instantiate_algorithm(args)
im = Image.open(args.infile)
assert im.mode == 'RGB', 'Only RGB mode images are supported!'
rgb_data = np.array(im)
capacity = algorithm.data_capacity(rgb_data.shape)
if args.verbose:
print('Data capacity {:.2f} b/{:.2f} kb/{:.2f} mb'.format(capacity, capacity / 1024, capacity / 1024 / 1024))
else:
print(capacity)
def encode_action(args):
""" Run encode action """
if not args.infile:
raise RuntimeError('You must specify infile for this action')
if not args.outfile:
raise RuntimeError('You must specify outfile for this action')
if not args.datafile:
raise RuntimeError('You must specify datafile for this action')
algorithm = instantiate_algorithm(args)
im = Image.open(args.infile)
assert im.mode == 'RGB', 'Only RGB mode images are supported!'
data_payload = np.fromfile(args.datafile, dtype=np.uint8)
input_rgb_data = np.array(im)
output_rgb_data = algorithm.encode(input_rgb_data, data_payload)
output_img = Image.fromarray(output_rgb_data, 'RGB')
output_img.save(args.outfile)
def decode_action(args):
""" Run decode action """
if not args.infile:
raise RuntimeError('You must specify infile for this action')
if not args.outfile:
raise RuntimeError('You must specify outfile for this action')
algorithm = instantiate_algorithm(args)
im = Image.open(args.infile)
assert im.mode == 'RGB', 'Only RGB mode images are supported!'
input_rgb_data = np.array(im)
output_payload = algorithm.decode(input_rgb_data)
output_payload.tofile(args.outfile)
def main():
parser = argparse.ArgumentParser(description='Encode data in your images')
parser.add_argument('action', choices=['info', 'encode', 'decode'],
help="Specify which action you want to choose")
parser.add_argument('-i', '--infile', help='Specify input file', default='')
parser.add_argument('-o', '--outfile', help='Specify output file', default='')
parser.add_argument('-a', '--algorithm', help='Choose your algorithm', default='xor_encoding')
parser.add_argument('-d', '--datafile', help='Specify data file', default='')
parser.add_argument('-v', '--verbose', help='Make program more verbose', default='')
group = parser.add_argument_group('Algorithm parameters')
group.add_argument('--block_size', type=int, help='Algorithm block size', default=64*64)
group.add_argument('--intensity', type=int, choices=[x + 1 for x in range(8)],
help='Algorithm intensity', default=8)
args = parser.parse_args()
if args.action == 'info':
info_action(args)
if args.action == 'encode':
encode_action(args)
if args.action == 'decode':
decode_action(args)
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"numpy.fromfile",
"PIL.Image.open",
"numpy.array",
"PIL.Image.fromarray",
"encoder.algorithms.xor_encoding.XorEncoding"
] | [((610, 633), 'PIL.Image.open', 'Image.open', (['args.infile'], {}), '(args.infile)\n', (620, 633), True, 'import PIL.Image as Image\n'), ((716, 728), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (724, 728), True, 'import numpy as np\n'), ((1361, 1384), 'PIL.Image.open', 'Image.open', (['args.infile'], {}), '(args.infile)\n', (1371, 1384), True, 'import PIL.Image as Image\n'), ((1472, 1514), 'numpy.fromfile', 'np.fromfile', (['args.datafile'], {'dtype': 'np.uint8'}), '(args.datafile, dtype=np.uint8)\n', (1483, 1514), True, 'import numpy as np\n'), ((1537, 1549), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (1545, 1549), True, 'import numpy as np\n'), ((1637, 1676), 'PIL.Image.fromarray', 'Image.fromarray', (['output_rgb_data', '"""RGB"""'], {}), "(output_rgb_data, 'RGB')\n", (1652, 1676), True, 'import PIL.Image as Image\n'), ((2014, 2037), 'PIL.Image.open', 'Image.open', (['args.infile'], {}), '(args.infile)\n', (2024, 2037), True, 'import PIL.Image as Image\n'), ((2127, 2139), 'numpy.array', 'np.array', (['im'], {}), '(im)\n', (2135, 2139), True, 'import numpy as np\n'), ((2261, 2326), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Encode data in your images"""'}), "(description='Encode data in your images')\n", (2284, 2326), False, 'import argparse\n'), ((287, 352), 'encoder.algorithms.xor_encoding.XorEncoding', 'XorEncoding', ([], {'block_size': 'args.block_size', 'intensity': 'args.intensity'}), '(block_size=args.block_size, intensity=args.intensity)\n', (298, 352), False, 'from encoder.algorithms.xor_encoding import XorEncoding\n')] |
import sys
import numpy as np
from itertools import combinations
def fwa(n, roads):
delta = np.ones(shape=(n, n)) * np.inf
pairs = list(combinations(range(n), 2))
for i in range(n):
delta[i][i] = 0
for j in roads:
delta[j[0]][j[1]] = 1
delta[j[1]][j[0]] = 1
for k in range(n):
for j in pairs:
delta[j[0]][j[1]] = min(delta[j[0]][j[1]], delta[j[0]][k] + delta[k][j[1]])
delta[j[1]][j[0]] = min(delta[j[1]][j[0]], delta[j[1]][k] + delta[k][j[0]])
return delta
def dash(n, s, delta):
d_dash = [np.inf for _ in range(n)]
for i in range(n):
for j in s:
d_dash[i] = min(d_dash[i], delta[j][i], delta[i][j])
return d_dash
def buildCastle(n, m, s, roads, scenarios):
ret = [-1 for _ in range(0, s)]
delta = fwa(n, roads)
count = 0
vertex = list(range(n))
for sn in scenarios:
unmarked = [ele for ele in vertex if ele not in sn]
d_dash = dash(n, sn, delta)
maxDist = [np.inf for _ in range(n)]
d = [0 for _ in range(n)]
for j in unmarked:
for kn in range(n):
d[kn] = min(d_dash[kn], delta[j][kn])
maxDist[j] = max(d)
maxDist = np.array(maxDist)
ret[count] = max(list(np.where(maxDist == min(maxDist))[0]))
count = count + 1
return ret
# Reads the input
astr = sys.stdin.readline().split()
n = int(astr[0])
m = int(astr[1])
s = int(astr[2])
sys.stdin.readline()
# Read roads
roads = [(-1, -1) for _ in range(m)]
for i in range(m):
astr = sys.stdin.readline().split()
roads[i] = (int(astr[0]), int(astr[1]))
sys.stdin.readline()
# Read scenarios
scenarios = [(-1, []) for _ in range(s)]
for i in range(s):
astr = sys.stdin.readline().split()
k = int(astr[0])
scenarios[i] = [int(v) for v in astr[1:k + 1]]
# Calls your funcion
ret = buildCastle(n, m, s, roads, scenarios)
# Writes the output
for i in range(0, s):
print(ret[i])
| [
"numpy.ones",
"numpy.array",
"sys.stdin.readline"
] | [((1532, 1552), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1550, 1552), False, 'import sys\n'), ((1712, 1732), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1730, 1732), False, 'import sys\n'), ((104, 125), 'numpy.ones', 'np.ones', ([], {'shape': '(n, n)'}), '(shape=(n, n))\n', (111, 125), True, 'import numpy as np\n'), ((1286, 1303), 'numpy.array', 'np.array', (['maxDist'], {}), '(maxDist)\n', (1294, 1303), True, 'import numpy as np\n'), ((1448, 1468), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1466, 1468), False, 'import sys\n'), ((1637, 1657), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1655, 1657), False, 'import sys\n'), ((1825, 1845), 'sys.stdin.readline', 'sys.stdin.readline', ([], {}), '()\n', (1843, 1845), False, 'import sys\n')] |
# -*- coding:utf8 -*-
import threading
import numpy as np
# 每个exp由一个单独的线程运行
def exp_run_thread(exp, pos, res_list):
print("%s begin" % exp.exp_name)
res_list[pos]=exp.run()
np.random.seed()
print("%s end" % exp.exp_name)
def exp_multi_thread_run(exp_list):
res_list = [None for i in range(len(exp_list))]
thread_t = []
for i in range(len(exp_list)):
# 每个实验块创建一个线程
t = threading.Thread(name="task %s" % exp_list[i].exp_name, target=exp_run_thread, args=[exp_list[i], i, res_list])
thread_t.append(t)
t.start()
# 阻塞主线程
for t in thread_t:
t.join()
print("All exp stops")
return res_list
def exp_single_thread_run(exp_list):
res_list = [exp_list[i].run() for i in range(len(exp_list))]
print("All exp stops")
return res_list
| [
"threading.Thread",
"numpy.random.seed"
] | [((190, 206), 'numpy.random.seed', 'np.random.seed', ([], {}), '()\n', (204, 206), True, 'import numpy as np\n'), ((419, 535), 'threading.Thread', 'threading.Thread', ([], {'name': "('task %s' % exp_list[i].exp_name)", 'target': 'exp_run_thread', 'args': '[exp_list[i], i, res_list]'}), "(name='task %s' % exp_list[i].exp_name, target=\n exp_run_thread, args=[exp_list[i], i, res_list])\n", (435, 535), False, 'import threading\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from typing import Dict
import logging
import numpy as np
from .filter import Filter
from ..data_objects import TrajectoryData
###############################################################################
log = logging.getLogger(__name__)
###############################################################################
class EveryNthSubpointFilter(Filter):
n_per_type: Dict[str, int]
default_n: int
def __init__(self, n_per_type: Dict[str, int], default_n: int = 1):
"""
This filter reduces the number of subpoints in each frame
of simularium data
Parameters
----------
n_per_type : Dict[str, int]
N for agents of each type,
keep every nth subpoint for that type (if subpoints exist),
filter out all the others
default_n : int (optional)
N for any agents of type not specified in n_per_type
Default: 1
"""
self.n_per_type = n_per_type
self.default_n = default_n
def apply(self, data: TrajectoryData) -> TrajectoryData:
"""
Reduce the number of subpoints in each frame of the simularium
data by filtering out all but every nth subpoint
"""
print("Filtering: every Nth subpoint -------------")
# get dimensions
total_steps = data.agent_data.times.size
max_agents = int(np.amax(data.agent_data.n_agents))
max_subpoints = int(np.amax(data.agent_data.n_subpoints))
# get filtered data
n_subpoints = np.zeros((total_steps, max_agents))
subpoints = np.zeros((total_steps, max_agents, max_subpoints, 3))
for time_index in range(total_steps):
for agent_index in range(int(data.agent_data.n_agents[time_index])):
new_subpoint_index = 0
if data.agent_data.n_subpoints[time_index][agent_index] > 0:
type_name = data.agent_data.types[time_index][agent_index]
if type_name in self.n_per_type:
inc = self.n_per_type[type_name]
else:
inc = self.default_n
for subpoint_index in range(
int(data.agent_data.n_subpoints[time_index][agent_index])
):
if subpoint_index % inc != 0:
continue
subpoints[time_index][agent_index][
new_subpoint_index
] = data.agent_data.subpoints[time_index][agent_index][
subpoint_index
]
new_subpoint_index += 1
n_subpoints[time_index][agent_index] = new_subpoint_index
data.agent_data.n_subpoints = n_subpoints
data.agent_data.subpoints = subpoints
print(
f"filtered dims = {total_steps} timesteps X "
f"{max_agents} agents X {int(np.amax(n_subpoints))} subpoints"
)
return data
| [
"numpy.amax",
"numpy.zeros",
"logging.getLogger"
] | [((264, 291), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (281, 291), False, 'import logging\n'), ((1597, 1632), 'numpy.zeros', 'np.zeros', (['(total_steps, max_agents)'], {}), '((total_steps, max_agents))\n', (1605, 1632), True, 'import numpy as np\n'), ((1653, 1706), 'numpy.zeros', 'np.zeros', (['(total_steps, max_agents, max_subpoints, 3)'], {}), '((total_steps, max_agents, max_subpoints, 3))\n', (1661, 1706), True, 'import numpy as np\n'), ((1446, 1479), 'numpy.amax', 'np.amax', (['data.agent_data.n_agents'], {}), '(data.agent_data.n_agents)\n', (1453, 1479), True, 'import numpy as np\n'), ((1509, 1545), 'numpy.amax', 'np.amax', (['data.agent_data.n_subpoints'], {}), '(data.agent_data.n_subpoints)\n', (1516, 1545), True, 'import numpy as np\n'), ((3043, 3063), 'numpy.amax', 'np.amax', (['n_subpoints'], {}), '(n_subpoints)\n', (3050, 3063), True, 'import numpy as np\n')] |
import os.path
from tqdm import tqdm
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
def plot_change_region_2d(regional_change_stream, change_idx: int, binary_thresh: float, save: bool, path=None):
region = regional_change_stream.approximate_change_regions()[change_idx]
region = (region - np.min(region)) / (np.max(region) - np.min(region))
region = region.reshape(int(np.sqrt(len(region))), int(np.sqrt(len(region))))
fig, axes = plt.subplots(1, 2, figsize=(4, 2))
for ax in axes:
ax.set_aspect(1.0, adjustable='box')
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
sns.heatmap(region, ax=axes[0], cmap="Greys", cbar_kws={"shrink": 0.5})
sns.heatmap(region > (1-binary_thresh) * np.max(region), ax=axes[1], cmap="Greys_r", cbar_kws={"shrink": 0.5})
axes[0].set_title("Mean difference")
axes[1].set_title("Thresholded")
plt.suptitle("Change for index {}".format(change_idx))
plt.tight_layout()
if save:
assert isinstance(path, str)
plt.savefig(path)
else:
plt.show()
def rgb2gray(rgb):
return np.dot(rgb[:, :, :3], [0.299, 0.587, 0.114])
def preprocess_hipe() -> pd.DataFrame:
this_dir, _ = os.path.split(__file__)
folder = os.path.join(this_dir, "..", "data", "hipe")
cache_dir = os.path.join(folder, "cache")
cache_df_path = os.path.join(cache_dir, "df.csv")
if os.path.exists(cache_df_path):
data = pd.read_csv(cache_df_path)
data["SensorDateTime"] = pd.to_datetime(data["SensorDateTime"], utc=True)
else:
data = None
all_files = os.listdir(folder)
for i, file in tqdm(enumerate(all_files)):
if file.endswith(".csv"):
path = os.path.join(folder, file)
df = pd.read_csv(path).loc[::5]
df["SensorDateTime"] = pd.to_datetime(df["SensorDateTime"], utc=True).dt.round("s")
df.drop("Machine", axis=1, inplace=True)
if data is None:
data = df
else:
data = pd.merge(data, df, how="outer", on="SensorDateTime")
data = data.loc[:, (data != 0).any(axis=0)]
data = data.sort_values("SensorDateTime").groupby("SensorDateTime").mean()
data.to_csv(cache_df_path)
data.ffill(inplace=True) # forward fill if possible
data.bfill(inplace=True) # backward fill the rest
phase_count_cols = [col for col in data.columns if "PhaseCount" in col]
return data.drop(phase_count_cols, axis=1)
| [
"matplotlib.pyplot.tight_layout",
"seaborn.heatmap",
"matplotlib.pyplot.show",
"pandas.read_csv",
"pandas.merge",
"numpy.min",
"numpy.max",
"pandas.to_datetime",
"numpy.dot",
"matplotlib.pyplot.subplots",
"matplotlib.pyplot.savefig"
] | [((530, 564), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(4, 2)'}), '(1, 2, figsize=(4, 2))\n', (542, 564), True, 'from matplotlib import pyplot as plt\n'), ((743, 814), 'seaborn.heatmap', 'sns.heatmap', (['region'], {'ax': 'axes[0]', 'cmap': '"""Greys"""', 'cbar_kws': "{'shrink': 0.5}"}), "(region, ax=axes[0], cmap='Greys', cbar_kws={'shrink': 0.5})\n", (754, 814), True, 'import seaborn as sns\n'), ((1096, 1114), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1112, 1114), True, 'from matplotlib import pyplot as plt\n'), ((1281, 1325), 'numpy.dot', 'np.dot', (['rgb[:, :, :3]', '[0.299, 0.587, 0.114]'], {}), '(rgb[:, :, :3], [0.299, 0.587, 0.114])\n', (1287, 1325), True, 'import numpy as np\n'), ((1188, 1205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (1199, 1205), True, 'from matplotlib import pyplot as plt\n'), ((1234, 1244), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1242, 1244), True, 'from matplotlib import pyplot as plt\n'), ((1629, 1655), 'pandas.read_csv', 'pd.read_csv', (['cache_df_path'], {}), '(cache_df_path)\n', (1640, 1655), True, 'import pandas as pd\n'), ((1690, 1738), 'pandas.to_datetime', 'pd.to_datetime', (["data['SensorDateTime']"], {'utc': '(True)'}), "(data['SensorDateTime'], utc=True)\n", (1704, 1738), True, 'import pandas as pd\n'), ((370, 384), 'numpy.min', 'np.min', (['region'], {}), '(region)\n', (376, 384), True, 'import numpy as np\n'), ((389, 403), 'numpy.max', 'np.max', (['region'], {}), '(region)\n', (395, 403), True, 'import numpy as np\n'), ((406, 420), 'numpy.min', 'np.min', (['region'], {}), '(region)\n', (412, 420), True, 'import numpy as np\n'), ((865, 879), 'numpy.max', 'np.max', (['region'], {}), '(region)\n', (871, 879), True, 'import numpy as np\n'), ((2277, 2329), 'pandas.merge', 'pd.merge', (['data', 'df'], {'how': '"""outer"""', 'on': '"""SensorDateTime"""'}), "(data, df, how='outer', on='SensorDateTime')\n", (2285, 2329), True, 'import pandas as pd\n'), ((1975, 1992), 'pandas.read_csv', 'pd.read_csv', (['path'], {}), '(path)\n', (1986, 1992), True, 'import pandas as pd\n'), ((2042, 2088), 'pandas.to_datetime', 'pd.to_datetime', (["df['SensorDateTime']"], {'utc': '(True)'}), "(df['SensorDateTime'], utc=True)\n", (2056, 2088), True, 'import pandas as pd\n')] |
#%%
import os
import mne
import re
import sys
import glob
import pickle
import logging
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.WARNING)
sys.path.append('/research/FGB-ETP-DVM/DvM')
import numpy as np
import seaborn as sns
from scipy.interpolate import interp1d
from IPython import embed
from beh_analyses.PreProcessing import *
from eeg_analyses.EEG import *
from eeg_analyses.CTF import *
from eeg_analyses.BDM import *
from support.FolderStructure import *
from support.support import *
# subject specific info
sj_info = {'1': {'replace':{}}, # example replace: replace = {'15': {'session_1': {'B1': 'EXG7'}}}
'2': {'replace':{}},
'3': {'replace':{}},
'4': {'replace':{}},
'5': {'replace':{}},
'6': {'replace':{}},
'7': {'replace':{}},
'8': {'replace':{}},
'9': {'replace':{}},
'10': {'replace':{}},
'11': {'replace':{}},
'12': {'replace':{}},
'13': {'replace':{}},
'14': {'replace':{}},
'15': {'replace':{}},
'16': {'replace':{}},
'17': {'replace':{}},
'18': {'replace':{}},
'19': {'replace':{}},
'20': {'replace':{}},
'21': {'replace':{}},
'22': {'replace':{}},
'23': {'replace':{}},
'24': {'replace':{}},
'25': {'replace':{}}, # run as subject 1
}
# project specific info
project = 'Topdown_vs_implicit'
factors = []
labels = []
to_filter = ['RT']
nr_sessions = 1
project_param = ['nr_trials','trigger','RT', 'subject_nr', 'block_cnt', 'practice',
'block_type', 'correct','dist_loc','dist_shape',
'dist_color','target_loc','target_shape',
'target_color','memory_orient']
# eeg info (event_id specified below)
ping = 'Topdown_vs_implicit'
part = 'beh'
eog = ['V_up','V_do','H_r','H_l']
ref = ['Ref_r','Ref_l']
eeg_runs = [1]
# epoch parameters
t_min = -1.4
t_max = 1.3
event_id = [100,101,102,103,104,105,106,107,110]
flt_pad = 0.5
binary = 0
# eye tracker info
tracker_ext = 'asc'
eye_freq = 1000
start_event = 'Onset ping' # start_event = 'Onset search'
tracker_shift = 0
viewing_dist = 60
screen_res = (1680, 1050)
screen_h = 29
class TopImplicit(FolderStructure):
def __init__(self): pass
def prepareBEH(self, project, part, factors, labels, project_param, to_filter):
'''
standard Behavior processing
'''
PP = PreProcessing(project = project, part = part, factor_headers = factors, factor_labels = labels)
PP.create_folder_structure()
PP.combine_single_subject_files(save = False)
PP.select_data(project_parameters = project_param, save = False)
PP.filter_data(to_filter = to_filter, filter_crit = ' and correct == 1', cnd_sel = False, save = True)
PP.exclude_outliers(criteria = dict(RT = 'RT_filter == True', correct = ''))
PP.save_data_file()
def prepareEEG(self, sj, session, eog, ref, eeg_runs, t_min, t_max, flt_pad, sj_info, event_id, project_param, project_folder, binary, channel_plots, inspect):
'''
EEG preprocessing as preregistred @
'''
# set subject specific parameters
file = 'subject_{}_session_{}_'.format(sj, session)
replace = sj_info[str(sj)]['replace']
log_file = self.FolderTracker(extension=['processed', 'info'],
filename='preprocessing_param.csv')
# start logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M',
filename= self.FolderTracker(extension=['processed', 'info'],
filename='preprocess_sj{}_ses{}.log'.format(
sj, session), overwrite = False),
filemode='w')
# READ IN RAW DATA, APPLY REREFERENCING AND CHANGE NAMING SCHEME
EEG = mne.concatenate_raws([RawBDF(os.path.join(project_folder, 'raw', file + '{}.bdf'.format(run)),
preload=True, eog=eog) for run in eeg_runs])
#EEG.replaceChannel(sj, session, replace)
EEG.reReference(ref_channels=ref, vEOG=eog[
:2], hEOG=eog[2:], changevoltage=True, to_remove = ['EXG7','EXG8'])
EEG.setMontage(montage='biosemi64')
#FILTER DATA TWICE: ONCE FOR ICA AND ONCE FOR EPOCHING
EEGica = EEG.copy()
EEGica.filter(h_freq=None, l_freq=1.5,
fir_design='firwin', skip_by_annotation='edge')
EEG.filter(h_freq=None, l_freq=0.01, fir_design='firwin',
skip_by_annotation='edge')
# MATCH BEHAVIOR FILE
events = EEG.eventSelection(event_id, binary=binary, min_duration=0)
# dirty fix to deal with double triggers
if sj == 1:
events = np.delete(events, np.where(events[:,2] > 90)[0], axis = 0)
for i, trigger in enumerate(events[:-1,2]):
if trigger+10 == events[i+1,2]:
events[i,2] += 100
beh, missing = EEG.matchBeh(sj, session, events, event_id,
headers = project_param)
# EPOCH DATA
epochs = Epochs(sj, session, EEG, events, event_id=event_id,
tmin=t_min, tmax=t_max, baseline=None, flt_pad = flt_pad, reject_by_annotation = True)
epochs_ica = Epochs(sj, session, EEGica, events, event_id=event_id,
tmin=t_min, tmax=t_max, baseline=None, flt_pad = flt_pad, reject_by_annotation = True)
# AUTMATED ARTIFACT DETECTION
epochs.selectBadChannels(run_ransac = True, channel_plots = False, inspect = True, RT = None)
z = epochs.artifactDetection(z_thresh=4, band_pass=[110, 140], plot=True, inspect=True)
# ICA
epochs.applyICA(EEG, epochs_ica, method='picard', fit_params = dict(ortho=False, extended=True), inspect = True)
del EEGica
# EYE MOVEMENTS
epochs.detectEye(missing, events, beh.shape[0], time_window=(t_min*1000, t_max*1000),
tracker_shift = tracker_shift, start_event = start_event,
extension = tracker_ext, eye_freq = eye_freq,
screen_res = screen_res, viewing_dist = viewing_dist,
screen_h = screen_h)
# INTERPOLATE BADS
bads = epochs.info['bads']
epochs.interpolate_bads(reset_bads=True, mode='accurate')
# LINK BEHAVIOR
epochs.linkBeh(beh, events, event_id)
logPreproc((sj, session), log_file, nr_sj = len(sj_info.keys()), nr_sessions = nr_sessions,
to_update = dict(nr_clean = len(epochs), z_value = z, nr_bads = len(bads), bad_el = bads))
if __name__ == '__main__':
os.environ['MKL_NUM_THREADS'] = '5'
os.environ['NUMEXP_NUM_THREADS'] = '5'
os.environ['OMP_NUM_THREADS'] = '5'
# Specify project parameters
#project_folder = '/Users/dockyduncan/Documents/EEG/ping'
project_folder = '/research/FGB-ETP-DVM/Topdown_vs_implicit'
os.chdir(project_folder)
# initiate current project
PO = TopImplicit()
#Run preprocessing
#PO.prepareBEH(project, part, factors, labels, project_param, to_filter)
#Run preprocessing EEG
sj = 4
PO.prepareEEG(sj = sj, session = 1, eog = eog, ref = ref, eeg_runs = eeg_runs,
t_min = t_min, t_max = t_max, flt_pad = flt_pad, sj_info = sj_info,
event_id = event_id, project_param = project_param,
project_folder = project_folder, binary = binary,
channel_plots = True, inspect = True)
# beh, eeg = PO.loadData(sj, name = 'ses-1', eyefilter=False, eye_window=None)
# beh['condition'] = 'all'
# eeg.baseline = None # temp_fix
# beh['trigger'] %= 10
# ctf = CTF(beh, eeg, 'all', 'trigger', nr_iter = 10, nr_blocks = 3, nr_bins = 8, nr_chans = 8, delta = False, power = 'filtered')
# # step 1: search broad band of frequencies collapsed across all conditions
# ctf.spatialCTF(sj, [-0.2, 2], method = 'Foster', freqs = dict(all = [4,30]), downsample = 4, nr_perm = 0)
# # step 2: compare conditions within the alpha band
# ctf.spatialCTF(sj, [-0.2, 2], method = 'Foster', freqs = dict(alpha = [8,12]), downsample = 4, nr_perm = 0)
# temp code to show slopes
times = np.linspace(-0.2, 2, 282)
data = pickle.load(open(PO.FolderTracker(['ctf','all','trigger','filtered'],'all_1_slopes-Foster_alpha.pickle'), 'rb')) | [
"sys.path.append",
"numpy.where",
"numpy.linspace",
"os.chdir",
"logging.getLogger"
] | [((100, 131), 'logging.getLogger', 'logging.getLogger', (['"""matplotlib"""'], {}), "('matplotlib')\n", (117, 131), False, 'import logging\n'), ((169, 213), 'sys.path.append', 'sys.path.append', (['"""/research/FGB-ETP-DVM/DvM"""'], {}), "('/research/FGB-ETP-DVM/DvM')\n", (184, 213), False, 'import sys\n'), ((7034, 7058), 'os.chdir', 'os.chdir', (['project_folder'], {}), '(project_folder)\n', (7042, 7058), False, 'import os\n'), ((8328, 8353), 'numpy.linspace', 'np.linspace', (['(-0.2)', '(2)', '(282)'], {}), '(-0.2, 2, 282)\n', (8339, 8353), True, 'import numpy as np\n'), ((4814, 4841), 'numpy.where', 'np.where', (['(events[:, 2] > 90)'], {}), '(events[:, 2] > 90)\n', (4822, 4841), True, 'import numpy as np\n')] |
# Copyright (2017-2020)
# The Wormnet project
# <NAME> (<EMAIL>)
import numpy as np
import tensorflow as tf
import kerasncp as kncp
from kerasncp.tf import LTCCell
data_x = np.random.default_rng().normal(size=(100, 16, 10))
data_y = np.random.default_rng().normal(size=(100, 16, 1))
print("data_y.shape: ", str(data_y.shape))
wiring = kncp.wirings.FullyConnected(16, 8) # 16 units, 8 motor neurons
rnn_cell = LTCCell(wiring)
dense1 = tf.keras.layers.Dense(16, activation="tanh")
dense2 = tf.keras.layers.Dense(1)
inputs = tf.keras.Input(shape=(None, 10))
x = dense1(inputs)
x = tf.keras.layers.RNN(rnn_cell, return_sequences=True)(x)
x = dense2(x)
trainable_model = tf.keras.Model(inputs, x)
trainable_model.compile(
optimizer=tf.keras.optimizers.Adam(0.01), loss=tf.keras.losses.MeanSquaredError()
)
trainable_model.fit(x=data_x, y=data_y, batch_size=25, epochs=10)
trainable_model.evaluate(x=data_x, y=data_y)
# Now we need to construct a single-step model that accepts an initial hidden state as additional input
inputs_single = tf.keras.Input(shape=(10,))
inputs_state = tf.keras.Input(shape=(rnn_cell.state_size,))
x = dense1(inputs_single)
_, output_states = rnn_cell(x, inputs_state)
single_step_model = tf.keras.Model([inputs_single, inputs_state], output_states)
def infer_hidden_states(single_step_model, state_size, data_x):
"""
Infers the hidden states of a single-step RNN model
Args:
single_step_model: RNN model taking a pair (inputs,old_hidden_state) as input and outputting new_hidden_state
state_size: Size of the RNN model (=number of units)
data_x: Input data for which the hidden states should be inferred
Returns:
Tensor of shape (batch_size,sequence_length+1,state_size). The sequence starts with the initial hidden state
(all zeros) and is therefore one time-step longer than the input sequence
"""
batch_size = data_x.shape[0]
seq_len = data_x.shape[1]
hidden = tf.zeros((batch_size, state_size))
hidden_states = [hidden]
for t in range(seq_len):
# Compute new hidden state from old hidden state + input at time t
print("hidden.shape", hidden)
hidden = single_step_model([data_x[:, t], hidden])
print("all", hidden)
print("all", len(hidden))
hidden_states.append(hidden)
return tf.stack(hidden_states, axis=1)
# Now we can infer the hidden state
states = infer_hidden_states(single_step_model, rnn_cell.state_size, data_x)
print("Hidden states of first example ", states[0])
for i in range(wiring.units):
print("Neuron {:0d} is a {:} neuron".format(i, wiring.get_type_of_neuron(i))) | [
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Input",
"tensorflow.keras.layers.RNN",
"tensorflow.keras.Model",
"tensorflow.stack",
"numpy.random.default_rng",
"tensorflow.zeros",
"tensorflow.keras.optimizers.Adam",
"kerasncp.wirings.FullyConnected",... | [((338, 372), 'kerasncp.wirings.FullyConnected', 'kncp.wirings.FullyConnected', (['(16)', '(8)'], {}), '(16, 8)\n', (365, 372), True, 'import kerasncp as kncp\n'), ((413, 428), 'kerasncp.tf.LTCCell', 'LTCCell', (['wiring'], {}), '(wiring)\n', (420, 428), False, 'from kerasncp.tf import LTCCell\n'), ((439, 483), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(16)'], {'activation': '"""tanh"""'}), "(16, activation='tanh')\n", (460, 483), True, 'import tensorflow as tf\n'), ((493, 517), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), '(1)\n', (514, 517), True, 'import tensorflow as tf\n'), ((528, 560), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(None, 10)'}), '(shape=(None, 10))\n', (542, 560), True, 'import tensorflow as tf\n'), ((672, 697), 'tensorflow.keras.Model', 'tf.keras.Model', (['inputs', 'x'], {}), '(inputs, x)\n', (686, 697), True, 'import tensorflow as tf\n'), ((1043, 1070), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(10,)'}), '(shape=(10,))\n', (1057, 1070), True, 'import tensorflow as tf\n'), ((1086, 1130), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(rnn_cell.state_size,)'}), '(shape=(rnn_cell.state_size,))\n', (1100, 1130), True, 'import tensorflow as tf\n'), ((1222, 1282), 'tensorflow.keras.Model', 'tf.keras.Model', (['[inputs_single, inputs_state]', 'output_states'], {}), '([inputs_single, inputs_state], output_states)\n', (1236, 1282), True, 'import tensorflow as tf\n'), ((584, 636), 'tensorflow.keras.layers.RNN', 'tf.keras.layers.RNN', (['rnn_cell'], {'return_sequences': '(True)'}), '(rnn_cell, return_sequences=True)\n', (603, 636), True, 'import tensorflow as tf\n'), ((1977, 2011), 'tensorflow.zeros', 'tf.zeros', (['(batch_size, state_size)'], {}), '((batch_size, state_size))\n', (1985, 2011), True, 'import tensorflow as tf\n'), ((2353, 2384), 'tensorflow.stack', 'tf.stack', (['hidden_states'], {'axis': '(1)'}), '(hidden_states, axis=1)\n', (2361, 2384), True, 'import tensorflow as tf\n'), ((175, 198), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (196, 198), True, 'import numpy as np\n'), ((235, 258), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (256, 258), True, 'import numpy as np\n'), ((737, 767), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(0.01)'], {}), '(0.01)\n', (761, 767), True, 'import tensorflow as tf\n'), ((774, 808), 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), '()\n', (806, 808), True, 'import tensorflow as tf\n')] |
# coding: utf-8
# In[1]:
import sympy as sp
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.setrecursionlimit(25000)
import mpmath as mp
import scipy.fftpack
from sympy import lambdify
import mpmath as mpf
# In[2]:
# analytic expression for the symbol P(f) of B-L wavelet filter
f=sp.symbols('f')
J=sp.I
w = 2*sp.pi*f
N1 = 5+30*sp.cos(w/2)**2+30*sp.sin(w/2)**2*sp.cos(w/2)**2;
N2 = 2*sp.sin(w/2)**4*sp.cos(w/2)**2+70*sp.cos(w/2)**4+(2/3)*sp.sin(w/2)**6;
S = (N1+N2)/(105*sp.sin(w/2)**8);
phi_hat=16/(w**4*sp.sqrt(S))
Pf=phi_hat.subs(f,2*f)/phi_hat
# In[3]:
# plot the |P(f)| of B-L wavelet filter
sp.plotting.plot(abs(Pf),(f, -0.5, 0.5), xlabel='f', ylabel='P(f)',
adaptive=False, num_of_points=200, title = "Symbol P(f)")
# In[4]:
def f_coeff(x,F,N):
# x: symbolic periodic function of "f"
# F: period
# N: number of Fourier coeffs (odd integer)
# h: Fourier coefficients h[-3],h[-2],h[-1],h[0],h[1],h[2],h[3]
df = F/N
xs=np.zeros(N,dtype=complex)
xs[0]=sp.limit(x,f,0)
for n in range(1,N):
xs[n] = complex(x.subs(f,n*df))
h = np.fft.fft(xs)/N
h = np.fft.fftshift(h)
return h
# In[5]:
def f_trans(x,F,M,N):
# x: symbolic input function of "f"
# F: frequency window [-F/2,F/2]
# N: number of sample values
# M: number of aliases
# Xs: Fourier transform sample values
# fs: frequency sample values
dt = 1/F # delta t
df = F/N # delta f
T =N/F
for k in range(1,M+1):
x = x+x.subs(f,f-k*T)+x.subs(f,f+k*T)
xs=np.zeros(N,dtype=float)
fs=np.zeros(N,dtype=float)
dc=sp.limit(x,f,0)
xs[0]=dc
fs[0]=-F/2
for n in range(1,N):
xs[n] = float(x.subs(f,n*dt))
fs[n] = (n-1)*df-F/2
Xs = np.fft.fft(xs)*dt
Xs = np.fft.fftshift(Xs)
return fs, Xs
# In[6]:
f=sp.symbols('f')
J=sp.I
w = 2*sp.pi*f
N1w = 5+30*((sp.cos(w/2))**2)+30*((sp.sin(w/2))**2)*((sp.cos(w/2))**2)
N2w = 2*((sp.sin(w/2))**4)*((sp.cos(w/2))**2)+70*((sp.cos(w/2))**4)+(2/3)*((sp.sin(w/2))**6)
Sw = (N1w+N2w)/(105*((sp.sin(w/2))**8))
phi_hat=16/((w**4)*sp.sqrt(Sw))
x = phi_hat
F = 6
M=0
N=256
fs, Xs= f_trans(x,F,M,N)
# plt.figure()
# plt.title('B-L scaling function')
# plt.plot(fs,np.real(Xs),'bo')
# plt.savefig('B-L scaling')
# In[7]:
# analytic expression for the Fourier Transform of B-L wavelet
psi_hat=-sp.exp(-J*sp.pi*f)*Pf.subs(f,(f+1)/2)*phi_hat.subs(f,f/2)
# In[8]:
#plot Fourier Tranform of B-L wavelet
sp.plotting.plot(abs(psi_hat), (f,-5,5), xlabel='f', ylabel='psi_hat',
num_of_points=200, title = "Fourier Transform of B-L wavelet")
| [
"sympy.symbols",
"sympy.limit",
"numpy.fft.fft",
"sympy.cos",
"numpy.zeros",
"sympy.sqrt",
"numpy.fft.fftshift",
"sympy.exp",
"sympy.sin",
"sys.setrecursionlimit"
] | [((110, 138), 'sys.setrecursionlimit', 'sys.setrecursionlimit', (['(25000)'], {}), '(25000)\n', (131, 138), False, 'import sys\n'), ((307, 322), 'sympy.symbols', 'sp.symbols', (['"""f"""'], {}), "('f')\n", (317, 322), True, 'import sympy as sp\n'), ((1875, 1890), 'sympy.symbols', 'sp.symbols', (['"""f"""'], {}), "('f')\n", (1885, 1890), True, 'import sympy as sp\n'), ((1000, 1026), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'complex'}), '(N, dtype=complex)\n', (1008, 1026), True, 'import numpy as np\n'), ((1036, 1053), 'sympy.limit', 'sp.limit', (['x', 'f', '(0)'], {}), '(x, f, 0)\n', (1044, 1053), True, 'import sympy as sp\n'), ((1150, 1168), 'numpy.fft.fftshift', 'np.fft.fftshift', (['h'], {}), '(h)\n', (1165, 1168), True, 'import numpy as np\n'), ((1579, 1603), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'float'}), '(N, dtype=float)\n', (1587, 1603), True, 'import numpy as np\n'), ((1610, 1634), 'numpy.zeros', 'np.zeros', (['N'], {'dtype': 'float'}), '(N, dtype=float)\n', (1618, 1634), True, 'import numpy as np\n'), ((1641, 1658), 'sympy.limit', 'sp.limit', (['x', 'f', '(0)'], {}), '(x, f, 0)\n', (1649, 1658), True, 'import sympy as sp\n'), ((1822, 1841), 'numpy.fft.fftshift', 'np.fft.fftshift', (['Xs'], {}), '(Xs)\n', (1837, 1841), True, 'import numpy as np\n'), ((531, 541), 'sympy.sqrt', 'sp.sqrt', (['S'], {}), '(S)\n', (538, 541), True, 'import sympy as sp\n'), ((1125, 1139), 'numpy.fft.fft', 'np.fft.fft', (['xs'], {}), '(xs)\n', (1135, 1139), True, 'import numpy as np\n'), ((1795, 1809), 'numpy.fft.fft', 'np.fft.fft', (['xs'], {}), '(xs)\n', (1805, 1809), True, 'import numpy as np\n'), ((2136, 2147), 'sympy.sqrt', 'sp.sqrt', (['Sw'], {}), '(Sw)\n', (2143, 2147), True, 'import sympy as sp\n'), ((387, 400), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (393, 400), True, 'import sympy as sp\n'), ((464, 477), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (470, 477), True, 'import sympy as sp\n'), ((497, 510), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (503, 510), True, 'import sympy as sp\n'), ((1967, 1980), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (1973, 1980), True, 'import sympy as sp\n'), ((2060, 2073), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (2066, 2073), True, 'import sympy as sp\n'), ((2099, 2112), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (2105, 2112), True, 'import sympy as sp\n'), ((2401, 2423), 'sympy.exp', 'sp.exp', (['(-J * sp.pi * f)'], {}), '(-J * sp.pi * f)\n', (2407, 2423), True, 'import sympy as sp\n'), ((354, 367), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (360, 367), True, 'import sympy as sp\n'), ((372, 385), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (378, 385), True, 'import sympy as sp\n'), ((425, 438), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (431, 438), True, 'import sympy as sp\n'), ((443, 456), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (449, 456), True, 'import sympy as sp\n'), ((1926, 1939), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (1932, 1939), True, 'import sympy as sp\n'), ((1948, 1961), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (1954, 1961), True, 'import sympy as sp\n'), ((2013, 2026), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (2019, 2026), True, 'import sympy as sp\n'), ((2035, 2048), 'sympy.cos', 'sp.cos', (['(w / 2)'], {}), '(w / 2)\n', (2041, 2048), True, 'import sympy as sp\n'), ((410, 423), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (416, 423), True, 'import sympy as sp\n'), ((1994, 2007), 'sympy.sin', 'sp.sin', (['(w / 2)'], {}), '(w / 2)\n', (2000, 2007), True, 'import sympy as sp\n')] |
"""Deconvolution using CVXPY"""
import warnings
import numpy as np
from scipy.sparse import issparse
import cvxpy as cvx
from .utils import convolution_output_size, convolution_matrix
def least_squares(A, y, gamma_L2=0, gamma_L1=0):
"""Iterative solution to a least squares problem using CVXPY
Returns argmin_x ||A*x - y||^2 + gamma_L2 ||x||^2 + gamma_L1 ||x||_1
Parameters
----------
A : array_like or sparse
[N x M] projection matrix or operator
y : array_like
length-M vector
gamma_L2 : float (optional)
L2 regularization strength. Default=0
gamma_L1 : float (optional)
L1 regularization strength. Default=0
Returns
-------
x : ndarray
length-N vector that minimizes the cost function.
"""
if not issparse(A):
A = np.asarray(A)
y = np.asarray(y)
N, M = A.shape
cplx = np.issubdtype(A.dtype, complex) or np.issubdtype(y.dtype, complex)
# objective is ||Ax - y||^2 + gamma_L1 |x|_1 + gamma_L2 ||x||^2
if cplx:
# CVXPY does not handle complex, so we split the problem
Ar, Ai = A.real, A.imag
yr, yi = y.real, y.imag
xr = cvx.Variable(M)
xi = cvx.Variable(M)
error = (cvx.sum_squares(Ar*xr - Ai*xi - yr) +
cvx.sum_squares(Ai*xr + Ar*xi - yi))
u = cvx.norm(cvx.hstack(xr, xi), 2, axis=1)
else:
x = cvx.Variable(A.shape[1])
error = cvx.sum_squares(A*x - y)
u = x
cost = error
if gamma_L1 != 0:
gamma_L1 = cvx.Parameter(value=gamma_L1, sign="positive")
cost = cost + gamma_L1 * cvx.norm(u, 1)
if gamma_L2 != 0:
gamma_L2 = cvx.Parameter(value=gamma_L2, sign='positive')
cost = cost + gamma_L2 * cvx.sum_squares(u)
objective = cvx.Minimize(cost)
prob = cvx.Problem(objective)
prob.solve()
print("Problem Status: {0}".format(prob.status))
if cplx:
result = np.array(xr.value).ravel() + 1j * np.array(xi.value).ravel()
else:
result = np.asarray(x.value).ravel()
return result
def _direct_deconvolution(w, y, Nx, gamma_L2=0, gamma_L1=0):
w = np.asarray(w)
y = np.asarray(y)
cplx = np.issubdtype(w.dtype, complex) or np.issubdtype(y.dtype, complex)
if cplx:
wr, wi = w.real, w.imag
yr, yi = y.real, y.imag
xr = cvx.Variable(Nx)
xi = cvx.Variable(Nx)
error = (cvx.sum_squares(cvx.conv(wr, xr) - cvx.conv(wi, xi) - yr) +
cvx.sum_squares(cvx.conv(wi, xr) + cvx.conv(wr, xi) - yi))
u = cvx.norm(cvx.hstack(xr, xi), 2, axis=1)
else:
x = cvx.Variable(Nx)
error = cvx.sum_squares(cvx.conv(w, x) - y)
u = x
cost = error
if gamma_L1 != 0:
gamma_L1 = cvx.Parameter(value=gamma_L1, sign="positive")
cost = cost + gamma_L1 * cvx.norm(x, 1)
if gamma_L2 != 0:
gamma_L2 = cvx.Parameter(value=gamma_L2, sign="positive")
cost = cost + gamma_L2 * cvx.sum_squares(x)
objective = cvx.Minimize(cost)
prob = cvx.Problem(objective)
prob.solve()
print("Problem Status: {0}".format(prob.status))
if cplx:
result = np.array(xr.value).ravel() + 1j * np.array(xi.value).ravel()
else:
result = np.asarray(x.value).ravel()
return result
def deconvolution(w, y, gamma_L2=0, gamma_L1=0, Nx=None,
conv_method='direct', mode='full'):
"""Iterative deconvolution using least squares via CVXPY
Returns argmin_x ||conv(w, x) - y||^2 + gamma_L2 ||x||^2 + gamma_L1 ||x||_1
Parameters
----------
w : array_like
length-N array representing the convolution
y : array_like
length-M array or [M x K] matrix. Note that M must match the
output of of np.convolve(w, x, mode).
gamma_L2 : float, optional
L2 regularization strength. Default=0
gamma_L1 : float (optional)
L1 regularization strength. Default=0
Nx : int, optional
The number of elements in the x array. Default = N
conv_method : {'direct', 'matrix'}, optional
Method to use for convolution. Default='fft'
mode : {'full', 'valid', 'same'}, optional
The convolution mode (see ``np.convolve`` dostring for details)
Returns
-------
x : ndarray
the length-Nx or [Nx x K] matrix representing the deconvolution
"""
w = np.asarray(w)
Nx = len(w) if Nx is None else Nx
Ny = convolution_output_size(len(w), Nx, mode=mode)
if len(y) != Ny:
raise ValueError("Array sizes do not match convolution mode")
if Ny < Nx and gamma_L2 == 0 and gamma_L1 == 0:
warnings.warn("Ill-posed deconvolution: len(y)={0}, len(x)={1}. "
"Try adding regularization or using a different "
"mode of convolution".format(Ny, Nx))
if conv_method == 'matrix':
C = convolution_matrix(w, Nx, mode=mode)
return least_squares(C, y, gamma_L2=gamma_L2, gamma_L1=gamma_L1)
elif conv_method == 'direct':
if mode != 'full':
raise ValueError("Only mode='full' supported for direct method "
"of CVX deconvolution.")
return _direct_deconvolution(w, y, Nx,
gamma_L2=gamma_L2,
gamma_L1=gamma_L1)
else:
raise ValueError("conv_method must be in {'matrix', 'direct', 'fft'}")
| [
"cvxpy.Parameter",
"scipy.sparse.issparse",
"numpy.asarray",
"cvxpy.hstack",
"numpy.array",
"cvxpy.Problem",
"cvxpy.norm",
"cvxpy.Variable",
"cvxpy.conv",
"cvxpy.sum_squares",
"numpy.issubdtype",
"cvxpy.Minimize"
] | [((846, 859), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (856, 859), True, 'import numpy as np\n'), ((1806, 1824), 'cvxpy.Minimize', 'cvx.Minimize', (['cost'], {}), '(cost)\n', (1818, 1824), True, 'import cvxpy as cvx\n'), ((1836, 1858), 'cvxpy.Problem', 'cvx.Problem', (['objective'], {}), '(objective)\n', (1847, 1858), True, 'import cvxpy as cvx\n'), ((2166, 2179), 'numpy.asarray', 'np.asarray', (['w'], {}), '(w)\n', (2176, 2179), True, 'import numpy as np\n'), ((2188, 2201), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (2198, 2201), True, 'import numpy as np\n'), ((3042, 3060), 'cvxpy.Minimize', 'cvx.Minimize', (['cost'], {}), '(cost)\n', (3054, 3060), True, 'import cvxpy as cvx\n'), ((3072, 3094), 'cvxpy.Problem', 'cvx.Problem', (['objective'], {}), '(objective)\n', (3083, 3094), True, 'import cvxpy as cvx\n'), ((4409, 4422), 'numpy.asarray', 'np.asarray', (['w'], {}), '(w)\n', (4419, 4422), True, 'import numpy as np\n'), ((799, 810), 'scipy.sparse.issparse', 'issparse', (['A'], {}), '(A)\n', (807, 810), False, 'from scipy.sparse import issparse\n'), ((824, 837), 'numpy.asarray', 'np.asarray', (['A'], {}), '(A)\n', (834, 837), True, 'import numpy as np\n'), ((892, 923), 'numpy.issubdtype', 'np.issubdtype', (['A.dtype', 'complex'], {}), '(A.dtype, complex)\n', (905, 923), True, 'import numpy as np\n'), ((927, 958), 'numpy.issubdtype', 'np.issubdtype', (['y.dtype', 'complex'], {}), '(y.dtype, complex)\n', (940, 958), True, 'import numpy as np\n'), ((1185, 1200), 'cvxpy.Variable', 'cvx.Variable', (['M'], {}), '(M)\n', (1197, 1200), True, 'import cvxpy as cvx\n'), ((1214, 1229), 'cvxpy.Variable', 'cvx.Variable', (['M'], {}), '(M)\n', (1226, 1229), True, 'import cvxpy as cvx\n'), ((1414, 1438), 'cvxpy.Variable', 'cvx.Variable', (['A.shape[1]'], {}), '(A.shape[1])\n', (1426, 1438), True, 'import cvxpy as cvx\n'), ((1455, 1481), 'cvxpy.sum_squares', 'cvx.sum_squares', (['(A * x - y)'], {}), '(A * x - y)\n', (1470, 1481), True, 'import cvxpy as cvx\n'), ((1554, 1600), 'cvxpy.Parameter', 'cvx.Parameter', ([], {'value': 'gamma_L1', 'sign': '"""positive"""'}), "(value=gamma_L1, sign='positive')\n", (1567, 1600), True, 'import cvxpy as cvx\n'), ((1690, 1736), 'cvxpy.Parameter', 'cvx.Parameter', ([], {'value': 'gamma_L2', 'sign': '"""positive"""'}), "(value=gamma_L2, sign='positive')\n", (1703, 1736), True, 'import cvxpy as cvx\n'), ((2213, 2244), 'numpy.issubdtype', 'np.issubdtype', (['w.dtype', 'complex'], {}), '(w.dtype, complex)\n', (2226, 2244), True, 'import numpy as np\n'), ((2248, 2279), 'numpy.issubdtype', 'np.issubdtype', (['y.dtype', 'complex'], {}), '(y.dtype, complex)\n', (2261, 2279), True, 'import numpy as np\n'), ((2372, 2388), 'cvxpy.Variable', 'cvx.Variable', (['Nx'], {}), '(Nx)\n', (2384, 2388), True, 'import cvxpy as cvx\n'), ((2402, 2418), 'cvxpy.Variable', 'cvx.Variable', (['Nx'], {}), '(Nx)\n', (2414, 2418), True, 'import cvxpy as cvx\n'), ((2647, 2663), 'cvxpy.Variable', 'cvx.Variable', (['Nx'], {}), '(Nx)\n', (2659, 2663), True, 'import cvxpy as cvx\n'), ((2790, 2836), 'cvxpy.Parameter', 'cvx.Parameter', ([], {'value': 'gamma_L1', 'sign': '"""positive"""'}), "(value=gamma_L1, sign='positive')\n", (2803, 2836), True, 'import cvxpy as cvx\n'), ((2926, 2972), 'cvxpy.Parameter', 'cvx.Parameter', ([], {'value': 'gamma_L2', 'sign': '"""positive"""'}), "(value=gamma_L2, sign='positive')\n", (2939, 2972), True, 'import cvxpy as cvx\n'), ((1248, 1287), 'cvxpy.sum_squares', 'cvx.sum_squares', (['(Ar * xr - Ai * xi - yr)'], {}), '(Ar * xr - Ai * xi - yr)\n', (1263, 1287), True, 'import cvxpy as cvx\n'), ((1303, 1342), 'cvxpy.sum_squares', 'cvx.sum_squares', (['(Ai * xr + Ar * xi - yi)'], {}), '(Ai * xr + Ar * xi - yi)\n', (1318, 1342), True, 'import cvxpy as cvx\n'), ((1361, 1379), 'cvxpy.hstack', 'cvx.hstack', (['xr', 'xi'], {}), '(xr, xi)\n', (1371, 1379), True, 'import cvxpy as cvx\n'), ((2594, 2612), 'cvxpy.hstack', 'cvx.hstack', (['xr', 'xi'], {}), '(xr, xi)\n', (2604, 2612), True, 'import cvxpy as cvx\n'), ((1634, 1648), 'cvxpy.norm', 'cvx.norm', (['u', '(1)'], {}), '(u, 1)\n', (1642, 1648), True, 'import cvxpy as cvx\n'), ((1770, 1788), 'cvxpy.sum_squares', 'cvx.sum_squares', (['u'], {}), '(u)\n', (1785, 1788), True, 'import cvxpy as cvx\n'), ((2048, 2067), 'numpy.asarray', 'np.asarray', (['x.value'], {}), '(x.value)\n', (2058, 2067), True, 'import numpy as np\n'), ((2696, 2710), 'cvxpy.conv', 'cvx.conv', (['w', 'x'], {}), '(w, x)\n', (2704, 2710), True, 'import cvxpy as cvx\n'), ((2870, 2884), 'cvxpy.norm', 'cvx.norm', (['x', '(1)'], {}), '(x, 1)\n', (2878, 2884), True, 'import cvxpy as cvx\n'), ((3006, 3024), 'cvxpy.sum_squares', 'cvx.sum_squares', (['x'], {}), '(x)\n', (3021, 3024), True, 'import cvxpy as cvx\n'), ((3284, 3303), 'numpy.asarray', 'np.asarray', (['x.value'], {}), '(x.value)\n', (3294, 3303), True, 'import numpy as np\n'), ((1960, 1978), 'numpy.array', 'np.array', (['xr.value'], {}), '(xr.value)\n', (1968, 1978), True, 'import numpy as np\n'), ((3196, 3214), 'numpy.array', 'np.array', (['xr.value'], {}), '(xr.value)\n', (3204, 3214), True, 'import numpy as np\n'), ((1994, 2012), 'numpy.array', 'np.array', (['xi.value'], {}), '(xi.value)\n', (2002, 2012), True, 'import numpy as np\n'), ((2453, 2469), 'cvxpy.conv', 'cvx.conv', (['wr', 'xr'], {}), '(wr, xr)\n', (2461, 2469), True, 'import cvxpy as cvx\n'), ((2472, 2488), 'cvxpy.conv', 'cvx.conv', (['wi', 'xi'], {}), '(wi, xi)\n', (2480, 2488), True, 'import cvxpy as cvx\n'), ((2530, 2546), 'cvxpy.conv', 'cvx.conv', (['wi', 'xr'], {}), '(wi, xr)\n', (2538, 2546), True, 'import cvxpy as cvx\n'), ((2549, 2565), 'cvxpy.conv', 'cvx.conv', (['wr', 'xi'], {}), '(wr, xi)\n', (2557, 2565), True, 'import cvxpy as cvx\n'), ((3230, 3248), 'numpy.array', 'np.array', (['xi.value'], {}), '(xi.value)\n', (3238, 3248), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Mon Dec 20 01:10:22 2021
@author: mohit
"""
#%%
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.decomposition import PCA
import prince
#%%
df=pd.read_csv('stackoverflow_data.csv')
col=df.columns
df.isnull().sum()
df.info()
df.describe()
df.fillna(method='ffill', inplace=True)
df.isnull().sum()
df['MilitaryUS'].fillna('No', inplace=True)
df.isnull().sum()
#Respodents group by country
#%% Hypothesis US and Indian Developers are equally satisfied by their job.
df_country= (df.groupby(['Country'])).count()
df_country.sort_values(by=['Respondent'], ascending=False, inplace=True)
df_country.iloc[0:10, 0].plot.bar()
USPeople_df= df[df['Country']== 'United States']
IndianPeople_df= df[df['Country']== 'India']
USPeople_df=USPeople_df[['Country','JobSatisfaction']]
IndianPeople_df=IndianPeople_df[['Country','JobSatisfaction']]
#decide hypothesis Akash
#%% Hypothesis for US male and female developers equally paid
#Code is designed in such way can compare for any country.
df_allgender=df.groupby(['Gender']).count()
df_allgender.sort_values(by=['Respondent'], ascending=False, inplace=True)
df_allgender.iloc[0:3, 0].plot.bar()
df_allgender=df_allgender.T
df_gender= df_allgender[['Female', 'Male']]
df_gender=df_gender.iloc[0, :]
df_female= df[df['Gender']== 'Female']
df_male= df[df['Gender']== 'Male']
femaleSalaries_df= df_female[['Country','Currency','CurrencySymbol','Salary']]
maleSalaries_df= df_male[['Country','Currency','CurrencySymbol','Salary']]
USFemaleSalaries=pd.DataFrame(femaleSalaries_df.groupby('Currency')['Salary'])
USMaleSalaries=pd.DataFrame(maleSalaries_df.groupby('Currency')['Salary'])
M1=USFemaleSalaries.iloc[18, 1]
M2=USMaleSalaries.iloc[18,1]
u,p1 = stats.mannwhitneyu(M1,M2)
k,p=stats.kstest(M1,M2)
#%% Same for Student and Hobby/Salary/Race Ethinity are Student with anythihng else, think about it
#Use of open Source compare to Country/Gender
#Education and Salary comparison
#%% Other Ideas
codeforHobby_df= df[df['Hobby']== 'Yes']
# Which country code for Hobby most?
codeforHobbyCountry_df= codeforHobby_df.groupby('Country').count()
codeforHobbyCountry_df.sort_values(by=['Respondent'], ascending=False, inplace=True)
# What Age group codes for Hobby most?
codeforHobbyAge_df= codeforHobby_df.groupby('Age').count()
codeforHobbyAge_df.sort_values(by=['Respondent'], ascending=False, inplace=True)
# How many years are developer coding for Hobby most?
codeforHobbyYearsCoding_df= codeforHobby_df.groupby('YearsCoding').count()
codeforHobbyYearsCoding_df.sort_values(by=['Respondent'], ascending=False, inplace=True)
codeforHobbyCountry_df.iloc[0:10, 0].plot.bar()
codeforHobbyAge_df.iloc[:, 0].plot.bar()
codeforHobbyYearsCoding_df.iloc[:, 0].plot.bar()
#%% Which country use open Source most?
# Male or Female code as hobby and most opensourse?
openSource_df= df[df['OpenSource']== 'Yes']
openSourceCountry_df= openSource_df.groupby('Country').count()
openSourceCountry_df.sort_values(by=['Respondent'], ascending=False, inplace=True)
openSourceCountry_df.iloc[0:10, 0].plot.bar()
openSource_df=pd.DataFrame([df[(df['Gender'] == 'Female') & (df['OpenSource'] == 'Yes')].count()[0], df[(df['Gender'] == 'Male') & (df['OpenSource'] == 'Yes')].count()[0]])
openSource_df=openSource_df.T
openSource_df.columns=['Female', 'Male']
openSource_df.plot.bar()
#%%
# Top 10 Databases worked upon?
databases_df= df['DatabaseWorkedWith'].value_counts()
databases_df.iloc[0:5].plot.bar()
# Top desired Databases to work on
desiredDatabase_df=df['DatabaseDesireNextYear'].value_counts()
desiredDatabase_df.iloc[0:5].plot.bar()
#%%
# Top 10 Platforms worked upon?
# Top desired Platforms to work on
# Top 10 Frameworks worked upon?
platform_df= df['PlatformWorkedWith'].value_counts()
platform_df.iloc[0:5].plot.bar()
# Top desired Databases to work on
desiredplatform_df=df['PlatformDesireNextYear'].value_counts()
desiredplatform_df.iloc[0:5].plot.bar()
framework_df= df['FrameworkWorkedWith'].value_counts()
framework_df.iloc[0:10].plot.bar()
#%%
# About Developer
# Coders happy to code?
satisfaction_df= df['JobSatisfaction'].value_counts()
satisfaction_df.plot.pie()
#Most Common Jobs for Developers
job_df=df['DevType'].value_counts()
job_df.iloc[0:10].plot.pie()
# Highest Degree done by Developers
degree_df= df['FormalEducation'].value_counts()
degree_df.plot.pie()
#%% correlation
from sklearn.preprocessing import OneHotEncoder
#Find the unique in every column and determine which one to OnehOTEncode
enc = OneHotEncoder(handle_unknown='ignore')
enc.fit(df.values)
x=enc.transform(df.values)
#Select Which columns to use for prediction
#Add it to rest columns and apply PCA
#And Than predict
df_enc=pd.get_dummies(df)
x=enc.categories_
r = np.corrcoef(df)
| [
"scipy.stats.kstest",
"pandas.read_csv",
"scipy.stats.mannwhitneyu",
"pandas.get_dummies",
"numpy.corrcoef",
"sklearn.preprocessing.OneHotEncoder"
] | [((276, 313), 'pandas.read_csv', 'pd.read_csv', (['"""stackoverflow_data.csv"""'], {}), "('stackoverflow_data.csv')\n", (287, 313), True, 'import pandas as pd\n'), ((1891, 1917), 'scipy.stats.mannwhitneyu', 'stats.mannwhitneyu', (['M1', 'M2'], {}), '(M1, M2)\n', (1909, 1917), False, 'from scipy import stats\n'), ((1922, 1942), 'scipy.stats.kstest', 'stats.kstest', (['M1', 'M2'], {}), '(M1, M2)\n', (1934, 1942), False, 'from scipy import stats\n'), ((4779, 4817), 'sklearn.preprocessing.OneHotEncoder', 'OneHotEncoder', ([], {'handle_unknown': '"""ignore"""'}), "(handle_unknown='ignore')\n", (4792, 4817), False, 'from sklearn.preprocessing import OneHotEncoder\n'), ((4981, 4999), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {}), '(df)\n', (4995, 4999), True, 'import pandas as pd\n'), ((5024, 5039), 'numpy.corrcoef', 'np.corrcoef', (['df'], {}), '(df)\n', (5035, 5039), True, 'import numpy as np\n')] |
import tensorflow as tf
import os
import sys
sys.path.insert(0, './scripts/')
import tensorlayer as tl
import numpy as np
import random
from utils import *
from models import *
import argparse
parser = argparse.ArgumentParser(description='3D-GAN implementation for 32*32*32 voxel output')
parser.add_argument('-o','--object', default='chair', help='The name of the current experiment, this will be used to create folders and save models.')
parser.add_argument('-b','--batchsize', default=32, help ='The batch size.', type=int)
parser.add_argument('-depth','--depth', default='best', help ='Epoch from which to load the depth map predictor, if you want the best leave default.' )
parser.add_argument('-occ','--occ', default='best', help ='Epoch from which to load the occupancy map predictor, if you want the best leave default.' )
parser.add_argument('-dis','--distance', default=70, help ='The range in which distances will be predicted.', type=int)
parser.add_argument('-high', default= 256, help='The size of the high dimension objects.', type= int)
parser.add_argument('-low', default= 32, help='The size of the low dimension object.', type= int)
args = parser.parse_args()
checkpoint_dir = "checkpoint/" + args.object +'/'
data_dir = 'data/voxels/' + args.object+ '/test'
batchsize = args.batchsize
high = args.high
low = args.low
distance = args.distance
ratio = high // low
#######inputs##########
scope_depth = 'depth'
scope_occ = 'occupancy'
images_low = tf.placeholder(tf.float32, [batchsize, low, low, 1], name='images_low') # low res odm input
side = tf.placeholder(tf.float32, [batchsize, low, low, 1], name='side') # the side being considered
combined = tf.concat((images_low, side), axis = 3)
########## network computations #######################
net_depth, depth_pred = upscale(combined, ratio, scope = scope_depth, is_train=False, reuse = False)
net_occ, occ_pred = upscale(combined, ratio, scope = scope_occ, is_train=False, reuse = False)
net_depth.print_params(False)
net_occ.print_params(False)
##### computing #######
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess=tf.Session()
tl.ops.set_gpu_fraction(sess=sess, gpu_fraction=0.999)
sess.run(tf.global_variables_initializer())
# load networks for depth and occupancy
load_networks(checkpoint_dir, sess, net_depth, args.depth, name ='depth')
load_networks(checkpoint_dir, sess, net_occ, args.occ, name = 'occ')
files = grab_files(data_dir)
for idx in (xrange(0, len(files)/args.batchsize)):
odms = []
cur_files = files[idx*batchsize:(idx+1)*batchsize]
# loops over all sides
for k in range(6):
batch, _ = make_batch(cur_files, high, low, side = k, valid = True)
depths, occs = sess.run([depth_pred,occ_pred], feed_dict={images_low:batch['low'], side: batch['side']})
odms.append(recover_odms(depths, occs, batch['low_up'], high, low, distance, threshold = 1.5*high//low)) # combining depths and occupancy maps to recover full odms
# combining information
odms = zip(odms[0], odms[1], odms[2], odms[3], odms[4], odms[5])
objs, small_objs = make_objs(cur_files) # loading the ground truth object and input object
batch_predictions = zip(odms, objs, small_objs)
# looping over batch
for odm, obj, small_obj in (batch_predictions):
small_obj = upsample(small_obj, high, low)
prediction = apply_occupancy(np.array(small_obj), np.array(odm), high)
prediction = apply_depth(np.array(prediction),np.array(odm),high)
evaluate_SR(prediction, obj, small_obj, gt = False) # render model
| [
"argparse.ArgumentParser",
"tensorflow.global_variables_initializer",
"tensorlayer.ops.set_gpu_fraction",
"tensorflow.Session",
"tensorflow.concat",
"sys.path.insert",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"numpy.array"
] | [((47, 79), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./scripts/"""'], {}), "(0, './scripts/')\n", (62, 79), False, 'import sys\n'), ((207, 298), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""3D-GAN implementation for 32*32*32 voxel output"""'}), "(description=\n '3D-GAN implementation for 32*32*32 voxel output')\n", (230, 298), False, 'import argparse\n'), ((1512, 1583), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batchsize, low, low, 1]'], {'name': '"""images_low"""'}), "(tf.float32, [batchsize, low, low, 1], name='images_low')\n", (1526, 1583), True, 'import tensorflow as tf\n'), ((1618, 1683), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[batchsize, low, low, 1]'], {'name': '"""side"""'}), "(tf.float32, [batchsize, low, low, 1], name='side')\n", (1632, 1683), True, 'import tensorflow as tf\n'), ((1726, 1763), 'tensorflow.concat', 'tf.concat', (['(images_low, side)'], {'axis': '(3)'}), '((images_low, side), axis=3)\n', (1735, 1763), True, 'import tensorflow as tf\n'), ((2121, 2137), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (2135, 2137), True, 'import tensorflow as tf\n'), ((2182, 2194), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2192, 2194), True, 'import tensorflow as tf\n'), ((2195, 2249), 'tensorlayer.ops.set_gpu_fraction', 'tl.ops.set_gpu_fraction', ([], {'sess': 'sess', 'gpu_fraction': '(0.999)'}), '(sess=sess, gpu_fraction=0.999)\n', (2218, 2249), True, 'import tensorlayer as tl\n'), ((2259, 2292), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2290, 2292), True, 'import tensorflow as tf\n'), ((3409, 3428), 'numpy.array', 'np.array', (['small_obj'], {}), '(small_obj)\n', (3417, 3428), True, 'import numpy as np\n'), ((3430, 3443), 'numpy.array', 'np.array', (['odm'], {}), '(odm)\n', (3438, 3443), True, 'import numpy as np\n'), ((3478, 3498), 'numpy.array', 'np.array', (['prediction'], {}), '(prediction)\n', (3486, 3498), True, 'import numpy as np\n'), ((3499, 3512), 'numpy.array', 'np.array', (['odm'], {}), '(odm)\n', (3507, 3512), True, 'import numpy as np\n')] |
import re
import numpy as np
import pandas as pd
import random as rd
from sklearn import preprocessing
from sklearn.cluster import KMeans
from sklearn.ensemble import RandomForestRegressor
from sklearn.decomposition import PCA
# Print options
np.set_printoptions(precision=4, threshold=10000, linewidth=160, edgeitems=999, suppress=True)
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
pd.set_option('display.width', 160)
pd.set_option('expand_frame_repr', False)
pd.set_option('precision', 4)
# constructing binary features
def process_embarked():
global df_titanic_data
# replacing the missing values with the most commmon value in the variable
df_titanic_data.Embarked[df_titanic_data.Embarked.isnull()] = df_titanic_data.Embarked.dropna().mode().values
# converting the values into numbers
df_titanic_data['Embarked'] = pd.factorize(df_titanic_data['Embarked'])[0]
# binarizing the constructed features
if keep_binary:
df_titanic_data = pd.concat([df_titanic_data, pd.get_dummies(df_titanic_data['Embarked']).rename(
columns=lambda x: 'Embarked_' + str(x))], axis=1)
# Define a helper function that can use RandomForestClassifier for handling the missing values of the age variable
def set_missing_ages():
global df_titanic_data
age_data = df_titanic_data[
['Age', 'Embarked', 'Fare', 'Parch', 'SibSp', 'Title_id', 'Pclass', 'Names', 'CabinLetter']]
input_values_RF = age_data.loc[(df_titanic_data.Age.notnull())].values[:, 1::]
target_values_RF = age_data.loc[(df_titanic_data.Age.notnull())].values[:, 0]
# Creating an object from the random forest regression function of sklearn<use the documentation for more details>
regressor = RandomForestRegressor(n_estimators=2000, n_jobs=-1)
# building the model based on the input values and target values above
regressor.fit(input_values_RF, target_values_RF)
# using the trained model to predict the missing values
predicted_ages = regressor.predict(age_data.loc[(df_titanic_data.Age.isnull())].values[:, 1::])
# Filling the predicted ages in the origial titanic dataframe
age_data.loc[(age_data.Age.isnull()), 'Age'] = predicted_ages
# Helper function for constructing features from the age variable
def process_age():
global df_titanic_data
# calling the set_missing_ages helper function to use random forest regression for predicting missing values of age
set_missing_ages()
# # scale the age variable by centering it around the mean with a unit variance
# if keep_scaled:
# scaler_preprocessing = preprocessing.StandardScaler()
# df_titanic_data['Age_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Age.reshape(-1, 1))
# construct a feature for children
df_titanic_data['isChild'] = np.where(df_titanic_data.Age < 13, 1, 0)
# bin into quartiles and create binary features
df_titanic_data['Age_bin'] = pd.qcut(df_titanic_data['Age'], 4)
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Age_bin']).rename(columns=lambda y: 'Age_' + str(y))],
axis=1)
if keep_bins:
df_titanic_data['Age_bin_id'] = pd.factorize(df_titanic_data['Age_bin'])[0] + 1
if keep_bins and keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['Age_bin_id_scaled'] = scaler_processing.fit_transform(
df_titanic_data.Age_bin_id.reshape(-1, 1))
if not keep_strings:
df_titanic_data.drop('Age_bin', axis=1, inplace=True)
# Helper function for constructing features from the passengers/crew names
def process_name():
global df_titanic_data
# getting the different names in the names variable
df_titanic_data['Names'] = df_titanic_data['Name'].map(lambda y: len(re.split(' ', y)))
# Getting titles for each person
df_titanic_data['Title'] = df_titanic_data['Name'].map(lambda y: re.compile(", (.*?)\.").findall(y)[0])
# handling the low occuring titles
df_titanic_data['Title'][df_titanic_data.Title == 'Jonkheer'] = 'Master'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Ms', 'Mlle'])] = 'Miss'
df_titanic_data['Title'][df_titanic_data.Title == 'Mme'] = 'Mrs'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Capt', 'Don', 'Major', 'Col', 'Sir'])] = 'Sir'
df_titanic_data['Title'][df_titanic_data.Title.isin(['Dona', 'Lady', 'the Countess'])] = 'Lady'
# binarizing all the features
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Title']).rename(columns=lambda x: 'Title_' + str(x))],
axis=1)
# scalling
if keep_scaled:
scaler_preprocessing = preprocessing.StandardScaler()
df_titanic_data['Names_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Names.reshape(-1, 1))
# binning
if keep_bins:
df_titanic_data['Title_id'] = pd.factorize(df_titanic_data['Title'])[0] + 1
if keep_bins and keep_scaled:
scaler = preprocessing.StandardScaler()
df_titanic_data['Title_id_scaled'] = scaler.fit_transform(df_titanic_data.Title_id.reshape(-1, 1))
# Generate features from the cabin input variable
def process_cabin():
# refering to the global variable that contains the titanic examples
global df_titanic_data
# repllacing the missing value in cabin variable "U0"
df_titanic_data['Cabin'][df_titanic_data.Cabin.isnull()] = 'U0'
# the cabin number is a sequence of of alphanumerical digits, so we are going to create some features
# from the alphabetical part of it
df_titanic_data['CabinLetter'] = df_titanic_data['Cabin'].map(lambda l: get_cabin_letter(l))
df_titanic_data['CabinLetter'] = pd.factorize(df_titanic_data['CabinLetter'])[0]
# binarizing the cabin letters features
if keep_binary:
cletters = pd.get_dummies(df_titanic_data['CabinLetter']).rename(columns=lambda x: 'CabinLetter_' + str(x))
df_titanic_data = pd.concat([df_titanic_data, cletters], axis=1)
# creating features from the numerical side of the cabin
df_titanic_data['CabinNumber'] = df_titanic_data['Cabin'].map(lambda x: get_cabin_num(x)).astype(int) + 1
# scaling the feature
if keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['CabinNumber_scaled'] = scaler_processing.fit_transform(df_titanic_data.CabinNumber.reshape(-1, 1))
def get_cabin_letter(cabin_value):
# searching for the letters in the cabin alphanumerical value
letter_match = re.compile("([a-zA-Z]+)").search(cabin_value)
if letter_match:
return letter_match.group()
else:
return 'U'
def get_cabin_num(cabin_value):
# searching for the numbers in the cabin alphanumerical value
number_match = re.compile("([0-9]+)").search(cabin_value)
if number_match:
return number_match.group()
else:
return 0
# helper function for constructing features from the ticket fare variable
def process_fare():
global df_titanic_data
# handling the missing values by replacing it with the median feare
df_titanic_data['Fare'][np.isnan(df_titanic_data['Fare'])] = df_titanic_data['Fare'].median()
# zeros in the fare will cause some division problems so we are going to set them to 1/10th of the lowest fare
df_titanic_data['Fare'][np.where(df_titanic_data['Fare'] == 0)[0]] = df_titanic_data['Fare'][
df_titanic_data['Fare'].nonzero()[
0]].min() / 10
# Binarizing the features by binning them into quantiles
df_titanic_data['Fare_bin'] = pd.qcut(df_titanic_data['Fare'], 4)
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Fare_bin']).rename(columns=lambda x: 'Fare_' + str(x))],
axis=1)
# binning
if keep_bins:
df_titanic_data['Fare_bin_id'] = pd.factorize(df_titanic_data['Fare_bin'])[0] + 1
# scaling the value
if keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['Fare_scaled'] = scaler_processing.fit_transform(df_titanic_data.Fare.reshape(-1, 1))
if keep_bins and keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['Fare_bin_id_scaled'] = scaler_processing.fit_transform(
df_titanic_data.Fare_bin_id.reshape(-1, 1))
if not keep_strings:
df_titanic_data.drop('Fare_bin', axis=1, inplace=True)
# Helper function for constructing features from the ticket variable
def process_ticket():
global df_titanic_data
df_titanic_data['TicketPrefix'] = df_titanic_data['Ticket'].map(lambda y: get_ticket_prefix(y.upper()))
df_titanic_data['TicketPrefix'] = df_titanic_data['TicketPrefix'].map(lambda y: re.sub('[\.?\/?]', '', y))
df_titanic_data['TicketPrefix'] = df_titanic_data['TicketPrefix'].map(lambda y: re.sub('STON', 'SOTON', y))
df_titanic_data['TicketPrefixId'] = pd.factorize(df_titanic_data['TicketPrefix'])[0]
# binarzing features for each ticket layer
if keep_binary:
prefixes = pd.get_dummies(df_titanic_data['TicketPrefix']).rename(columns=lambda y: 'TicketPrefix_' + str(y))
df_titanic_data = pd.concat([df_titanic_data, prefixes], axis=1)
df_titanic_data.drop(['TicketPrefix'], axis=1, inplace=True)
df_titanic_data['TicketNumber'] = df_titanic_data['Ticket'].map(lambda y: get_ticket_num(y))
df_titanic_data['TicketNumberDigits'] = df_titanic_data['TicketNumber'].map(lambda y: len(y)).astype(np.int)
df_titanic_data['TicketNumberStart'] = df_titanic_data['TicketNumber'].map(lambda y: y[0:1]).astype(np.int)
df_titanic_data['TicketNumber'] = df_titanic_data.TicketNumber.astype(np.int)
if keep_scaled:
scaler_processing = preprocessing.StandardScaler()
df_titanic_data['TicketNumber_scaled'] = scaler_processing.fit_transform(
df_titanic_data.TicketNumber.reshape(-1, 1))
def get_ticket_prefix(ticket_value):
# searching for the letters in the ticket alphanumerical value
match_letter = re.compile("([a-zA-Z\.\/]+)").search(ticket_value)
if match_letter:
return match_letter.group()
else:
return 'U'
def get_ticket_num(ticket_value):
# searching for the numbers in the ticket alphanumerical value
match_number = re.compile("([\d]+$)").search(ticket_value)
if match_number:
return match_number.group()
else:
return '0'
# construncting features from the passenger class variable
def process_PClass():
global df_titanic_data
# using the most frequent value(mode) to replace the messing value
df_titanic_data.Pclass[df_titanic_data.Pclass.isnull()] = df_titanic_data.Pclass.dropna().mode().values
# binarizing the features
if keep_binary:
df_titanic_data = pd.concat(
[df_titanic_data, pd.get_dummies(df_titanic_data['Pclass']).rename(columns=lambda y: 'Pclass_' + str(y))],
axis=1)
if keep_scaled:
scaler_preprocessing = preprocessing.StandardScaler()
df_titanic_data['Pclass_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Pclass.reshape(-1, 1))
# constructing features based on the family variables subh as SibSp and Parch
def process_family():
global df_titanic_data
# ensuring that there's no zeros to use interaction variables
df_titanic_data['SibSp'] = df_titanic_data['SibSp'] + 1
df_titanic_data['Parch'] = df_titanic_data['Parch'] + 1
# scaling
if keep_scaled:
scaler_preprocessing = preprocessing.StandardScaler()
df_titanic_data['SibSp_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.SibSp.reshape(-1, 1))
df_titanic_data['Parch_scaled'] = scaler_preprocessing.fit_transform(df_titanic_data.Parch.reshape(-1, 1))
# binarizing all the features
if keep_binary:
sibsps_var = pd.get_dummies(df_titanic_data['SibSp']).rename(columns=lambda y: 'SibSp_' + str(y))
parchs_var = pd.get_dummies(df_titanic_data['Parch']).rename(columns=lambda y: 'Parch_' + str(y))
df_titanic_data = pd.concat([df_titanic_data, sibsps_var, parchs_var], axis=1)
# binarzing the sex variable
def process_sex():
global df_titanic_data
df_titanic_data['Gender'] = np.where(df_titanic_data['Sex'] == 'male', 1, 0)
# dropping raw original
def process_drops():
global df_titanic_data
drops = ['Name', 'Names', 'Title', 'Sex', 'SibSp', 'Parch', 'Pclass', 'Embarked', \
'Cabin', 'CabinLetter', 'CabinNumber', 'Age', 'Fare', 'Ticket', 'TicketNumber']
string_drops = ['Title', 'Name', 'Cabin', 'Ticket', 'Sex', 'Ticket', 'TicketNumber']
if not keep_raw:
df_titanic_data.drop(drops, axis=1, inplace=True)
elif not keep_strings:
df_titanic_data.drop(string_drops, axis=1, inplace=True)
# handling all the feature engineering tasks
def get_titanic_dataset(binary=False, bins=False, scaled=False, strings=False, raw=True, pca=False, balanced=False):
global keep_binary, keep_bins, keep_scaled, keep_raw, keep_strings, df_titanic_data
keep_binary = binary
keep_bins = bins
keep_scaled = scaled
keep_raw = raw
keep_strings = strings
# reading the train and test sets using Pandas
train_data = pd.read_csv('data/train.csv', header=0)
test_data = pd.read_csv('data/test.csv', header=0)
# concatenate the train and test set together for doing the overall feature engineering stuff
df_titanic_data = pd.concat([train_data, test_data])
# removing duplicate indices due to coming the train and test set by re-indexing the data
df_titanic_data.reset_index(inplace=True)
# removing the index column the reset_index() function generates
df_titanic_data.drop('index', axis=1, inplace=True)
# index the columns to be 1-based index
df_titanic_data = df_titanic_data.reindex_axis(train_data.columns, axis=1)
# processing the titanic raw variables using the helper functions that we defined above
process_cabin()
process_ticket()
process_name()
process_fare()
process_embarked()
process_family()
process_sex()
process_PClass()
process_age()
process_drops()
# move the servived column to be the first
columns_list = list(df_titanic_data.columns.values)
columns_list.remove('Survived')
new_col_list = list(['Survived'])
new_col_list.extend(columns_list)
df_titanic_data = df_titanic_data.reindex(columns=new_col_list)
print("Starting with", df_titanic_data.columns.size,
"manually constructing features based on the interaction between them...\n", df_titanic_data.columns.values)
# Constructing features mannually based on the interaction between the individual features
numeric_features = df_titanic_data.loc[:,
['Age_scaled', 'Fare_scaled', 'Pclass_scaled', 'Parch_scaled', 'SibSp_scaled',
'Names_scaled', 'CabinNumber_scaled', 'Age_bin_id_scaled', 'Fare_bin_id_scaled']]
print("\nUsing only numeric features for automated feature generation:\n", numeric_features.head(10))
new_fields_count = 0
for i in range(0, numeric_features.columns.size - 1):
for j in range(0, numeric_features.columns.size - 1):
if i <= j:
name = str(numeric_features.columns.values[i]) + "*" + str(numeric_features.columns.values[j])
df_titanic_data = pd.concat(
[df_titanic_data, pd.Series(numeric_features.iloc[:, i] * numeric_features.iloc[:, j], name=name)],
axis=1)
new_fields_count += 1
if i < j:
name = str(numeric_features.columns.values[i]) + "+" + str(numeric_features.columns.values[j])
df_titanic_data = pd.concat(
[df_titanic_data, pd.Series(numeric_features.iloc[:, i] + numeric_features.iloc[:, j], name=name)],
axis=1)
new_fields_count += 1
if not i == j:
name = str(numeric_features.columns.values[i]) + "/" + str(numeric_features.columns.values[j])
df_titanic_data = pd.concat(
[df_titanic_data, pd.Series(numeric_features.iloc[:, i] / numeric_features.iloc[:, j], name=name)],
axis=1)
name = str(numeric_features.columns.values[i]) + "-" + str(numeric_features.columns.values[j])
df_titanic_data = pd.concat(
[df_titanic_data, pd.Series(numeric_features.iloc[:, i] - numeric_features.iloc[:, j], name=name)],
axis=1)
new_fields_count += 2
print("\n", new_fields_count, "new features constructed")
# using Spearman correlation method to remove the feature that have high correlation
# calculating the correlation matrix
df_titanic_data_cor = df_titanic_data.drop(['Survived', 'PassengerId'], axis=1).corr(method='spearman')
# creating a mask that will ignore correlated ones
mask_ignore = np.ones(df_titanic_data_cor.columns.size) - np.eye(df_titanic_data_cor.columns.size)
df_titanic_data_cor = mask_ignore * df_titanic_data_cor
features_to_drop = []
# dropping the correclated features
for column in df_titanic_data_cor.columns.values:
# check if we already decided to drop this variable
if np.in1d([column], features_to_drop):
continue
# finding highly correlacted variables
corr_vars = df_titanic_data_cor[abs(df_titanic_data_cor[column]) > 0.98].index
features_to_drop = np.union1d(features_to_drop, corr_vars)
print("\nWe are going to drop", features_to_drop.shape[0], " which are highly correlated features...\n")
df_titanic_data.drop(features_to_drop, axis=1, inplace=True)
# splitting the dataset to train and test and do PCA
train_data = df_titanic_data[:train_data.shape[0]]
test_data = df_titanic_data[test_data.shape[0]:]
if pca:
print("reducing number of variables...")
train_data, test_data = reduce(train_data, test_data)
else:
# drop the empty 'Survived' column for the test set that was created during set concatentation
test_data.drop('Survived', axis=1, inplace=True)
print("\n", train_data.columns.size, "initial features generated...\n") # , input_df.columns.values
return train_data, test_data
# reducing the dimensionality for the training and testing set
def reduce(train_data, test_data):
# join the full data together
df_titanic_data = pd.concat([train_data, test_data])
df_titanic_data.reset_index(inplace=True)
df_titanic_data.drop('index', axis=1, inplace=True)
df_titanic_data = df_titanic_data.reindex_axis(train_data.columns, axis=1)
# converting the survived column to series
survived_series = pd.Series(df['Survived'], name='Survived')
print(df_titanic_data.head())
# getting the input and target values
input_values = df_titanic_data.values[:, 1::]
target_values = df_titanic_data.values[:, 0]
print(input_values[0:10])
# minimum variance percentage that should be covered by the reduced number of variables
variance_percentage = .99
# creating PCA object
pca_object = PCA(n_components=variance_percentage)
# trasforming the features
input_values_transformed = pca_object.fit_transform(input_values, target_values)
# creating a datafram for the transformed variables from PCA
pca_df = pd.DataFrame(input_values_transformed)
print(pca_df.shape[1], " reduced components which describe ", str(variance_percentage)[1:], "% of the variance")
# constructing a new dataframe that contains the newly reduced vars of PCA
df_titanic_data = pd.concat([survived_series, pca_df], axis=1)
# split into separate input and test sets again
train_data = df_titanic_data[:train_data.shape[0]]
test_data = df_titanic_data[test_data.shape[0]:]
test_data.reset_index(inplace=True)
test_data.drop('index', axis=1, inplace=True)
test_data.drop('Survived', axis=1, inplace=True)
return train_data, test_data
# Calling the helper functions
if __name__ == '__main__':
train, test = get_titanic_dataset(bins=True, scaled=True, binary=True)
initial_drops = ['PassengerId']
train.drop(initial_drops, axis=1, inplace=True)
test.drop(initial_drops, axis=1, inplace=True)
train, test = reduce(train, test)
print(train.columns.values)
| [
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"numpy.ones",
"numpy.isnan",
"pandas.set_option",
"pandas.DataFrame",
"numpy.set_printoptions",
"pandas.qcut",
"numpy.union1d",
"pandas.concat",
"re.sub",
"re.split",
"pandas.get_dummies",
"sklearn.ensemble.RandomForestRegressor",
... | [((244, 343), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'precision': '(4)', 'threshold': '(10000)', 'linewidth': '(160)', 'edgeitems': '(999)', 'suppress': '(True)'}), '(precision=4, threshold=10000, linewidth=160, edgeitems=\n 999, suppress=True)\n', (263, 343), True, 'import numpy as np\n'), ((339, 381), 'pandas.set_option', 'pd.set_option', (['"""display.max_columns"""', 'None'], {}), "('display.max_columns', None)\n", (352, 381), True, 'import pandas as pd\n'), ((382, 421), 'pandas.set_option', 'pd.set_option', (['"""display.max_rows"""', 'None'], {}), "('display.max_rows', None)\n", (395, 421), True, 'import pandas as pd\n'), ((422, 457), 'pandas.set_option', 'pd.set_option', (['"""display.width"""', '(160)'], {}), "('display.width', 160)\n", (435, 457), True, 'import pandas as pd\n'), ((458, 499), 'pandas.set_option', 'pd.set_option', (['"""expand_frame_repr"""', '(False)'], {}), "('expand_frame_repr', False)\n", (471, 499), True, 'import pandas as pd\n'), ((500, 529), 'pandas.set_option', 'pd.set_option', (['"""precision"""', '(4)'], {}), "('precision', 4)\n", (513, 529), True, 'import pandas as pd\n'), ((1764, 1815), 'sklearn.ensemble.RandomForestRegressor', 'RandomForestRegressor', ([], {'n_estimators': '(2000)', 'n_jobs': '(-1)'}), '(n_estimators=2000, n_jobs=-1)\n', (1785, 1815), False, 'from sklearn.ensemble import RandomForestRegressor\n'), ((2870, 2910), 'numpy.where', 'np.where', (['(df_titanic_data.Age < 13)', '(1)', '(0)'], {}), '(df_titanic_data.Age < 13, 1, 0)\n', (2878, 2910), True, 'import numpy as np\n'), ((2997, 3031), 'pandas.qcut', 'pd.qcut', (["df_titanic_data['Age']", '(4)'], {}), "(df_titanic_data['Age'], 4)\n", (3004, 3031), True, 'import pandas as pd\n'), ((7888, 7923), 'pandas.qcut', 'pd.qcut', (["df_titanic_data['Fare']", '(4)'], {}), "(df_titanic_data['Fare'], 4)\n", (7895, 7923), True, 'import pandas as pd\n'), ((12607, 12655), 'numpy.where', 'np.where', (["(df_titanic_data['Sex'] == 'male')", '(1)', '(0)'], {}), "(df_titanic_data['Sex'] == 'male', 1, 0)\n", (12615, 12655), True, 'import numpy as np\n'), ((13609, 13648), 'pandas.read_csv', 'pd.read_csv', (['"""data/train.csv"""'], {'header': '(0)'}), "('data/train.csv', header=0)\n", (13620, 13648), True, 'import pandas as pd\n'), ((13665, 13703), 'pandas.read_csv', 'pd.read_csv', (['"""data/test.csv"""'], {'header': '(0)'}), "('data/test.csv', header=0)\n", (13676, 13703), True, 'import pandas as pd\n'), ((13825, 13859), 'pandas.concat', 'pd.concat', (['[train_data, test_data]'], {}), '([train_data, test_data])\n', (13834, 13859), True, 'import pandas as pd\n'), ((18917, 18951), 'pandas.concat', 'pd.concat', (['[train_data, test_data]'], {}), '([train_data, test_data])\n', (18926, 18951), True, 'import pandas as pd\n'), ((19203, 19245), 'pandas.Series', 'pd.Series', (["df['Survived']"], {'name': '"""Survived"""'}), "(df['Survived'], name='Survived')\n", (19212, 19245), True, 'import pandas as pd\n'), ((19621, 19658), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': 'variance_percentage'}), '(n_components=variance_percentage)\n', (19624, 19658), False, 'from sklearn.decomposition import PCA\n'), ((19855, 19893), 'pandas.DataFrame', 'pd.DataFrame', (['input_values_transformed'], {}), '(input_values_transformed)\n', (19867, 19893), True, 'import pandas as pd\n'), ((20114, 20158), 'pandas.concat', 'pd.concat', (['[survived_series, pca_df]'], {'axis': '(1)'}), '([survived_series, pca_df], axis=1)\n', (20123, 20158), True, 'import pandas as pd\n'), ((884, 925), 'pandas.factorize', 'pd.factorize', (["df_titanic_data['Embarked']"], {}), "(df_titanic_data['Embarked'])\n", (896, 925), True, 'import pandas as pd\n'), ((3397, 3427), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (3425, 3427), False, 'from sklearn import preprocessing\n'), ((4839, 4869), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (4867, 4869), False, 'from sklearn import preprocessing\n'), ((5154, 5184), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (5182, 5184), False, 'from sklearn import preprocessing\n'), ((5873, 5917), 'pandas.factorize', 'pd.factorize', (["df_titanic_data['CabinLetter']"], {}), "(df_titanic_data['CabinLetter'])\n", (5885, 5917), True, 'import pandas as pd\n'), ((6128, 6174), 'pandas.concat', 'pd.concat', (['[df_titanic_data, cletters]'], {'axis': '(1)'}), '([df_titanic_data, cletters], axis=1)\n', (6137, 6174), True, 'import pandas as pd\n'), ((6422, 6452), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (6450, 6452), False, 'from sklearn import preprocessing\n'), ((7299, 7332), 'numpy.isnan', 'np.isnan', (["df_titanic_data['Fare']"], {}), "(df_titanic_data['Fare'])\n", (7307, 7332), True, 'import numpy as np\n'), ((8317, 8347), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (8345, 8347), False, 'from sklearn import preprocessing\n'), ((8521, 8551), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (8549, 8551), False, 'from sklearn import preprocessing\n'), ((9271, 9316), 'pandas.factorize', 'pd.factorize', (["df_titanic_data['TicketPrefix']"], {}), "(df_titanic_data['TicketPrefix'])\n", (9283, 9316), True, 'import pandas as pd\n'), ((9532, 9578), 'pandas.concat', 'pd.concat', (['[df_titanic_data, prefixes]'], {'axis': '(1)'}), '([df_titanic_data, prefixes], axis=1)\n', (9541, 9578), True, 'import pandas as pd\n'), ((10100, 10130), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (10128, 10130), False, 'from sklearn import preprocessing\n'), ((11353, 11383), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (11381, 11383), False, 'from sklearn import preprocessing\n'), ((11883, 11913), 'sklearn.preprocessing.StandardScaler', 'preprocessing.StandardScaler', ([], {}), '()\n', (11911, 11913), False, 'from sklearn import preprocessing\n'), ((12437, 12497), 'pandas.concat', 'pd.concat', (['[df_titanic_data, sibsps_var, parchs_var]'], {'axis': '(1)'}), '([df_titanic_data, sibsps_var, parchs_var], axis=1)\n', (12446, 12497), True, 'import pandas as pd\n'), ((17387, 17428), 'numpy.ones', 'np.ones', (['df_titanic_data_cor.columns.size'], {}), '(df_titanic_data_cor.columns.size)\n', (17394, 17428), True, 'import numpy as np\n'), ((17431, 17471), 'numpy.eye', 'np.eye', (['df_titanic_data_cor.columns.size'], {}), '(df_titanic_data_cor.columns.size)\n', (17437, 17471), True, 'import numpy as np\n'), ((17726, 17761), 'numpy.in1d', 'np.in1d', (['[column]', 'features_to_drop'], {}), '([column], features_to_drop)\n', (17733, 17761), True, 'import numpy as np\n'), ((17946, 17985), 'numpy.union1d', 'np.union1d', (['features_to_drop', 'corr_vars'], {}), '(features_to_drop, corr_vars)\n', (17956, 17985), True, 'import numpy as np\n'), ((6695, 6720), 're.compile', 're.compile', (['"""([a-zA-Z]+)"""'], {}), "('([a-zA-Z]+)')\n", (6705, 6720), False, 'import re\n'), ((6947, 6969), 're.compile', 're.compile', (['"""([0-9]+)"""'], {}), "('([0-9]+)')\n", (6957, 6969), False, 'import re\n'), ((7514, 7552), 'numpy.where', 'np.where', (["(df_titanic_data['Fare'] == 0)"], {}), "(df_titanic_data['Fare'] == 0)\n", (7522, 7552), True, 'import numpy as np\n'), ((9091, 9118), 're.sub', 're.sub', (['"""[\\\\.?\\\\/?]"""', '""""""', 'y'], {}), "('[\\\\.?\\\\/?]', '', y)\n", (9097, 9118), False, 'import re\n'), ((9202, 9228), 're.sub', 're.sub', (['"""STON"""', '"""SOTON"""', 'y'], {}), "('STON', 'SOTON', y)\n", (9208, 9228), False, 'import re\n'), ((10395, 10426), 're.compile', 're.compile', (['"""([a-zA-Z\\\\.\\\\/]+)"""'], {}), "('([a-zA-Z\\\\.\\\\/]+)')\n", (10405, 10426), False, 'import re\n'), ((10654, 10677), 're.compile', 're.compile', (['"""([\\\\d]+$)"""'], {}), "('([\\\\d]+$)')\n", (10664, 10677), False, 'import re\n'), ((3286, 3326), 'pandas.factorize', 'pd.factorize', (["df_titanic_data['Age_bin']"], {}), "(df_titanic_data['Age_bin'])\n", (3298, 3326), True, 'import pandas as pd\n'), ((3905, 3921), 're.split', 're.split', (['""" """', 'y'], {}), "(' ', y)\n", (3913, 3921), False, 'import re\n'), ((5056, 5094), 'pandas.factorize', 'pd.factorize', (["df_titanic_data['Title']"], {}), "(df_titanic_data['Title'])\n", (5068, 5094), True, 'import pandas as pd\n'), ((6005, 6051), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['CabinLetter']"], {}), "(df_titanic_data['CabinLetter'])\n", (6019, 6051), True, 'import pandas as pd\n'), ((8195, 8236), 'pandas.factorize', 'pd.factorize', (["df_titanic_data['Fare_bin']"], {}), "(df_titanic_data['Fare_bin'])\n", (8207, 8236), True, 'import pandas as pd\n'), ((9407, 9454), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['TicketPrefix']"], {}), "(df_titanic_data['TicketPrefix'])\n", (9421, 9454), True, 'import pandas as pd\n'), ((12220, 12260), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['SibSp']"], {}), "(df_titanic_data['SibSp'])\n", (12234, 12260), True, 'import pandas as pd\n'), ((12326, 12366), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['Parch']"], {}), "(df_titanic_data['Parch'])\n", (12340, 12366), True, 'import pandas as pd\n'), ((1046, 1089), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['Embarked']"], {}), "(df_titanic_data['Embarked'])\n", (1060, 1089), True, 'import pandas as pd\n'), ((3120, 3162), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['Age_bin']"], {}), "(df_titanic_data['Age_bin'])\n", (3134, 3162), True, 'import pandas as pd\n'), ((4031, 4055), 're.compile', 're.compile', (['""", (.*?)\\\\."""'], {}), "(', (.*?)\\\\.')\n", (4041, 4055), False, 'import re\n'), ((4665, 4705), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['Title']"], {}), "(df_titanic_data['Title'])\n", (4679, 4705), True, 'import pandas as pd\n'), ((8012, 8055), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['Fare_bin']"], {}), "(df_titanic_data['Fare_bin'])\n", (8026, 8055), True, 'import pandas as pd\n'), ((11192, 11233), 'pandas.get_dummies', 'pd.get_dummies', (["df_titanic_data['Pclass']"], {}), "(df_titanic_data['Pclass'])\n", (11206, 11233), True, 'import pandas as pd\n'), ((15825, 15904), 'pandas.Series', 'pd.Series', (['(numeric_features.iloc[:, i] * numeric_features.iloc[:, j])'], {'name': 'name'}), '(numeric_features.iloc[:, i] * numeric_features.iloc[:, j], name=name)\n', (15834, 15904), True, 'import pandas as pd\n'), ((16189, 16268), 'pandas.Series', 'pd.Series', (['(numeric_features.iloc[:, i] + numeric_features.iloc[:, j])'], {'name': 'name'}), '(numeric_features.iloc[:, i] + numeric_features.iloc[:, j], name=name)\n', (16198, 16268), True, 'import pandas as pd\n'), ((16558, 16637), 'pandas.Series', 'pd.Series', (['(numeric_features.iloc[:, i] / numeric_features.iloc[:, j])'], {'name': 'name'}), '(numeric_features.iloc[:, i] / numeric_features.iloc[:, j], name=name)\n', (16567, 16637), True, 'import pandas as pd\n'), ((16862, 16941), 'pandas.Series', 'pd.Series', (['(numeric_features.iloc[:, i] - numeric_features.iloc[:, j])'], {'name': 'name'}), '(numeric_features.iloc[:, i] - numeric_features.iloc[:, j], name=name)\n', (16871, 16941), True, 'import pandas as pd\n')] |
# -*- coding: utf-8 -*
import brica
import tensorflow as tf
import numpy as np
import random
import time
import os
#--Agent---------------------------------------------------------------------------
class Agent(object):
def __init__(self, number, sess, gamma=0.9, params_update_iter=10):
self.number = number
self.name = 'agent_' + str(number)
self.agent_brain = Brain(self.name, sess) # create ACNet for each worker
self.sess = sess
self.gamma = gamma
self.params_update_iter = params_update_iter
def _discounted_reward(self, v_, r):
buffer_r = np.zeros_like(r)
for i in reversed(range(len(r))):
v_ = r[i] + v_ * self.gamma
buffer_r[i] = v_
return buffer_r
def choose_action(self, s):
action = self.agent_brain.choose_action(s)
return action
def predict_value(self, s):
v = self.agent_brain.predict_value(s)
return v
def learn(self, buffer_s, buffer_a, buffer_v_target):
buffer_s, buffer_a, buffer_v_target = np.vstack(buffer_s), np.vstack(buffer_a), np.vstack(buffer_v_target)
self.agent_brain.update_global_params(buffer_s, buffer_a, buffer_v_target) # actual training step, update global ACNet
#--Network for the Actor Critic----------------------------------------------------------------
class Brain(object):
def __init__(self, scope, sess, action_scale=1.0, actor_lr=0.001, critic_lr=0.001):
self.sess = sess
self.low_action_bound = 0.0
self.high_action_bound = 1.0
self.n_states = 64+1 #128/2 #* 3
self.n_actions = 10 #64 #128/2
self.action_scale = action_scale
self.actor_lr = actor_lr
self.critic_lr = critic_lr
self.entropy_beta = 0.01
self.build_graph(scope)
def build_graph(self, scope):
self.s = tf.placeholder(tf.float32, [None, self.n_states], 's')
self.a = tf.placeholder(tf.float32, [None, self.n_actions], 'a')
self.q_target = tf.placeholder(tf.float32, [None, 1], 'q_target')
if scope == 'global':
self._build_net(scope=scope)
else:
mu, sigma, self.critic_net = self._build_net(scope=scope)
# workerのparameterセット
la_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')
lc_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')
# globalのparameterセット
ga_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='global/actor')
gc_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='global/critic')
with tf.name_scope('c_loss'):
td = tf.subtract(self.q_target, self.critic_net, name='TD_error') # td = R(t) - V(s)
self.critic_loss_op = tf.reduce_mean(tf.square(td)) # td**2
with tf.name_scope('a_loss'):
sigma = sigma + 1e-4
normal_dist = tf.distributions.Normal(mu, sigma)
log_prob_action_adv = normal_dist.log_prob(self.a) * td #log( π(a|s,θ) ) * td
entropy = normal_dist.entropy()
self.policy_loss = self.entropy_beta * entropy + log_prob_action_adv #おっきく間違わないためのエントロピー項
self.policy_loss_op = tf.reduce_mean(-self.policy_loss)
with tf.variable_scope('train'):
self.actor_optimizer = tf.train.RMSPropOptimizer(self.actor_lr, name='RMSPropA')
self.critic_optimizer = tf.train.RMSPropOptimizer(self.critic_lr, name='RMSPropC')
with tf.name_scope('choose_a'):
self.actor_net = tf.squeeze(normal_dist.sample(1), axis=0) # normal_distから要素を一つ取り出し1なら削除
self.actor_net = tf.clip_by_value(self.actor_net, self.low_action_bound, self.high_action_bound) # actor_netの値の内上限下限を超えるものは上限下限になる
with tf.name_scope('local_grad'):
self.actor_grads = tf.gradients(self.policy_loss_op, la_params, name='actor_grads')
self.critic_grads = tf.gradients(self.critic_loss_op, lc_params, name='critic_grads')
with tf.name_scope('pull'):
# workerにglobalのパラメータをコピー
self.update_la_params_op = [la.assign(ga) for la, ga in zip(la_params, ga_params)]
self.update_lc_params_op = [lc.assign(gc) for lc, gc in zip(lc_params, gc_params)]
with tf.name_scope('push'):
# globalにworkerのパラメータをコピー
self.update_ga_params_op = self.actor_optimizer.apply_gradients(zip(self.actor_grads, ga_params))
self.update_gc_params_op = self.critic_optimizer.apply_gradients(zip(self.critic_grads, gc_params))
def _build_net(self, scope):
with tf.variable_scope(scope):
k_init, b_init = tf.random_normal_initializer(0.0, 0.1), tf.constant_initializer(0.1)
'''
# add LSTM
lstm_cell = tf.nn.rnn.BasicLSTMCell(1, state_is_tuple=True)
c_init = np.zeros((1, lstm_cell.state_size.c), np.float32)
h_init = np.zeros((1, lstm_cell.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm_cell.state_size.h])
'''
#actor net 活性化関数relu6 tanh softplus
with tf.variable_scope('actor'):
actor_hidden1 = tf.layers.dense(inputs=self.s, units=32,
activation=tf.nn.relu6,
kernel_initializer=k_init,
bias_initializer=b_init,
name='actor_hidden1')
actor_hidden2 = tf.layers.dense(inputs=actor_hidden1, units=16,
activation=tf.nn.relu6,
kernel_initializer=k_init,
bias_initializer=b_init,
name='actor_hidden2')
mu = tf.layers.dense(inputs=actor_hidden2,
units=self.n_actions,
activation=tf.nn.tanh,
kernel_initializer=k_init,
bias_initializer=b_init,
name='mu')
sigma = tf.layers.dense(inputs=actor_hidden2,
units=self.n_actions,
activation=tf.nn.softplus,
kernel_initializer=k_init,
bias_initializer=b_init,
name='sigma')
mu = self.action_scale*mu
#critic net 活性化関数relu6
with tf.variable_scope('critic'):
critic_hidden1 = tf.layers.dense(inputs=self.s, units=32,
activation=tf.nn.relu6,
kernel_initializer=k_init,
bias_initializer=b_init,
name='critic_hidden1')
critic_hidden2 = tf.layers.dense(inputs=critic_hidden1, units=16,
activation=tf.nn.relu6,
kernel_initializer=k_init,
bias_initializer=b_init,
name='critic_hidden2')
critic_net = tf.layers.dense(inputs=critic_hidden2,
units=1,
kernel_initializer=k_init,
bias_initializer=b_init,
name='critic_net') # state value
return mu, sigma, critic_net
def update_global_params(self, s, a, dr):
s = np.reshape(s,(-1, self.n_states))
feed_dict = {self.s: s, self.a: a, self.q_target: dr}
self.sess.run([self.update_ga_params_op, self.update_gc_params_op], feed_dict)
def update_local_params(self):
self.sess.run([self.update_la_params_op, self.update_lc_params_op])
def choose_action(self, s):
s = np.reshape(s,(-1, self.n_states))
return self.sess.run(self.actor_net, {self.s: s})[0]
def predict_value(self, s):
s = np.reshape(s, (-1, self.n_states))
v = self.sess.run(self.critic_net, {self.s: s})[0, 0]
return v
#--BG--------------------------------------------------------------------------------------------------
class BG(object):
def __init__(self, logger=None, log_path=None, train=True):
self.timing = brica.Timing(5, 1, 0)
self.log_path = log_path
self.train = train
self.buffer_s, self.buffer_a, self.buffer_r = [], [], []
self.steps = 1
self.a = []
self.reward = 0
#self.now = time.ctime()
#self.cnvtime = time.strptime(self.now)
self.sess = tf.Session()
Brain('global', self.sess)
self.worker = Agent(1, self.sess)
self.saver = tf.train.Saver(max_to_keep=None)
self.sess.run(tf.global_variables_initializer())
def save_model(self, model_name):
#path = './log/param/param'+time.strftime("%Y_%m_%d_%I_%M", self.cnvtime)
path = self.log_path + "/param"
if not os.path.exists(path):
os.makedirs(path)
self.saver.save(self.sess, path + '/' + model_name)
def load_model(self, model_name):
#path = './log/param/param'+'2018_09_21_01_26/'
path = './log/load/'
self.saver.restore(self.sess, path+model_name)
def __call__(self, inputs):
#starttime = time.time()
if 'from_environment' not in inputs:
raise Exception('BG did not recieve from Environment')
if 'from_pfc' not in inputs:
raise Exception('BG did not recieve from PFC')
if 'from_fef' not in inputs:
raise Exception('BG did not recieve from FEF')
reward, done = inputs['from_environment']
phase = inputs['from_pfc']
fef_data = inputs['from_fef']
action_space = 10 #len(fef_data)
print('reward:', reward)
print('step:', self.steps)
self.reward = reward
s = np.append(fef_data[:, 0], phase)
self.a = self.worker.choose_action(s)
#print('a:', self.a)
self.buffer_s.append(s)
self.buffer_a.append(self.a)
self.buffer_r.append((reward + 8) / 8)
#if train == True and
if self.steps % self.worker.params_update_iter == 0 or done:
if done:
v_s_ = 0
else:
v_s_ = self.worker.predict_value(s)
discounted_r = self.worker._discounted_reward(v_s_, self.buffer_r)
self.worker.learn(self.buffer_s, self.buffer_a, discounted_r)
self.buffer_s, self.buffer_a, self.buffer_r = [], [], []
self.worker.agent_brain.update_local_params()
self.steps += 1
return dict(to_pfc=None, to_fef=None, to_sc=self.a)
#endtime = time.time()
#print('BG time:', endtime-starttime)
#return do
| [
"tensorflow.clip_by_value",
"tensorflow.get_collection",
"tensorflow.constant_initializer",
"tensorflow.train.RMSPropOptimizer",
"numpy.zeros_like",
"tensorflow.subtract",
"os.path.exists",
"tensorflow.variable_scope",
"brica.Timing",
"tensorflow.placeholder",
"numpy.append",
"numpy.reshape",
... | [((613, 629), 'numpy.zeros_like', 'np.zeros_like', (['r'], {}), '(r)\n', (626, 629), True, 'import numpy as np\n'), ((1885, 1939), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_states]', '"""s"""'], {}), "(tf.float32, [None, self.n_states], 's')\n", (1899, 1939), True, 'import tensorflow as tf\n'), ((1957, 2012), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, self.n_actions]', '"""a"""'], {}), "(tf.float32, [None, self.n_actions], 'a')\n", (1971, 2012), True, 'import tensorflow as tf\n'), ((2037, 2086), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, 1]', '"""q_target"""'], {}), "(tf.float32, [None, 1], 'q_target')\n", (2051, 2086), True, 'import tensorflow as tf\n'), ((8266, 8300), 'numpy.reshape', 'np.reshape', (['s', '(-1, self.n_states)'], {}), '(s, (-1, self.n_states))\n', (8276, 8300), True, 'import numpy as np\n'), ((8606, 8640), 'numpy.reshape', 'np.reshape', (['s', '(-1, self.n_states)'], {}), '(s, (-1, self.n_states))\n', (8616, 8640), True, 'import numpy as np\n'), ((8746, 8780), 'numpy.reshape', 'np.reshape', (['s', '(-1, self.n_states)'], {}), '(s, (-1, self.n_states))\n', (8756, 8780), True, 'import numpy as np\n'), ((9069, 9090), 'brica.Timing', 'brica.Timing', (['(5)', '(1)', '(0)'], {}), '(5, 1, 0)\n', (9081, 9090), False, 'import brica\n'), ((9385, 9397), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9395, 9397), True, 'import tensorflow as tf\n'), ((9496, 9528), 'tensorflow.train.Saver', 'tf.train.Saver', ([], {'max_to_keep': 'None'}), '(max_to_keep=None)\n', (9510, 9528), True, 'import tensorflow as tf\n'), ((10727, 10759), 'numpy.append', 'np.append', (['fef_data[:, 0]', 'phase'], {}), '(fef_data[:, 0], phase)\n', (10736, 10759), True, 'import numpy as np\n'), ((1072, 1091), 'numpy.vstack', 'np.vstack', (['buffer_s'], {}), '(buffer_s)\n', (1081, 1091), True, 'import numpy as np\n'), ((1093, 1112), 'numpy.vstack', 'np.vstack', (['buffer_a'], {}), '(buffer_a)\n', (1102, 1112), True, 'import numpy as np\n'), ((1114, 1140), 'numpy.vstack', 'np.vstack', (['buffer_v_target'], {}), '(buffer_v_target)\n', (1123, 1140), True, 'import numpy as np\n'), ((2301, 2376), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': "(scope + '/actor')"}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/actor')\n", (2318, 2376), True, 'import tensorflow as tf\n'), ((2401, 2477), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': "(scope + '/critic')"}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope=scope + '/critic')\n", (2418, 2477), True, 'import tensorflow as tf\n'), ((2537, 2610), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""global/actor"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='global/actor')\n", (2554, 2610), True, 'import tensorflow as tf\n'), ((2635, 2709), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES'], {'scope': '"""global/critic"""'}), "(tf.GraphKeys.TRAINABLE_VARIABLES, scope='global/critic')\n", (2652, 2709), True, 'import tensorflow as tf\n'), ((4825, 4849), 'tensorflow.variable_scope', 'tf.variable_scope', (['scope'], {}), '(scope)\n', (4842, 4849), True, 'import tensorflow as tf\n'), ((9551, 9584), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9582, 9584), True, 'import tensorflow as tf\n'), ((9770, 9790), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (9784, 9790), False, 'import os\n'), ((9804, 9821), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (9815, 9821), False, 'import os\n'), ((2728, 2751), 'tensorflow.name_scope', 'tf.name_scope', (['"""c_loss"""'], {}), "('c_loss')\n", (2741, 2751), True, 'import tensorflow as tf\n'), ((2774, 2834), 'tensorflow.subtract', 'tf.subtract', (['self.q_target', 'self.critic_net'], {'name': '"""TD_error"""'}), "(self.q_target, self.critic_net, name='TD_error')\n", (2785, 2834), True, 'import tensorflow as tf\n'), ((2949, 2972), 'tensorflow.name_scope', 'tf.name_scope', (['"""a_loss"""'], {}), "('a_loss')\n", (2962, 2972), True, 'import tensorflow as tf\n'), ((3041, 3075), 'tensorflow.distributions.Normal', 'tf.distributions.Normal', (['mu', 'sigma'], {}), '(mu, sigma)\n', (3064, 3075), True, 'import tensorflow as tf\n'), ((3363, 3396), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(-self.policy_loss)'], {}), '(-self.policy_loss)\n', (3377, 3396), True, 'import tensorflow as tf\n'), ((3415, 3441), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""train"""'], {}), "('train')\n", (3432, 3441), True, 'import tensorflow as tf\n'), ((3482, 3539), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.actor_lr'], {'name': '"""RMSPropA"""'}), "(self.actor_lr, name='RMSPropA')\n", (3507, 3539), True, 'import tensorflow as tf\n'), ((3580, 3638), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['self.critic_lr'], {'name': '"""RMSPropC"""'}), "(self.critic_lr, name='RMSPropC')\n", (3605, 3638), True, 'import tensorflow as tf\n'), ((3657, 3682), 'tensorflow.name_scope', 'tf.name_scope', (['"""choose_a"""'], {}), "('choose_a')\n", (3670, 3682), True, 'import tensorflow as tf\n'), ((3822, 3901), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['self.actor_net', 'self.low_action_bound', 'self.high_action_bound'], {}), '(self.actor_net, self.low_action_bound, self.high_action_bound)\n', (3838, 3901), True, 'import tensorflow as tf\n'), ((3954, 3981), 'tensorflow.name_scope', 'tf.name_scope', (['"""local_grad"""'], {}), "('local_grad')\n", (3967, 3981), True, 'import tensorflow as tf\n'), ((4018, 4082), 'tensorflow.gradients', 'tf.gradients', (['self.policy_loss_op', 'la_params'], {'name': '"""actor_grads"""'}), "(self.policy_loss_op, la_params, name='actor_grads')\n", (4030, 4082), True, 'import tensorflow as tf\n'), ((4119, 4184), 'tensorflow.gradients', 'tf.gradients', (['self.critic_loss_op', 'lc_params'], {'name': '"""critic_grads"""'}), "(self.critic_loss_op, lc_params, name='critic_grads')\n", (4131, 4184), True, 'import tensorflow as tf\n'), ((4203, 4224), 'tensorflow.name_scope', 'tf.name_scope', (['"""pull"""'], {}), "('pull')\n", (4216, 4224), True, 'import tensorflow as tf\n'), ((4483, 4504), 'tensorflow.name_scope', 'tf.name_scope', (['"""push"""'], {}), "('push')\n", (4496, 4504), True, 'import tensorflow as tf\n'), ((4880, 4918), 'tensorflow.random_normal_initializer', 'tf.random_normal_initializer', (['(0.0)', '(0.1)'], {}), '(0.0, 0.1)\n', (4908, 4918), True, 'import tensorflow as tf\n'), ((4920, 4948), 'tensorflow.constant_initializer', 'tf.constant_initializer', (['(0.1)'], {}), '(0.1)\n', (4943, 4948), True, 'import tensorflow as tf\n'), ((5480, 5506), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""actor"""'], {}), "('actor')\n", (5497, 5506), True, 'import tensorflow as tf\n'), ((5540, 5682), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.s', 'units': '(32)', 'activation': 'tf.nn.relu6', 'kernel_initializer': 'k_init', 'bias_initializer': 'b_init', 'name': '"""actor_hidden1"""'}), "(inputs=self.s, units=32, activation=tf.nn.relu6,\n kernel_initializer=k_init, bias_initializer=b_init, name='actor_hidden1')\n", (5555, 5682), True, 'import tensorflow as tf\n'), ((5899, 6048), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'actor_hidden1', 'units': '(16)', 'activation': 'tf.nn.relu6', 'kernel_initializer': 'k_init', 'bias_initializer': 'b_init', 'name': '"""actor_hidden2"""'}), "(inputs=actor_hidden1, units=16, activation=tf.nn.relu6,\n kernel_initializer=k_init, bias_initializer=b_init, name='actor_hidden2')\n", (5914, 6048), True, 'import tensorflow as tf\n'), ((6255, 6405), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'actor_hidden2', 'units': 'self.n_actions', 'activation': 'tf.nn.tanh', 'kernel_initializer': 'k_init', 'bias_initializer': 'b_init', 'name': '"""mu"""'}), "(inputs=actor_hidden2, units=self.n_actions, activation=tf.\n nn.tanh, kernel_initializer=k_init, bias_initializer=b_init, name='mu')\n", (6270, 6405), True, 'import tensorflow as tf\n'), ((6610, 6772), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'actor_hidden2', 'units': 'self.n_actions', 'activation': 'tf.nn.softplus', 'kernel_initializer': 'k_init', 'bias_initializer': 'b_init', 'name': '"""sigma"""'}), "(inputs=actor_hidden2, units=self.n_actions, activation=tf.\n nn.softplus, kernel_initializer=k_init, bias_initializer=b_init, name=\n 'sigma')\n", (6625, 6772), True, 'import tensorflow as tf\n'), ((7059, 7086), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""critic"""'], {}), "('critic')\n", (7076, 7086), True, 'import tensorflow as tf\n'), ((7121, 7264), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'self.s', 'units': '(32)', 'activation': 'tf.nn.relu6', 'kernel_initializer': 'k_init', 'bias_initializer': 'b_init', 'name': '"""critic_hidden1"""'}), "(inputs=self.s, units=32, activation=tf.nn.relu6,\n kernel_initializer=k_init, bias_initializer=b_init, name='critic_hidden1')\n", (7136, 7264), True, 'import tensorflow as tf\n'), ((7486, 7637), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'critic_hidden1', 'units': '(16)', 'activation': 'tf.nn.relu6', 'kernel_initializer': 'k_init', 'bias_initializer': 'b_init', 'name': '"""critic_hidden2"""'}), "(inputs=critic_hidden1, units=16, activation=tf.nn.relu6,\n kernel_initializer=k_init, bias_initializer=b_init, name='critic_hidden2')\n", (7501, 7637), True, 'import tensorflow as tf\n'), ((7855, 7977), 'tensorflow.layers.dense', 'tf.layers.dense', ([], {'inputs': 'critic_hidden2', 'units': '(1)', 'kernel_initializer': 'k_init', 'bias_initializer': 'b_init', 'name': '"""critic_net"""'}), "(inputs=critic_hidden2, units=1, kernel_initializer=k_init,\n bias_initializer=b_init, name='critic_net')\n", (7870, 7977), True, 'import tensorflow as tf\n'), ((2907, 2920), 'tensorflow.square', 'tf.square', (['td'], {}), '(td)\n', (2916, 2920), True, 'import tensorflow as tf\n')] |
from basic import *
from numpy import rnorm
import random
class Author(Player):
def add_article(self, quality=None):
self.article = Article(quality)
if higher_is_smarter:
self.track_article_to_prestige()
self.article.author = self
self.article.author_perceived_quality = rnorm(self.article.quality, 1) # maybe could vary this in a more complicated version of the model
def track_article_to_prestige(self):
"higher adjustment factor = smaller move of discernment toward prestige. (1 = discernment just becomes prestige)"
if self.prestige > self.article.quality:
self.article.quality = self.article.quality + ((self.prestige - self.article.quality) / self.adjustment_factor)
if self.article.quality > self.prestige:
self.article.quality = self.article.quality - ((self.article.quality - self.prestige) / self.adjustment_factor)
def submit(self, journal):
"consult strategy and decide whether to submit to a journal."
if self.strategy.should_submit(journal, article):
journal.receive_article(self.article, expedited=False)
article.submissions.append({"journal": journal, "expedited": False, "accepted": None})
def receive_response(self, journal, accepted):
"handle response from journal"
pass
# TODO
def expedite(self, journal):
"checks to see if should expedite at test_journal given acceptance at accepted_journal"
if self.strategy.should_expedite(test_journal, self.article, accepted_journal):
test_journal.receive_article(self.article, expedited=True)
for submission in article.submissions:
if submission["journal"] == test_journal:
submission["expedited"] = True
| [
"numpy.rnorm"
] | [((321, 351), 'numpy.rnorm', 'rnorm', (['self.article.quality', '(1)'], {}), '(self.article.quality, 1)\n', (326, 351), False, 'from numpy import rnorm\n')] |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
###############################################################################
#Non-Standard Imports
###############################################################################
import addpath
import dunlin as dn
import dunlin._utils_model.dun_file_reader as dfr
import dunlin._utils_model.ivp as ivp
import dunlin._utils_model.ode_coder as odc
if __name__ == '__main__':
#Some overhead for testing
plt.close('all')
###############################################################################
#Part 1: Manual Instantiation
###############################################################################
def plot(t, y, AX, label='_nolabel'):
for i, ax in enumerate(AX):
ax.plot(t, y[i], label=label)
top = np.max(y[i])
top = top*1.2 if top else 1
top = np.maximum(top, ax.get_ylim()[1])
bottom = -top*.05
ax.set_ylim(bottom=bottom, top=top)
if label != '_nolabel':
ax.legend()
def modify(y, p, scenario):
x0, x1, x2 = y
p0, p1 = p
if scenario == 's0':
x1 = 0.5
new_y = np.array([x0, x1, x2])
new_p = np.array([p0, p1])
return new_y, new_p
def func(t, y, p):
x0 = y[0]
x1 = y[1]
x2 = y[2]
p0 = p[0]
p1 = p[1]
r0 = p0*x0
r1 = p1*x1
d_x0 = -r0
d_x1 = +r0 -r1
d_x2 = r1
return np.array([d_x0, d_x1, d_x2])
y0 = np.array([1, 0, 0])
p = np.array([0.01, 0.01])
tspan = np.linspace(0, 1000, 101)
fig = plt.figure()
AX = [fig.add_subplot(1, 3, i+1) for i in range(3)]
rtol = 1e-3
###For saving###
#df = pd.DataFrame(np.concatenate(([t], y), axis=0))
#df.to_csv('event_test_files/simulate_event_1.csv', index=False)
################
#Case 31: Modify
print('Case 31: Modify')
t, y = ivp.integrate(func, tspan, y0, p, modify=modify, overlap=True, include_events=True, scenario='s0')
plot(t, y, AX, 'Case 31: Modify')
df = pd.read_csv('event_test_files/simulate_event_31.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
###############################################################################
#Part 2: Dynamic Instantiation
###############################################################################
#Set up
dun_data = dfr.read_file('event_test_files/M2.dun')
model_data = dun_data['M2']
func_data = odc.make_ode_data(model_data)
func_ = func_data['rhs'][1]
y0 = np.array([1, 0, 0])
p = np.array([0.01, 0.01])
tspan = np.linspace(0, 1000, 101)
modify = func_data['modify'][1]
fig = plt.figure()
AX = [fig.add_subplot(1, 3, i+1) for i in range(3)]
#Case 41: Modify
print('Case 41: Modify')
t, y = ivp.integrate(func_, tspan, y0, p, modify=modify, overlap=True, include_events=True, scenario='s0')
plot(t, y, AX, 'Case 41: Modify')
df = pd.read_csv('event_test_files/simulate_event_31.csv')
answer = df.values
values = np.concatenate(([t], y), axis=0)
assert np.all( np.isclose(answer, values, rtol=rtol))
| [
"dunlin._utils_model.dun_file_reader.read_file",
"pandas.read_csv",
"matplotlib.pyplot.close",
"dunlin._utils_model.ode_coder.make_ode_data",
"matplotlib.pyplot.figure",
"numpy.isclose",
"numpy.array",
"numpy.max",
"numpy.linspace",
"dunlin._utils_model.ivp.integrate",
"numpy.concatenate"
] | [((555, 571), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (564, 571), True, 'import matplotlib.pyplot as plt\n'), ((1759, 1778), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (1767, 1778), True, 'import numpy as np\n'), ((1791, 1813), 'numpy.array', 'np.array', (['[0.01, 0.01]'], {}), '([0.01, 0.01])\n', (1799, 1813), True, 'import numpy as np\n'), ((1826, 1851), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(101)'], {}), '(0, 1000, 101)\n', (1837, 1851), True, 'import numpy as np\n'), ((1868, 1880), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1878, 1880), True, 'import matplotlib.pyplot as plt\n'), ((2199, 2301), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func', 'tspan', 'y0', 'p'], {'modify': 'modify', 'overlap': '(True)', 'include_events': '(True)', 'scenario': '"""s0"""'}), "(func, tspan, y0, p, modify=modify, overlap=True,\n include_events=True, scenario='s0')\n", (2212, 2301), True, 'import dunlin._utils_model.ivp as ivp\n'), ((2350, 2403), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_31.csv"""'], {}), "('event_test_files/simulate_event_31.csv')\n", (2361, 2403), True, 'import pandas as pd\n'), ((2440, 2472), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (2454, 2472), True, 'import numpy as np\n'), ((2768, 2808), 'dunlin._utils_model.dun_file_reader.read_file', 'dfr.read_file', (['"""event_test_files/M2.dun"""'], {}), "('event_test_files/M2.dun')\n", (2781, 2808), True, 'import dunlin._utils_model.dun_file_reader as dfr\n'), ((2858, 2887), 'dunlin._utils_model.ode_coder.make_ode_data', 'odc.make_ode_data', (['model_data'], {}), '(model_data)\n', (2875, 2887), True, 'import dunlin._utils_model.ode_coder as odc\n'), ((2937, 2956), 'numpy.array', 'np.array', (['[1, 0, 0]'], {}), '([1, 0, 0])\n', (2945, 2956), True, 'import numpy as np\n'), ((2969, 2991), 'numpy.array', 'np.array', (['[0.01, 0.01]'], {}), '([0.01, 0.01])\n', (2977, 2991), True, 'import numpy as np\n'), ((3004, 3029), 'numpy.linspace', 'np.linspace', (['(0)', '(1000)', '(101)'], {}), '(0, 1000, 101)\n', (3015, 3029), True, 'import numpy as np\n'), ((3087, 3099), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (3097, 3099), True, 'import matplotlib.pyplot as plt\n'), ((3224, 3327), 'dunlin._utils_model.ivp.integrate', 'ivp.integrate', (['func_', 'tspan', 'y0', 'p'], {'modify': 'modify', 'overlap': '(True)', 'include_events': '(True)', 'scenario': '"""s0"""'}), "(func_, tspan, y0, p, modify=modify, overlap=True,\n include_events=True, scenario='s0')\n", (3237, 3327), True, 'import dunlin._utils_model.ivp as ivp\n'), ((3376, 3429), 'pandas.read_csv', 'pd.read_csv', (['"""event_test_files/simulate_event_31.csv"""'], {}), "('event_test_files/simulate_event_31.csv')\n", (3387, 3429), True, 'import pandas as pd\n'), ((3466, 3498), 'numpy.concatenate', 'np.concatenate', (['([t], y)'], {'axis': '(0)'}), '(([t], y), axis=0)\n', (3480, 3498), True, 'import numpy as np\n'), ((1345, 1367), 'numpy.array', 'np.array', (['[x0, x1, x2]'], {}), '([x0, x1, x2])\n', (1353, 1367), True, 'import numpy as np\n'), ((1384, 1402), 'numpy.array', 'np.array', (['[p0, p1]'], {}), '([p0, p1])\n', (1392, 1402), True, 'import numpy as np\n'), ((1713, 1741), 'numpy.array', 'np.array', (['[d_x0, d_x1, d_x2]'], {}), '([d_x0, d_x1, d_x2])\n', (1721, 1741), True, 'import numpy as np\n'), ((2492, 2529), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (2502, 2529), True, 'import numpy as np\n'), ((3518, 3555), 'numpy.isclose', 'np.isclose', (['answer', 'values'], {'rtol': 'rtol'}), '(answer, values, rtol=rtol)\n', (3528, 3555), True, 'import numpy as np\n'), ((917, 929), 'numpy.max', 'np.max', (['y[i]'], {}), '(y[i])\n', (923, 929), True, 'import numpy as np\n')] |
import numpy as np
def newton(f, x0, jac, hess, options):
A = hess(x0) + np.eye(x0.shape[0]) * 1.0e-6
j = jac(x0)
return x0 - np.dot(np.linalg.inv(A), j)
def grad(f, x0, jac, hess, options):
alpha = options['alpha']
j = jac(x0)
return x0 - alpha * j
def exp_fit(f, x0, jac, hess, options):
s = options['sigma']
return x0 + 0.5 * jac(x0) * s * s / f(x0) | [
"numpy.eye",
"numpy.linalg.inv"
] | [((76, 95), 'numpy.eye', 'np.eye', (['x0.shape[0]'], {}), '(x0.shape[0])\n', (82, 95), True, 'import numpy as np\n'), ((140, 156), 'numpy.linalg.inv', 'np.linalg.inv', (['A'], {}), '(A)\n', (153, 156), True, 'import numpy as np\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import numpy as np
import texar as tx
import random
from texar.agents.episodic_agent_base import EpisodicAgentBase
from texar.utils import utils
from texar.core import optimization as opt
__all__ = [
"DQNAgent"
]
class DQNAgent(EpisodicAgentBase):
def __init__(self,
env_config,
sess=None,
qnet=None,
target=None,
qnet_kwargs=None,
qnet_caller_kwargs=None,
replay_memory=None,
replay_memory_kwargs=None,
exploration=None,
exploration_kwargs=None,
hparams=None):
EpisodicAgentBase.__init__(self, env_config, hparams)
self._sess = sess
self._cold_start_steps = self._hparams.cold_start_steps
self._sample_batch_size = self._hparams.sample_batch_size
self._update_period = self._hparams.update_period
self._discount_factor = self._hparams.discount_factor
self._update_type = self._hparams.update_type
self._num_actions = self._env_config.action_space.high - \
self._env_config.action_space.low
with tf.variable_scope(self.variable_scope):
if qnet is None:
kwargs = utils.get_instance_kwargs(
qnet_kwargs, self._hparams.qnet_hparams)
qnet = utils.check_or_get_instance(
ins_or_class_or_name=self._hparams.qnet_type,
kwargs=kwargs,
module_paths=['texar.modules', 'texar.custom'])
target = utils.check_or_get_instance(
ins_or_class_or_name=self._hparams.qnet_type,
kwargs=kwargs,
module_paths=['texar.modules', 'texar.custom'])
self._qnet = qnet
self._target = target
self._qnet_caller_kwargs = qnet_caller_kwargs or {}
if replay_memory is None:
kwargs = utils.get_instance_kwargs(
replay_memory_kwargs, self._hparams.replay_memory_hparams)
replay_memory = utils.check_or_get_instance(
ins_or_class_or_name=self._hparams.replay_memory_type,
kwargs=kwargs,
module_paths=['texar.core', 'texar.custom'])
self._replay_memory = replay_memory
if exploration is None:
kwargs = utils.get_instance_kwargs(
exploration_kwargs, self._hparams.exploration_hparams)
exploration = utils.check_or_get_instance(
ins_or_class_or_name=self._hparams.exploration_type,
kwargs=kwargs,
module_paths=['texar.core', 'texar.custom'])
self._exploration = exploration
self._build_graph()
self.timestep = 0
@staticmethod
def default_hparams():
return {
'qnet_type': 'CategoricalQNet',
'qnet_hparams': None,
'replay_memory_type': 'DequeReplayMemory',
'replay_memory_hparams': None,
'exploration_type': 'EpsilonLinearDecayExploration',
'exploration_hparams': None,
'optimization': opt.default_optimization_hparams(),
'update_type': 'copy',
'cold_start_steps': 100,
'sample_batch_size': 32,
'update_period': 100,
'discount_factor': 0.95,
'name': 'dqn_agent'
}
def _build_graph(self):
with tf.variable_scope(self.variable_scope):
self._observ_inputs = tf.placeholder(
dtype=self._env_config.observ_dtype,
shape=[None, ] + list(self._env_config.observ_shape),
name='observ_inputs')
self._action_inputs = tf.placeholder(
dtype=self._env_config.action_dtype,
shape=[None, self._num_actions],
name='action_inputs')
self._y_inputs = tf.placeholder(
dtype=tf.float32,
shape=[None, ],
name='y_inputs')
self._qnet_outputs = self._get_qnet_outputs(self._observ_inputs)
self._target_outputs = self._get_target_outputs(self._observ_inputs)
self._td_error = self._get_td_error(
qnet_qvalues=self._qnet_outputs['qvalues'],
actions=self._action_inputs,
y=self._y_inputs)
self._train_op = self._get_train_op()
if self._update_type == 'copy':
self._update_op = self._get_copy_update_op()
elif self._update_type == 'tau':
self._update_op = self._get_tau_update_op()
def _get_qnet_outputs(self, state_inputs):
return self._qnet(inputs=state_inputs, **self._qnet_caller_kwargs)
def _get_target_outputs(self, state_inputs):
return self._target(inputs=state_inputs, **self._qnet_caller_kwargs)
def _get_td_error(self, qnet_qvalues, actions, y):
return y - tf.reduce_sum(qnet_qvalues * tf.to_float(actions), axis=1)
def _get_train_op(self):
train_op = opt.get_train_op(
loss=tf.reduce_sum(self._td_error ** 2),
variables=self._qnet.trainable_variables,
hparams=self._hparams.optimization.todict())
return train_op
def _get_copy_update_op(self):
op = []
for i in range(len(self._qnet.trainable_variables)):
op.append(tf.assign(ref=self._target.trainable_variables[i],
value=self._qnet.trainable_variables[i]))
return op
def _get_tau_update_op(self):
tau = 1. / self._update_period
op = []
for i in range(len(self._qnet.trainable_variables)):
op.append(tf.assign(
ref=self._target.trainable_variables[i],
value=(1. - tau) * self._target.trainable_variables[i] +
tau * self._qnet.trainable_variables[i]))
return op
def _observe(self, observ, action, reward, terminal, next_observ,
train_policy, feed_dict):
action_one_hot = [0.] * self._num_actions
action_one_hot[action] = 1.
self._replay_memory.add(dict(
observ=observ,
action=action_one_hot,
reward=reward,
terminal=terminal,
next_observ=next_observ))
self.timestep += 1
if self.timestep > self._cold_start_steps and train_policy:
self._train_qnet(feed_dict)
def _train_qnet(self, feed_dict):
minibatch = self._replay_memory.get(self._sample_batch_size)
observ_batch = np.array([data['observ'] for data in minibatch])
action_batch = np.array([data['action'] for data in minibatch])
reward_batch = np.array([data['reward'] for data in minibatch])
terminal_batch = np.array([data['terminal'] for data in minibatch])
next_observ_batch = \
np.array([data['next_observ'] for data in minibatch])
target_qvalue = self._sess.run(
self._target_outputs['qvalues'], feed_dict={
self._observ_inputs: next_observ_batch,
tx.global_mode(): tf.estimator.ModeKeys.PREDICT})
y_batch = reward_batch
for i in range(self._sample_batch_size):
if not terminal_batch[i]:
y_batch[i] += self._discount_factor * np.max(target_qvalue[i])
feed_dict_= {
self._observ_inputs: observ_batch,
self._y_inputs: y_batch,
self._action_inputs: action_batch
}
feed_dict_.update(feed_dict or {})
self._sess.run(self._train_op, feed_dict=feed_dict_)
self.update_target(feed_dict)
def update_target(self, feed_dict):
if self._update_type == 'tau' or (
self._update_type == 'copy' and
self.timestep % self._update_period == 0):
self._sess.run(self._update_op, feed_dict=feed_dict)
def get_action(self, observ, feed_dict=None):
qvalue = self._sess.run(
self._qnet_outputs['qvalues'],
feed_dict={self._observ_inputs: np.array([observ]),
tx.global_mode(): tf.estimator.ModeKeys.PREDICT})
action = np.zeros(shape=self._num_actions)
if random.random() < self._exploration.get_epsilon(self.timestep):
action_id = random.randrange(self._num_actions)
else:
action_id = np.argmax(qvalue)
action[action_id] = 1.0
return action_id
def _reset(self):
pass
@property
def sess(self):
"""The tf session.
"""
return self._sess
@sess.setter
def sess(self, session):
self._sess = session
| [
"tensorflow.reduce_sum",
"texar.utils.utils.get_instance_kwargs",
"texar.core.optimization.default_optimization_hparams",
"numpy.argmax",
"numpy.zeros",
"texar.agents.episodic_agent_base.EpisodicAgentBase.__init__",
"tensorflow.variable_scope",
"texar.utils.utils.check_or_get_instance",
"tensorflow.... | [((803, 856), 'texar.agents.episodic_agent_base.EpisodicAgentBase.__init__', 'EpisodicAgentBase.__init__', (['self', 'env_config', 'hparams'], {}), '(self, env_config, hparams)\n', (829, 856), False, 'from texar.agents.episodic_agent_base import EpisodicAgentBase\n'), ((6877, 6925), 'numpy.array', 'np.array', (["[data['observ'] for data in minibatch]"], {}), "([data['observ'] for data in minibatch])\n", (6885, 6925), True, 'import numpy as np\n'), ((6949, 6997), 'numpy.array', 'np.array', (["[data['action'] for data in minibatch]"], {}), "([data['action'] for data in minibatch])\n", (6957, 6997), True, 'import numpy as np\n'), ((7021, 7069), 'numpy.array', 'np.array', (["[data['reward'] for data in minibatch]"], {}), "([data['reward'] for data in minibatch])\n", (7029, 7069), True, 'import numpy as np\n'), ((7095, 7145), 'numpy.array', 'np.array', (["[data['terminal'] for data in minibatch]"], {}), "([data['terminal'] for data in minibatch])\n", (7103, 7145), True, 'import numpy as np\n'), ((7188, 7241), 'numpy.array', 'np.array', (["[data['next_observ'] for data in minibatch]"], {}), "([data['next_observ'] for data in minibatch])\n", (7196, 7241), True, 'import numpy as np\n'), ((8505, 8538), 'numpy.zeros', 'np.zeros', ([], {'shape': 'self._num_actions'}), '(shape=self._num_actions)\n', (8513, 8538), True, 'import numpy as np\n'), ((1331, 1369), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.variable_scope'], {}), '(self.variable_scope)\n', (1348, 1369), True, 'import tensorflow as tf\n'), ((3407, 3441), 'texar.core.optimization.default_optimization_hparams', 'opt.default_optimization_hparams', ([], {}), '()\n', (3439, 3441), True, 'from texar.core import optimization as opt\n'), ((3707, 3745), 'tensorflow.variable_scope', 'tf.variable_scope', (['self.variable_scope'], {}), '(self.variable_scope)\n', (3724, 3745), True, 'import tensorflow as tf\n'), ((3992, 4103), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'self._env_config.action_dtype', 'shape': '[None, self._num_actions]', 'name': '"""action_inputs"""'}), "(dtype=self._env_config.action_dtype, shape=[None, self.\n _num_actions], name='action_inputs')\n", (4006, 4103), True, 'import tensorflow as tf\n'), ((4177, 4240), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[None]', 'name': '"""y_inputs"""'}), "(dtype=tf.float32, shape=[None], name='y_inputs')\n", (4191, 4240), True, 'import tensorflow as tf\n'), ((8550, 8565), 'random.random', 'random.random', ([], {}), '()\n', (8563, 8565), False, 'import random\n'), ((8638, 8673), 'random.randrange', 'random.randrange', (['self._num_actions'], {}), '(self._num_actions)\n', (8654, 8673), False, 'import random\n'), ((8712, 8729), 'numpy.argmax', 'np.argmax', (['qvalue'], {}), '(qvalue)\n', (8721, 8729), True, 'import numpy as np\n'), ((1425, 1491), 'texar.utils.utils.get_instance_kwargs', 'utils.get_instance_kwargs', (['qnet_kwargs', 'self._hparams.qnet_hparams'], {}), '(qnet_kwargs, self._hparams.qnet_hparams)\n', (1450, 1491), False, 'from texar.utils import utils\n'), ((1536, 1676), 'texar.utils.utils.check_or_get_instance', 'utils.check_or_get_instance', ([], {'ins_or_class_or_name': 'self._hparams.qnet_type', 'kwargs': 'kwargs', 'module_paths': "['texar.modules', 'texar.custom']"}), "(ins_or_class_or_name=self._hparams.qnet_type,\n kwargs=kwargs, module_paths=['texar.modules', 'texar.custom'])\n", (1563, 1676), False, 'from texar.utils import utils\n'), ((1759, 1899), 'texar.utils.utils.check_or_get_instance', 'utils.check_or_get_instance', ([], {'ins_or_class_or_name': 'self._hparams.qnet_type', 'kwargs': 'kwargs', 'module_paths': "['texar.modules', 'texar.custom']"}), "(ins_or_class_or_name=self._hparams.qnet_type,\n kwargs=kwargs, module_paths=['texar.modules', 'texar.custom'])\n", (1786, 1899), False, 'from texar.utils import utils\n'), ((2149, 2238), 'texar.utils.utils.get_instance_kwargs', 'utils.get_instance_kwargs', (['replay_memory_kwargs', 'self._hparams.replay_memory_hparams'], {}), '(replay_memory_kwargs, self._hparams.\n replay_memory_hparams)\n', (2174, 2238), False, 'from texar.utils import utils\n'), ((2287, 2438), 'texar.utils.utils.check_or_get_instance', 'utils.check_or_get_instance', ([], {'ins_or_class_or_name': 'self._hparams.replay_memory_type', 'kwargs': 'kwargs', 'module_paths': "['texar.core', 'texar.custom']"}), "(ins_or_class_or_name=self._hparams.\n replay_memory_type, kwargs=kwargs, module_paths=['texar.core',\n 'texar.custom'])\n", (2314, 2438), False, 'from texar.utils import utils\n'), ((2601, 2686), 'texar.utils.utils.get_instance_kwargs', 'utils.get_instance_kwargs', (['exploration_kwargs', 'self._hparams.exploration_hparams'], {}), '(exploration_kwargs, self._hparams.exploration_hparams\n )\n', (2626, 2686), False, 'from texar.utils import utils\n'), ((2733, 2882), 'texar.utils.utils.check_or_get_instance', 'utils.check_or_get_instance', ([], {'ins_or_class_or_name': 'self._hparams.exploration_type', 'kwargs': 'kwargs', 'module_paths': "['texar.core', 'texar.custom']"}), "(ins_or_class_or_name=self._hparams.\n exploration_type, kwargs=kwargs, module_paths=['texar.core',\n 'texar.custom'])\n", (2760, 2882), False, 'from texar.utils import utils\n'), ((5368, 5402), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(self._td_error ** 2)'], {}), '(self._td_error ** 2)\n', (5381, 5402), True, 'import tensorflow as tf\n'), ((5674, 5770), 'tensorflow.assign', 'tf.assign', ([], {'ref': 'self._target.trainable_variables[i]', 'value': 'self._qnet.trainable_variables[i]'}), '(ref=self._target.trainable_variables[i], value=self._qnet.\n trainable_variables[i])\n', (5683, 5770), True, 'import tensorflow as tf\n'), ((5990, 6144), 'tensorflow.assign', 'tf.assign', ([], {'ref': 'self._target.trainable_variables[i]', 'value': '((1.0 - tau) * self._target.trainable_variables[i] + tau * self._qnet.\n trainable_variables[i])'}), '(ref=self._target.trainable_variables[i], value=(1.0 - tau) * self\n ._target.trainable_variables[i] + tau * self._qnet.trainable_variables[i])\n', (5999, 6144), True, 'import tensorflow as tf\n'), ((5254, 5274), 'tensorflow.to_float', 'tf.to_float', (['actions'], {}), '(actions)\n', (5265, 5274), True, 'import tensorflow as tf\n'), ((7412, 7428), 'texar.global_mode', 'tx.global_mode', ([], {}), '()\n', (7426, 7428), True, 'import texar as tx\n'), ((7635, 7659), 'numpy.max', 'np.max', (['target_qvalue[i]'], {}), '(target_qvalue[i])\n', (7641, 7659), True, 'import numpy as np\n'), ((8437, 8453), 'texar.global_mode', 'tx.global_mode', ([], {}), '()\n', (8451, 8453), True, 'import texar as tx\n'), ((8394, 8412), 'numpy.array', 'np.array', (['[observ]'], {}), '([observ])\n', (8402, 8412), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Project Statement
# The Film Junky Union, a new edgy community for classic movie enthusiasts, is developing a system for filtering and categorizing movie reviews. The goal is to train a model to automatically detect negative reviews. You'll be using a dataset of IMBD movie reviews with polarity labelling to build a model for classifying positive and negative reviews. It will need to have an F1 score of at least 0.85.
# ## Initialization
# In[1]:
import math
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import seaborn as sns
import re # import the built in module to find patterns
from tqdm.auto import tqdm
# In[2]:
get_ipython().run_line_magic('matplotlib', 'inline')
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'png'")
# the next line provides graphs of better quality on HiDPI screens
get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'retina'")
plt.style.use('seaborn')
# In[3]:
# import sys and insert code to ignore warnings
import sys
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
# In[4]:
# this is to use progress_apply, read more at https://pypi.org/project/tqdm/#pandas-integration
tqdm.pandas()
# ## Load Data
# In[5]:
# load the data
try:
df_reviews = pd.read_csv('/Users/rraven/Desktop/a_final_yandex/datasets/imdb_reviews.tsv', sep='\t', dtype={'votes': 'Int64'})
except:
df_reviews = pd.read_csv('/datasets/imdb_reviews.tsv', sep='\t', dtype={'votes': 'Int64'})
# In[6]:
df_reviews.head()
# In[7]:
df_reviews.info()
# In[8]:
df_reviews.describe()
# In[9]:
df_reviews.shape
# In[10]:
df_reviews.duplicated().sum()
# In[11]:
df_reviews.isnull().sum()
# In[12]:
df_reviews.review
# In[13]:
df_reviews.ds_part
# In[14]:
df_reviews.pos.describe()
# __Observations__
# - The target (positive) is categorical (0 for negative, 1 for positive)
# - The pos distribution is fairly even (balance of 49.9% '0' and 51.1% '1')
# - This is a classification task
# - The features are in the review column (text based), although other rows may be considered
# - There are 47331 instances and 17 columns
# - The df includes reviews from movies released up to 2010
# - No duplicate rows
# - There are 2 rows with missing values in average_rating and votes columns
# - ds_part column identifies the train and test part of the dataset
# - At some point, it may be useful to focus on only 'review', 'pos', 'ds_part'
#
# __Drop the 2 rows with missing values and verify__
# In[15]:
# drop rows with missing values
df_reviews = df_reviews.dropna()
df_reviews.info()
# ## EDA
# Let's check the number of movies and reviews over years.
# In[16]:
fig, axs = plt.subplots(2, 1, figsize=(16, 8))
ax = axs[0]
dft1 = df_reviews[['tconst', 'start_year']].drop_duplicates() ['start_year'].value_counts().sort_index()
dft1 = dft1.reindex(index=np.arange(dft1.index.min(), max(dft1.index.max(), 2021))).fillna(0)
dft1.plot(kind='bar', ax=ax)
ax.set_title('Number of Movies Over Years')
ax = axs[1]
dft2 = df_reviews.groupby(['start_year', 'pos'])['pos'].count().unstack()
dft2 = dft2.reindex(index=np.arange(dft2.index.min(), max(dft2.index.max(), 2021))).fillna(0)
dft2.plot(kind='bar', stacked=True, label='#reviews (neg, pos)', ax=ax)
dft2 = df_reviews['start_year'].value_counts().sort_index()
dft2 = dft2.reindex(index=np.arange(dft2.index.min(), max(dft2.index.max(), 2021))).fillna(0)
dft3 = (dft2/dft1).fillna(0)
axt = ax.twinx()
dft3.reset_index(drop=True).rolling(5).mean().plot(color='orange', label='reviews per movie (avg over 5 years)', ax=axt)
lines, labels = axt.get_legend_handles_labels()
ax.legend(lines, labels, loc='upper left')
ax.set_title('Number of Reviews Over Years')
fig.tight_layout()
# From this graph, we see an overall increase in the number of movies per year, an increase in reviews, but a pretty even 5 year rolling average reviews per movie for the period mid 1970s to mid 2000s. There is a peak in movies and movie reviews around 2006/2007.
#
# Let's check the distribution of number of reviews per movie with the exact counting and KDE (just to learn how it may differ from the exact counting)
# In[17]:
fig, axs = plt.subplots(1, 2, figsize=(16, 5))
ax = axs[0]
dft = df_reviews.groupby('tconst')['review'].count() .value_counts() .sort_index()
dft.plot.bar(ax=ax)
ax.set_title('Bar Plot of #Reviews Per Movie')
ax = axs[1]
dft = df_reviews.groupby('tconst')['review'].count()
sns.kdeplot(dft, ax=ax)
ax.set_title('KDE Plot of #Reviews Per Movie')
fig.tight_layout()
# We note that most movies only have 1 or a few reviews (the large peak on the left), but there is also a second group of movies that are generate 30 reviews. These movies may be popular or controversial or frequenly reviewed for some other reason.
# In[18]:
df_reviews['pos'].value_counts()
# In[19]:
fig, axs = plt.subplots(1, 2, figsize=(12, 4))
ax = axs[0]
dft = df_reviews.query('ds_part == "train"')['rating'].value_counts().sort_index()
dft = dft.reindex(index=np.arange(min(dft.index.min(), 1), max(dft.index.max(), 11))).fillna(0)
dft.plot.bar(ax=ax)
ax.set_ylim([0, 5000])
ax.set_title('The train set: distribution of ratings')
ax = axs[1]
dft = df_reviews.query('ds_part == "test"')['rating'].value_counts().sort_index()
dft = dft.reindex(index=np.arange(min(dft.index.min(), 1), max(dft.index.max(), 11))).fillna(0)
dft.plot.bar(ax=ax)
ax.set_ylim([0, 5000])
ax.set_title('The test set: distribution of ratings')
fig.tight_layout()
# We previously noted the overall number of pos=1 and pos=0 were fairly close in the total dataset.
#
# The above graphs demonstrate fairly similar distribution of ratings between the train set and the test set.
#
# Next we examine the distribution of negative and positive reviews over the years for two parts of the dataset
# In[20]:
fig, axs = plt.subplots(2, 2, figsize=(16, 8), gridspec_kw=dict(width_ratios=(2, 1), height_ratios=(1, 1)))
ax = axs[0][0]
dft = df_reviews.query('ds_part == "train"').groupby(['start_year', 'pos'])['pos'].count().unstack()
dft.index = dft.index.astype('int')
dft = dft.reindex(index=np.arange(dft.index.min(), max(dft.index.max(), 2020))).fillna(0)
dft.plot(kind='bar', stacked=True, ax=ax)
ax.set_title('The train set: number of reviews of different polarities per year')
ax = axs[0][1]
dft = df_reviews.query('ds_part == "train"').groupby(['tconst', 'pos'])['pos'].count().unstack()
sns.kdeplot(dft[0], color='blue', label='negative', kernel='epa', ax=ax)
sns.kdeplot(dft[1], color='green', label='positive', kernel='epa', ax=ax)
ax.legend()
ax.set_title('The train set: distribution of different polarities per movie')
ax = axs[1][0]
dft = df_reviews.query('ds_part == "test"').groupby(['start_year', 'pos'])['pos'].count().unstack()
dft.index = dft.index.astype('int')
dft = dft.reindex(index=np.arange(dft.index.min(), max(dft.index.max(), 2020))).fillna(0)
dft.plot(kind='bar', stacked=True, ax=ax)
ax.set_title('The test set: number of reviews of different polarities per year')
ax = axs[1][1]
dft = df_reviews.query('ds_part == "test"').groupby(['tconst', 'pos'])['pos'].count().unstack()
sns.kdeplot(dft[0], color='blue', label='negative', kernel='epa', ax=ax)
sns.kdeplot(dft[1], color='green', label='positive', kernel='epa', ax=ax)
ax.legend()
ax.set_title('The test set: distribution of different polarities per movie')
fig.tight_layout()
# Once again, we visually note similar curves and distributions of the train and test datasets.
# ## Evaluation Procedure
# Composing an evaluation routine which can be used for all models in this project
# In[21]:
import sklearn.metrics as metrics
def evaluate_model(model, train_features, train_target, test_features, test_target):
eval_stats = {}
fig, axs = plt.subplots(1, 3, figsize=(20, 6))
for type, features, target in (('train', train_features, train_target), ('test', test_features, test_target)):
eval_stats[type] = {}
pred_target = model.predict(features)
pred_proba = model.predict_proba(features)[:, 1]
# F1
f1_thresholds = np.arange(0, 1.01, 0.05)
f1_scores = [metrics.f1_score(target, pred_proba>=threshold) for threshold in f1_thresholds]
# ROC
fpr, tpr, roc_thresholds = metrics.roc_curve(target, pred_proba)
roc_auc = metrics.roc_auc_score(target, pred_proba)
eval_stats[type]['ROC AUC'] = roc_auc
# PRC
precision, recall, pr_thresholds = metrics.precision_recall_curve(target, pred_proba)
aps = metrics.average_precision_score(target, pred_proba)
eval_stats[type]['APS'] = aps
if type == 'train':
color = 'blue'
else:
color = 'green'
# F1 Score
ax = axs[0]
max_f1_score_idx = np.argmax(f1_scores)
ax.plot(f1_thresholds, f1_scores, color=color, label=f'{type}, max={f1_scores[max_f1_score_idx]:.2f} @ {f1_thresholds[max_f1_score_idx]:.2f}')
# setting crosses for some thresholds
for threshold in (0.2, 0.4, 0.5, 0.6, 0.8):
closest_value_idx = np.argmin(np.abs(f1_thresholds-threshold))
marker_color = 'orange' if threshold != 0.5 else 'red'
ax.plot(f1_thresholds[closest_value_idx], f1_scores[closest_value_idx], color=marker_color, marker='X', markersize=7)
ax.set_xlim([-0.02, 1.02])
ax.set_ylim([-0.02, 1.02])
ax.set_xlabel('threshold')
ax.set_ylabel('F1')
ax.legend(loc='lower center')
ax.set_title(f'F1 Score')
# ROC
ax = axs[1]
ax.plot(fpr, tpr, color=color, label=f'{type}, ROC AUC={roc_auc:.2f}')
# setting crosses for some thresholds
for threshold in (0.2, 0.4, 0.5, 0.6, 0.8):
closest_value_idx = np.argmin(np.abs(roc_thresholds-threshold))
marker_color = 'orange' if threshold != 0.5 else 'red'
ax.plot(fpr[closest_value_idx], tpr[closest_value_idx], color=marker_color, marker='X', markersize=7)
ax.plot([0, 1], [0, 1], color='grey', linestyle='--')
ax.set_xlim([-0.02, 1.02])
ax.set_ylim([-0.02, 1.02])
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.legend(loc='lower center')
ax.set_title(f'ROC Curve')
# PRC
ax = axs[2]
ax.plot(recall, precision, color=color, label=f'{type}, AP={aps:.2f}')
# setting crosses for some thresholds
for threshold in (0.2, 0.4, 0.5, 0.6, 0.8):
closest_value_idx = np.argmin(np.abs(pr_thresholds-threshold))
marker_color = 'orange' if threshold != 0.5 else 'red'
ax.plot(recall[closest_value_idx], precision[closest_value_idx], color=marker_color, marker='X', markersize=7)
ax.set_xlim([-0.02, 1.02])
ax.set_ylim([-0.02, 1.02])
ax.set_xlabel('recall')
ax.set_ylabel('precision')
ax.legend(loc='lower center')
ax.set_title(f'PRC')
eval_stats[type]['Accuracy'] = metrics.accuracy_score(target, pred_target)
eval_stats[type]['F1'] = metrics.f1_score(target, pred_target)
df_eval_stats = pd.DataFrame(eval_stats)
df_eval_stats = df_eval_stats.round(2)
df_eval_stats = df_eval_stats.reindex(index=('Accuracy', 'F1', 'APS', 'ROC AUC'))
print(df_eval_stats)
# return values
return
# ## Normalization
# We assume all models below accepts texts in lowercase and without any digits, punctuations marks etc.
# In[22]:
# print lines of reviews before normalization to compare
df_reviews['review']
# In[23]:
# create clear_text function to keep only letters, spaces, and apostrophes in the text
def clear_text(text):
# indicate with the ^ to keep letters, apostrophes and spaces
pattern = r"[^a-zA-Z']"
# in this case it gets rid of everything that is not a letter, ' or space
text = re.sub(pattern, ' ', text.lower())
# use split to convert string to list
text = text.split()
# now use join to create a string with spaces
text = " ".join(text)
return text
# In[24]:
# apply clear_text function to all text
df_reviews['review_norm'] = df_reviews['review'].apply(lambda x: clear_text(x))
# In[25]:
# verify the normalized text
df_reviews['review_norm']
# ## Train / Test Split
# Luckily, the whole dataset is already divided into train/test one parts. The corresponding flag is 'ds_part'.
# In[26]:
df_reviews_train = df_reviews.query('ds_part == "train"').copy()
df_reviews_test = df_reviews.query('ds_part == "test"').copy()
# create y_train and y_test for targets and X_train, X_test for features
y_train = df_reviews_train['pos']
y_test = df_reviews_test['pos']
X_train = df_reviews_train['review_norm']
X_test = df_reviews_test['review_norm']
print(df_reviews_train.shape)
print(df_reviews_test.shape)
print(y_train.shape)
print(y_test.shape)
print(X_train.shape)
print(X_test.shape)
# ## Working with models
# ### Model 0 - Constant
# In[27]:
from sklearn.dummy import DummyClassifier
# In[28]:
model_0 = DummyClassifier(strategy='constant', constant=1)
model_0.fit(X_train, y_train)
# In[29]:
# evaluate constant model
model_0_eval = evaluate_model(model_0, X_train, y_train, X_test, y_test)
# ### Model 1 - NLTK, TF-IDF and LR
# In[30]:
import nltk
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import LogisticRegression
from nltk.corpus import stopwords
# In[31]:
# Create a counter and define stop words
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
count_tf_idf = TfidfVectorizer(stop_words=stop_words)
# In[32]:
# get matrices
tfidf_train = count_tf_idf.fit_transform(X_train)
tfidf_test = count_tf_idf.transform(X_test) # note only use transform here
# In[33]:
# create model and fit model
model_1 = LogisticRegression(random_state=12345, solver='liblinear')
model_1.fit(tfidf_train, y_train)
# In[34]:
# evaluate model
model_1_eval = evaluate_model(model_1, tfidf_train, y_train, tfidf_test, y_test)
# ### Model 3 - spaCy, TF-IDF and LR
# In[35]:
import spacy
import en_core_web_sm
nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])
# In[36]:
# print text before preprocessing
df_reviews['review']
# In[38]:
# create function to lemmatize text
nlp = en_core_web_sm.load(disable=['parser', 'ner'])
def lemmatize(text):
doc = nlp(text)
lemmas = []
for token in doc:
lemmas.append(token.lemma_)
return ' '.join(lemmas)
# In[41]:
df_reviews['reviews_lemm'] = df_reviews['review'].progress_apply(lemmatize)
# In[42]:
# verify reviews_lemm
df_reviews['reviews_lemm']
# In[43]:
# change the features for the test/train data
X_train = df_reviews.query('ds_part == "train"')['reviews_lemm']
X_test = df_reviews.query('ds_part == "test"')['reviews_lemm']
# In[44]:
# # Create a counter and define stop words
count_tf_idf_lemm = TfidfVectorizer(stop_words=stop_words)
tfidf_train_lemm = count_tf_idf_lemm.fit_transform(X_train)
tfidf_test_lemm = count_tf_idf_lemm.transform(X_test) #
# In[45]:
# create a model and fit model
model_3 = LogisticRegression(random_state=12345, solver='liblinear')
model_3.fit(tfidf_train_lemm, y_train)
# In[46]:
# evaluate model
model_3_eval = evaluate_model(model_3, tfidf_train_lemm, y_train, tfidf_test_lemm, y_test)
# ### Model 4 - spaCy, TF-IDF and LGBMClassifier
# In[47]:
from lightgbm import LGBMClassifier
# We can use the same features for train as we did in Model 3 (where text preprocessing/lemmatization applied)
# In[48]:
# create and fit model
model_4 = LGBMClassifier(random_state=12345)
model_4.fit(tfidf_train_lemm, y_train)
# In[49]:
# evaluate model
model_4_eval = evaluate_model(model_4, tfidf_train_lemm, y_train, tfidf_test_lemm, y_test)
# ### Model 9 - BERT - did not attempt
# In[50]:
import torch
import transformers
# In[51]:
tokenizer = transformers.BertTokenizer.from_pretrained('bert-base-uncased')
config = transformers.BertConfig.from_pretrained('bert-base-uncased')
model = transformers.BertModel.from_pretrained('bert-base-uncased')
# In[52]:
def BERT_text_to_embeddings(texts, max_length=512, batch_size=100, force_device=None, disable_progress_bar=False):
ids_list = []
attention_mask_list = []
# text to padded ids of tokens along with their attention masks
# <put your code here to create ids_list and attention_mask_list>
if force_device is not None:
device = torch.device(force_device)
else:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model.to(device)
if not disable_progress_bar:
print(f'Using the {device} device.')
# gettings embeddings in batches
embeddings = []
for i in tqdm(range(math.ceil(len(ids_list)/batch_size)), disable=disable_progress_bar):
ids_batch = torch.LongTensor(ids_list[batch_size*i:batch_size*(i+1)]).to(device)
# <put your code here to create attention_mask_batch
with torch.no_grad():
model.eval()
batch_embeddings = model(input_ids=ids_batch, attention_mask=attention_mask_batch)
embeddings.append(batch_embeddings[0][:,0,:].detach().cpu().numpy())
return np.concatenate(embeddings)
# In[53]:
# Attention! Running BERT for thousands of texts may take long run on CPU, at least several hours
# train_features_9 = BERT_text_to_embeddings(df_reviews_train['review_norm'], force_device='cuda')
# In[54]:
# print(df_reviews_train['review_norm'].shape)
# print(train_features_9.shape)
# print(train_target.shape)
# In[55]:
# if you have got the embeddings, it's advisable to save them to have them ready if
# np.savez_compressed('features_9.npz', train_features_9=train_features_9, test_features_9=test_features_9)
# and load...
# with np.load('features_9.npz') as data:
# train_features_9 = data['train_features_9']
# test_features_9 = data['test_features_9']
# ## My Reviews
# In[56]:
# feel free to completely remove these reviews and try your models on your own reviews, those below are just examples
my_reviews = pd.DataFrame([
'I did not simply like it, not my kind of movie.',
'Well, I was bored and felt asleep in the middle of the movie.',
'I was really fascinated with the movie',
'Even the actors looked really old and disinterested, and they got paid to be in the movie. What a soulless cash grab.',
'I didn\'t expect the reboot to be so good! Writers really cared about the source material',
'The movie had its upsides and downsides, but I feel like overall it\'s a decent flick. I could see myself going to see it again.',
'What a rotten attempt at a comedy. Not a single joke lands, everyone acts annoying and loud, even kids won\'t like this!',
'Launching on Netflix was a brave move & I really appreciate being able to binge on episode after episode, of this exciting intelligent new drama.'
], columns=['review'])
my_reviews['review_norm'] = my_reviews['review'].apply(lambda x: clear_text(x))
my_reviews
# ### Model 1
# In[57]:
texts = my_reviews['review_norm']
my_reviews_pred_prob = model_1.predict_proba(count_tf_idf.transform(texts))[:, 1]
for i, review in enumerate(texts.str.slice(0, 100)):
print(f'{my_reviews_pred_prob[i]:.2f}: {review}')
# ### Model 3
# In[58]:
texts = my_reviews['review_norm']
my_reviews_pred_prob = model_3.predict_proba(
count_tf_idf_lemm.transform(texts.apply(lambda x: text_preprocessing_3(x))))[:, 1]
for i, review in enumerate(texts.str.slice(0, 100)):
print(f'{my_reviews_pred_prob[i]:.2f}: {review}')
# ### Model 4
# In[59]:
texts = my_reviews['review_norm']
# tfidf_vectorizer_4 = tfidf_vectorizer_3
my_reviews_pred_prob = model_4.predict_proba(count_tf_idf_lemm.transform(texts.apply(lambda x: text_preprocessing_3(x))))[:, 1]
for i, review in enumerate(texts.str.slice(0, 100)):
print(f'{my_reviews_pred_prob[i]:.2f}: {review}')
# ## Conclusions
# For this project, we built models to categorize movie reviews. The goal was to train a model to automatically detect negative reviews and we needed to create a model with an F1 score of at least 0.85.
#
# Luckily, the data was already split into train and test portions and the classes were balanced so there was not need for further manipulation. We did remove two rows with missing value, but otherwise we did not need to perform data prep.
#
# Since we don't have the compute power to run BERT, we focused on training and testing:
# - Model 0 - constant model for comparison
# - Model 1 - NLTK, TF-IDF and LR
# - Model 3 - spaCy, TF-IDF and LR
# - Model 4 - spaCy, TF-IDF and LGBMClassifier
#
# | Model | Data | Accuracy | F1 Score |
# |------|------|------|------|
# | Model 0 | Train | 0.50 | 0.67 |
# | Model 0 | Test | 0.50 | 0.67 |
# | Model 1 | Train | 0.94 | 0.94 |
# | Model 1 | Test | 0.88 | 0.88 |
# | Model 3 | Train | 0.93 | 0.93 |
# | Model 3 | Test | 0.88 | 0.88 |
# | Model 4 | Train | 0.91 | 0.91 |
# | Model 4 | Test | 0.86 | 0.86 |
#
# All three models performed better than Model O, the constant dummy classifier, so in this instance modeling was useful.
#
# We discovered 2 models surpasing the F1 score goal of at least 0.85 (Model 1 and Model 3) on the test dataset.
# - Model 1, NLTK, TF-IDF and LR, produced a F1 score of 0.88 on the test data and 0.94 on the train data
# - Model 3, spaCy, TF-IDF and LR, produced a F1 score of 0.88 on the test data and 0.93 on the train data
# - Model 1, NLTK, TF-IDF and LR, has the slight edge because of its higher training F1 score
#
# It is likely the scores could improve further with parameter tuning.
#
# We analyzed the results of the 'My Reviews' section selecting 0.5 as an inflection point. If the value is > 0.5, the model detects a positive review. If the review is <= 0.5, the model detects a negative review.
#
# - Model 3 and 4 performed the best on this small set of 8 sample reviews (only 1 error each)
# - Model 1 performed the worst (3 errors out of 8 samples), but all the errors were on Positive reviews
# - These results do not match the results of the models on the 47329 samples of movie reviews. Model 4 performed worst on the large samples, but tied for best performance in the small 8 samples. Model 1 performed well on large samples, but poorly on the small samples.
#
# This could be chance (just poor subsample selection for those 8 samples) or we could be looking at the a different dividing point (0.70 instead of 0.50, for instance). Additionally, even thought this is an even distribution (4 positives and 4 negative reviews), but we don't know how balanced these 8 reviews are.
#
#
#
# | Rating | Model 1 | Model 3 | Model 4 | Error
# |------|------|------|------|------|
# | 1. Neg | 0.14 | 0.24 | 0.58 | Model 4
# | 2. Neg | 0.16 | 0.11 | 0.34 | None
# | 3. Pos | 0.54 | 0.41 | 0.60 | Model 1
# | 4. Neg | 0.11 | 0.11 | 0.37 | None
# | 5. Pos | 0.31 | 0.23 | 0.71 | Model 1,3
# | 6. Pos | 0.47 | 0.51 | 0.63 | Model 1
# | 7. Neg | 0.04 | 0.02 | 0.19 | None
# | 8. Pos | 0.82 | 0.92 | 0.77 | None
#
#
#
# In[ ]:
| [
"lightgbm.LGBMClassifier",
"seaborn.kdeplot",
"numpy.abs",
"numpy.argmax",
"sklearn.feature_extraction.text.TfidfVectorizer",
"pandas.read_csv",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.style.use",
"sklearn.metrics.f1_score",
"numpy.arange",
"torch.device",
"nltk.download",
"torc... | [((1034, 1058), 'matplotlib.pyplot.style.use', 'plt.style.use', (['"""seaborn"""'], {}), "('seaborn')\n", (1047, 1058), True, 'import matplotlib.pyplot as plt\n'), ((1322, 1335), 'tqdm.auto.tqdm.pandas', 'tqdm.pandas', ([], {}), '()\n', (1333, 1335), False, 'from tqdm.auto import tqdm\n'), ((2838, 2873), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(2)', '(1)'], {'figsize': '(16, 8)'}), '(2, 1, figsize=(16, 8))\n', (2850, 2873), True, 'import matplotlib.pyplot as plt\n'), ((4344, 4379), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(16, 5)'}), '(1, 2, figsize=(16, 5))\n', (4356, 4379), True, 'import matplotlib.pyplot as plt\n'), ((4617, 4640), 'seaborn.kdeplot', 'sns.kdeplot', (['dft'], {'ax': 'ax'}), '(dft, ax=ax)\n', (4628, 4640), True, 'import seaborn as sns\n'), ((5030, 5065), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(12, 4)'}), '(1, 2, figsize=(12, 4))\n', (5042, 5065), True, 'import matplotlib.pyplot as plt\n'), ((6598, 6670), 'seaborn.kdeplot', 'sns.kdeplot', (['dft[0]'], {'color': '"""blue"""', 'label': '"""negative"""', 'kernel': '"""epa"""', 'ax': 'ax'}), "(dft[0], color='blue', label='negative', kernel='epa', ax=ax)\n", (6609, 6670), True, 'import seaborn as sns\n'), ((6671, 6744), 'seaborn.kdeplot', 'sns.kdeplot', (['dft[1]'], {'color': '"""green"""', 'label': '"""positive"""', 'kernel': '"""epa"""', 'ax': 'ax'}), "(dft[1], color='green', label='positive', kernel='epa', ax=ax)\n", (6682, 6744), True, 'import seaborn as sns\n'), ((7314, 7386), 'seaborn.kdeplot', 'sns.kdeplot', (['dft[0]'], {'color': '"""blue"""', 'label': '"""negative"""', 'kernel': '"""epa"""', 'ax': 'ax'}), "(dft[0], color='blue', label='negative', kernel='epa', ax=ax)\n", (7325, 7386), True, 'import seaborn as sns\n'), ((7387, 7460), 'seaborn.kdeplot', 'sns.kdeplot', (['dft[1]'], {'color': '"""green"""', 'label': '"""positive"""', 'kernel': '"""epa"""', 'ax': 'ax'}), "(dft[1], color='green', label='positive', kernel='epa', ax=ax)\n", (7398, 7460), True, 'import seaborn as sns\n'), ((13336, 13384), 'sklearn.dummy.DummyClassifier', 'DummyClassifier', ([], {'strategy': '"""constant"""', 'constant': '(1)'}), "(strategy='constant', constant=1)\n", (13351, 13384), False, 'from sklearn.dummy import DummyClassifier\n'), ((13794, 13820), 'nltk.download', 'nltk.download', (['"""stopwords"""'], {}), "('stopwords')\n", (13807, 13820), False, 'import nltk\n'), ((13881, 13919), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': 'stop_words'}), '(stop_words=stop_words)\n', (13896, 13919), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((14127, 14185), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(12345)', 'solver': '"""liblinear"""'}), "(random_state=12345, solver='liblinear')\n", (14145, 14185), False, 'from sklearn.linear_model import LogisticRegression\n'), ((14426, 14481), 'spacy.load', 'spacy.load', (['"""en_core_web_sm"""'], {'disable': "['parser', 'ner']"}), "('en_core_web_sm', disable=['parser', 'ner'])\n", (14436, 14481), False, 'import spacy\n'), ((14607, 14653), 'en_core_web_sm.load', 'en_core_web_sm.load', ([], {'disable': "['parser', 'ner']"}), "(disable=['parser', 'ner'])\n", (14626, 14653), False, 'import en_core_web_sm\n'), ((15219, 15257), 'sklearn.feature_extraction.text.TfidfVectorizer', 'TfidfVectorizer', ([], {'stop_words': 'stop_words'}), '(stop_words=stop_words)\n', (15234, 15257), False, 'from sklearn.feature_extraction.text import TfidfVectorizer\n'), ((15429, 15487), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(12345)', 'solver': '"""liblinear"""'}), "(random_state=12345, solver='liblinear')\n", (15447, 15487), False, 'from sklearn.linear_model import LogisticRegression\n'), ((15908, 15942), 'lightgbm.LGBMClassifier', 'LGBMClassifier', ([], {'random_state': '(12345)'}), '(random_state=12345)\n', (15922, 15942), False, 'from lightgbm import LGBMClassifier\n'), ((16218, 16281), 'transformers.BertTokenizer.from_pretrained', 'transformers.BertTokenizer.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (16260, 16281), False, 'import transformers\n'), ((16291, 16351), 'transformers.BertConfig.from_pretrained', 'transformers.BertConfig.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (16330, 16351), False, 'import transformers\n'), ((16360, 16419), 'transformers.BertModel.from_pretrained', 'transformers.BertModel.from_pretrained', (['"""bert-base-uncased"""'], {}), "('bert-base-uncased')\n", (16398, 16419), False, 'import transformers\n'), ((18502, 19363), 'pandas.DataFrame', 'pd.DataFrame', (['[\'I did not simply like it, not my kind of movie.\',\n \'Well, I was bored and felt asleep in the middle of the movie.\',\n \'I was really fascinated with the movie\',\n \'Even the actors looked really old and disinterested, and they got paid to be in the movie. What a soulless cash grab.\'\n ,\n "I didn\'t expect the reboot to be so good! Writers really cared about the source material"\n ,\n "The movie had its upsides and downsides, but I feel like overall it\'s a decent flick. I could see myself going to see it again."\n ,\n "What a rotten attempt at a comedy. Not a single joke lands, everyone acts annoying and loud, even kids won\'t like this!"\n ,\n \'Launching on Netflix was a brave move & I really appreciate being able to binge on episode after episode, of this exciting intelligent new drama.\'\n ]'], {'columns': "['review']"}), '([\'I did not simply like it, not my kind of movie.\',\n \'Well, I was bored and felt asleep in the middle of the movie.\',\n \'I was really fascinated with the movie\',\n \'Even the actors looked really old and disinterested, and they got paid to be in the movie. What a soulless cash grab.\'\n ,\n "I didn\'t expect the reboot to be so good! Writers really cared about the source material"\n ,\n "The movie had its upsides and downsides, but I feel like overall it\'s a decent flick. I could see myself going to see it again."\n ,\n "What a rotten attempt at a comedy. Not a single joke lands, everyone acts annoying and loud, even kids won\'t like this!"\n ,\n \'Launching on Netflix was a brave move & I really appreciate being able to binge on episode after episode, of this exciting intelligent new drama.\'\n ], columns=[\'review\'])\n', (18514, 19363), True, 'import pandas as pd\n'), ((1181, 1212), 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), "('ignore')\n", (1202, 1212), False, 'import warnings\n'), ((1404, 1521), 'pandas.read_csv', 'pd.read_csv', (['"""/Users/rraven/Desktop/a_final_yandex/datasets/imdb_reviews.tsv"""'], {'sep': '"""\t"""', 'dtype': "{'votes': 'Int64'}"}), "('/Users/rraven/Desktop/a_final_yandex/datasets/imdb_reviews.tsv',\n sep='\\t', dtype={'votes': 'Int64'})\n", (1415, 1521), True, 'import pandas as pd\n'), ((7957, 7992), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(3)'], {'figsize': '(20, 6)'}), '(1, 3, figsize=(20, 6))\n', (7969, 7992), True, 'import matplotlib.pyplot as plt\n'), ((11418, 11442), 'pandas.DataFrame', 'pd.DataFrame', (['eval_stats'], {}), '(eval_stats)\n', (11430, 11442), True, 'import pandas as pd\n'), ((13838, 13864), 'nltk.corpus.stopwords.words', 'stopwords.words', (['"""english"""'], {}), "('english')\n", (13853, 13864), False, 'from nltk.corpus import stopwords\n'), ((17618, 17644), 'numpy.concatenate', 'np.concatenate', (['embeddings'], {}), '(embeddings)\n', (17632, 17644), True, 'import numpy as np\n'), ((1543, 1620), 'pandas.read_csv', 'pd.read_csv', (['"""/datasets/imdb_reviews.tsv"""'], {'sep': '"""\t"""', 'dtype': "{'votes': 'Int64'}"}), "('/datasets/imdb_reviews.tsv', sep='\\t', dtype={'votes': 'Int64'})\n", (1554, 1620), True, 'import pandas as pd\n'), ((8307, 8331), 'numpy.arange', 'np.arange', (['(0)', '(1.01)', '(0.05)'], {}), '(0, 1.01, 0.05)\n', (8316, 8331), True, 'import numpy as np\n'), ((8491, 8528), 'sklearn.metrics.roc_curve', 'metrics.roc_curve', (['target', 'pred_proba'], {}), '(target, pred_proba)\n', (8508, 8528), True, 'import sklearn.metrics as metrics\n'), ((8547, 8588), 'sklearn.metrics.roc_auc_score', 'metrics.roc_auc_score', (['target', 'pred_proba'], {}), '(target, pred_proba)\n', (8568, 8588), True, 'import sklearn.metrics as metrics\n'), ((8697, 8747), 'sklearn.metrics.precision_recall_curve', 'metrics.precision_recall_curve', (['target', 'pred_proba'], {}), '(target, pred_proba)\n', (8727, 8747), True, 'import sklearn.metrics as metrics\n'), ((8762, 8813), 'sklearn.metrics.average_precision_score', 'metrics.average_precision_score', (['target', 'pred_proba'], {}), '(target, pred_proba)\n', (8793, 8813), True, 'import sklearn.metrics as metrics\n'), ((9025, 9045), 'numpy.argmax', 'np.argmax', (['f1_scores'], {}), '(f1_scores)\n', (9034, 9045), True, 'import numpy as np\n'), ((11278, 11321), 'sklearn.metrics.accuracy_score', 'metrics.accuracy_score', (['target', 'pred_target'], {}), '(target, pred_target)\n', (11300, 11321), True, 'import sklearn.metrics as metrics\n'), ((11355, 11392), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['target', 'pred_target'], {}), '(target, pred_target)\n', (11371, 11392), True, 'import sklearn.metrics as metrics\n'), ((16800, 16826), 'torch.device', 'torch.device', (['force_device'], {}), '(force_device)\n', (16812, 16826), False, 'import torch\n'), ((8353, 8402), 'sklearn.metrics.f1_score', 'metrics.f1_score', (['target', '(pred_proba >= threshold)'], {}), '(target, pred_proba >= threshold)\n', (8369, 8402), True, 'import sklearn.metrics as metrics\n'), ((17369, 17384), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17382, 17384), False, 'import torch\n'), ((9337, 9370), 'numpy.abs', 'np.abs', (['(f1_thresholds - threshold)'], {}), '(f1_thresholds - threshold)\n', (9343, 9370), True, 'import numpy as np\n'), ((10035, 10069), 'numpy.abs', 'np.abs', (['(roc_thresholds - threshold)'], {}), '(roc_thresholds - threshold)\n', (10041, 10069), True, 'import numpy as np\n'), ((10799, 10832), 'numpy.abs', 'np.abs', (['(pr_thresholds - threshold)'], {}), '(pr_thresholds - threshold)\n', (10805, 10832), True, 'import numpy as np\n'), ((16877, 16902), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16900, 16902), False, 'import torch\n'), ((17213, 17276), 'torch.LongTensor', 'torch.LongTensor', (['ids_list[batch_size * i:batch_size * (i + 1)]'], {}), '(ids_list[batch_size * i:batch_size * (i + 1)])\n', (17229, 17276), False, 'import torch\n')] |
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_curve, auc
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import label_binarize
from scipy import interp
from itertools import cycle
from sklearn.tree import DecisionTreeClassifier
import pydotplus
from sklearn import tree
import collections
from IPython.display import Image
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn import svm
from sklearn.multiclass import OneVsOneClassifier
plt.rc('xtick', color='k', labelsize='medium', direction='in')
plt.rc('xtick.major', size=8, pad=12)
plt.rc('xtick.minor', size=8, pad=12)
plt.rc('ytick', color='k', labelsize='medium', direction='in')
plt.rc('ytick.major', size=8, pad=12)
plt.rc('ytick.minor', size=8, pad=12)
def make_meshgrid(x, y, n=100):
"""Create a mesh of points to plot in
Parameters
----------
x: data to base x-axis meshgrid on
y: data to base y-axis meshgrid on
n: number of intermediary points (optional)
Returns
-------
xx, yy : ndarray
"""
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
#xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
# np.arange(y_min, y_max, h))
xx, yy = np.meshgrid(np.linspace(x_min, x_max, n),
np.linspace(y_min, y_max, n))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
"""Plot the decision boundaries for a classifier.
Parameters
----------
ax: matplotlib axes object
clf: a classifier
xx: meshgrid ndarray
yy: meshgrid ndarray
params: dictionary of params to pass to contourf, optional
"""
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
def countour_knn(n,X,y,w , resolution = 100, ax = None):
'''
Takes:
- n : number of nearest neighbors
- X : feature matrix
- y : label
- w : voting rule
- resolution = 100 : number of points in the mesh (high number affect performance)
- ax = None : ax for plotting
returns:
(matplotlib.Axe)
'''
models = KNeighborsClassifier(n_neighbors = n,weights=w, n_jobs=-1)
models = models.fit(X, y)
# title for the plots
titles = 'K neighbors k='+str(n)+', '+w
if ax is None:
fig, ax = plt.subplots(1, 1)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1 , n=resolution)
plot_contours(ax, models, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
ax.set_title(titles)
return ax
def makeROCcurveBin( X,y,model , ax ):
"""
Takes:
* p : penalty {‘l1’, ‘l2’, ‘elasticnet’, ‘none’}
* X : covariables
* y : target
* c : inverse regularization strength
"""
y_score_logi_r_c = model.decision_function(X)
fpr_logi_r_c, tpr_logi_r_c, thre = roc_curve(y, y_score_logi_r_c)
roc_auc_logi_r_c = auc(fpr_logi_r_c, tpr_logi_r_c)
score=model.score(X,y)
ax.set_xlim([-0.01, 1.00])
ax.set_ylim([-0.01, 1.01])
ax.plot(fpr_logi_r_c, tpr_logi_r_c, lw=3, label='LogRegr ROC curve\n (area = {:0.2f})\n Acc={:1.3f}'.format(roc_auc_logi_r_c,score))
ax.set_xlabel('False Positive Rate', fontsize=16)
ax.set_ylabel('True Positive Rate', fontsize=16)
ax.set_title('ROC curve (logistic classifier)', fontsize=16)
ax.legend(loc='lower right', fontsize=13)
ax.plot([0, 1], [0, 1], color='navy', lw=3, linestyle='--')
return
def makeROCcurveMulti( X,y,model , ax ):
"""
Takes:
* p : penalty {‘l1’, ‘l2’, ‘elasticnet’, ‘none’}
* X : covariables
* y : target
* c : inverse regularization strength
"""
n_classes = len(set(y))
y = label_binarize(y, classes=np.arange(0,n_classes,1))
classifier = OneVsRestClassifier(model)
y_score = classifier.fit(X, y).decision_function(X)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
lw = 3
# Plot all ROC curves
ax.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
ax.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
ax.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
ax.plot([0, 1], [0, 1], 'k--', lw=lw)
ax.set_xlim([0.0, 1.0])
ax.set_ylim([0.0, 1.05])
ax.set_xlabel('False Positive Rate')
ax.set_ylabel('True Positive Rate')
ax.set_title('Multi class Receiver operating characteristic curve')
ax.legend(loc="lower right")
return
def makeROCcurve(X,y,model,ax):
nb_classes = len(set(y))
if nb_classes > 2:
makeROCcurveMulti(X,y,model,ax)
else:
makeROCcurveBin(X,y,model,ax)
def countour_lr(p,X,y,c,mult='ovr'):
"""
Takes:
* p : penalty {‘l1’, ‘l2’, ‘elasticnet’, ‘none’}
* X : covariables
* y : target
* c : inverse regularization strength
* mult = 'ovr': how to handle multi-class {‘auto’, ‘ovr’, ‘multinomial’}
"""
models = LogisticRegression(penalty = p,C=c, multi_class=mult)
# Create the logistic regresison object(with 3 main hyperparameters!!)
# penalty is either l1 or l2, C is how much weight we put on the regularization, multi_calss is how we proceed when multiclasses
models = models.fit(X, y)
dico_color={0:'blue',1:'white',2:'red'}
titles = 'Logistic regression penalty='+str(p)+' C='+str(c)
nbCategories = len( set(y) )
if nbCategories> 3 :
print("more than 3 categories detected... problem may occur with this code")
## simple mesh
#nl = int( np.ceil(nbCategories/2) ) # number of category lines
#fig, ax = plt.subplots(nl+1,2,figsize=(10,5 + 2.5*nl),
# gridspec_kw = {'height_ratios':[2]+[1]*(nl)})
fig, ax = plt.subplots(1,2,figsize=(10,5))
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax[0], models, xx, yy,cmap=plt.cm.coolwarm, alpha=0.8)
ax[0].scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
interc=models.intercept_
wei=models.coef_
for i in range(len(interc)):
ax[0].plot([xx.min(),xx.max()],
[-(interc[i]+wei[i][0]*xx.min())/wei[i][1],-(interc[i]+wei[i][0]*xx.max())/wei[i][1]],
color=dico_color[i],ls='--')
ax[0].set_xlim(xx.min(), xx.max())
ax[0].set_ylim(yy.min(), yy.max())
ax[0].set_xticks(())
ax[0].set_yticks(())
ax[0].set_title(titles)
## plotting decision functions
xx = np.linspace(np.min(X0)-5, np.max(X0)+5, 100)
yy = np.linspace(np.min(X1)-5, np.max(X1)+5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
y_pred = models.predict(X)
accuracy = accuracy_score(y, y_pred)
#print("Accuracy (train) for %s: %0.1f%% " % (name, accuracy * 100))
## plotting ROCcurve
makeROCcurve(X,y,models,ax[1])
plt.tight_layout()
# View probabilities:
probas = models.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
fig, ax = plt.subplots(1,n_classes,figsize=(10,5))
for k in range(n_classes):
if n_classes==1:
AX=ax
else:
AX = ax[k]
imshow_handle = AX.imshow(probas[:, k].reshape((100, 100)),extent=(np.min(X0)-5, np.max(X0)+5, np.min(X1)-5, np.max(X1)+5), origin='lower',cmap='plasma')
AX.set_xticks(())
AX.set_xlim([np.min(X0)-5, np.max(X0)+5])
AX.set_ylim([np.min(X1)-5, np.max(X1)+5])
AX.set_yticks(())
AX.set_title('Class '+str(k),fontsize=25)
for i in range(len(interc)):
AX.plot([np.min(X0)-5,np.max(X0)+5],
[-(interc[i]+wei[i][0]*(np.min(X0)-5))/wei[i][1],-(interc[i]+wei[i][0]*(np.max(X0)+5))/wei[i][1]],
color=dico_color[i],ls='--')
idx = (y_pred == k)
if idx.any():
AX.scatter(X[idx, 0], X[idx, 1],
s = 100,marker='o',
c=[dico_color[h] for h in y[idx]],
edgecolor='k')
#cbar=plt.colorbar(imshow_handle,ax=axs[k])
#cbar.ax.set_title('Probability',fontsize=10)
plt.tight_layout()
axo = plt.axes([0,0,1,0.05])
plt.colorbar(imshow_handle, cax=axo, orientation='horizontal')
axo.set_xlabel("Probability",fontsize=25)
def roc_multi_ovr(grid_lr_acc_i,
n_classes,
X_train,
y_train,
X_test,
y_test):
L = list(set(y_test))
L.sort()
y = label_binarize(y_test, classes=L)
classifier = OneVsRestClassifier(LogisticRegression(penalty = grid_lr_acc_i.best_params_['model__penalty'],
C=grid_lr_acc_i.best_params_['model__C'],solver='liblinear'))
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for j in range(n_classes):
mean_tpr += interp(all_fpr, fpr[j], tpr[j])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
lw = 3
# Plot all ROC curves
plt.figure(figsize=(7,7))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.3f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.3f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(range(n_classes), colors):
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.3f})'
''.format(L[i], roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Multi class Receiver operating characteristic curve\nOnevsRest')
plt.legend(loc="lower right")
plt.show()
#This you don't care too much either but if you want to use it one day it is here : It is a way to plot Roc curves in the
#context of one vs one multiclass
def roc_multi_ovo(grid_lr_acc_i,n_classes,X_train,y_train,X_test,y_test):
n_classes=3
y_list=[]
for i in range(n_classes):
glen=[]
for j in range(i+1,n_classes):
glen.append(label_binarize(np.array(y_test), classes=[i,j]))
if len(glen)>0:
y_list.append(np.concatenate(glen))
y=np.vstack(y_list)
y = label_binarize(y_test, classes=np.arange(0,n_classes,1))
classifier = OneVsOneClassifier(LogisticRegression(penalty = grid_lr_acc_i.best_params_['model__penalty'],
C=grid_lr_acc_i.best_params_['model__C'],solver='liblinear'))
#y_p=classifier.fit(X_test, y_test).predict(X_test)
y_score = classifier.fit(X_train, y_train).decision_function(X_test)
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
k=0
for i in range(n_classes):
for j in range(i+1,n_classes):
fpr[str(i)+'_'+str(j)], tpr[str(i)+'_'+str(j)], _ = roc_curve(y[:, k], y_score[:, k])
roc_auc[str(i)+'_'+str(j)] = auc(fpr[str(i)+'_'+str(j)], tpr[str(i)+'_'+str(j)])
k+=1
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in fpr.keys()]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for j in fpr.keys():
if j!='micro':
mean_tpr += interp(all_fpr, fpr[j], tpr[j])
# Finally average it and compute AUC
mean_tpr /= (n_classes*(n_classes-1)/2)
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
lw = 3
# Plot all ROC curves
plt.figure(figsize=(7,7))
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
color='deeppink', linestyle=':', linewidth=4)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
color='navy', linestyle=':', linewidth=4)
colors = cycle(['aqua', 'darkorange', 'cornflowerblue'])
for i, color in zip(fpr.keys(), colors):
if i!="macro" and i!="micro":
plt.plot(fpr[i], tpr[i], color=color, lw=lw,
label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--', lw=lw)
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
#plt.title('Some extension of Receiver operating characteristic to multi-class')
plt.title('Multi class Receiver operating characteristic curve\nOnevsOne')
plt.legend(loc="lower right")
plt.show()
def contour_SVM(X,y,c,ker,deg=2,gam=1,mult='ovr'):
"""
Takes:
* X : covariable
* y : target
* c : regulatization parameter
* ker : kernel
* deg : degree
* gam : gamma
* mult : decision function shape
"""
models = svm.SVC(C=c, kernel=ker, degree=deg, gamma= gam, decision_function_shape=mult,probability=True)
#those are all the hyperparameters that are, in my opinion, important to tune. C is again the good old inverse of the weight for l2
#regularization, kernel is the dot product you want to use, degree is the degree of the polynomial kernel you want to use,
#gamma is the standard deviation for the Gaussian Radial Basis function, decision_function_shape is used in case of multiclass,
#proba = True is just here so we can draw the proba countour in our plot.
models = models.fit(X, y)
dico_color={0:'blue',1:'white',2:'red'}
titles = 'SVM'+' C='+str(c)+' '+ker
fig, ax = plt.subplots(1,2,figsize=(10,5))
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax[0], models, xx, yy,cmap=plt.cm.coolwarm, alpha=0.8)
ax[0].scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
if ker=='linear':
interc=models.intercept_
wei=models.coef_
for i in range(len(interc)):
ax[0].plot([xx.min(),xx.max()],
[-(interc[i]+wei[i][0]*xx.min())/wei[i][1],-(interc[i]+wei[i][0]*xx.max())/wei[i][1]],
color=dico_color[i],ls='--')
ax[0].set_xlim(xx.min(), xx.max())
ax[0].set_ylim(yy.min(), yy.max())
ax[0].set_xticks(())
ax[0].set_yticks(())
ax[0].set_title(titles)
## plotting decision functions
xx = np.linspace(np.min(X0)-5, np.max(X0)+5, 100)
yy = np.linspace(np.min(X1)-5, np.max(X1)+5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
y_pred = models.predict(X)
accuracy = accuracy_score(y, y_pred)
#print("Accuracy (train) for %s: %0.1f%% " % (name, accuracy * 100))
## plotting ROCcurve
makeROCcurve(X,y,models,ax[1])
plt.tight_layout()
# View probabilities:
probas = models.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
fig, ax = plt.subplots(1,n_classes,figsize=(10,5))
for k in range(n_classes):
if n_classes==1:
AX=ax
else:
AX = ax[k]
imshow_handle = AX.imshow(probas[:, k].reshape((100, 100)),extent=(np.min(X0)-5, np.max(X0)+5, np.min(X1)-5, np.max(X1)+5), origin='lower',cmap='plasma')
AX.set_xticks(())
AX.set_xlim([np.min(X0)-5, np.max(X0)+5])
AX.set_ylim([np.min(X1)-5, np.max(X1)+5])
AX.set_yticks(())
AX.set_title('Class '+str(k),fontsize=25)
if ker == "linear":
for i in range(len(interc)):
AX.plot([np.min(X0)-5,np.max(X0)+5],
[-(interc[i]+wei[i][0]*(np.min(X0)-5))/wei[i][1],-(interc[i]+wei[i][0]*(np.max(X0)+5))/wei[i][1]],
color=dico_color[i],ls='--')
idx = (y_pred == k)
if idx.any():
AX.scatter(X[idx, 0], X[idx, 1],
s = 100,marker='o',
c=[dico_color[h] for h in y[idx]],
edgecolor='k')
#cbar=plt.colorbar(imshow_handle,ax=axs[k])
#cbar.ax.set_title('Probability',fontsize=10)
plt.tight_layout()
axo = plt.axes([0,0,1,0.05])
plt.colorbar(imshow_handle, cax=axo, orientation='horizontal')
axo.set_xlabel("Probability",fontsize=25)
def countour_tree(X,y,crit,maxd,min_s,min_l,max_f):#to understand what those hyperparameters stand for just check the first example
models = DecisionTreeClassifier(criterion=crit,max_depth=maxd,min_samples_split=min_s,min_samples_leaf=min_l,max_features=max_f)
models = models.fit(X, y)
# title for the plots
titles = 'Decision tree '+' '.join([str(crit),str(maxd),str(min_s),str(min_l),str(max_f)])
# Set-up 2x2 grid for plotting.
fig, ax = plt.subplots(1, 1)
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, models, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
#ax.set_xticks(())
#ax.set_yticks(())
ax.set_title(titles)
plt.show()
dot_data = tree.export_graphviz(models,
feature_names=['x','y'],
out_file=None,
filled=True,
rounded=True)
graph = pydotplus.graph_from_dot_data(dot_data)
colors = ('turquoise', 'orange')
edges = collections.defaultdict(list)
for edge in graph.get_edge_list():
edges[edge.get_source()].append(int(edge.get_destination()))
for edge in edges:
edges[edge].sort()
for i in range(2):
dest = graph.get_node(str(edges[edge][i]))[0]
dest.set_fillcolor(colors[i])
return Image(graph.create_png())
def countour_RF(X,y,n_tree,crit,maxd,min_s,min_l,max_f):
"""
Performs a classification using a random forest and plots a 2D decision space
and then does the same for a single tree classifier with similar hyper parameters for comparison
Takes:
* X : covariables
* y : target
* n_tree : number of tree in the forest
* crit : impurity criterion
* maxd : tree max depth
* min_s : minimum number of samples to consider an internal node rule
* min_l : minimum number of samples to consider an leaf node rule
* max_f : maximum number of features to consider at a node
"""
models = RandomForestClassifier(n_tree,criterion=crit,max_depth=maxd,min_samples_split=min_s,min_samples_leaf=min_l,max_features=max_f)
models = models.fit(X, y)
dico_color={0:'blue',1:'white',2:'red'}
# title for the plots
titles = 'Random Forest '+' '.join([str(crit),
str(maxd),
str(min_s),
str(min_l),
str(max_f)])
nCat = len(set(y))
fig = plt.figure(constrained_layout=True,figsize=(10,4+np.ceil(nCat/4)*4))
gs = GridSpec( 2+ int(np.ceil(nCat/4)), 4, figure=fig)
#print( 2+ int(np.ceil(nCat/4)), 4 )
### plot 1 : RF contour
ax = fig.add_subplot(gs[:2, :2])
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
Xfull = np.c_[xx.ravel(), yy.ravel()]
plot_contours(ax, models, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_title(titles)
## probability contour for each category
xx = np.linspace(np.min(X0)-5, np.max(X0)+5, 100)
yy = np.linspace(np.min(X1)-5, np.max(X1)+5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
y_pred = models.predict(X)
accuracy = accuracy_score(y, y_pred)
# View probabilities:
probas = models.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
for k in range(n_classes):
#print(k,2+k//4, k%4)
ax = fig.add_subplot(gs[2+k//4, k%4])
if k == 0:
ax.set_ylabel('Random Forest')
imshow_handle = ax.imshow(probas[:, k].reshape((100, 100)),
extent=(np.min(X0)-5, np.max(X0)+5,
np.min(X1)-5, np.max(X1)+5),
origin='lower',cmap='plasma')
ax.set_xticks(())
ax.set_xlim([np.min(X0)-5, np.max(X0)+5])
ax.set_ylim([np.min(X1)-5, np.max(X1)+5])
ax.set_yticks(())
ax.set_title('Class '+str(k),fontsize=25)
idx = (y_pred == k)
if idx.any():
ax.scatter(X[idx, 0], X[idx, 1],
s=100, marker='o',
c=[dico_color[h] for h in y[idx]], edgecolor='k')
## comparing with a decision tree
models = DecisionTreeClassifier(criterion=crit,
max_depth=maxd,
min_samples_split=min_s,
min_samples_leaf=min_l,
max_features=max_f)
models = models.fit(X, y)
# title for the plots
titles = 'Decision tree '+' '.join([str(crit),str(maxd),str(min_s),str(min_l),str(max_f)])
### plot 1 : RF contour
ax = fig.add_subplot(gs[:2, 2:])
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, models, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_title(titles)
plt.show()
ax = plt.axes([0,0,1,0.05])
plt.title("Probability",fontsize=25)
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
def countour_ADA(X,y,n_estimators,learning_rate):
'''
Takes:
* X : covariables
* y : target
* n_estimators : number of stumps
* learning_rate : learning rate
'''
n_tree=n_estimators
learn_r=learning_rate
models = AdaBoostClassifier(n_estimators=n_estimators,learning_rate=learning_rate)
models = models.fit(X, y)
dico_color={0:'blue',1:'white',2:'red'}
# title for the plots
titles = 'Adaboost '+' '.join([str(n_tree),str(learn_r)])
# Set-up 2x2 grid for plotting.
fig, ax = plt.subplots(1, 1,figsize=(5,5))
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
Xfull = np.c_[xx.ravel(), yy.ravel()]
plot_contours(ax, models, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
#ax.set_xticks(())
#ax.set_yticks(())
ax.set_title(titles)
#plt.savefig('C:\\Users\\sebas\\Desktop\\cours_scikit-learn\\Iris_example_knn_1_'+str(i)+'.pdf')
plt.show()
xx = np.linspace(np.min(X0)-5, np.max(X0)+5, 100)
yy = np.linspace(np.min(X1)-5, np.max(X1)+5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
y_pred = models.predict(X)
accuracy = accuracy_score(y, y_pred)
# View probabilities:
probas = models.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
plt.figure(figsize=(10*n_classes,10))
for k in range(n_classes):
plt.subplot(1, n_classes, k + 1)
#plt.title("Class %d" % k)
if k == 0:
plt.ylabel('Adaboost')
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),extent=(np.min(X0)-5, np.max(X0)+5, np.min(X1)-5, np.max(X1)+5), origin='lower',cmap='plasma')
plt.xticks(())
plt.xlim([np.min(X0)-5, np.max(X0)+5])
plt.ylim([np.min(X1)-5, np.max(X1)+5])
plt.yticks(())
plt.title('Class '+str(k),fontsize=25)
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1],s=100, marker='o', c=[dico_color[h] for h in y[idx]], edgecolor='k')
ax = plt.axes([0,0,1,0.05])
plt.title("Probability",fontsize=25)
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show()
def countour_BG(X,y,
n_estimators,
learning_rate,
max_depth,
min_samples_split,
min_samples_leaf,
max_features='auto'):
"""
Takes:
* X : covariables data
* y : target
* n_estimators : number of trees
* learning_rate : learning rate
* max_depth : tree max depth
* min_samples_split : minimum number of samples to consider an internal node rule
* min_samples_leaf : minimum number of samples to consider an leaf node rule
* max_features : maximum number of features to consider at a node
"""
models = GradientBoostingClassifier(n_estimators=n_estimators,
learning_rate=learning_rate,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
max_features=max_features)
models = models.fit(X, y)
dico_color={0:'blue',1:'white',2:'red'}
# title for the plots
titles = 'Gradient Boosted '+' '.join([str(n_estimators),str(learning_rate)])
# Set-up 2x2 grid for plotting.
fig, ax = plt.subplots(1, 1,figsize=(5,5))
#plt.subplots_adjust(wspace=0.4, hspace=0.4)
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
Xfull = np.c_[xx.ravel(), yy.ravel()]
plot_contours(ax, models, xx, yy,
cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_title(titles)
plt.show()
xx = np.linspace(np.min(X0)-5, np.max(X0)+5, 100)
yy = np.linspace(np.min(X1)-5, np.max(X1)+5, 100).T
xx, yy = np.meshgrid(xx, yy)
Xfull = np.c_[xx.ravel(), yy.ravel()]
y_pred = models.predict(X)
accuracy = accuracy_score(y, y_pred)
# View probabilities:
probas = models.predict_proba(Xfull)
n_classes = np.unique(y_pred).size
plt.figure(figsize=(10*n_classes,10))
for k in range(n_classes):
plt.subplot(1, n_classes, k + 1)
#plt.title("Class %d" % k)
if k == 0:
plt.ylabel('Gradient Boosted')
imshow_handle = plt.imshow(probas[:, k].reshape((100, 100)),extent=(np.min(X0)-5, np.max(X0)+5, np.min(X1)-5, np.max(X1)+5), origin='lower',cmap='plasma',alpha=0.7)
plt.xticks(())
plt.xlim([np.min(X0)-5, np.max(X0)+5])
plt.ylim([np.min(X1)-5, np.max(X1)+5])
plt.yticks(())
plt.title('Class '+str(k),fontsize=25)
idx = (y_pred == k)
if idx.any():
plt.scatter(X[idx, 0], X[idx, 1],s=100, marker='o', c=[dico_color[h] for h in y[idx]], edgecolor='k')
ax = plt.axes([0,0,1,0.05])
plt.title("Probability",fontsize=25)
plt.colorbar(imshow_handle, cax=ax, orientation='horizontal')
plt.show() | [
"matplotlib.pyplot.title",
"matplotlib.pyplot.axes",
"sklearn.metrics.accuracy_score",
"sklearn.tree.DecisionTreeClassifier",
"collections.defaultdict",
"matplotlib.pyplot.figure",
"numpy.arange",
"sklearn.svm.SVC",
"itertools.cycle",
"matplotlib.pyplot.tight_layout",
"numpy.unique",
"numpy.ze... | [((827, 889), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick"""'], {'color': '"""k"""', 'labelsize': '"""medium"""', 'direction': '"""in"""'}), "('xtick', color='k', labelsize='medium', direction='in')\n", (833, 889), True, 'import matplotlib.pyplot as plt\n'), ((890, 927), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick.major"""'], {'size': '(8)', 'pad': '(12)'}), "('xtick.major', size=8, pad=12)\n", (896, 927), True, 'import matplotlib.pyplot as plt\n'), ((928, 965), 'matplotlib.pyplot.rc', 'plt.rc', (['"""xtick.minor"""'], {'size': '(8)', 'pad': '(12)'}), "('xtick.minor', size=8, pad=12)\n", (934, 965), True, 'import matplotlib.pyplot as plt\n'), ((967, 1029), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick"""'], {'color': '"""k"""', 'labelsize': '"""medium"""', 'direction': '"""in"""'}), "('ytick', color='k', labelsize='medium', direction='in')\n", (973, 1029), True, 'import matplotlib.pyplot as plt\n'), ((1030, 1067), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick.major"""'], {'size': '(8)', 'pad': '(12)'}), "('ytick.major', size=8, pad=12)\n", (1036, 1067), True, 'import matplotlib.pyplot as plt\n'), ((1068, 1105), 'matplotlib.pyplot.rc', 'plt.rc', (['"""ytick.minor"""'], {'size': '(8)', 'pad': '(12)'}), "('ytick.minor', size=8, pad=12)\n", (1074, 1105), True, 'import matplotlib.pyplot as plt\n'), ((2580, 2637), 'sklearn.neighbors.KNeighborsClassifier', 'KNeighborsClassifier', ([], {'n_neighbors': 'n', 'weights': 'w', 'n_jobs': '(-1)'}), '(n_neighbors=n, weights=w, n_jobs=-1)\n', (2600, 2637), False, 'from sklearn.neighbors import KNeighborsClassifier\n'), ((3542, 3572), 'sklearn.metrics.roc_curve', 'roc_curve', (['y', 'y_score_logi_r_c'], {}), '(y, y_score_logi_r_c)\n', (3551, 3572), False, 'from sklearn.metrics import roc_curve, auc\n'), ((3596, 3627), 'sklearn.metrics.auc', 'auc', (['fpr_logi_r_c', 'tpr_logi_r_c'], {}), '(fpr_logi_r_c, tpr_logi_r_c)\n', (3599, 3627), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4494, 4520), 'sklearn.multiclass.OneVsRestClassifier', 'OneVsRestClassifier', (['model'], {}), '(model)\n', (4513, 4520), False, 'from sklearn.multiclass import OneVsRestClassifier\n'), ((4968, 4999), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (4971, 4999), False, 'from sklearn.metrics import roc_curve, auc\n'), ((5192, 5214), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (5205, 5214), True, 'import numpy as np\n'), ((5445, 5476), 'sklearn.metrics.auc', 'auc', (["fpr['macro']", "tpr['macro']"], {}), "(fpr['macro'], tpr['macro'])\n", (5448, 5476), False, 'from sklearn.metrics import roc_curve, auc\n'), ((5956, 6003), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (5961, 6003), False, 'from itertools import cycle\n'), ((7026, 7078), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': 'p', 'C': 'c', 'multi_class': 'mult'}), '(penalty=p, C=c, multi_class=mult)\n', (7044, 7078), False, 'from sklearn.linear_model import LogisticRegression\n'), ((7810, 7845), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (7822, 7845), True, 'import matplotlib.pyplot as plt\n'), ((8655, 8674), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (8666, 8674), True, 'import numpy as np\n'), ((8768, 8793), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (8782, 8793), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((8937, 8955), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (8953, 8955), True, 'import matplotlib.pyplot as plt\n'), ((9091, 9134), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'n_classes'], {'figsize': '(10, 5)'}), '(1, n_classes, figsize=(10, 5))\n', (9103, 9134), True, 'import matplotlib.pyplot as plt\n'), ((10269, 10287), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (10285, 10287), True, 'import matplotlib.pyplot as plt\n'), ((10298, 10323), 'matplotlib.pyplot.axes', 'plt.axes', (['[0, 0, 1, 0.05]'], {}), '([0, 0, 1, 0.05])\n', (10306, 10323), True, 'import matplotlib.pyplot as plt\n'), ((10326, 10388), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imshow_handle'], {'cax': 'axo', 'orientation': '"""horizontal"""'}), "(imshow_handle, cax=axo, orientation='horizontal')\n", (10338, 10388), True, 'import matplotlib.pyplot as plt\n'), ((10663, 10696), 'sklearn.preprocessing.label_binarize', 'label_binarize', (['y_test'], {'classes': 'L'}), '(y_test, classes=L)\n', (10677, 10696), False, 'from sklearn.preprocessing import label_binarize\n'), ((11400, 11431), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (11403, 11431), False, 'from sklearn.metrics import roc_curve, auc\n'), ((11632, 11654), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (11645, 11654), True, 'import numpy as np\n'), ((11893, 11924), 'sklearn.metrics.auc', 'auc', (["fpr['macro']", "tpr['macro']"], {}), "(fpr['macro'], tpr['macro'])\n", (11896, 11924), False, 'from sklearn.metrics import roc_curve, auc\n'), ((11966, 11992), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (11976, 11992), True, 'import matplotlib.pyplot as plt\n'), ((12473, 12520), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (12478, 12520), False, 'from itertools import cycle\n'), ((12758, 12796), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'lw': 'lw'}), "([0, 1], [0, 1], 'k--', lw=lw)\n", (12766, 12796), True, 'import matplotlib.pyplot as plt\n'), ((12801, 12821), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (12809, 12821), True, 'import matplotlib.pyplot as plt\n'), ((12826, 12847), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (12834, 12847), True, 'import matplotlib.pyplot as plt\n'), ((12852, 12885), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (12862, 12885), True, 'import matplotlib.pyplot as plt\n'), ((12890, 12922), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (12900, 12922), True, 'import matplotlib.pyplot as plt\n'), ((12927, 13005), 'matplotlib.pyplot.title', 'plt.title', (['"""Multi class Receiver operating characteristic curve\nOnevsRest"""'], {}), '("""Multi class Receiver operating characteristic curve\nOnevsRest""")\n', (12936, 13005), True, 'import matplotlib.pyplot as plt\n'), ((13007, 13036), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (13017, 13036), True, 'import matplotlib.pyplot as plt\n'), ((13041, 13051), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (13049, 13051), True, 'import matplotlib.pyplot as plt\n'), ((13555, 13572), 'numpy.vstack', 'np.vstack', (['y_list'], {}), '(y_list)\n', (13564, 13572), True, 'import numpy as np\n'), ((14553, 14584), 'sklearn.metrics.auc', 'auc', (["fpr['micro']", "tpr['micro']"], {}), "(fpr['micro'], tpr['micro'])\n", (14556, 14584), False, 'from sklearn.metrics import roc_curve, auc\n'), ((14779, 14801), 'numpy.zeros_like', 'np.zeros_like', (['all_fpr'], {}), '(all_fpr)\n', (14792, 14801), True, 'import numpy as np\n'), ((15079, 15110), 'sklearn.metrics.auc', 'auc', (["fpr['macro']", "tpr['macro']"], {}), "(fpr['macro'], tpr['macro'])\n", (15082, 15110), False, 'from sklearn.metrics import roc_curve, auc\n'), ((15152, 15178), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(7, 7)'}), '(figsize=(7, 7))\n', (15162, 15178), True, 'import matplotlib.pyplot as plt\n'), ((15659, 15706), 'itertools.cycle', 'cycle', (["['aqua', 'darkorange', 'cornflowerblue']"], {}), "(['aqua', 'darkorange', 'cornflowerblue'])\n", (15664, 15706), False, 'from itertools import cycle\n'), ((15978, 16016), 'matplotlib.pyplot.plot', 'plt.plot', (['[0, 1]', '[0, 1]', '"""k--"""'], {'lw': 'lw'}), "([0, 1], [0, 1], 'k--', lw=lw)\n", (15986, 16016), True, 'import matplotlib.pyplot as plt\n'), ((16021, 16041), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[0.0, 1.0]'], {}), '([0.0, 1.0])\n', (16029, 16041), True, 'import matplotlib.pyplot as plt\n'), ((16046, 16067), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.0, 1.05]'], {}), '([0.0, 1.05])\n', (16054, 16067), True, 'import matplotlib.pyplot as plt\n'), ((16072, 16105), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""False Positive Rate"""'], {}), "('False Positive Rate')\n", (16082, 16105), True, 'import matplotlib.pyplot as plt\n'), ((16110, 16142), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""True Positive Rate"""'], {}), "('True Positive Rate')\n", (16120, 16142), True, 'import matplotlib.pyplot as plt\n'), ((16240, 16317), 'matplotlib.pyplot.title', 'plt.title', (['"""Multi class Receiver operating characteristic curve\nOnevsOne"""'], {}), '("""Multi class Receiver operating characteristic curve\nOnevsOne""")\n', (16249, 16317), True, 'import matplotlib.pyplot as plt\n'), ((16319, 16348), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (16329, 16348), True, 'import matplotlib.pyplot as plt\n'), ((16353, 16363), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16361, 16363), True, 'import matplotlib.pyplot as plt\n'), ((16651, 16751), 'sklearn.svm.SVC', 'svm.SVC', ([], {'C': 'c', 'kernel': 'ker', 'degree': 'deg', 'gamma': 'gam', 'decision_function_shape': 'mult', 'probability': '(True)'}), '(C=c, kernel=ker, degree=deg, gamma=gam, decision_function_shape=\n mult, probability=True)\n', (16658, 16751), False, 'from sklearn import svm\n'), ((17356, 17391), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(10, 5)'}), '(1, 2, figsize=(10, 5))\n', (17368, 17391), True, 'import matplotlib.pyplot as plt\n'), ((18238, 18257), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (18249, 18257), True, 'import numpy as np\n'), ((18351, 18376), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (18365, 18376), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((18520, 18538), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (18536, 18538), True, 'import matplotlib.pyplot as plt\n'), ((18674, 18717), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', 'n_classes'], {'figsize': '(10, 5)'}), '(1, n_classes, figsize=(10, 5))\n', (18686, 18717), True, 'import matplotlib.pyplot as plt\n'), ((19892, 19910), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (19908, 19910), True, 'import matplotlib.pyplot as plt\n'), ((19921, 19946), 'matplotlib.pyplot.axes', 'plt.axes', (['[0, 0, 1, 0.05]'], {}), '([0, 0, 1, 0.05])\n', (19929, 19946), True, 'import matplotlib.pyplot as plt\n'), ((19949, 20011), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imshow_handle'], {'cax': 'axo', 'orientation': '"""horizontal"""'}), "(imshow_handle, cax=axo, orientation='horizontal')\n", (19961, 20011), True, 'import matplotlib.pyplot as plt\n'), ((20211, 20339), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': 'crit', 'max_depth': 'maxd', 'min_samples_split': 'min_s', 'min_samples_leaf': 'min_l', 'max_features': 'max_f'}), '(criterion=crit, max_depth=maxd, min_samples_split=\n min_s, min_samples_leaf=min_l, max_features=max_f)\n', (20233, 20339), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((20543, 20561), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (20555, 20561), True, 'import matplotlib.pyplot as plt\n'), ((21005, 21015), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (21013, 21015), True, 'import matplotlib.pyplot as plt\n'), ((21036, 21136), 'sklearn.tree.export_graphviz', 'tree.export_graphviz', (['models'], {'feature_names': "['x', 'y']", 'out_file': 'None', 'filled': '(True)', 'rounded': '(True)'}), "(models, feature_names=['x', 'y'], out_file=None,\n filled=True, rounded=True)\n", (21056, 21136), False, 'from sklearn import tree\n'), ((21272, 21311), 'pydotplus.graph_from_dot_data', 'pydotplus.graph_from_dot_data', (['dot_data'], {}), '(dot_data)\n', (21301, 21311), False, 'import pydotplus\n'), ((21362, 21391), 'collections.defaultdict', 'collections.defaultdict', (['list'], {}), '(list)\n', (21385, 21391), False, 'import collections\n'), ((22394, 22529), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', (['n_tree'], {'criterion': 'crit', 'max_depth': 'maxd', 'min_samples_split': 'min_s', 'min_samples_leaf': 'min_l', 'max_features': 'max_f'}), '(n_tree, criterion=crit, max_depth=maxd,\n min_samples_split=min_s, min_samples_leaf=min_l, max_features=max_f)\n', (22416, 22529), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((23692, 23711), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (23703, 23711), True, 'import numpy as np\n'), ((23805, 23830), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (23819, 23830), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((24897, 25025), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'criterion': 'crit', 'max_depth': 'maxd', 'min_samples_split': 'min_s', 'min_samples_leaf': 'min_l', 'max_features': 'max_f'}), '(criterion=crit, max_depth=maxd, min_samples_split=\n min_s, min_samples_leaf=min_l, max_features=max_f)\n', (24919, 25025), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((25725, 25735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25733, 25735), True, 'import matplotlib.pyplot as plt\n'), ((25750, 25775), 'matplotlib.pyplot.axes', 'plt.axes', (['[0, 0, 1, 0.05]'], {}), '([0, 0, 1, 0.05])\n', (25758, 25775), True, 'import matplotlib.pyplot as plt\n'), ((25777, 25814), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability"""'], {'fontsize': '(25)'}), "('Probability', fontsize=25)\n", (25786, 25814), True, 'import matplotlib.pyplot as plt\n'), ((25818, 25879), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imshow_handle'], {'cax': 'ax', 'orientation': '"""horizontal"""'}), "(imshow_handle, cax=ax, orientation='horizontal')\n", (25830, 25879), True, 'import matplotlib.pyplot as plt\n'), ((25884, 25894), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (25892, 25894), True, 'import matplotlib.pyplot as plt\n'), ((26175, 26249), 'sklearn.ensemble.AdaBoostClassifier', 'AdaBoostClassifier', ([], {'n_estimators': 'n_estimators', 'learning_rate': 'learning_rate'}), '(n_estimators=n_estimators, learning_rate=learning_rate)\n', (26193, 26249), False, 'from sklearn.ensemble import AdaBoostClassifier\n'), ((26471, 26505), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (26483, 26505), True, 'import matplotlib.pyplot as plt\n'), ((27093, 27103), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (27101, 27103), True, 'import matplotlib.pyplot as plt\n'), ((27232, 27251), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (27243, 27251), True, 'import numpy as np\n'), ((27345, 27370), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (27359, 27370), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((27486, 27526), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10 * n_classes, 10)'}), '(figsize=(10 * n_classes, 10))\n', (27496, 27526), True, 'import matplotlib.pyplot as plt\n'), ((28240, 28265), 'matplotlib.pyplot.axes', 'plt.axes', (['[0, 0, 1, 0.05]'], {}), '([0, 0, 1, 0.05])\n', (28248, 28265), True, 'import matplotlib.pyplot as plt\n'), ((28267, 28304), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability"""'], {'fontsize': '(25)'}), "('Probability', fontsize=25)\n", (28276, 28304), True, 'import matplotlib.pyplot as plt\n'), ((28308, 28369), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imshow_handle'], {'cax': 'ax', 'orientation': '"""horizontal"""'}), "(imshow_handle, cax=ax, orientation='horizontal')\n", (28320, 28369), True, 'import matplotlib.pyplot as plt\n'), ((28375, 28385), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (28383, 28385), True, 'import matplotlib.pyplot as plt\n'), ((29074, 29285), 'sklearn.ensemble.GradientBoostingClassifier', 'GradientBoostingClassifier', ([], {'n_estimators': 'n_estimators', 'learning_rate': 'learning_rate', 'max_depth': 'max_depth', 'min_samples_split': 'min_samples_split', 'min_samples_leaf': 'min_samples_leaf', 'max_features': 'max_features'}), '(n_estimators=n_estimators, learning_rate=\n learning_rate, max_depth=max_depth, min_samples_split=min_samples_split,\n min_samples_leaf=min_samples_leaf, max_features=max_features)\n', (29100, 29285), False, 'from sklearn.ensemble import GradientBoostingClassifier\n'), ((29719, 29753), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {'figsize': '(5, 5)'}), '(1, 1, figsize=(5, 5))\n', (29731, 29753), True, 'import matplotlib.pyplot as plt\n'), ((30202, 30212), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (30210, 30212), True, 'import matplotlib.pyplot as plt\n'), ((30341, 30360), 'numpy.meshgrid', 'np.meshgrid', (['xx', 'yy'], {}), '(xx, yy)\n', (30352, 30360), True, 'import numpy as np\n'), ((30454, 30479), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y', 'y_pred'], {}), '(y, y_pred)\n', (30468, 30479), False, 'from sklearn.metrics import accuracy_score, confusion_matrix\n'), ((30595, 30635), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10 * n_classes, 10)'}), '(figsize=(10 * n_classes, 10))\n', (30605, 30635), True, 'import matplotlib.pyplot as plt\n'), ((31367, 31392), 'matplotlib.pyplot.axes', 'plt.axes', (['[0, 0, 1, 0.05]'], {}), '([0, 0, 1, 0.05])\n', (31375, 31392), True, 'import matplotlib.pyplot as plt\n'), ((31394, 31431), 'matplotlib.pyplot.title', 'plt.title', (['"""Probability"""'], {'fontsize': '(25)'}), "('Probability', fontsize=25)\n", (31403, 31431), True, 'import matplotlib.pyplot as plt\n'), ((31435, 31496), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['imshow_handle'], {'cax': 'ax', 'orientation': '"""horizontal"""'}), "(imshow_handle, cax=ax, orientation='horizontal')\n", (31447, 31496), True, 'import matplotlib.pyplot as plt\n'), ((31502, 31512), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (31510, 31512), True, 'import matplotlib.pyplot as plt\n'), ((1613, 1641), 'numpy.linspace', 'np.linspace', (['x_min', 'x_max', 'n'], {}), '(x_min, x_max, n)\n', (1624, 1641), True, 'import numpy as np\n'), ((1668, 1696), 'numpy.linspace', 'np.linspace', (['y_min', 'y_max', 'n'], {}), '(y_min, y_max, n)\n', (1679, 1696), True, 'import numpy as np\n'), ((2784, 2802), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(1)'], {}), '(1, 1)\n', (2796, 2802), True, 'import matplotlib.pyplot as plt\n'), ((4744, 4777), 'sklearn.metrics.roc_curve', 'roc_curve', (['y[:, i]', 'y_score[:, i]'], {}), '(y[:, i], y_score[:, i])\n', (4753, 4777), False, 'from sklearn.metrics import roc_curve, auc\n'), ((4799, 4818), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (4802, 4818), False, 'from sklearn.metrics import roc_curve, auc\n'), ((5266, 5297), 'scipy.interp', 'interp', (['all_fpr', 'fpr[i]', 'tpr[i]'], {}), '(all_fpr, fpr[i], tpr[i])\n', (5272, 5297), False, 'from scipy import interp\n'), ((9049, 9066), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (9058, 9066), True, 'import numpy as np\n'), ((10739, 10878), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': "grid_lr_acc_i.best_params_['model__penalty']", 'C': "grid_lr_acc_i.best_params_['model__C']", 'solver': '"""liblinear"""'}), "(penalty=grid_lr_acc_i.best_params_['model__penalty'], C=\n grid_lr_acc_i.best_params_['model__C'], solver='liblinear')\n", (10757, 10878), False, 'from sklearn.linear_model import LogisticRegression\n'), ((11172, 11205), 'sklearn.metrics.roc_curve', 'roc_curve', (['y[:, i]', 'y_score[:, i]'], {}), '(y[:, i], y_score[:, i])\n', (11181, 11205), False, 'from sklearn.metrics import roc_curve, auc\n'), ((11227, 11246), 'sklearn.metrics.auc', 'auc', (['fpr[i]', 'tpr[i]'], {}), '(fpr[i], tpr[i])\n', (11230, 11246), False, 'from sklearn.metrics import roc_curve, auc\n'), ((11706, 11737), 'scipy.interp', 'interp', (['all_fpr', 'fpr[j]', 'tpr[j]'], {}), '(all_fpr, fpr[j], tpr[j])\n', (11712, 11737), False, 'from scipy import interp\n'), ((13675, 13814), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'penalty': "grid_lr_acc_i.best_params_['model__penalty']", 'C': "grid_lr_acc_i.best_params_['model__C']", 'solver': '"""liblinear"""'}), "(penalty=grid_lr_acc_i.best_params_['model__penalty'], C=\n grid_lr_acc_i.best_params_['model__C'], solver='liblinear')\n", (13693, 13814), False, 'from sklearn.linear_model import LogisticRegression\n'), ((18632, 18649), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (18641, 18649), True, 'import numpy as np\n'), ((23914, 23931), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (23923, 23931), True, 'import numpy as np\n'), ((27454, 27471), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (27463, 27471), True, 'import numpy as np\n'), ((27563, 27595), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'n_classes', '(k + 1)'], {}), '(1, n_classes, k + 1)\n', (27574, 27595), True, 'import matplotlib.pyplot as plt\n'), ((27856, 27870), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (27866, 27870), True, 'import matplotlib.pyplot as plt\n'), ((27973, 27987), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (27983, 27987), True, 'import matplotlib.pyplot as plt\n'), ((30563, 30580), 'numpy.unique', 'np.unique', (['y_pred'], {}), '(y_pred)\n', (30572, 30580), True, 'import numpy as np\n'), ((30672, 30704), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', 'n_classes', '(k + 1)'], {}), '(1, n_classes, k + 1)\n', (30683, 30704), True, 'import matplotlib.pyplot as plt\n'), ((30983, 30997), 'matplotlib.pyplot.xticks', 'plt.xticks', (['()'], {}), '(())\n', (30993, 30997), True, 'import matplotlib.pyplot as plt\n'), ((31100, 31114), 'matplotlib.pyplot.yticks', 'plt.yticks', (['()'], {}), '(())\n', (31110, 31114), True, 'import matplotlib.pyplot as plt\n'), ((4451, 4477), 'numpy.arange', 'np.arange', (['(0)', 'n_classes', '(1)'], {}), '(0, n_classes, 1)\n', (4460, 4477), True, 'import numpy as np\n'), ((8553, 8563), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (8559, 8563), True, 'import numpy as np\n'), ((8567, 8577), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (8573, 8577), True, 'import numpy as np\n'), ((13613, 13639), 'numpy.arange', 'np.arange', (['(0)', 'n_classes', '(1)'], {}), '(0, n_classes, 1)\n', (13622, 13639), True, 'import numpy as np\n'), ((14255, 14288), 'sklearn.metrics.roc_curve', 'roc_curve', (['y[:, k]', 'y_score[:, k]'], {}), '(y[:, k], y_score[:, k])\n', (14264, 14288), False, 'from sklearn.metrics import roc_curve, auc\n'), ((14874, 14905), 'scipy.interp', 'interp', (['all_fpr', 'fpr[j]', 'tpr[j]'], {}), '(all_fpr, fpr[j], tpr[j])\n', (14880, 14905), False, 'from scipy import interp\n'), ((18136, 18146), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (18142, 18146), True, 'import numpy as np\n'), ((18150, 18160), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (18156, 18160), True, 'import numpy as np\n'), ((23590, 23600), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (23596, 23600), True, 'import numpy as np\n'), ((23604, 23614), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (23610, 23614), True, 'import numpy as np\n'), ((27130, 27140), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (27136, 27140), True, 'import numpy as np\n'), ((27144, 27154), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (27150, 27154), True, 'import numpy as np\n'), ((27662, 27684), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Adaboost"""'], {}), "('Adaboost')\n", (27672, 27684), True, 'import matplotlib.pyplot as plt\n'), ((28128, 28234), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[idx, 0]', 'X[idx, 1]'], {'s': '(100)', 'marker': '"""o"""', 'c': '[dico_color[h] for h in y[idx]]', 'edgecolor': '"""k"""'}), "(X[idx, 0], X[idx, 1], s=100, marker='o', c=[dico_color[h] for h in\n y[idx]], edgecolor='k')\n", (28139, 28234), True, 'import matplotlib.pyplot as plt\n'), ((30239, 30249), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (30245, 30249), True, 'import numpy as np\n'), ((30253, 30263), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (30259, 30263), True, 'import numpy as np\n'), ((30771, 30801), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Gradient Boosted"""'], {}), "('Gradient Boosted')\n", (30781, 30801), True, 'import matplotlib.pyplot as plt\n'), ((31255, 31361), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X[idx, 0]', 'X[idx, 1]'], {'s': '(100)', 'marker': '"""o"""', 'c': '[dico_color[h] for h in y[idx]]', 'edgecolor': '"""k"""'}), "(X[idx, 0], X[idx, 1], s=100, marker='o', c=[dico_color[h] for h in\n y[idx]], edgecolor='k')\n", (31266, 31361), True, 'import matplotlib.pyplot as plt\n'), ((8607, 8617), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (8613, 8617), True, 'import numpy as np\n'), ((8621, 8631), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (8627, 8631), True, 'import numpy as np\n'), ((13526, 13546), 'numpy.concatenate', 'np.concatenate', (['glen'], {}), '(glen)\n', (13540, 13546), True, 'import numpy as np\n'), ((18190, 18200), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (18196, 18200), True, 'import numpy as np\n'), ((18204, 18214), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (18210, 18214), True, 'import numpy as np\n'), ((23020, 23037), 'numpy.ceil', 'np.ceil', (['(nCat / 4)'], {}), '(nCat / 4)\n', (23027, 23037), True, 'import numpy as np\n'), ((23644, 23654), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (23650, 23654), True, 'import numpy as np\n'), ((23658, 23668), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (23664, 23668), True, 'import numpy as np\n'), ((27184, 27194), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (27190, 27194), True, 'import numpy as np\n'), ((27198, 27208), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (27204, 27208), True, 'import numpy as np\n'), ((30293, 30303), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (30299, 30303), True, 'import numpy as np\n'), ((30307, 30317), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (30313, 30317), True, 'import numpy as np\n'), ((9484, 9494), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (9490, 9494), True, 'import numpy as np\n'), ((9498, 9508), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (9504, 9508), True, 'import numpy as np\n'), ((9534, 9544), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (9540, 9544), True, 'import numpy as np\n'), ((9548, 9558), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (9554, 9558), True, 'import numpy as np\n'), ((13441, 13457), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (13449, 13457), True, 'import numpy as np\n'), ((19067, 19077), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (19073, 19077), True, 'import numpy as np\n'), ((19081, 19091), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (19087, 19091), True, 'import numpy as np\n'), ((19117, 19127), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (19123, 19127), True, 'import numpy as np\n'), ((19131, 19141), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (19137, 19141), True, 'import numpy as np\n'), ((24443, 24453), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (24449, 24453), True, 'import numpy as np\n'), ((24457, 24467), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (24463, 24467), True, 'import numpy as np\n'), ((24493, 24503), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (24499, 24503), True, 'import numpy as np\n'), ((24507, 24517), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (24513, 24517), True, 'import numpy as np\n'), ((27889, 27899), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (27895, 27899), True, 'import numpy as np\n'), ((27903, 27913), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (27909, 27913), True, 'import numpy as np\n'), ((27936, 27946), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (27942, 27946), True, 'import numpy as np\n'), ((27950, 27960), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (27956, 27960), True, 'import numpy as np\n'), ((31016, 31026), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (31022, 31026), True, 'import numpy as np\n'), ((31030, 31040), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (31036, 31040), True, 'import numpy as np\n'), ((31063, 31073), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (31069, 31073), True, 'import numpy as np\n'), ((31077, 31087), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (31083, 31087), True, 'import numpy as np\n'), ((9350, 9360), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (9356, 9360), True, 'import numpy as np\n'), ((9364, 9374), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (9370, 9374), True, 'import numpy as np\n'), ((9378, 9388), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (9384, 9388), True, 'import numpy as np\n'), ((9392, 9402), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (9398, 9402), True, 'import numpy as np\n'), ((9697, 9707), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (9703, 9707), True, 'import numpy as np\n'), ((9710, 9720), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (9716, 9720), True, 'import numpy as np\n'), ((18933, 18943), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (18939, 18943), True, 'import numpy as np\n'), ((18947, 18957), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (18953, 18957), True, 'import numpy as np\n'), ((18961, 18971), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (18967, 18971), True, 'import numpy as np\n'), ((18975, 18985), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (18981, 18985), True, 'import numpy as np\n'), ((22974, 22991), 'numpy.ceil', 'np.ceil', (['(nCat / 4)'], {}), '(nCat / 4)\n', (22981, 22991), True, 'import numpy as np\n'), ((24231, 24241), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (24237, 24241), True, 'import numpy as np\n'), ((24245, 24255), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (24251, 24255), True, 'import numpy as np\n'), ((24302, 24312), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (24308, 24312), True, 'import numpy as np\n'), ((24316, 24326), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (24322, 24326), True, 'import numpy as np\n'), ((27761, 27771), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (27767, 27771), True, 'import numpy as np\n'), ((27775, 27785), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (27781, 27785), True, 'import numpy as np\n'), ((27789, 27799), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (27795, 27799), True, 'import numpy as np\n'), ((27803, 27813), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (27809, 27813), True, 'import numpy as np\n'), ((30878, 30888), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (30884, 30888), True, 'import numpy as np\n'), ((30892, 30902), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (30898, 30902), True, 'import numpy as np\n'), ((30906, 30916), 'numpy.min', 'np.min', (['X1'], {}), '(X1)\n', (30912, 30916), True, 'import numpy as np\n'), ((30920, 30930), 'numpy.max', 'np.max', (['X1'], {}), '(X1)\n', (30926, 30930), True, 'import numpy as np\n'), ((19316, 19326), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (19322, 19326), True, 'import numpy as np\n'), ((19329, 19339), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (19335, 19339), True, 'import numpy as np\n'), ((9769, 9779), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (9775, 9779), True, 'import numpy as np\n'), ((9817, 9827), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (9823, 9827), True, 'import numpy as np\n'), ((19392, 19402), 'numpy.min', 'np.min', (['X0'], {}), '(X0)\n', (19398, 19402), True, 'import numpy as np\n'), ((19440, 19450), 'numpy.max', 'np.max', (['X0'], {}), '(X0)\n', (19446, 19450), True, 'import numpy as np\n')] |
import numpy as np
from sympy import *
from scipy.optimize import approx_fprime
x = symbols('x')
y = symbols('y')
# first function
f = 2 * x ** 2 * y ** 3 + 1 / x + y ** 2 * x + 7
f1x = diff(f, x)
print(format(f1x))
f1y = diff(f, y)
print(format(f1y))
# second function
f = x ** 2 * y - sin(x * y) + cos(x ** 2) + 6 * y
f1x = diff(f, x)
print(format(f1x))
f1y = diff(f, y)
print(format(f1y))
# gradient of the first function
def func(t):
return 2 * t[0] ** 2 * t[1] ** 3 + 1 / t[0] + t[1] ** 2 * t[0] + 7
eps = np.sqrt(np.finfo(float).eps)
grad = approx_fprime([1, 2], func, [eps, eps])
print(grad)
| [
"numpy.finfo",
"scipy.optimize.approx_fprime"
] | [((560, 599), 'scipy.optimize.approx_fprime', 'approx_fprime', (['[1, 2]', 'func', '[eps, eps]'], {}), '([1, 2], func, [eps, eps])\n', (573, 599), False, 'from scipy.optimize import approx_fprime\n'), ((532, 547), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (540, 547), True, 'import numpy as np\n')] |
import mmcv
import numpy as np
import torch
from mmdet3d.core import Box3DMode, CameraInstance3DBoxes, LiDARInstance3DBoxes
from mmdet3d.datasets import ObjectNoise, ObjectSample
def test_remove_points_in_boxes():
points = np.array([[68.1370, 3.3580, 2.5160, 0.0000],
[67.6970, 3.5500, 2.5010, 0.0000],
[67.6490, 3.7600, 2.5000, 0.0000],
[66.4140, 3.9010, 2.4590, 0.0000],
[66.0120, 4.0850, 2.4460, 0.0000],
[65.8340, 4.1780, 2.4400, 0.0000],
[65.8410, 4.3860, 2.4400, 0.0000],
[65.7450, 4.5870, 2.4380, 0.0000],
[65.5510, 4.7800, 2.4320, 0.0000],
[65.4860, 4.9820, 2.4300, 0.0000]])
boxes = np.array(
[[30.0285, 10.5110, -1.5304, 0.5100, 0.8700, 1.6000, 1.6400],
[7.8369, 1.6053, -1.5605, 0.5800, 1.2300, 1.8200, -3.1000],
[10.8740, -1.0827, -1.3310, 0.6000, 0.5200, 1.7100, 1.3500],
[14.9783, 2.2466, -1.4950, 0.6100, 0.7300, 1.5300, -1.9200],
[11.0656, 0.6195, -1.5202, 0.6600, 1.0100, 1.7600, -1.4600],
[10.5994, -7.9049, -1.4980, 0.5300, 1.9600, 1.6800, 1.5600],
[28.7068, -8.8244, -1.1485, 0.6500, 1.7900, 1.7500, 3.1200],
[20.2630, 5.1947, -1.4799, 0.7300, 1.7600, 1.7300, 1.5100],
[18.2496, 3.1887, -1.6109, 0.5600, 1.6800, 1.7100, 1.5600],
[7.7396, -4.3245, -1.5801, 0.5600, 1.7900, 1.8000, -0.8300]])
points = ObjectSample.remove_points_in_boxes(points, boxes)
assert points.shape == (10, 4)
def test_object_sample():
import pickle
db_sampler = mmcv.ConfigDict({
'data_root': './tests/data/kitti/',
'info_path': './tests/data/kitti/kitti_dbinfos_train.pkl',
'rate': 1.0,
'prepare': {
'filter_by_difficulty': [-1],
'filter_by_min_points': {
'Pedestrian': 10
}
},
'classes': ['Pedestrian', 'Cyclist', 'Car'],
'sample_groups': {
'Pedestrian': 6
}
})
with open('./tests/data/kitti/kitti_dbinfos_train.pkl', 'rb') as f:
db_infos = pickle.load(f)
np.random.seed(0)
object_sample = ObjectSample(db_sampler)
points = np.fromfile(
'./tests/data/kitti/training/velodyne_reduced/000000.bin',
np.float32).reshape(-1, 4)
annos = mmcv.load('./tests/data/kitti/kitti_infos_train.pkl')
info = annos[0]
annos = info['annos']
gt_names = annos['name']
gt_bboxes_3d = db_infos['Pedestrian'][0]['box3d_lidar']
gt_bboxes_3d = LiDARInstance3DBoxes([gt_bboxes_3d])
CLASSES = ('Car', 'Pedestrian', 'Cyclist')
gt_labels = []
for cat in gt_names:
if cat in CLASSES:
gt_labels.append(CLASSES.index(cat))
else:
gt_labels.append(-1)
input_dict = dict(
points=points, gt_bboxes_3d=gt_bboxes_3d, gt_labels_3d=gt_labels)
input_dict = object_sample(input_dict)
points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d']
gt_labels_3d = input_dict['gt_labels_3d']
repr_str = repr(object_sample)
expected_repr_str = 'ObjectSample sample_2d=False, ' \
'data_root=./tests/data/kitti/, ' \
'info_path=./tests/data/kitti/kitti' \
'_dbinfos_train.pkl, rate=1.0, ' \
'prepare={\'filter_by_difficulty\': [-1], ' \
'\'filter_by_min_points\': {\'Pedestrian\': 10}}, ' \
'classes=[\'Pedestrian\', \'Cyclist\', \'Car\'], ' \
'sample_groups={\'Pedestrian\': 6}'
assert repr_str == expected_repr_str
assert points.shape == (1177, 4)
assert gt_bboxes_3d.tensor.shape == (2, 7)
assert np.all(gt_labels_3d == [1, 0])
def test_object_noise():
np.random.seed(0)
object_noise = ObjectNoise()
points = np.fromfile(
'./tests/data/kitti/training/velodyne_reduced/000000.bin',
np.float32).reshape(-1, 4)
annos = mmcv.load('./tests/data/kitti/kitti_infos_train.pkl')
info = annos[0]
rect = info['calib']['R0_rect'].astype(np.float32)
Trv2c = info['calib']['Tr_velo_to_cam'].astype(np.float32)
annos = info['annos']
loc = annos['location']
dims = annos['dimensions']
rots = annos['rotation_y']
gt_bboxes_3d = np.concatenate([loc, dims, rots[..., np.newaxis]],
axis=1).astype(np.float32)
gt_bboxes_3d = CameraInstance3DBoxes(gt_bboxes_3d).convert_to(
Box3DMode.LIDAR, np.linalg.inv(rect @ Trv2c))
input_dict = dict(points=points, gt_bboxes_3d=gt_bboxes_3d)
input_dict = object_noise(input_dict)
points = input_dict['points']
gt_bboxes_3d = input_dict['gt_bboxes_3d'].tensor
expected_gt_bboxes_3d = torch.tensor(
[[9.1724, -1.7559, -1.3550, 0.4800, 1.2000, 1.8900, 0.0505]])
repr_str = repr(object_noise)
expected_repr_str = 'ObjectNoise(num_try=100, ' \
'translation_std=[0.25, 0.25, 0.25], ' \
'global_rot_range=[0.0, 0.0], ' \
'rot_range=[-0.15707963267, 0.15707963267])'
assert repr_str == expected_repr_str
assert points.shape == (800, 4)
assert torch.allclose(gt_bboxes_3d, expected_gt_bboxes_3d, 1e-3)
| [
"mmcv.ConfigDict",
"numpy.random.seed",
"mmcv.load",
"mmdet3d.datasets.ObjectSample.remove_points_in_boxes",
"mmdet3d.core.LiDARInstance3DBoxes",
"torch.allclose",
"numpy.fromfile",
"numpy.concatenate",
"pickle.load",
"numpy.array",
"numpy.linalg.inv",
"torch.tensor",
"mmdet3d.datasets.Objec... | [((230, 541), 'numpy.array', 'np.array', (['[[68.137, 3.358, 2.516, 0.0], [67.697, 3.55, 2.501, 0.0], [67.649, 3.76, \n 2.5, 0.0], [66.414, 3.901, 2.459, 0.0], [66.012, 4.085, 2.446, 0.0], [\n 65.834, 4.178, 2.44, 0.0], [65.841, 4.386, 2.44, 0.0], [65.745, 4.587, \n 2.438, 0.0], [65.551, 4.78, 2.432, 0.0], [65.486, 4.982, 2.43, 0.0]]'], {}), '([[68.137, 3.358, 2.516, 0.0], [67.697, 3.55, 2.501, 0.0], [67.649,\n 3.76, 2.5, 0.0], [66.414, 3.901, 2.459, 0.0], [66.012, 4.085, 2.446, \n 0.0], [65.834, 4.178, 2.44, 0.0], [65.841, 4.386, 2.44, 0.0], [65.745, \n 4.587, 2.438, 0.0], [65.551, 4.78, 2.432, 0.0], [65.486, 4.982, 2.43, 0.0]]\n )\n', (238, 541), True, 'import numpy as np\n'), ((811, 1369), 'numpy.array', 'np.array', (['[[30.0285, 10.511, -1.5304, 0.51, 0.87, 1.6, 1.64], [7.8369, 1.6053, -\n 1.5605, 0.58, 1.23, 1.82, -3.1], [10.874, -1.0827, -1.331, 0.6, 0.52, \n 1.71, 1.35], [14.9783, 2.2466, -1.495, 0.61, 0.73, 1.53, -1.92], [\n 11.0656, 0.6195, -1.5202, 0.66, 1.01, 1.76, -1.46], [10.5994, -7.9049, \n -1.498, 0.53, 1.96, 1.68, 1.56], [28.7068, -8.8244, -1.1485, 0.65, 1.79,\n 1.75, 3.12], [20.263, 5.1947, -1.4799, 0.73, 1.76, 1.73, 1.51], [\n 18.2496, 3.1887, -1.6109, 0.56, 1.68, 1.71, 1.56], [7.7396, -4.3245, -\n 1.5801, 0.56, 1.79, 1.8, -0.83]]'], {}), '([[30.0285, 10.511, -1.5304, 0.51, 0.87, 1.6, 1.64], [7.8369, \n 1.6053, -1.5605, 0.58, 1.23, 1.82, -3.1], [10.874, -1.0827, -1.331, 0.6,\n 0.52, 1.71, 1.35], [14.9783, 2.2466, -1.495, 0.61, 0.73, 1.53, -1.92],\n [11.0656, 0.6195, -1.5202, 0.66, 1.01, 1.76, -1.46], [10.5994, -7.9049,\n -1.498, 0.53, 1.96, 1.68, 1.56], [28.7068, -8.8244, -1.1485, 0.65, 1.79,\n 1.75, 3.12], [20.263, 5.1947, -1.4799, 0.73, 1.76, 1.73, 1.51], [\n 18.2496, 3.1887, -1.6109, 0.56, 1.68, 1.71, 1.56], [7.7396, -4.3245, -\n 1.5801, 0.56, 1.79, 1.8, -0.83]])\n', (819, 1369), True, 'import numpy as np\n'), ((1533, 1583), 'mmdet3d.datasets.ObjectSample.remove_points_in_boxes', 'ObjectSample.remove_points_in_boxes', (['points', 'boxes'], {}), '(points, boxes)\n', (1568, 1583), False, 'from mmdet3d.datasets import ObjectNoise, ObjectSample\n'), ((1682, 1994), 'mmcv.ConfigDict', 'mmcv.ConfigDict', (["{'data_root': './tests/data/kitti/', 'info_path':\n './tests/data/kitti/kitti_dbinfos_train.pkl', 'rate': 1.0, 'prepare': {\n 'filter_by_difficulty': [-1], 'filter_by_min_points': {'Pedestrian': 10\n }}, 'classes': ['Pedestrian', 'Cyclist', 'Car'], 'sample_groups': {\n 'Pedestrian': 6}}"], {}), "({'data_root': './tests/data/kitti/', 'info_path':\n './tests/data/kitti/kitti_dbinfos_train.pkl', 'rate': 1.0, 'prepare': {\n 'filter_by_difficulty': [-1], 'filter_by_min_points': {'Pedestrian': 10\n }}, 'classes': ['Pedestrian', 'Cyclist', 'Car'], 'sample_groups': {\n 'Pedestrian': 6}})\n", (1697, 1994), False, 'import mmcv\n'), ((2226, 2243), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (2240, 2243), True, 'import numpy as np\n'), ((2264, 2288), 'mmdet3d.datasets.ObjectSample', 'ObjectSample', (['db_sampler'], {}), '(db_sampler)\n', (2276, 2288), False, 'from mmdet3d.datasets import ObjectNoise, ObjectSample\n'), ((2429, 2482), 'mmcv.load', 'mmcv.load', (['"""./tests/data/kitti/kitti_infos_train.pkl"""'], {}), "('./tests/data/kitti/kitti_infos_train.pkl')\n", (2438, 2482), False, 'import mmcv\n'), ((2637, 2673), 'mmdet3d.core.LiDARInstance3DBoxes', 'LiDARInstance3DBoxes', (['[gt_bboxes_3d]'], {}), '([gt_bboxes_3d])\n', (2657, 2673), False, 'from mmdet3d.core import Box3DMode, CameraInstance3DBoxes, LiDARInstance3DBoxes\n'), ((3851, 3881), 'numpy.all', 'np.all', (['(gt_labels_3d == [1, 0])'], {}), '(gt_labels_3d == [1, 0])\n', (3857, 3881), True, 'import numpy as np\n'), ((3913, 3930), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3927, 3930), True, 'import numpy as np\n'), ((3950, 3963), 'mmdet3d.datasets.ObjectNoise', 'ObjectNoise', ([], {}), '()\n', (3961, 3963), False, 'from mmdet3d.datasets import ObjectNoise, ObjectSample\n'), ((4104, 4157), 'mmcv.load', 'mmcv.load', (['"""./tests/data/kitti/kitti_infos_train.pkl"""'], {}), "('./tests/data/kitti/kitti_infos_train.pkl')\n", (4113, 4157), False, 'import mmcv\n'), ((4885, 4951), 'torch.tensor', 'torch.tensor', (['[[9.1724, -1.7559, -1.355, 0.48, 1.2, 1.89, 0.0505]]'], {}), '([[9.1724, -1.7559, -1.355, 0.48, 1.2, 1.89, 0.0505]])\n', (4897, 4951), False, 'import torch\n'), ((5338, 5396), 'torch.allclose', 'torch.allclose', (['gt_bboxes_3d', 'expected_gt_bboxes_3d', '(0.001)'], {}), '(gt_bboxes_3d, expected_gt_bboxes_3d, 0.001)\n', (5352, 5396), False, 'import torch\n'), ((2207, 2221), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (2218, 2221), False, 'import pickle\n'), ((4635, 4662), 'numpy.linalg.inv', 'np.linalg.inv', (['(rect @ Trv2c)'], {}), '(rect @ Trv2c)\n', (4648, 4662), True, 'import numpy as np\n'), ((2302, 2389), 'numpy.fromfile', 'np.fromfile', (['"""./tests/data/kitti/training/velodyne_reduced/000000.bin"""', 'np.float32'], {}), "('./tests/data/kitti/training/velodyne_reduced/000000.bin', np.\n float32)\n", (2313, 2389), True, 'import numpy as np\n'), ((3977, 4064), 'numpy.fromfile', 'np.fromfile', (['"""./tests/data/kitti/training/velodyne_reduced/000000.bin"""', 'np.float32'], {}), "('./tests/data/kitti/training/velodyne_reduced/000000.bin', np.\n float32)\n", (3988, 4064), True, 'import numpy as np\n'), ((4431, 4489), 'numpy.concatenate', 'np.concatenate', (['[loc, dims, rots[..., np.newaxis]]'], {'axis': '(1)'}), '([loc, dims, rots[..., np.newaxis]], axis=1)\n', (4445, 4489), True, 'import numpy as np\n'), ((4562, 4597), 'mmdet3d.core.CameraInstance3DBoxes', 'CameraInstance3DBoxes', (['gt_bboxes_3d'], {}), '(gt_bboxes_3d)\n', (4583, 4597), False, 'from mmdet3d.core import Box3DMode, CameraInstance3DBoxes, LiDARInstance3DBoxes\n')] |
# -*- coding: utf-8 -*-
"""
Created on Tue May 14 13:21:11 2019
@author: Garnet
"""
import mido
import numpy as np
INPUT_SIZE = 10
ACCEPTED_MESSAGES = ['end_of_track', 'program_change', 'control_change', 'note_on']
def merge_tracks(track1, track2):
merge_time = t1_time = t2_time = 0 #time in seconds into each track
i = j = 0 #index in track1 and track2 respectively
result = mido.MidiTrack()
name = mido.MetaMessage('track_name',
name = track1.name + " and " + track2.name)
result.append(name)
# Put messages in order until we exhaust one track
while i < len(track1) and j < len(track2):
#Skip metamessages
if track1[i].is_meta:
i += 1
elif track2[j].is_meta:
j += 1
else:
if track1[i].time + t1_time <= track2[j].time + t2_time:
time_dif = t1_time + track1[i].time - merge_time
result.append(track1[i])
t1_time += track1[i].time
i += 1
else:
time_dif = t2_time + track2[j].time - merge_time
result.append(track2[j])
t2_time += track2[j].time
j += 1
result[-1].time = time_dif
merge_time += time_dif
# Append the rest of the un-exhausted track
# We only need to fix the time value for the first note
# The rest already have the correct time relative to the previous note
if i < len(track1):
time_dif = t1_time + track1[i].time - merge_time
result.append(track1[i])
result[-1].time = time_dif
for x in range(i+1, len(track1)):
if not track1[x].is_meta:
result.append(track1[x])
else:
time_dif = t2_time + track2[j].time - merge_time
result.append(track2[j])
result[-1].time = time_dif
for x in range(j+1, len(track2)):
if not track2[x].is_meta:
result.append(track2[x])
return result
def get_merged_piano_tracks(mid):
piano_tracks = []
for track in mid.tracks:
if len(track) > 10 and 'piano' in track.name.lower():
piano_tracks.append(track)
if len(piano_tracks) == 0:
return None
else:
while len(piano_tracks) > 1:
piano_tracks[0] = merge_tracks(piano_tracks[0],
piano_tracks.pop(-1))
return piano_tracks[0]
def tensor_to_msg_list(tensor):
""" Returns a mido.Message() if tensor.shape[0] == 1.
Otherwise, returns a mido.MidiTrack()
Transforms the given tensor into the appropriate mido class
Requires the tensor to be cleaned by clean_tensor() first
mido will throw an error if the values are not in an appropriate range
"""
if tensor.shape == (INPUT_SIZE, ):
msg_type = ACCEPTED_MESSAGES[tensor[:4].argmax()]
msg_time = tensor[4]
if msg_type == "end_of_track":
msg = mido.MetaMessage("end_of_track")
else:
msg = mido.Message(msg_type, time=msg_time)
if msg_type == 'program_change':
msg.program = int(tensor[5])
elif msg_type == 'control_change':
msg.control = int(tensor[6])
msg.value = int(tensor[7])
elif msg_type == 'note_on':
msg.note = int(tensor[8])
msg.velocity = int(tensor[9])
return msg
else:
track = []
for i in tensor:
track.append(tensor_to_msg_list(i))
return track
def clean_tensor(tensor, track_time):
"""
Exactly one of tensor[n][:4] should be treated as 1, the rest should be 0
(take the largest value and set it to 1)
tensor[4] should be in [0,1]
Unless specified, tensor[5:] should be 0
If tensor[1] == 1, tensor[5] should be in [0,127]
if tensor[2] == 1, tensor[6] should be in [0, 127]
If tensor[3] == 1, tensor[7:] should be in [0, 127]
"""
result = np.zeros([1,INPUT_SIZE],dtype=int)
if tensor.shape == (INPUT_SIZE, ):
# Clamp the time value
tensor[4] = max(0, min(1, tensor[4]))
result[0][4] = round(tensor[4] * track_time)
# A midi message can only have one type
msg_type = tensor[:4].argmax()
for i in range(4):
if i == msg_type:
result[0][i] = 1
else:
result[0][i] = 0
# End of Song msg == 0
if msg_type == 0:
result[0][5:] = 0
# Program msg === 1
if msg_type == 1:
tensor[5] = max(0, min(1, tensor[5]))
result[0][5] = round(tensor[5] * 127)
# Control msg === 2
if msg_type == 2:
tensor[6] = max(0, min(1, tensor[6]))
tensor[7] = max(0, min(1, tensor[7]))
result[0][6] = round(tensor[6] * 127)
result[0][7] = round(tensor[7] * 127)
# Note msg === 3
if msg_type == 3:
tensor[8] = max(0, min(1, tensor[8]))
tensor[9] = max(0, min(1, tensor[9]))
result[0][8] = round(tensor[8] * 127)
result[0][9] = round(tensor[9] * 127)
return result
else:
for i in tensor:
x = clean_tensor(i, track_time)
x = np.reshape(x, [1, INPUT_SIZE])
result = np.concatenate((result, x), axis=0)
result = np.delete(result, 0, 0)
return result
def track_to_tensor(track):
"""
Converts a mido midi track into a tensor for use in the
neural network.
"""
#Find how large our tensor needs to be
messages = 0
total_time = 0
for msg in track:
if msg.type in ACCEPTED_MESSAGES:
messages += 1
total_time += msg.time
messages += 1 # for 'end_of_track' message
# tensor[0:4] = msg.type, tensor[4] = % max_time,
# tensor[5:8] = value for msg.type, tensor[8] = velocity
# values for msg.type: program_change -? program, note_on -> note,
# control_change -> control
tensor = np.zeros((messages, INPUT_SIZE))
i = 0
for msg in track:
for j in range(len(ACCEPTED_MESSAGES)):
if msg.type == ACCEPTED_MESSAGES[j]:
# Set message type input
tensor[i][j] = 1
# Set message time
tensor[i][4] = msg.time/total_time
# Set value
if msg.type == 'program_change':
tensor[i][5] = msg.program / 127
if msg.type == 'control_change':
tensor[i][6] = msg.control / 127
tensor[i][7] = msg.value/ 127
if msg.type == 'note_on':
tensor[i][8] = msg.note / 127
tensor[i][9] = msg.velocity / 127
i += 1
# Add the end_of_track
tensor[i][0] = 1
return tensor
def tensor_to_midi(tensor, track_time, name=None):
tensor = clean_tensor(tensor, track_time)
track = tensor_to_msg_list(tensor)
mid = mido.MidiFile()
mid.add_track()
mid.tracks[0].append(mido.MetaMessage('track_name', name="testtrack"))
for msg in track:
mid.tracks[0].append(msg)
if name != None:
mid.save("{}.mid".format(name))
return mid
if __name__ == "__main__":
mid = mido.MidiFile("./chopin/chp_op18.mid")
track = get_merged_piano_tracks(mid)
total_time = 0
fd = open("chp_op18_msgs.txt", "w")
for msg in track:
fd.write(str(msg))
fd.write("\n")
total_time += msg.time
fd.close()
tensor = track_to_tensor(track)
conv_mid = tensor_to_midi(tensor, total_time, 'test')
| [
"mido.MidiFile",
"mido.Message",
"numpy.zeros",
"mido.MetaMessage",
"numpy.reshape",
"mido.MidiTrack",
"numpy.delete",
"numpy.concatenate"
] | [((413, 429), 'mido.MidiTrack', 'mido.MidiTrack', ([], {}), '()\n', (427, 429), False, 'import mido\n'), ((442, 514), 'mido.MetaMessage', 'mido.MetaMessage', (['"""track_name"""'], {'name': "(track1.name + ' and ' + track2.name)"}), "('track_name', name=track1.name + ' and ' + track2.name)\n", (458, 514), False, 'import mido\n'), ((4229, 4265), 'numpy.zeros', 'np.zeros', (['[1, INPUT_SIZE]'], {'dtype': 'int'}), '([1, INPUT_SIZE], dtype=int)\n', (4237, 4265), True, 'import numpy as np\n'), ((6375, 6407), 'numpy.zeros', 'np.zeros', (['(messages, INPUT_SIZE)'], {}), '((messages, INPUT_SIZE))\n', (6383, 6407), True, 'import numpy as np\n'), ((7399, 7414), 'mido.MidiFile', 'mido.MidiFile', ([], {}), '()\n', (7412, 7414), False, 'import mido\n'), ((7696, 7734), 'mido.MidiFile', 'mido.MidiFile', (['"""./chopin/chp_op18.mid"""'], {}), "('./chopin/chp_op18.mid')\n", (7709, 7734), False, 'import mido\n'), ((5672, 5695), 'numpy.delete', 'np.delete', (['result', '(0)', '(0)'], {}), '(result, 0, 0)\n', (5681, 5695), True, 'import numpy as np\n'), ((7462, 7510), 'mido.MetaMessage', 'mido.MetaMessage', (['"""track_name"""'], {'name': '"""testtrack"""'}), "('track_name', name='testtrack')\n", (7478, 7510), False, 'import mido\n'), ((3145, 3177), 'mido.MetaMessage', 'mido.MetaMessage', (['"""end_of_track"""'], {}), "('end_of_track')\n", (3161, 3177), False, 'import mido\n'), ((3212, 3249), 'mido.Message', 'mido.Message', (['msg_type'], {'time': 'msg_time'}), '(msg_type, time=msg_time)\n', (3224, 3249), False, 'import mido\n'), ((5565, 5595), 'numpy.reshape', 'np.reshape', (['x', '[1, INPUT_SIZE]'], {}), '(x, [1, INPUT_SIZE])\n', (5575, 5595), True, 'import numpy as np\n'), ((5618, 5653), 'numpy.concatenate', 'np.concatenate', (['(result, x)'], {'axis': '(0)'}), '((result, x), axis=0)\n', (5632, 5653), True, 'import numpy as np\n')] |
import os
from shutil import copyfile
os.getcwd()
copyfile('..\\x64\\Release\\qip.dll','qip.pyd')
from datetime import datetime
import numpy, pandas,uuid
import qip
# tmp = numpy.repeat(numpy.datetime64(datetime.now(),'D'),int(1e7))
nnows = dict(zip(["s","m","ms","ns"],[numpy.datetime64(datetime.now(),dtype0) for dtype0 in ["s","m","ms","ns"] ]))
hdl = qip.open_connection(8888)
def measure(dtype0):
tmp = numpy.linspace(1., 100., int(1e7), dtype=dtype0)
start_time = datetime.now()
result = qip.execute(hdl,'{0N!x}',tmp)
time_elapsed = datetime.now() - start_time
isSame = result == tmp
print('Time elapsed for {} : {} : {}'.format(str(dtype0), time_elapsed, isSame.any()))
def measure1(dtype0):
tmp = numpy.linspace(1, int(1e7), int(1e7), dtype=dtype0)
start_time = datetime.now()
result = qip.execute(hdl,'{0N!x}',tmp)
time_elapsed = datetime.now() - start_time
isSame = result == tmp
print('Time elapsed for {} : {} : {}'.format(str(dtype0), time_elapsed, isSame.any()))
def measure2(dtype0):
tmp = numpy.repeat(numpy.datetime64(datetime.now(),dtype0),int(1e7))
start_time = datetime.now()
result = qip.execute(hdl,'{0N!x}',tmp)
time_elapsed = datetime.now() - start_time
isSame = abs((result - tmp).astype(int)) < 1000
print('Time elapsed for {} : {} : {} '.format(str(tmp.dtype.name), time_elapsed, isSame.any()))
def measure3(dtype0):
tmp = numpy.repeat(numpy.datetime64(datetime.now(),dtype0),int(1e7)) - nnows[dtype0]
start_time = datetime.now()
result = qip.execute(hdl,'{0N!x}',tmp)
time_elapsed = datetime.now() - start_time
isSame = abs((result - tmp).astype(int)) < 1000
print('Time elapsed for {} : {} : {} '.format(str(tmp.dtype.name), time_elapsed, isSame.any()))
def measure4():
tmp = [str(num) for num in numpy.linspace(1., 100., int(1e1), dtype=numpy.float)]
tmp = numpy.array(tmp)
start_time = datetime.now()
result = qip.execute(hdl,'{0N!x}',tmp)
time_elapsed = datetime.now() - start_time
isSame = result == tmp
print('Time elapsed for {} : {} : {} '.format("str_", time_elapsed, isSame.any()))
measure3("s")
measure3("m")
measure3("ms")
measure3("ns")
measure2("D")
measure2("M")
measure2("us")
measure2("ms")
measure2("ns")
measure(numpy.float64)
measure(numpy.float32)
measure1(numpy.int64)
measure1(numpy.int32)
measure1(numpy.int16)
measure1(numpy.int8)
measure1(numpy.bool)
measure4() | [
"os.getcwd",
"qip.open_connection",
"qip.execute",
"numpy.array",
"shutil.copyfile",
"datetime.datetime.now"
] | [((40, 51), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (49, 51), False, 'import os\n'), ((52, 100), 'shutil.copyfile', 'copyfile', (['"""..\\\\x64\\\\Release\\\\qip.dll"""', '"""qip.pyd"""'], {}), "('..\\\\x64\\\\Release\\\\qip.dll', 'qip.pyd')\n", (60, 100), False, 'from shutil import copyfile\n'), ((361, 386), 'qip.open_connection', 'qip.open_connection', (['(8888)'], {}), '(8888)\n', (380, 386), False, 'import qip\n'), ((485, 499), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (497, 499), False, 'from datetime import datetime\n'), ((513, 544), 'qip.execute', 'qip.execute', (['hdl', '"""{0N!x}"""', 'tmp'], {}), "(hdl, '{0N!x}', tmp)\n", (524, 544), False, 'import qip\n'), ((811, 825), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (823, 825), False, 'from datetime import datetime\n'), ((839, 870), 'qip.execute', 'qip.execute', (['hdl', '"""{0N!x}"""', 'tmp'], {}), "(hdl, '{0N!x}', tmp)\n", (850, 870), False, 'import qip\n'), ((1149, 1163), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1161, 1163), False, 'from datetime import datetime\n'), ((1177, 1208), 'qip.execute', 'qip.execute', (['hdl', '"""{0N!x}"""', 'tmp'], {}), "(hdl, '{0N!x}', tmp)\n", (1188, 1208), False, 'import qip\n'), ((1539, 1553), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1551, 1553), False, 'from datetime import datetime\n'), ((1567, 1598), 'qip.execute', 'qip.execute', (['hdl', '"""{0N!x}"""', 'tmp'], {}), "(hdl, '{0N!x}', tmp)\n", (1578, 1598), False, 'import qip\n'), ((1912, 1928), 'numpy.array', 'numpy.array', (['tmp'], {}), '(tmp)\n', (1923, 1928), False, 'import numpy, pandas, uuid\n'), ((1946, 1960), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1958, 1960), False, 'from datetime import datetime\n'), ((1974, 2005), 'qip.execute', 'qip.execute', (['hdl', '"""{0N!x}"""', 'tmp'], {}), "(hdl, '{0N!x}', tmp)\n", (1985, 2005), False, 'import qip\n'), ((562, 576), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (574, 576), False, 'from datetime import datetime\n'), ((888, 902), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (900, 902), False, 'from datetime import datetime\n'), ((1226, 1240), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1238, 1240), False, 'from datetime import datetime\n'), ((1616, 1630), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1628, 1630), False, 'from datetime import datetime\n'), ((2023, 2037), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2035, 2037), False, 'from datetime import datetime\n'), ((1099, 1113), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1111, 1113), False, 'from datetime import datetime\n'), ((293, 307), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (305, 307), False, 'from datetime import datetime\n'), ((1473, 1487), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (1485, 1487), False, 'from datetime import datetime\n')] |
import json
import numpy as np
import random
from tqdm import tqdm
saliency_ratio = 2
resolution = 100
noisy_label = "distractor_activity"
noise_video_scan_stride = 10 * resolution # unit: sec
noise_video_length_random = [10 * resolution, 180 * resolution] # unit: sec
activity_minimal_sec = 3 * resolution
data = json.load(open("./activity_net.v1-3.min.json"))
video_segment_list = []
class_label_set = set()
for video_id, video in tqdm(data["database"].items()):
duration = int(video["duration"] * resolution)
subset = video["subset"]
if subset == "testing":
continue # skip test data, we don't have gt!
annotations = video["annotations"]
segment_list = []
discretized_utilization_flag = np.zeros(
(duration, 1)
) # zero means not used, one means used.
label_indicator_dict = {}
for annotation in annotations:
label = annotation["label"]
class_label_set.add(label)
seg_start, seg_end = annotation["segment"]
seg_start = max(
int(seg_start * resolution), 0
) # for defect data annotation
seg_end = min(
int(seg_end * resolution), duration
) # for defect data annotation
discretized_utilization_flag[seg_start:seg_end] = 1
segment_list.append([seg_start, seg_end])
label_indicator_dict[
(seg_start + seg_end) // 2
] = label # save the mid time dict, use it later label obtain
# add background
sorted(segment_list) # order by segment_s of every segment
for seg_start, seg_end in segment_list:
if seg_start == seg_end:
continue # bad data in ActivityNet!
if seg_end - seg_start < activity_minimal_sec:
continue # too short,skip!
# corner case, marginal padding.
padding_left = min(
seg_start - 0,
random.randint(0, int((seg_end - seg_start) * saliency_ratio)),
) # dongzhuoyao, should remove noisy background here.
padding_right = min(
duration - seg_end - 1,
random.randint(0, int((seg_end - seg_start) * saliency_ratio)),
)
try:
iter_time = 0
while (
np.sum(
discretized_utilization_flag[
seg_start - padding_left : seg_start
]
)
> 0
): # gradually find unused background noise segment
padding_left = padding_left // 2
if (
iter_time > 4
): # if still cannot find it, just directly don't use any noise background segment.
padding_left = 0
break
iter_time += 1
iter_time = 0
while (
np.sum(
discretized_utilization_flag[
seg_end : seg_end + padding_right
]
)
> 0
):
padding_right = padding_right // 2
if iter_time > 4:
padding_right = 0
break
iter_time += 1
except:
raise
print("sth wrong")
if (
np.sum(
discretized_utilization_flag[seg_end : seg_end + padding_right]
)
> 0
or np.sum(
discretized_utilization_flag[
seg_start - padding_left : seg_start
]
)
> 0
):
continue # current area is already occupied by others, skip it.
# update flag
discretized_utilization_flag[
seg_start - padding_left : seg_end + padding_right
] = 1
video_segment_list.append(
dict(
border=[
(seg_start - padding_left) / resolution,
(seg_end + padding_right) / resolution,
],
segment=[
seg_start / resolution,
seg_end / resolution,
], # apply resolution, convert to seconds.
label=label_indicator_dict[(seg_start + seg_end) // 2],
video_id=video_id,
activitynet_duration=duration / resolution,
activitynet_subset=subset,
)
)
# add pure-noise video from unused segments
for i in range(
0, discretized_utilization_flag.shape[0], noise_video_scan_stride
):
current_noise_length = random.randint(
noise_video_length_random[0], noise_video_length_random[1]
)
if i + current_noise_length >= discretized_utilization_flag.shape[0]:
continue # out of the border of this activitynet video, skip it.
if (
np.sum(discretized_utilization_flag[i : i + current_noise_length])
== 0
):
# print("find a noise video")
discretized_utilization_flag[i : i + current_noise_length] = 1
video_segment_list.append(
dict(
border=[
i / resolution,
(i + current_noise_length) / resolution,
],
segment=[
i / resolution,
(i + current_noise_length) / resolution,
], # main field
label=noisy_label,
video_id=video_id,
activitynet_duration=duration / resolution,
activitynet_subset=subset,
)
)
noise_video_num = 0
for x in video_segment_list:
if x["label"] == noisy_label:
noise_video_num += 1
print(
"total_video num={}, normal num={}, noise num={}, noise ratio={}%".format(
len(video_segment_list),
len(video_segment_list) - noise_video_num,
noise_video_num,
noise_video_num * 100.0 / len(video_segment_list),
)
)
_ = ["'{}'".format(tmp) for tmp in sorted(list(class_label_set))]
print(",".join(_))
with open("video_segment.json", "w") as f:
json.dump(video_segment_list, f)
| [
"json.dump",
"numpy.sum",
"numpy.zeros",
"random.randint"
] | [((729, 752), 'numpy.zeros', 'np.zeros', (['(duration, 1)'], {}), '((duration, 1))\n', (737, 752), True, 'import numpy as np\n'), ((6233, 6265), 'json.dump', 'json.dump', (['video_segment_list', 'f'], {}), '(video_segment_list, f)\n', (6242, 6265), False, 'import json\n'), ((4624, 4698), 'random.randint', 'random.randint', (['noise_video_length_random[0]', 'noise_video_length_random[1]'], {}), '(noise_video_length_random[0], noise_video_length_random[1])\n', (4638, 4698), False, 'import random\n'), ((4902, 4966), 'numpy.sum', 'np.sum', (['discretized_utilization_flag[i:i + current_noise_length]'], {}), '(discretized_utilization_flag[i:i + current_noise_length])\n', (4908, 4966), True, 'import numpy as np\n'), ((2233, 2305), 'numpy.sum', 'np.sum', (['discretized_utilization_flag[seg_start - padding_left:seg_start]'], {}), '(discretized_utilization_flag[seg_start - padding_left:seg_start])\n', (2239, 2305), True, 'import numpy as np\n'), ((2839, 2908), 'numpy.sum', 'np.sum', (['discretized_utilization_flag[seg_end:seg_end + padding_right]'], {}), '(discretized_utilization_flag[seg_end:seg_end + padding_right])\n', (2845, 2908), True, 'import numpy as np\n'), ((3301, 3370), 'numpy.sum', 'np.sum', (['discretized_utilization_flag[seg_end:seg_end + padding_right]'], {}), '(discretized_utilization_flag[seg_end:seg_end + padding_right])\n', (3307, 3370), True, 'import numpy as np\n'), ((3434, 3506), 'numpy.sum', 'np.sum', (['discretized_utilization_flag[seg_start - padding_left:seg_start]'], {}), '(discretized_utilization_flag[seg_start - padding_left:seg_start])\n', (3440, 3506), True, 'import numpy as np\n')] |
#-*- coding=utf-8 -*-
import cv2
import sys
import os
import random
sys.path.insert(0, '/home/remo/Desktop/remodet_repository_DJ/python')
# print sys.path
import caffe
import numpy as np
sys.path.append("../")
import img_func as func
sys.path.append("/home/remo/from_wdh")
caffe.set_mode_gpu()
caffe.set_device(0)
class SsdDet:
def det_init(self, net_dict_info):
self.net_dets = []
self.mean_data = [104.0, 117.0, 123.0]
self.colors = [[0, 0, 255], [0, 255, 0], [0, 255, 255], [0, 0, 0]]
self.root_folders = ["","/home/xjx/Models/Results/DetPose_JointTrain","/home/xjx/Models/Results/DetNet"]
self.net_keys = net_dict_info.keys()
self.net_keys = sorted(self.net_keys)
self.flag_169 = True
self.flag_916 = not self.flag_169
self.res = {}
self.res2 = []
for i in xrange(len(self.net_keys)):
path_id_in_root_folders, path_mode, proto_path, weights_path_or_extra_string, itersize = net_dict_info[self.net_keys[i]]
if path_mode == 0:
det_proto = os.path.join(self.root_folders[path_id_in_root_folders], proto_path)
det_weights = os.path.join(self.root_folders[path_id_in_root_folders], weights_path_or_extra_string)
elif path_mode == 1:
det_proto = os.path.join(self.root_folders[path_id_in_root_folders], proto_path)
det_weights = os.path.join(self.root_folders[path_id_in_root_folders], "%s/%s%s_iter_%d.caffemodel" %
(proto_path.split("/")[0],proto_path.split("/")[0], weights_path_or_extra_string, itersize))
self.net_dets.append(caffe.Net(det_proto,det_weights,caffe.TEST))
for i in xrange(len(self.net_keys)):
self.net_keys[i] += "_%s"%str(net_dict_info[self.net_keys[i]][-1])
# def det_config(self, ):
def det_mode(self, frame_org):
if self.flag_169:
self.img_h_det = 288
self.img_w_det = 512
elif self.flag_916 :
self.img_h_det = 512
self.img_w_det = 288
self.scale = 1
self.imgs_show_all = []
self.strs_show_all = []
self.blob_name_detout = "det_out"
self.ncols_show = 3
# frame_resize = np.zeros((self.img_h_det,self.img_w_det,3)).astype(np.uint8)
width = frame_org.shape[1]
height = frame_org.shape[0]
if self.flag_169:
height_new, width_new = func.get_pad169size(height, width) # func: 加入边框设置为16:9
elif self.flag_916:
height_new, width_new = func.get_pad916size(height, width) # func: 加入边框设置为16:9
frame_org = cv2.copyMakeBorder(frame_org, 0, height_new - height, 0, width_new - width, cv2.BORDER_CONSTANT,
value=(104, 117, 123))
frame_resize = cv2.resize(frame_org, (self.img_w_det, self.img_h_det), )
frame_org_resize = cv2.resize(frame_org, (self.img_w_det, self.img_h_det), cv2.INTER_NEAREST)
# frame_org_resize = cv2.resize(frame_org_resize,dsize=None,fx=self.scale,fy=self.scale) # 放大
# 使用双线性插值
frame_resize[0:frame_org_resize.shape[0],0:frame_org_resize.shape[1],:] = frame_org_resize
# 移动一个像素
# M = np.float32([[1, 0, 200], [0, 1, 120]]) # 10
# frame_resize = cv2.warpAffine(frame_resize, M, (frame_resize.shape[1], frame_resize.shape[0])) # 11
# 转化到(C,H,W)
frame_input = frame_resize.astype(np.float).transpose((2, 0, 1))
for i in xrange(3):
frame_input[i] -= self.mean_data[i]
self.person_boxes_all = []
for i in xrange(len(self.net_keys)):
self.net_dets[i].blobs["data"].reshape(1,3,self.img_h_det,self.img_w_det)
self.net_dets[i].blobs["data"].data[...] = frame_input
self.net_dets[i].forward()
det_out = self.net_dets[i].blobs[self.blob_name_detout].data
#print(det_out.shape,'----------------')
# des 的mask 输出结果
# des_mask_out = self.net_dets[i].blobs[].data
instance_boxes = []
person_boxes = []
num_person = det_out.shape[2]
for i_person in xrange(num_person):
cid = det_out[0][0][i_person][1]
xmin = int(det_out[0][0][i_person][3] * self.img_w_det)
ymin = int(det_out[0][0][i_person][4] * self.img_h_det)
xmax = int(det_out[0][0][i_person][5] * self.img_w_det)
ymax = int(det_out[0][0][i_person][6] * self.img_h_det)
score = det_out[0][0][i_person][2]
instance_boxes.append([xmin,ymin,xmax,ymax,score,int(cid)])
if cid == 0:
person_boxes.append([xmin, ymin, xmax, ymax, score, int(cid)])
self.person_boxes_all.append(person_boxes)
self.img_all_box = instance_boxes #
frame_show = frame_resize.copy()
#print instance_boxes
for i_person in xrange(len(instance_boxes)):
xmin = instance_boxes[i_person][0]
ymin = instance_boxes[i_person][1]
xmax = instance_boxes[i_person][2]
ymax = instance_boxes[i_person][3]
score =instance_boxes[i_person][4]
cid = instance_boxes[i_person][5]
cv2.rectangle(frame_show, (xmin, ymin), (xmax, ymax), self.colors[cid], 2)
cv2.putText(frame_show, '%d_%0.2f' % (i_person,score), (xmin, ymax-10), cv2.FONT_HERSHEY_COMPLEX, 0.8, self.colors[cid], 1)
# iou = func.compute_iou([xmin,ymin,xmax,ymax], [0,0,512,288])
# cv2.putText(frame_show, '%0.5f' % (iou), (xmax+20, ymin-20), cv2.FONT_HERSHEY_COMPLEX, 0.8, colors[cid], 1)
if cid != 0:
cv2.putText(frame_show, '%d*%d' % (xmax-xmin, ymax-ymin), (xmin, ymax+30), cv2.FONT_HERSHEY_COMPLEX, 0.8,[0,0,255], 1)
else:
cv2.putText(frame_show, '%d*%d' % (xmax-xmin, ymax-ymin), (xmin, ymax-50), cv2.FONT_HERSHEY_COMPLEX, 0.8,[0,0,255], 1)
# cv2.putText(frame_show, '%d' % cnt_frame, (10, 50), cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255), 1)
# cv2.imwrite( "/home/xjx/Documents/det/"+ str(cnt_img)+".jpg", frame_show)
self.imgs_show_all.append(frame_show)
self.img_one = func.putImgsToOne(self.imgs_show_all, self.net_keys, ncols=self.ncols_show, txt_org=(0, 20), fonts=1,
color=(0, 0, 255))
# def det_config(self, ):
def det_mode_and_save(self, frame_org,img_name):
f = open("/home/remo/Desktop/remo_cat_dog/Data_CatDog/OtherBackGround_Images/wrong_pic.txt",'a+')
if self.flag_169:
self.img_h_det = 288
self.img_w_det = 512
elif self.flag_916 :
self.img_h_det = 512
self.img_w_det = 288
self.scale = 1
self.imgs_show_all = []
self.strs_show_all = []
self.blob_name_detout = "det_out"
self.ncols_show = 3
# frame_resize = np.zeros((self.img_h_det,self.img_w_det,3)).astype(np.uint8)
width = frame_org.shape[1]
height = frame_org.shape[0]
if self.flag_169:
height_new, width_new = func.get_pad169size(height, width) # func: 加入边框设置为16:9
elif self.flag_916:
height_new, width_new = func.get_pad916size(height, width) # func: 加入边框设置为16:9
frame_org = cv2.copyMakeBorder(frame_org, 0, height_new - height, 0, width_new - width, cv2.BORDER_CONSTANT,
value=(104, 117, 123))
frame_resize = cv2.resize(frame_org, (self.img_w_det, self.img_h_det), )
frame_org_resize = cv2.resize(frame_org, (self.img_w_det, self.img_h_det), cv2.INTER_NEAREST)
# frame_org_resize = cv2.resize(frame_org_resize,dsize=None,fx=self.scale,fy=self.scale) # 放大
# 使用双线性插值
frame_resize[0:frame_org_resize.shape[0],0:frame_org_resize.shape[1],:] = frame_org_resize
# 移动一个像素
# M = np.float32([[1, 0, 200], [0, 1, 120]]) # 10
# frame_resize = cv2.warpAffine(frame_resize, M, (frame_resize.shape[1], frame_resize.shape[0])) # 11
# 转化到(C,H,W)
frame_input = frame_resize.astype(np.float).transpose((2, 0, 1))
for i in xrange(3):
frame_input[i] -= self.mean_data[i]
self.person_boxes_all = []
for i in xrange(len(self.net_keys)):
self.net_dets[i].blobs["data"].reshape(1,3,self.img_h_det,self.img_w_det)
self.net_dets[i].blobs["data"].data[...] = frame_input
self.net_dets[i].forward()
det_out = self.net_dets[i].blobs[self.blob_name_detout].data
#print(det_out.shape,'----------------')
# des 的mask 输出结果
# des_mask_out = self.net_dets[i].blobs[].data
instance_boxes = []
person_boxes = []
num_person = det_out.shape[2]
for i_person in xrange(num_person):
cid = det_out[0][0][i_person][1]
xmin = int(det_out[0][0][i_person][3] * self.img_w_det)
ymin = int(det_out[0][0][i_person][4] * self.img_h_det)
xmax = int(det_out[0][0][i_person][5] * self.img_w_det)
ymax = int(det_out[0][0][i_person][6] * self.img_h_det)
score = det_out[0][0][i_person][2]
instance_boxes.append([xmin,ymin,xmax,ymax,score,int(cid)])
if cid == 0:
person_boxes.append([xmin, ymin, xmax, ymax, score, int(cid)])
self.person_boxes_all.append(person_boxes)
self.img_all_box = instance_boxes #
frame_show = frame_resize.copy()
for i_person in xrange(len(instance_boxes)):
xmin = instance_boxes[i_person][0]
ymin = instance_boxes[i_person][1]
xmax = instance_boxes[i_person][2]
ymax = instance_boxes[i_person][3]
score =instance_boxes[i_person][4]
cid = instance_boxes[i_person][5]
cv2.rectangle(frame_show, (xmin, ymin), (xmax, ymax), self.colors[cid], 2)
if(instance_boxes[0][0]>0):
cv2.imwrite("/home/remo/Desktop/remo_cat_dog/Data_CatDog/OtherBackGround_Images/wrong_pic/"+img_name.split('/')[-2]+'_'+img_name.split('/')[-1],frame_show)
f.write(img_name+'\n')
f.close()
def det_txt(self,frame):
self.img_h_det = 288
self.img_w_det = 512
self.blob_name_detout = "det_out"
# 记录输入图像的大小
width = frame.shape[1]
height = frame.shape[0]
# 计算变为169所需加的边大小,并向右下方扩充,并resize成169(512,288)
height_new, width_new = func.get_pad169size(height, width) # func: 加入边框设置为16:9
frame_org = cv2.copyMakeBorder(frame, 0, height_new - height, 0, width_new - width, cv2.BORDER_CONSTANT,
value=(104, 117, 123))
frame_org_resize = cv2.resize(frame_org, (self.img_w_det, self.img_h_det), cv2.INTER_NEAREST)
frame_input = frame_org_resize.astype(np.float).transpose((2, 0, 1))
for i in xrange(3):
frame_input[i] -= self.mean_data[i]
# print(xrange(len(self.net_keys)))
for i in xrange(len(self.net_keys)):
self.net_dets[i].blobs["data"].reshape(1,3,self.img_h_det,self.img_w_det)
self.net_dets[i].blobs["data"].data[...] = frame_input
self.net_dets[i].forward()
det_out = self.net_dets[i].blobs[self.blob_name_detout].data
tmp_res=[]
num_object = det_out.shape[2]
for j in xrange(num_object):
cid = int(det_out[0][0][j][1])
xmin = int(np.clip(det_out[0][0][j][3] * width_new,0,width))
ymin = int(np.clip(det_out[0][0][j][4] * height_new,0,height))
xmax = int(np.clip(det_out[0][0][j][5] * width_new,0,width))
ymax = int(np.clip(det_out[0][0][j][6] * height_new,0,height))
score = det_out[0][0][j][2]
if cid != -1:
tmp_res.append([cid, score, xmin, ymin, xmax, ymax])
# print(tmp_res)
# self.res2.append(tmp_res)
# print self.res2
return len(tmp_res)
| [
"sys.path.append",
"img_func.putImgsToOne",
"caffe.set_mode_gpu",
"cv2.putText",
"cv2.copyMakeBorder",
"sys.path.insert",
"numpy.clip",
"cv2.rectangle",
"caffe.set_device",
"img_func.get_pad169size",
"caffe.Net",
"os.path.join",
"cv2.resize",
"img_func.get_pad916size"
] | [((68, 137), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""/home/remo/Desktop/remodet_repository_DJ/python"""'], {}), "(0, '/home/remo/Desktop/remodet_repository_DJ/python')\n", (83, 137), False, 'import sys\n'), ((187, 209), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (202, 209), False, 'import sys\n'), ((234, 272), 'sys.path.append', 'sys.path.append', (['"""/home/remo/from_wdh"""'], {}), "('/home/remo/from_wdh')\n", (249, 272), False, 'import sys\n'), ((273, 293), 'caffe.set_mode_gpu', 'caffe.set_mode_gpu', ([], {}), '()\n', (291, 293), False, 'import caffe\n'), ((294, 313), 'caffe.set_device', 'caffe.set_device', (['(0)'], {}), '(0)\n', (310, 313), False, 'import caffe\n'), ((2682, 2805), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['frame_org', '(0)', '(height_new - height)', '(0)', '(width_new - width)', 'cv2.BORDER_CONSTANT'], {'value': '(104, 117, 123)'}), '(frame_org, 0, height_new - height, 0, width_new - width,\n cv2.BORDER_CONSTANT, value=(104, 117, 123))\n', (2700, 2805), False, 'import cv2\n'), ((2869, 2924), 'cv2.resize', 'cv2.resize', (['frame_org', '(self.img_w_det, self.img_h_det)'], {}), '(frame_org, (self.img_w_det, self.img_h_det))\n', (2879, 2924), False, 'import cv2\n'), ((2955, 3029), 'cv2.resize', 'cv2.resize', (['frame_org', '(self.img_w_det, self.img_h_det)', 'cv2.INTER_NEAREST'], {}), '(frame_org, (self.img_w_det, self.img_h_det), cv2.INTER_NEAREST)\n', (2965, 3029), False, 'import cv2\n'), ((7531, 7654), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['frame_org', '(0)', '(height_new - height)', '(0)', '(width_new - width)', 'cv2.BORDER_CONSTANT'], {'value': '(104, 117, 123)'}), '(frame_org, 0, height_new - height, 0, width_new - width,\n cv2.BORDER_CONSTANT, value=(104, 117, 123))\n', (7549, 7654), False, 'import cv2\n'), ((7718, 7773), 'cv2.resize', 'cv2.resize', (['frame_org', '(self.img_w_det, self.img_h_det)'], {}), '(frame_org, (self.img_w_det, self.img_h_det))\n', (7728, 7773), False, 'import cv2\n'), ((7804, 7878), 'cv2.resize', 'cv2.resize', (['frame_org', '(self.img_w_det, self.img_h_det)', 'cv2.INTER_NEAREST'], {}), '(frame_org, (self.img_w_det, self.img_h_det), cv2.INTER_NEAREST)\n', (7814, 7878), False, 'import cv2\n'), ((10853, 10887), 'img_func.get_pad169size', 'func.get_pad169size', (['height', 'width'], {}), '(height, width)\n', (10872, 10887), True, 'import img_func as func\n'), ((10929, 11049), 'cv2.copyMakeBorder', 'cv2.copyMakeBorder', (['frame', '(0)', '(height_new - height)', '(0)', '(width_new - width)', 'cv2.BORDER_CONSTANT'], {'value': '(104, 117, 123)'}), '(frame, 0, height_new - height, 0, width_new - width, cv2\n .BORDER_CONSTANT, value=(104, 117, 123))\n', (10947, 11049), False, 'import cv2\n'), ((11111, 11185), 'cv2.resize', 'cv2.resize', (['frame_org', '(self.img_w_det, self.img_h_det)', 'cv2.INTER_NEAREST'], {}), '(frame_org, (self.img_w_det, self.img_h_det), cv2.INTER_NEAREST)\n', (11121, 11185), False, 'import cv2\n'), ((2484, 2518), 'img_func.get_pad169size', 'func.get_pad169size', (['height', 'width'], {}), '(height, width)\n', (2503, 2518), True, 'import img_func as func\n'), ((6416, 6540), 'img_func.putImgsToOne', 'func.putImgsToOne', (['self.imgs_show_all', 'self.net_keys'], {'ncols': 'self.ncols_show', 'txt_org': '(0, 20)', 'fonts': '(1)', 'color': '(0, 0, 255)'}), '(self.imgs_show_all, self.net_keys, ncols=self.ncols_show,\n txt_org=(0, 20), fonts=1, color=(0, 0, 255))\n', (6433, 6540), True, 'import img_func as func\n'), ((7333, 7367), 'img_func.get_pad169size', 'func.get_pad169size', (['height', 'width'], {}), '(height, width)\n', (7352, 7367), True, 'import img_func as func\n'), ((1078, 1146), 'os.path.join', 'os.path.join', (['self.root_folders[path_id_in_root_folders]', 'proto_path'], {}), '(self.root_folders[path_id_in_root_folders], proto_path)\n', (1090, 1146), False, 'import os\n'), ((1177, 1267), 'os.path.join', 'os.path.join', (['self.root_folders[path_id_in_root_folders]', 'weights_path_or_extra_string'], {}), '(self.root_folders[path_id_in_root_folders],\n weights_path_or_extra_string)\n', (1189, 1267), False, 'import os\n'), ((1682, 1727), 'caffe.Net', 'caffe.Net', (['det_proto', 'det_weights', 'caffe.TEST'], {}), '(det_proto, det_weights, caffe.TEST)\n', (1691, 1727), False, 'import caffe\n'), ((2605, 2639), 'img_func.get_pad916size', 'func.get_pad916size', (['height', 'width'], {}), '(height, width)\n', (2624, 2639), True, 'import img_func as func\n'), ((5387, 5461), 'cv2.rectangle', 'cv2.rectangle', (['frame_show', '(xmin, ymin)', '(xmax, ymax)', 'self.colors[cid]', '(2)'], {}), '(frame_show, (xmin, ymin), (xmax, ymax), self.colors[cid], 2)\n', (5400, 5461), False, 'import cv2\n'), ((5478, 5608), 'cv2.putText', 'cv2.putText', (['frame_show', "('%d_%0.2f' % (i_person, score))", '(xmin, ymax - 10)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.8)', 'self.colors[cid]', '(1)'], {}), "(frame_show, '%d_%0.2f' % (i_person, score), (xmin, ymax - 10),\n cv2.FONT_HERSHEY_COMPLEX, 0.8, self.colors[cid], 1)\n", (5489, 5608), False, 'import cv2\n'), ((7454, 7488), 'img_func.get_pad916size', 'func.get_pad916size', (['height', 'width'], {}), '(height, width)\n', (7473, 7488), True, 'import img_func as func\n'), ((10202, 10276), 'cv2.rectangle', 'cv2.rectangle', (['frame_show', '(xmin, ymin)', '(xmax, ymax)', 'self.colors[cid]', '(2)'], {}), '(frame_show, (xmin, ymin), (xmax, ymax), self.colors[cid], 2)\n', (10215, 10276), False, 'import cv2\n'), ((1325, 1393), 'os.path.join', 'os.path.join', (['self.root_folders[path_id_in_root_folders]', 'proto_path'], {}), '(self.root_folders[path_id_in_root_folders], proto_path)\n', (1337, 1393), False, 'import os\n'), ((5856, 5987), 'cv2.putText', 'cv2.putText', (['frame_show', "('%d*%d' % (xmax - xmin, ymax - ymin))", '(xmin, ymax + 30)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.8)', '[0, 0, 255]', '(1)'], {}), "(frame_show, '%d*%d' % (xmax - xmin, ymax - ymin), (xmin, ymax +\n 30), cv2.FONT_HERSHEY_COMPLEX, 0.8, [0, 0, 255], 1)\n", (5867, 5987), False, 'import cv2\n'), ((6017, 6148), 'cv2.putText', 'cv2.putText', (['frame_show', "('%d*%d' % (xmax - xmin, ymax - ymin))", '(xmin, ymax - 50)', 'cv2.FONT_HERSHEY_COMPLEX', '(0.8)', '[0, 0, 255]', '(1)'], {}), "(frame_show, '%d*%d' % (xmax - xmin, ymax - ymin), (xmin, ymax -\n 50), cv2.FONT_HERSHEY_COMPLEX, 0.8, [0, 0, 255], 1)\n", (6028, 6148), False, 'import cv2\n'), ((11876, 11926), 'numpy.clip', 'np.clip', (['(det_out[0][0][j][3] * width_new)', '(0)', 'width'], {}), '(det_out[0][0][j][3] * width_new, 0, width)\n', (11883, 11926), True, 'import numpy as np\n'), ((11953, 12005), 'numpy.clip', 'np.clip', (['(det_out[0][0][j][4] * height_new)', '(0)', 'height'], {}), '(det_out[0][0][j][4] * height_new, 0, height)\n', (11960, 12005), True, 'import numpy as np\n'), ((12032, 12082), 'numpy.clip', 'np.clip', (['(det_out[0][0][j][5] * width_new)', '(0)', 'width'], {}), '(det_out[0][0][j][5] * width_new, 0, width)\n', (12039, 12082), True, 'import numpy as np\n'), ((12109, 12161), 'numpy.clip', 'np.clip', (['(det_out[0][0][j][6] * height_new)', '(0)', 'height'], {}), '(det_out[0][0][j][6] * height_new, 0, height)\n', (12116, 12161), True, 'import numpy as np\n')] |
# python 3.7
# file: data.py
"""Functions/classes to load and clean data."""
# Standard Library Imports
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.optimize import least_squares, minimize
# Third-part Imports
import lmfit as lm
# Local Imports / PATH changes
# Authorship
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2021, ACWatt"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Prototype"
# FUNCTIONS ===================================================================
def fun_rosenbrock(x):
return np.array([10 * (x[1] - x[0] ** 2), (1 - x[0])])
def residuals(params, ebar, ebar_lag, t, t_lag):
rho = params['rho']
alpha = params['alpha']
d0 = params['delta0']
d1 = params['delta1']
model = rho * ebar_lag + alpha + d0 * t + d1 * t ** 2 - rho * (d0 * t_lag + d1 * t_lag ** 2)
return ebar - model
def residuals2(params, ebar, ebar_lag, t, t_lag):
rho = params[0]
alpha = params[1]
d0 = params[2]
d1 = params[3]
model = rho * ebar_lag + alpha + d0 * t + d1 * t ** 2 - rho * (d0 * t_lag + d1 * t_lag ** 2)
return ebar - model
def estimation_andy(data):
"""Reproduce Andy's work in Stata"""
# Estimate Eq. 71 with nonlinear regression
min_year = 1945
max_years = 2005
data = data[data.ebar_lag.notnull()] # need to remove Nan values before regressing
# Estimate nonlinear model:
# ebar = rho*ebar_lag + (1-rho)*B0 + h(t) - rho*h(t-1)
# define alpha = (1-rho)*B0
# use NL estimation to find rho, alpha, delta_0, delta_1
# Solve for B0 using B0 = alpha / (1-rho)
# Get Std Err of B0 using variance of B0^0.5
ebar = data.ebar
ebar_lag = data.ebar_lag
t = data.time
t_lag = data.time_lag
params = lm.Parameters()
params.add('rho', value=0.1)
params.add('alpha', value=1) # (1 - rho) * B0
params.add('delta0', value=1)
params.add('delta1', value=1)
# compare lmfit minimize covariance
result = lm.minimize(residuals, params, args=(ebar, ebar_lag, t, t_lag))
rho = result.params['rho'].value
alpha = result.params['alpha'].value
d0 = result.params['delta0'].value
d1 = result.params['delta1'].value
B0 = alpha / (1 - rho)
cov = result.covar
print('cov: \n', cov)
var = np.sqrt(np.diagonal(cov))
rho_v = var[0]
alpha_v = var[1]
# compare scipy leastsquares covariance
params_guess = [0.1, 1, 1, 1] # rho, alpha, delta0, delta1
result2 = least_squares(residuals2, params_guess, args=(ebar, ebar_lag, t, t_lag), max_nfev=1000)
J = result2.jac
cov = np.linalg.inv(J.T.dot(J))
print('cov: \n', cov)
var = np.sqrt(np.diagonal(cov))
rho = result2.x[0]
rho_v = var[0]
alpha = result2.x[1]
alpha_v = var[1]
# Variance formula from [1] (references at bottom)
B0_v = alpha ** 2 * rho_v / (1 - rho) ** 4 \
+ alpha_v / (1 - rho) ** 2 \
+ 2 * alpha * np.sqrt(cov[0, 1])
# The covariance of both methods seem to be very sensitive to convergence parameters (number of iterations...)
# need to figure out if this is the right covariance matrix for both methods. Perhaps use a simple sinthetic example
# below and try both methods.
def residual(params, x, data, eps_data):
amp = params['amp']
phaseshift = params['phase']
freq = params['frequency']
decay = params['decay']
model = amp * np.sin(x * freq + phaseshift) * np.exp(-x * x * decay)
return (data - model) / eps_data
# # generate synthetic data with noise
# x = np.linspace(0, 100)
# eps_data = np.random.normal(size=x.size, scale=0.2)
# data = 7.5 * np.sin(x*0.22 + 2.5) * np.exp(-x*x*0.01) + eps_data
#
# params = lm.Parameters()
# params.add('amp', value=10)
# params.add('decay', value=0.007)
# params.add('phase', value=0.2)
# params.add('frequency', value=3.0)
#
# out = lm.minimize(residual, params, args=(x, data, eps_data))
#
# amp = out.params['amp'].value
# freq = out.params['frequency'].value
# phaseshift = out.params['phase'].value
# decay = out.params['decay'].value
#
# prediction = amp * np.sin(x*freq + phaseshift) * np.exp(-x*x*decay)
# plt.figure()
# plt.plot(x, data)
# plt.plot(x, prediction)
# plt.show()
# MAIN ========================================================================
if __name__ == '__main__':
pass
# REFERENCES
"""
Variance formula for ratio of paramters
https://stats.stackexchange.com/questions/151974/standard-error-of-the-combination-of-estimated-parameters
"""
| [
"lmfit.Parameters",
"scipy.optimize.least_squares",
"lmfit.minimize",
"numpy.array",
"numpy.exp",
"numpy.sin",
"numpy.sqrt",
"numpy.diagonal"
] | [((652, 697), 'numpy.array', 'np.array', (['[10 * (x[1] - x[0] ** 2), 1 - x[0]]'], {}), '([10 * (x[1] - x[0] ** 2), 1 - x[0]])\n', (660, 697), True, 'import numpy as np\n'), ((1863, 1878), 'lmfit.Parameters', 'lm.Parameters', ([], {}), '()\n', (1876, 1878), True, 'import lmfit as lm\n'), ((2085, 2148), 'lmfit.minimize', 'lm.minimize', (['residuals', 'params'], {'args': '(ebar, ebar_lag, t, t_lag)'}), '(residuals, params, args=(ebar, ebar_lag, t, t_lag))\n', (2096, 2148), True, 'import lmfit as lm\n'), ((2580, 2671), 'scipy.optimize.least_squares', 'least_squares', (['residuals2', 'params_guess'], {'args': '(ebar, ebar_lag, t, t_lag)', 'max_nfev': '(1000)'}), '(residuals2, params_guess, args=(ebar, ebar_lag, t, t_lag),\n max_nfev=1000)\n', (2593, 2671), False, 'from scipy.optimize import least_squares, minimize\n'), ((2399, 2415), 'numpy.diagonal', 'np.diagonal', (['cov'], {}), '(cov)\n', (2410, 2415), True, 'import numpy as np\n'), ((2768, 2784), 'numpy.diagonal', 'np.diagonal', (['cov'], {}), '(cov)\n', (2779, 2784), True, 'import numpy as np\n'), ((3542, 3564), 'numpy.exp', 'np.exp', (['(-x * x * decay)'], {}), '(-x * x * decay)\n', (3548, 3564), True, 'import numpy as np\n'), ((3043, 3061), 'numpy.sqrt', 'np.sqrt', (['cov[0, 1]'], {}), '(cov[0, 1])\n', (3050, 3061), True, 'import numpy as np\n'), ((3510, 3539), 'numpy.sin', 'np.sin', (['(x * freq + phaseshift)'], {}), '(x * freq + phaseshift)\n', (3516, 3539), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
""" Use torchMoji to score texts for emoji distribution.
The resulting emoji ids (0-63) correspond to the mapping
in emoji_overview.png file at the root of the torchMoji repo.
Writes the result to a csv file.
"""
from __future__ import print_function, division, unicode_literals
# import example_helper
import json
import csv
import numpy as np
from collections import Counter
from torchmoji.sentence_tokenizer import SentenceTokenizer
from torchmoji.model_def import torchmoji_emojis
from torchmoji.global_variables import PRETRAINED_PATH, VOCAB_PATH
# OUTPUT_PATH = 'test_sentences.csv'
# TEST_SENTENCES = ['I love mom\'s cooking',
# 'I love how you never reply back..',
# 'I love cruising with my homies',
# 'I love messing with yo mind!!',
# 'I love you and now you\'re just gone..',
# 'This is shit',
# 'This is the shit']
def top_elements(array, k):
ind = np.argpartition(array, -k)[-k:]
return ind[np.argsort(array[ind])][::-1]
def most_frequent(List):
return max(set(List), key = List.count)
with open(VOCAB_PATH, 'r') as f:
vocabulary = json.load(f)
maxlen = 30
st = SentenceTokenizer(vocabulary, maxlen)
model = torchmoji_emojis(PRETRAINED_PATH)
def get_emoji_score(sentence, label):
mapper = {"angry":anger, "disgusted":disgust, "terrified":fear, "joyful":joy, "sad":sad, "surprised":surprise}
tokenized, _, _ = st.tokenize_sentences(sentence)
prob = model(tokenized)
scores = []
acc = []
for i, t in enumerate(sentence):
t_prob = prob[i]
ind_top = top_elements(t_prob, 2)
if(emoji_list[ind_top[0]][1] in mapper[label] or emoji_list[ind_top[1]][1] in mapper[label]):
acc.append(1)
else:
acc.append(0)
scores.append([emoji_list[i][1].upper() for i in ind_top])
counter = Counter(sum(scores,[]))
return "".join([ f'({c})' + r'{\NotoEmoji\symbol{"' +str(w)+'}}' for w, c in counter.most_common(4)]),np.mean(acc)
joy = ["1f602", "1f604","1f60a","1f60b", "1f60c", "1f60d", "1f60e", "1f60f", "263a", "1f618",
"1f61c","2764", "1f496", "1f495", "1f601","2665","270c","2661","1f3a7","1f49c","1f496","1f499"]
sad = ["1f614", "1f615", "1f62b", "1f629", "1f622",
"1f62a", "1f62d", "1f494"]
anger= ["1f62c", "1f620", "1f610","1f611", "1f621", "1f616", "1f624"]
disgust = ["1f637"]
fear = ["1f605"]
surprise = ["1f633"]
emoji_list = [["\U0001f602","1f602"],
["\U0001f612","1f612"],
["\U0001f629","1f629"],
["\U0001f62d","1f62d"],
["\U0001f60d","1f60d"],
["\U0001f614","1f614"],
["\U0001f44c","1f44c"],
["\U0001f60a","1f60a"],
["\u2764","2764"],
["\U0001f60f","1f60f"],
["\U0001f601","1f601"],
["\U0001f3b6","1f3b6"],
["\U0001f633","1f633"],
["\U0001f4af","1f4af"],
["\U0001f634","1f634"],
["\U0001f60c","1f60c"],
["\u263a","263a"],
["\U0001f64c","1f64c"],
["\U0001f495","1f495"],
["\U0001f611","1f611"],
["\U0001f605","1f605"],
["\U0001f64f","1f64f"],
["\U0001f615","1f615"],
["\U0001f618","1f618"],
["\u2665","2665"],
["\U0001f610","1f610"],
["\U0001f481","1f481"],
["\U0001f61e","1f61e"],
["\U0001f648","1f648"],
["\U0001f62b","1f62b"],
["\u270c","270c"],
["\U0001f60e","1f60e"],
["\U0001f621","1f621"],
["\U0001f44d","1f44d"],
["\U0001f622","1f622"],
["\U0001f62a","1f62a"],
["\U0001f60b","1f60b"],
["\U0001f624","1f624"],
["\u270b","270b"],
["\U0001f637","1f637"],
["\U0001f44f","1f44f"],
["\U0001f440","1f440"],
["\U0001f52b","1f52b"],
["\U0001f623","1f623"],
["\U0001f608","1f608"],
["\U0001f613","1f613"],
["\U0001f494","1f494"],
["\u2661","2661"],
["\U0001f3a7","1f3a7"],
["\U0001f64a","1f64a"],
["\U0001f609","1f609"],
["\U0001f480","1f480"],
["\U0001f616","1f616"],
["\U0001f604","1f604"],
["\U0001f61c","1f61c"],
["\U0001f620","1f620"],
["\U0001f645","1f645"],
["\U0001f4aa","1f4aa"],
["\U0001f44a","1f44a"],
["\U0001f49c","1f49c"],
["\U0001f496","1f496"],
["\U0001f499","1f499"],
["\U0001f62c","1f62c"],
["\u2728","2728"]] | [
"torchmoji.model_def.torchmoji_emojis",
"json.load",
"numpy.argsort",
"numpy.argpartition",
"torchmoji.sentence_tokenizer.SentenceTokenizer",
"numpy.mean"
] | [((1238, 1275), 'torchmoji.sentence_tokenizer.SentenceTokenizer', 'SentenceTokenizer', (['vocabulary', 'maxlen'], {}), '(vocabulary, maxlen)\n', (1255, 1275), False, 'from torchmoji.sentence_tokenizer import SentenceTokenizer\n'), ((1285, 1318), 'torchmoji.model_def.torchmoji_emojis', 'torchmoji_emojis', (['PRETRAINED_PATH'], {}), '(PRETRAINED_PATH)\n', (1301, 1318), False, 'from torchmoji.model_def import torchmoji_emojis\n'), ((1207, 1219), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1216, 1219), False, 'import json\n'), ((1007, 1033), 'numpy.argpartition', 'np.argpartition', (['array', '(-k)'], {}), '(array, -k)\n', (1022, 1033), True, 'import numpy as np\n'), ((2091, 2103), 'numpy.mean', 'np.mean', (['acc'], {}), '(acc)\n', (2098, 2103), True, 'import numpy as np\n'), ((1054, 1076), 'numpy.argsort', 'np.argsort', (['array[ind]'], {}), '(array[ind])\n', (1064, 1076), True, 'import numpy as np\n')] |
from __future__ import division, print_function
import pytest
import numpy as np
from modelling import gaussian_ensemble
from modelling import flared_disk_ensemble
@pytest.fixture
def gaussian_annulus(N=10):
"""Returns annulus with Gaussian lines."""
return gaussian_ensemble(vrot=1500., N=N, return_ensemble=True)
@pytest.fixture
def flared_annulus(N=10):
"""Returns annulus with optically thick lines."""
return flared_disk_ensemble(N=N, return_ensemble=True)
def test_number_of_spectra():
for N in [2, 5, 10]:
assert gaussian_annulus(N).spectra.shape[0] == N
assert flared_annulus(N).spectra.shape[0] == N
def test_channel_width():
annulus = gaussian_annulus()
assert annulus.channel == 30.
assert np.diff(annulus.velax).mean() == 30.
annulus = flared_annulus()
assert annulus.channel == 30.
assert np.diff(annulus.velax).mean() == 30.
def test_number_of_position_angles():
annulus = gaussian_annulus()
assert annulus.spectra.shape[0] == annulus.theta.size
annulus = flared_annulus()
assert annulus.spectra.shape[0] == annulus.theta.size
def test_no_empty_spectra():
annulus = gaussian_annulus()
assert int(np.any(np.all(annulus.spectra == 0, axis=1))) == 0
annulus = flared_annulus()
assert int(np.any(np.all(annulus.spectra == 0, axis=1))) == 0
| [
"numpy.diff",
"modelling.flared_disk_ensemble",
"numpy.all",
"modelling.gaussian_ensemble"
] | [((269, 326), 'modelling.gaussian_ensemble', 'gaussian_ensemble', ([], {'vrot': '(1500.0)', 'N': 'N', 'return_ensemble': '(True)'}), '(vrot=1500.0, N=N, return_ensemble=True)\n', (286, 326), False, 'from modelling import gaussian_ensemble\n'), ((435, 482), 'modelling.flared_disk_ensemble', 'flared_disk_ensemble', ([], {'N': 'N', 'return_ensemble': '(True)'}), '(N=N, return_ensemble=True)\n', (455, 482), False, 'from modelling import flared_disk_ensemble\n'), ((758, 780), 'numpy.diff', 'np.diff', (['annulus.velax'], {}), '(annulus.velax)\n', (765, 780), True, 'import numpy as np\n'), ((871, 893), 'numpy.diff', 'np.diff', (['annulus.velax'], {}), '(annulus.velax)\n', (878, 893), True, 'import numpy as np\n'), ((1214, 1250), 'numpy.all', 'np.all', (['(annulus.spectra == 0)'], {'axis': '(1)'}), '(annulus.spectra == 0, axis=1)\n', (1220, 1250), True, 'import numpy as np\n'), ((1311, 1347), 'numpy.all', 'np.all', (['(annulus.spectra == 0)'], {'axis': '(1)'}), '(annulus.spectra == 0, axis=1)\n', (1317, 1347), True, 'import numpy as np\n')] |
"""module containing the table interpolation function"""
import numpy as np
class NotInRangeError(ValueError):
def __init__(self, var, num, range_):
"""Error for when value is not in the specified range
:param str var: var assigned to
:param float num: num to assign
:param tuple range_: permitted range
"""
self.num = num
self.range_ = range_
self.msg = f"{var} = {num} not in range {range_}"
super().__init__(self.msg)
def table_interpolation(x_row, x_col, data):
""" Get table in numpy array form and two coordinates and
Interpolate the value in the table corresponding to those coordinates
:keyword x_row: the x row from which to retrieve the value
:type x_row: float
:keyword x_col: the x col from which to retrieve the value
:type x_col: float
:keyword data: the table as numpy array
:type data: np.ndarray
:rtype: float
"""
# first gear number of known teeth values
first_column = data[:, 0]
# second gear number of known teeth values
first_row = data[0, :]
if x_col > first_row[-1] or x_col < first_row[1]:
raise NotInRangeError("x_col", x_col, (first_row[1], first_row[-1]))
if x_row > first_column[-1] or x_row < first_column[1]:
raise NotInRangeError("x_row", x_row, (first_column[1], first_column[-1]))
# upper bounds indexes of x_row and x_col
index1 = np.searchsorted(first_column, x_row, side='right')
index2 = np.searchsorted(first_row, x_col, side='right')
try:
# get x_row neighbors
x_row_low, x_row_high = data[index1 - 1, 0], data[index1, 0]
except IndexError:
# if x_row upper neighbor is out of the table range
x_row_low, x_row_high = data[index1 - 1, 0], np.nan
try:
# get x_col neighbors
x_col_low, x_col_high = data[0, index2 - 1], data[0, index2]
except IndexError:
# if x_col upper neighbor is out of the table range
x_col_low, x_col_high = data[0, index2 - 1], np.nan
# if gear teeth number is not in the table do an
# interpolation on closest values inside the table
if x_row == x_row_low and x_col == x_col_low:
# x_row and x_col are known no need to interpolate
result = data[index1 - 1, index2 - 1]
elif x_row == x_row_low:
# x_row is known and x_col is interpolated
result = np.interp(x_col, data[0, :], data[index1 - 1, :])
elif x_col == x_col_low:
# x_col is known and x_row is interpolated
result = np.interp(x_row, data[:, 0], data[:, index2 - 1])
else:
# both are unknown and interpolated
lower_bound = np.interp(x_row, data[:, 0], data[:, index2 - 1])
upper_bound = np.interp(x_row, data[:, 0], data[:, index2])
range_ = np.array([data[0, index2 - 1], data[0, index2]])
bounds = np.array([lower_bound, upper_bound])
result = np.interp(x_col, range_, bounds)
return result
| [
"numpy.interp",
"numpy.array",
"numpy.searchsorted"
] | [((1442, 1492), 'numpy.searchsorted', 'np.searchsorted', (['first_column', 'x_row'], {'side': '"""right"""'}), "(first_column, x_row, side='right')\n", (1457, 1492), True, 'import numpy as np\n'), ((1506, 1553), 'numpy.searchsorted', 'np.searchsorted', (['first_row', 'x_col'], {'side': '"""right"""'}), "(first_row, x_col, side='right')\n", (1521, 1553), True, 'import numpy as np\n'), ((2418, 2467), 'numpy.interp', 'np.interp', (['x_col', 'data[0, :]', 'data[index1 - 1, :]'], {}), '(x_col, data[0, :], data[index1 - 1, :])\n', (2427, 2467), True, 'import numpy as np\n'), ((2565, 2614), 'numpy.interp', 'np.interp', (['x_row', 'data[:, 0]', 'data[:, index2 - 1]'], {}), '(x_row, data[:, 0], data[:, index2 - 1])\n', (2574, 2614), True, 'import numpy as np\n'), ((2691, 2740), 'numpy.interp', 'np.interp', (['x_row', 'data[:, 0]', 'data[:, index2 - 1]'], {}), '(x_row, data[:, 0], data[:, index2 - 1])\n', (2700, 2740), True, 'import numpy as np\n'), ((2763, 2808), 'numpy.interp', 'np.interp', (['x_row', 'data[:, 0]', 'data[:, index2]'], {}), '(x_row, data[:, 0], data[:, index2])\n', (2772, 2808), True, 'import numpy as np\n'), ((2826, 2874), 'numpy.array', 'np.array', (['[data[0, index2 - 1], data[0, index2]]'], {}), '([data[0, index2 - 1], data[0, index2]])\n', (2834, 2874), True, 'import numpy as np\n'), ((2892, 2928), 'numpy.array', 'np.array', (['[lower_bound, upper_bound]'], {}), '([lower_bound, upper_bound])\n', (2900, 2928), True, 'import numpy as np\n'), ((2946, 2978), 'numpy.interp', 'np.interp', (['x_col', 'range_', 'bounds'], {}), '(x_col, range_, bounds)\n', (2955, 2978), True, 'import numpy as np\n')] |
import numpy as np
from QR import QR
orden = int(input("Introduce el orden de la matriz: "))
A = np.zeros((orden,orden), np.float64)
for i in range(orden):
for j in range(orden):
A[i,j] = float(input(f"Elemento[{i+1}:{j+1}]: "))
finalA = QR(A)
print("\nEigenvalores: ")
for i in range(orden):
for j in range(orden):
if i == j:
print(f"{finalA[i,j]}", end=", ")
| [
"numpy.zeros",
"QR.QR"
] | [((99, 135), 'numpy.zeros', 'np.zeros', (['(orden, orden)', 'np.float64'], {}), '((orden, orden), np.float64)\n', (107, 135), True, 'import numpy as np\n'), ((255, 260), 'QR.QR', 'QR', (['A'], {}), '(A)\n', (257, 260), False, 'from QR import QR\n')] |
# -*- coding: utf-8 -*-
# @Author: jadesauve
# @Date: 2020-04-15 14:50:08
# @Last Modified by: jadesauve
# @Last Modified time: 2020-04-15 14:51:12
"""
code to compute the squared coherence between two variables avg over 21 frequency bands
adapted from steve riser
"""
# file to read HOT T and S data from WHOTS mooring and to
# estimate coherence and phase between T and S
#
import numpy as np
import os
import time
import matplotlib.pyplot as plt
from netCDF4 import Dataset
#
# read and plot WHOI HOT mooring data
#
# define the netcdf reading function
#
def import_data(file_name):
'''This function works regardless of the file called as long as a list of
variables can be identified easily in the file.'''
data_netcdf = Dataset(file_name, mode = 'r')
data = {}
#
for vname in list(data_netcdf.variables):
data[str(vname)] = data_netcdf.variables[vname][:]
#
data_netcdf.close()
#
return data
#
# define a function to band-average the spectrum to eliminate some noise
#
def band_average(fft_var1,fft_var2,frequency,n_av):
#
# this is a function to estimate the autospectrum or co-spectrum for 2 variables
# fft_var1 and fft_var2 are the inputs computed via fft
# they can be the same variable or different variables
# n_av is the number of bands to be used for smoothing (nice if it is an odd number)
# this function is limnited to 100,000 points but can easily be modified
#
nmax=100000
#
# define some variables and arrays
#
n_spec=len(fft_var1)
n_av2=int(n_av//2+1)
spec_amp_av=np.zeros(nmax)
spec_phase_av=np.zeros(nmax)
freq_av=np.zeros(nmax)
#
# average the lowest frequency bands first (with half as many points in the average)
#
sum_low_amp=0.
sum_low_phase=0.
count=0
spectrum_amp=np.absolute(fft_var1*np.conj(fft_var2))
spectrum_phase=np.angle(fft_var1*np.conj(fft_var2),deg=True)
#
for i in range(0,n_av2):
sum_low_amp+=spectrum_amp[i]
sum_low_phase+=spectrum_phase[i]
#
spec_amp_av[0]=sum_low_amp/n_av2
spec_phase_av[0]=sum_low_phase/n_av
#
# compute the rest of the averages
#
for i in range(n_av2,n_spec-n_av,n_av):
count+=1
spec_amp_est=np.mean(spectrum_amp[i:i+n_av])
spec_phase_est=np.mean(spectrum_phase[i:i+n_av])
freq_est=frequency[i+n_av//2]
spec_amp_av[count]=spec_amp_est
spec_phase_av[count]=spec_phase_est
freq_av[count]=freq_est
#
# contract the arrays
#
spec_amp_av=spec_amp_av[0:count]
spec_phase_av=spec_phase_av[0:count]
freq_av=freq_av[0:count]
return spec_amp_av,spec_phase_av,freq_av,count
#
# main program
#
# define the input and output files
#
path_in='/Users/riser/Desktop/ocean.569A/datasets/OS_WHOTS_201606_D_MICROCAT-025m.nc'
path_out1='/Users/riser/Desktop/ocean.569A/HOT.coherence.jpg'
path_out2='/Users/riser/Desktop/ocean.569A/HOT.phase.jpg'
#
# read the input file (netcdf)
#
HOT_data=import_data(path_in)
#
# determine the length of the data and the resulting number of spectral estimates
#
nn=len(HOT_data['TIME'])
print ('nn=',nn)
mm=int(nn/2+1)
#
# define the data arrays
#
time_meas=np.zeros(nn)
time_meas_days=np.zeros(nn)
temp_meas=np.zeros(nn)
sal_meas=np.zeros(nn)
tt=np.zeros(nn)
ss=np.zeros(nn)
freq=np.zeros(mm)
#
# parse the time, temperature, and salinity data from the input file
for i in range(0,nn):
time_meas[i]=24.*(float(HOT_data['TIME'][i])-float(HOT_data['TIME'][0]))
time_meas_days[i]=(float(HOT_data['TIME'][i])-float(HOT_data['TIME'][0]))
temp_meas[i]=float(HOT_data['TEMP'][i])
sal_meas[i]=float(HOT_data['PSAL'][i])
#
# remove the temperature and salinity means from the data
#
tt=temp_meas-np.mean(temp_meas)
ss=sal_meas-np.mean(sal_meas)
#
# determine the frequencies for the spectrum
#
delt=0.00208333
T_length=nn
pi=np.pi
omega0=2.*pi/(T_length*delt)
#
for i in range(0,mm):
freq[i]=i*omega0
#
# compute the fft of the input data (temperature and salinity here)
# the fft will yield a set of complex numbers
# also compute their complex conjugates
# use the function band_average to get the spectral estimates
# estimate the coherence and phase using the results of the function
#
n_av=21
zz1=np.fft.rfft(tt,n=nn)
zz2=np.fft.rfft(ss,n=nn)
zz2_star=np.conj(zz2)
temp_spec,temp_phase,freq_av,count=band_average(zz1,zz1,freq,n_av)
salt_spec,salt_phase,freq_av,count=band_average(zz2,zz2,freq,n_av)
cospec_amp,cospec_phase,freq_av,count=band_average(zz1,zz2_star,freq,n_av)
coh_sq=cospec_amp**2/(temp_spec*salt_spec)
#
# begin plotting the coherence and phase
#
# first the coherence
#
fig1=plt.figure(figsize=(9,7))
plt.ylim(0.,1.)
plt.semilogx(freq_av,coh_sq,color='purple')
plt.xlabel('$\omega$ (radians/day)',fontsize=15,ha='center')
plt.ylabel('Squared Coherence $\it{T}$-$\it{S}$',fontsize=15)
freq_nyquist=pi/delt
freq_T=2.*pi/(nn*delt)
plt.plot([freq_nyquist,freq_nyquist],[0.,1.],'--k')
plt.text(8.e2,0.2,'$\omega_{max}$',fontsize=12,color='firebrick')
plt.text(1.2,0.88,'n_av = 21')
plt.grid(which='both')
plt.title('MBARI M1 Mooring')
plt.show()
plt.savefig(path_out1)
plt.show()
#
# now the phase
#
fig2=plt.figure(figsize=(9,7))
plt.ylim(-180.,180.)
plt.semilogx(freq_av,cospec_phase,color='orange')
plt.xlabel('$\omega$ (radians/day)',fontsize=15,ha='center')
plt.ylabel('Phase $\it{T}$-$\it{S}$, degrees',fontsize=15)
freq_nyquist=pi/delt
freq_T=2.*pi/(nn*delt)
plt.plot([freq_nyquist,freq_nyquist],[-180.,180.],'--k')
plt.text(8.e2,-110.,'$\omega_{max}$',fontsize=12,color='firebrick')
plt.text(1.2,130.,'n_av = 21')
plt.grid(which='both')
plt.title('MBARI M1 Mooring')
plt.savefig(path_out2)
plt.show() | [
"matplotlib.pyplot.title",
"numpy.conj",
"netCDF4.Dataset",
"numpy.fft.rfft",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylim",
"numpy.zeros",
"matplotlib.pyplot.text",
"matplotlib.pyplot.figure",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel... | [((3174, 3186), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (3182, 3186), True, 'import numpy as np\n'), ((3202, 3214), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (3210, 3214), True, 'import numpy as np\n'), ((3225, 3237), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (3233, 3237), True, 'import numpy as np\n'), ((3247, 3259), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (3255, 3259), True, 'import numpy as np\n'), ((3263, 3275), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (3271, 3275), True, 'import numpy as np\n'), ((3279, 3291), 'numpy.zeros', 'np.zeros', (['nn'], {}), '(nn)\n', (3287, 3291), True, 'import numpy as np\n'), ((3297, 3309), 'numpy.zeros', 'np.zeros', (['mm'], {}), '(mm)\n', (3305, 3309), True, 'import numpy as np\n'), ((4228, 4249), 'numpy.fft.rfft', 'np.fft.rfft', (['tt'], {'n': 'nn'}), '(tt, n=nn)\n', (4239, 4249), True, 'import numpy as np\n'), ((4253, 4274), 'numpy.fft.rfft', 'np.fft.rfft', (['ss'], {'n': 'nn'}), '(ss, n=nn)\n', (4264, 4274), True, 'import numpy as np\n'), ((4283, 4295), 'numpy.conj', 'np.conj', (['zz2'], {}), '(zz2)\n', (4290, 4295), True, 'import numpy as np\n'), ((4622, 4648), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (4632, 4648), True, 'import matplotlib.pyplot as plt\n'), ((4648, 4666), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(0.0)', '(1.0)'], {}), '(0.0, 1.0)\n', (4656, 4666), True, 'import matplotlib.pyplot as plt\n'), ((4664, 4709), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['freq_av', 'coh_sq'], {'color': '"""purple"""'}), "(freq_av, coh_sq, color='purple')\n", (4676, 4709), True, 'import matplotlib.pyplot as plt\n'), ((4708, 4771), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega$ (radians/day)"""'], {'fontsize': '(15)', 'ha': '"""center"""'}), "('$\\\\omega$ (radians/day)', fontsize=15, ha='center')\n", (4718, 4771), True, 'import matplotlib.pyplot as plt\n'), ((4769, 4833), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Squared Coherence $\\\\it{T}$-$\\\\it{S}$"""'], {'fontsize': '(15)'}), "('Squared Coherence $\\\\it{T}$-$\\\\it{S}$', fontsize=15)\n", (4779, 4833), True, 'import matplotlib.pyplot as plt\n'), ((4875, 4932), 'matplotlib.pyplot.plot', 'plt.plot', (['[freq_nyquist, freq_nyquist]', '[0.0, 1.0]', '"""--k"""'], {}), "([freq_nyquist, freq_nyquist], [0.0, 1.0], '--k')\n", (4883, 4932), True, 'import matplotlib.pyplot as plt\n'), ((4927, 4998), 'matplotlib.pyplot.text', 'plt.text', (['(800.0)', '(0.2)', '"""$\\\\omega_{max}$"""'], {'fontsize': '(12)', 'color': '"""firebrick"""'}), "(800.0, 0.2, '$\\\\omega_{max}$', fontsize=12, color='firebrick')\n", (4935, 4998), True, 'import matplotlib.pyplot as plt\n'), ((4993, 5025), 'matplotlib.pyplot.text', 'plt.text', (['(1.2)', '(0.88)', '"""n_av = 21"""'], {}), "(1.2, 0.88, 'n_av = 21')\n", (5001, 5025), True, 'import matplotlib.pyplot as plt\n'), ((5024, 5046), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (5032, 5046), True, 'import matplotlib.pyplot as plt\n'), ((5047, 5076), 'matplotlib.pyplot.title', 'plt.title', (['"""MBARI M1 Mooring"""'], {}), "('MBARI M1 Mooring')\n", (5056, 5076), True, 'import matplotlib.pyplot as plt\n'), ((5077, 5087), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5085, 5087), True, 'import matplotlib.pyplot as plt\n'), ((5088, 5110), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path_out1'], {}), '(path_out1)\n', (5099, 5110), True, 'import matplotlib.pyplot as plt\n'), ((5111, 5121), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5119, 5121), True, 'import matplotlib.pyplot as plt\n'), ((5147, 5173), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(9, 7)'}), '(figsize=(9, 7))\n', (5157, 5173), True, 'import matplotlib.pyplot as plt\n'), ((5173, 5196), 'matplotlib.pyplot.ylim', 'plt.ylim', (['(-180.0)', '(180.0)'], {}), '(-180.0, 180.0)\n', (5181, 5196), True, 'import matplotlib.pyplot as plt\n'), ((5194, 5245), 'matplotlib.pyplot.semilogx', 'plt.semilogx', (['freq_av', 'cospec_phase'], {'color': '"""orange"""'}), "(freq_av, cospec_phase, color='orange')\n", (5206, 5245), True, 'import matplotlib.pyplot as plt\n'), ((5244, 5307), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""$\\\\omega$ (radians/day)"""'], {'fontsize': '(15)', 'ha': '"""center"""'}), "('$\\\\omega$ (radians/day)', fontsize=15, ha='center')\n", (5254, 5307), True, 'import matplotlib.pyplot as plt\n'), ((5305, 5366), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Phase $\\\\it{T}$-$\\\\it{S}$, degrees"""'], {'fontsize': '(15)'}), "('Phase $\\\\it{T}$-$\\\\it{S}$, degrees', fontsize=15)\n", (5315, 5366), True, 'import matplotlib.pyplot as plt\n'), ((5408, 5470), 'matplotlib.pyplot.plot', 'plt.plot', (['[freq_nyquist, freq_nyquist]', '[-180.0, 180.0]', '"""--k"""'], {}), "([freq_nyquist, freq_nyquist], [-180.0, 180.0], '--k')\n", (5416, 5470), True, 'import matplotlib.pyplot as plt\n'), ((5465, 5539), 'matplotlib.pyplot.text', 'plt.text', (['(800.0)', '(-110.0)', '"""$\\\\omega_{max}$"""'], {'fontsize': '(12)', 'color': '"""firebrick"""'}), "(800.0, -110.0, '$\\\\omega_{max}$', fontsize=12, color='firebrick')\n", (5473, 5539), True, 'import matplotlib.pyplot as plt\n'), ((5533, 5566), 'matplotlib.pyplot.text', 'plt.text', (['(1.2)', '(130.0)', '"""n_av = 21"""'], {}), "(1.2, 130.0, 'n_av = 21')\n", (5541, 5566), True, 'import matplotlib.pyplot as plt\n'), ((5564, 5586), 'matplotlib.pyplot.grid', 'plt.grid', ([], {'which': '"""both"""'}), "(which='both')\n", (5572, 5586), True, 'import matplotlib.pyplot as plt\n'), ((5587, 5616), 'matplotlib.pyplot.title', 'plt.title', (['"""MBARI M1 Mooring"""'], {}), "('MBARI M1 Mooring')\n", (5596, 5616), True, 'import matplotlib.pyplot as plt\n'), ((5617, 5639), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path_out2'], {}), '(path_out2)\n', (5628, 5639), True, 'import matplotlib.pyplot as plt\n'), ((5640, 5650), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5648, 5650), True, 'import matplotlib.pyplot as plt\n'), ((752, 780), 'netCDF4.Dataset', 'Dataset', (['file_name'], {'mode': '"""r"""'}), "(file_name, mode='r')\n", (759, 780), False, 'from netCDF4 import Dataset\n'), ((1581, 1595), 'numpy.zeros', 'np.zeros', (['nmax'], {}), '(nmax)\n', (1589, 1595), True, 'import numpy as np\n'), ((1614, 1628), 'numpy.zeros', 'np.zeros', (['nmax'], {}), '(nmax)\n', (1622, 1628), True, 'import numpy as np\n'), ((1641, 1655), 'numpy.zeros', 'np.zeros', (['nmax'], {}), '(nmax)\n', (1649, 1655), True, 'import numpy as np\n'), ((3717, 3735), 'numpy.mean', 'np.mean', (['temp_meas'], {}), '(temp_meas)\n', (3724, 3735), True, 'import numpy as np\n'), ((3748, 3765), 'numpy.mean', 'np.mean', (['sal_meas'], {}), '(sal_meas)\n', (3755, 3765), True, 'import numpy as np\n'), ((2229, 2262), 'numpy.mean', 'np.mean', (['spectrum_amp[i:i + n_av]'], {}), '(spectrum_amp[i:i + n_av])\n', (2236, 2262), True, 'import numpy as np\n'), ((2284, 2319), 'numpy.mean', 'np.mean', (['spectrum_phase[i:i + n_av]'], {}), '(spectrum_phase[i:i + n_av])\n', (2291, 2319), True, 'import numpy as np\n'), ((1836, 1853), 'numpy.conj', 'np.conj', (['fft_var2'], {}), '(fft_var2)\n', (1843, 1853), True, 'import numpy as np\n'), ((1892, 1909), 'numpy.conj', 'np.conj', (['fft_var2'], {}), '(fft_var2)\n', (1899, 1909), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This module deals with hexagonal twin structure.
"""
from pprint import pprint
import numpy as np
from phonopy.structure.atoms import atom_data, symbol_map
from twinpy.properties.hexagonal import (get_hcp_atom_positions,
check_cell_is_hcp)
from twinpy.properties.twinmode import TwinIndices
from twinpy.structure.lattice import CrystalLattice
def get_numbers_from_symbols(symbols:list):
"""
Get atomic numbers from symbols.
Args:
symbols: Atomic symbols.
"""
numbers = [ symbol_map[symbol] for symbol in symbols ]
return numbers
def get_symbols_from_numbers(numbers:list):
"""
Get symbols from atomic numbers.
Args:
numbers: Atomic numbers.
"""
symbols = [ atom_data[number][1] for number in numbers ]
return symbols
def check_same_cells(first_cell:tuple,
second_cell:tuple,
raise_error:bool=False,
atol:float=1e-6) -> bool:
"""
Check first cell and second cell are same.
Args:
first_cell: First cell.
second_cell: Second cell.
raise_error: If True, raise assrtion error.
Returns:
bool: Return True if two cells are same.
"""
is_lattice_same = np.allclose(first_cell[0], second_cell[0], atol=atol)
is_scaled_positions_same = np.allclose(
first_cell[1], second_cell[1], atol=atol)
is_symbols_same = (first_cell[2] == second_cell[2])
is_same = (is_lattice_same
and is_scaled_positions_same
and is_symbols_same)
if not is_same and raise_error:
np.testing.assert_allclose(first_cell[0], second_cell[0], atol=atol)
np.testing.assert_allclose(first_cell[1], second_cell[1], atol=atol)
assert (first_cell[2] == second_cell[2])
return is_same
def get_atom_positions_from_lattice_points(lattice_points:np.array,
atoms_from_lp:np.array) -> np.array:
"""
Get atom positions by embedding primitive atoms to lattice points.
Both lattice points and atom positions must be cartesian coordinates.
Args:
lattice_points: Lattice points.
atoms_from_lp: Atoms from lattice_points.
Returns:
np.array: atom positions
"""
scaled_positions = []
for lattice_point in lattice_points:
atoms = lattice_point + atoms_from_lp
scaled_positions.extend(atoms.tolist())
return np.array(scaled_positions)
class _BaseTwinStructure():
"""
Base structure class which is inherited
ShearStructure class and TwinBoundaryStructure class.
"""
def __init__(
self,
lattice:np.array,
symbol:str,
twinmode:str,
wyckoff:str='c',
):
"""
Setup.
Args:
lattice: Lattice.
symbol: Element symbol.
twinmode: Twin mode.
wyckoff: No.194 Wycoff letter ('c' or 'd').
Todo:
Check it is best to use 'deepcopy'.
"""
atoms_from_lp = get_hcp_atom_positions(wyckoff)
symbols = [symbol] * 2
crylat = CrystalLattice(lattice=lattice)
check_cell_is_hcp(cell=(lattice, atoms_from_lp, symbols))
self._hexagonal_lattice = lattice
self._a, _, self._c = crylat.abc
self._r = self._c / self._a
self._symbol = symbol
self._wyckoff = wyckoff
self._atoms_from_lattice_points = \
get_hcp_atom_positions(wyckoff=self._wyckoff)
self._natoms = 2
self._twinmode = None
self._indices = None
self._set_twinmode(twinmode=twinmode)
self._xshift = None
self._yshift = None
self._expansion_ratios = np.ones(3)
self._output_structure = \
{'lattice': self._hexagonal_lattice,
'lattice_points': {
'white': np.array([0.,0.,0.])},
'atoms_from_lattice_points': {
'white': self._atoms_from_lattice_points},
'symbols': [self._symbol] * 2}
def _set_twinmode(self, twinmode:str):
"""
Set parent.
Args:
twinmode: Twinmode.
"""
self._twinmode = twinmode
self._indices = TwinIndices(twinmode=self._twinmode,
lattice=self._hexagonal_lattice,
wyckoff=self._wyckoff)
@property
def r(self):
"""
Lattice ratio r = c / a .
"""
return self._r
@property
def symbol(self):
"""
Symbol.
"""
return self._symbol
@property
def wyckoff(self):
"""
Wyckoff letter.
"""
return self._wyckoff
@property
def atoms_from_lattice_points(self):
"""
Atoms from lattice points.
"""
return self._atoms_from_lattice_points
@property
def hexagonal_lattice(self):
"""
Hexagonal lattice.
"""
return self._hexagonal_lattice
@property
def xshift(self):
"""
Structure x shift.
"""
return self._xshift
@property
def yshift(self):
"""
Structure y shift.
"""
return self._yshift
@property
def natoms(self):
"""
Number of atoms.
"""
return self._natoms
@property
def twinmode(self):
"""
Twinmode.
"""
return self._twinmode
@property
def indices(self):
"""
Indices of twinmode.
"""
return self._indices
@property
def expansion_ratios(self):
"""
Expansion ratios which is applied when you run 'get_cell_for_export'.
"""
return self._expansion_ratios
def set_expansion_ratios(self, expansion_ratios:np.array):
"""
Set expansion ratios which is applied
when you run 'get_cell_for_export'.
Args:
"""
_expansion_ratios = expansion_ratios.copy()
if not isinstance(expansion_ratios, np.ndarray):
_expansion_ratios = np.array(_expansion_ratios)
assert _expansion_ratios.shape == (3,), \
"Shape of expansion_ratios is {}, which must be (3,)".format(
np.array(_expansion_ratios).shape)
self._expansion_ratios = _expansion_ratios
@property
def output_structure(self):
"""
Built structure.
"""
return self._output_structure
def get_cell_for_export(self,
get_lattice:bool=False,
move_atoms_into_unitcell:bool=True,
) -> tuple:
"""
Get cell for export.
Args:
get_lattice: Get lattice points not crystal structure.
move_atoms_into_unitcell: if True, move atoms to unitcell.
Returns:
tuple: Output cell.
Notes:
Lattice matrix is expanded using expansion_ratios.
You have to run set_expansion_ratios before run this function
if you want to expand lattice.
"""
_dummy = {'white': 'H', 'white_tb': 'Li',
'black': 'He', 'black_tb': 'Be'}
scaled_positions = []
if get_lattice:
symbols = []
for color in self._output_structure['lattice_points']:
posi = self._output_structure['lattice_points'][color]
sym = [_dummy[color]] * len(posi)
scaled_positions.extend(posi.tolist())
symbols.extend(sym)
print("replacing lattice points to elements:")
print(" 'white' : 'H'")
print(" 'white_tb': 'Li'")
print(" 'black' : 'He'")
print(" 'black_tb': 'Be'")
else:
for color in self._output_structure['lattice_points']:
posi = get_atom_positions_from_lattice_points(
self._output_structure['lattice_points'][color],
self._output_structure['atoms_from_lattice_points'][color])
scaled_positions.extend(posi.tolist())
scaled_positions = np.round(np.array(scaled_positions), decimals=8)
if move_atoms_into_unitcell:
scaled_positions %= 1.
symbols = self._output_structure['symbols']
lattice = np.transpose(
np.transpose(self._output_structure['lattice'])
* self._expansion_ratios)
return (lattice,
scaled_positions,
symbols)
| [
"twinpy.structure.lattice.CrystalLattice",
"numpy.allclose",
"numpy.transpose",
"twinpy.properties.hexagonal.check_cell_is_hcp",
"numpy.ones",
"numpy.array",
"numpy.testing.assert_allclose",
"twinpy.properties.hexagonal.get_hcp_atom_positions",
"twinpy.properties.twinmode.TwinIndices"
] | [((1330, 1383), 'numpy.allclose', 'np.allclose', (['first_cell[0]', 'second_cell[0]'], {'atol': 'atol'}), '(first_cell[0], second_cell[0], atol=atol)\n', (1341, 1383), True, 'import numpy as np\n'), ((1415, 1468), 'numpy.allclose', 'np.allclose', (['first_cell[1]', 'second_cell[1]'], {'atol': 'atol'}), '(first_cell[1], second_cell[1], atol=atol)\n', (1426, 1468), True, 'import numpy as np\n'), ((2539, 2565), 'numpy.array', 'np.array', (['scaled_positions'], {}), '(scaled_positions)\n', (2547, 2565), True, 'import numpy as np\n'), ((1693, 1761), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['first_cell[0]', 'second_cell[0]'], {'atol': 'atol'}), '(first_cell[0], second_cell[0], atol=atol)\n', (1719, 1761), True, 'import numpy as np\n'), ((1770, 1838), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['first_cell[1]', 'second_cell[1]'], {'atol': 'atol'}), '(first_cell[1], second_cell[1], atol=atol)\n', (1796, 1838), True, 'import numpy as np\n'), ((3165, 3196), 'twinpy.properties.hexagonal.get_hcp_atom_positions', 'get_hcp_atom_positions', (['wyckoff'], {}), '(wyckoff)\n', (3187, 3196), False, 'from twinpy.properties.hexagonal import get_hcp_atom_positions, check_cell_is_hcp\n'), ((3245, 3276), 'twinpy.structure.lattice.CrystalLattice', 'CrystalLattice', ([], {'lattice': 'lattice'}), '(lattice=lattice)\n', (3259, 3276), False, 'from twinpy.structure.lattice import CrystalLattice\n'), ((3285, 3342), 'twinpy.properties.hexagonal.check_cell_is_hcp', 'check_cell_is_hcp', ([], {'cell': '(lattice, atoms_from_lp, symbols)'}), '(cell=(lattice, atoms_from_lp, symbols))\n', (3302, 3342), False, 'from twinpy.properties.hexagonal import get_hcp_atom_positions, check_cell_is_hcp\n'), ((3584, 3629), 'twinpy.properties.hexagonal.get_hcp_atom_positions', 'get_hcp_atom_positions', ([], {'wyckoff': 'self._wyckoff'}), '(wyckoff=self._wyckoff)\n', (3606, 3629), False, 'from twinpy.properties.hexagonal import get_hcp_atom_positions, check_cell_is_hcp\n'), ((3849, 3859), 'numpy.ones', 'np.ones', (['(3)'], {}), '(3)\n', (3856, 3859), True, 'import numpy as np\n'), ((4391, 4487), 'twinpy.properties.twinmode.TwinIndices', 'TwinIndices', ([], {'twinmode': 'self._twinmode', 'lattice': 'self._hexagonal_lattice', 'wyckoff': 'self._wyckoff'}), '(twinmode=self._twinmode, lattice=self._hexagonal_lattice,\n wyckoff=self._wyckoff)\n', (4402, 4487), False, 'from twinpy.properties.twinmode import TwinIndices\n'), ((6288, 6315), 'numpy.array', 'np.array', (['_expansion_ratios'], {}), '(_expansion_ratios)\n', (6296, 6315), True, 'import numpy as np\n'), ((4015, 4040), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0])\n', (4023, 4040), True, 'import numpy as np\n'), ((6470, 6497), 'numpy.array', 'np.array', (['_expansion_ratios'], {}), '(_expansion_ratios)\n', (6478, 6497), True, 'import numpy as np\n'), ((8406, 8432), 'numpy.array', 'np.array', (['scaled_positions'], {}), '(scaled_positions)\n', (8414, 8432), True, 'import numpy as np\n'), ((8639, 8686), 'numpy.transpose', 'np.transpose', (["self._output_structure['lattice']"], {}), "(self._output_structure['lattice'])\n", (8651, 8686), True, 'import numpy as np\n')] |
from math import *
import matplotlib as mpl
import matplotlib.animation as animation
import matplotlib.cm as cm
import matplotlib.pyplot as plt
import numpy as np
from .utils import get_activities_over_time_as_list
from matplotlib.patches import RegularPolygon
from matplotlib.collections import PatchCollection
def _oddr_offset_to_pixel(x, y, size=1):
p_x = size * sqrt(3) * (x + 0.5 * (y & 1))
p_y = size * 3 / 2 * y
return p_x, p_y
def _evenr_offset_to_pixel(x, y, size=1):
p_x = size * sqrt(3) * (x - 0.5 * (y & 1))
p_y = size * 3 / 2 * y
return p_x, p_y
def _get_scaled_colormap(triples, colormap, vmin, vmax):
vals = [t[2] for t in triples]
vmin = min(vals) if vmin is None else vmin
vmax = max(vals) if vmax is None else vmax
cmap = plt.get_cmap(colormap)
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
return cm.ScalarMappable(norm=norm, cmap=cmap)
def _get_triples(activities):
# using the “odd-r” horizontal layout offset coordinate system described in:
# https://www.redblobgames.com/grids/hexagons/#coordinates
# here we get a list of tuples with three values: (x, y, node value)
return [(x, y, val) for y, row in enumerate(activities) for x, val in enumerate(row)]
def plot_hex_grid(trajectory, shape=None, slice=-1, title='', colormap='Greys', vmin=None, vmax=None, edgecolor=None):
activities = get_activities_over_time_as_list(trajectory)
if shape is not None:
activities = np.array(activities).reshape((len(activities), shape[0], shape[1]))[slice]
triples = _get_triples(activities)
has_odd_rows = (len(activities) % 2) != 0
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
m = _get_scaled_colormap(triples, colormap, vmin, vmax)
for t in triples:
x, y = _oddr_offset_to_pixel(t[0], t[1]) if has_odd_rows else _evenr_offset_to_pixel(t[0], t[1])
hex = RegularPolygon((x, y), numVertices=6, radius=1.,
orientation=np.radians(60),
facecolor=m.to_rgba(t[2]), edgecolor=edgecolor)
ax.add_patch(hex)
ax.scatter([t[0] for t in triples], [t[1] for t in triples], marker='')
plt.gca().invert_yaxis()
plt.title(title)
plt.show()
def animate_hex(trajectory, title='', shape=None, save=False, interval=50, colormap='Greys',
vmin=None, vmax=None, edgecolor=None):
activities = get_activities_over_time_as_list(trajectory)
if shape is not None:
activities = np.reshape(activities, (len(activities), shape[0], shape[1]))
triples = _get_triples(activities[0])
has_odd_rows = (len(activities[0]) % 2) != 0
fig, ax = plt.subplots(1)
ax.set_aspect('equal')
m = _get_scaled_colormap(triples, colormap, vmin, vmax)
patches = []
for t in triples:
x, y = _oddr_offset_to_pixel(t[0], t[1]) if has_odd_rows else _evenr_offset_to_pixel(t[0], t[1])
hex = RegularPolygon((x, y), numVertices=6, radius=1.,
orientation=np.radians(60), edgecolor=edgecolor)
patches.append(hex)
p = PatchCollection(patches, match_original=True, cmap=m.get_cmap())
ax.add_collection(p)
i = {'index': 0}
def update(*args):
i['index'] += 1
if i['index'] == len(activities):
i['index'] = 0
new_triples = _get_triples(activities[i['index']])
p.set_array(np.array([tr[2] for tr in new_triples]))
return p,
ax.scatter([t[0] for t in triples], [t[1] for t in triples], marker='')
plt.gca().invert_yaxis()
plt.title(title)
ani = animation.FuncAnimation(fig, update, interval=interval, blit=True, save_count=len(activities))
if save:
ani.save('evolved.gif', dpi=80, writer="imagemagick")
plt.show()
| [
"matplotlib.pyplot.title",
"numpy.radians",
"matplotlib.pyplot.show",
"matplotlib.pyplot.get_cmap",
"matplotlib.colors.Normalize",
"matplotlib.cm.ScalarMappable",
"numpy.array",
"matplotlib.pyplot.gca",
"matplotlib.pyplot.subplots"
] | [((787, 809), 'matplotlib.pyplot.get_cmap', 'plt.get_cmap', (['colormap'], {}), '(colormap)\n', (799, 809), True, 'import matplotlib.pyplot as plt\n'), ((821, 863), 'matplotlib.colors.Normalize', 'mpl.colors.Normalize', ([], {'vmin': 'vmin', 'vmax': 'vmax'}), '(vmin=vmin, vmax=vmax)\n', (841, 863), True, 'import matplotlib as mpl\n'), ((875, 914), 'matplotlib.cm.ScalarMappable', 'cm.ScalarMappable', ([], {'norm': 'norm', 'cmap': 'cmap'}), '(norm=norm, cmap=cmap)\n', (892, 914), True, 'import matplotlib.cm as cm\n'), ((1660, 1675), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (1672, 1675), True, 'import matplotlib.pyplot as plt\n'), ((2222, 2238), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (2231, 2238), True, 'import matplotlib.pyplot as plt\n'), ((2243, 2253), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2251, 2253), True, 'import matplotlib.pyplot as plt\n'), ((2680, 2695), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)'], {}), '(1)\n', (2692, 2695), True, 'import matplotlib.pyplot as plt\n'), ((3581, 3597), 'matplotlib.pyplot.title', 'plt.title', (['title'], {}), '(title)\n', (3590, 3597), True, 'import matplotlib.pyplot as plt\n'), ((3784, 3794), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3792, 3794), True, 'import matplotlib.pyplot as plt\n'), ((2193, 2202), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2200, 2202), True, 'import matplotlib.pyplot as plt\n'), ((3412, 3451), 'numpy.array', 'np.array', (['[tr[2] for tr in new_triples]'], {}), '([tr[2] for tr in new_triples])\n', (3420, 3451), True, 'import numpy as np\n'), ((3552, 3561), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (3559, 3561), True, 'import matplotlib.pyplot as plt\n'), ((1994, 2008), 'numpy.radians', 'np.radians', (['(60)'], {}), '(60)\n', (2004, 2008), True, 'import numpy as np\n'), ((3031, 3045), 'numpy.radians', 'np.radians', (['(60)'], {}), '(60)\n', (3041, 3045), True, 'import numpy as np\n'), ((1486, 1506), 'numpy.array', 'np.array', (['activities'], {}), '(activities)\n', (1494, 1506), True, 'import numpy as np\n')] |
import numpy as np
import copy
import itertools
def horiz_next(loc):
if loc[1] + 1 < WIDTH:
return loc[0], loc[1] + 1
else:
return None
def horiz_prev(loc):
if loc[1] - 1 >= 0:
return loc[0], loc[1] - 1
else:
return None
def vert_next(loc):
if loc[0] + 1 < HEIGHT:
return loc[0] + 1, loc[1]
else:
return None
def vert_prev(loc):
if loc[0] - 1 >= 0:
return loc[0] - 1, loc[1]
else:
return None
def diag1_next(loc):
if (loc[0] + 1 < WIDTH) and (loc[1] + 1 < HEIGHT):
return loc[0] + 1, loc[1] + 1
else:
return None
def diag1_prev(loc):
if (loc[0] - 1 >= 0) and (loc[1] - 1 >= 0):
return loc[0] - 1, loc[1] - 1
else:
return None
def diag2_next(loc):
if (loc[0] + 1 < WIDTH) and (loc[1] - 1 >= 0):
return loc[0] + 1, loc[1] - 1
else:
return None
def diag2_prev(loc):
if (loc[0] - 1 >= 0) and (loc[1] + 1 < HEIGHT):
return loc[0] - 1, loc[1] + 1
else:
return None
WIDTH = 3
HEIGHT = 2
move_funcs = [
horiz_next,
horiz_prev,
vert_next,
vert_prev,
diag1_next,
diag1_prev,
diag2_next,
diag2_prev
]
def liberties_in_dir(loc, move_fun, board):
current = loc
path = list()
while (current is not None) and board[current]:
current = move_fun(current)
path.append(current)
return path[:-1]
class GameState:
def __init__(self):
self.open_cells = np.array([[True] * WIDTH] * HEIGHT)
self.open_cells[HEIGHT - 1, WIDTH - 1] = False
self.current_player = 0
self.player_locations = [None, None]
def actions(self):
""" Return a list of legal actions for the active player
You are free to choose any convention to represent actions,
but one option is to represent actions by the (row, column)
of the endpoint for the token. For example, if your token is
in (0, 0), and your opponent is in (1, 0) then the legal
actions could be encoded as (0, 1) and (0, 2).
"""
return [(x, y) for x in range(HEIGHT) for y in range(WIDTH)
if self.open_cells[x, y]]
def player(self):
""" Return the id of the active player
Hint: return 0 for the first player, and 1 for the second player
"""
return self.current_player
def result(self, action):
""" Return a new state that results from applying the given
action in the current state
Hint: Check out the deepcopy module--do NOT modify the
objects internal state in place
"""
assert action in self.actions() # Check if the action is valid
new_state = copy.deepcopy(self)
new_state.open_cells[action] = False
new_state.player_locations[self.player()] = action
new_state.current_player = 0 if self.player() == 1 else 1
return new_state
def terminal_test(self):
""" return True if the current state is terminal,
and False otherwise
Hint: an Isolation state is terminal if _either_
player has no remaining liberties (even if the
player is not active in the current state)
"""
terminal = len(self.liberties(self.player_locations[0])) == 0
terminal = terminal or len(self.liberties(self.player_locations[1])) == 0
return terminal
def liberties(self, loc):
""" Return a list of all open cells in the
neighborhood of the specified location. The list
should include all open spaces in a straight line
along any row, column or diagonal from the current
position. (Tokens CANNOT move through obstacles
or blocked squares in queens Isolation.)
Note: if loc is None, then return all empty cells
on the board
"""
if loc is None:
return self.actions()
liberties = [liberties_in_dir(loc, move_fun, self.open_cells)
for move_fun in move_funcs]
return list(itertools.chain(*liberties))
| [
"copy.deepcopy",
"numpy.array",
"itertools.chain"
] | [((1522, 1557), 'numpy.array', 'np.array', (['([[True] * WIDTH] * HEIGHT)'], {}), '([[True] * WIDTH] * HEIGHT)\n', (1530, 1557), True, 'import numpy as np\n'), ((2761, 2780), 'copy.deepcopy', 'copy.deepcopy', (['self'], {}), '(self)\n', (2774, 2780), False, 'import copy\n'), ((4095, 4122), 'itertools.chain', 'itertools.chain', (['*liberties'], {}), '(*liberties)\n', (4110, 4122), False, 'import itertools\n')] |
"""Cluster profiles plots."""
import numpy as np
import matplotlib.pyplot as plt
import xarray as xr
import src.plot_utils.latex_style as lsty
import src.plot_utils.colors as col
import src.data_loading.xr_loader as xvl
import src.plot_utils.gen_panels as gp
import src.time_wrapper as twr
import src.constants as cst
@twr.timeit
def make_profiles(ds: xr.Dataset) -> xr.Dataset:
"""
Make cluster profiles (mean and std dev).
Args:
ds (xr.Dataset): the dataset.
Returns:
xr.Dataset: new_dataset to plot.
"""
lsty.mpl_params()
k_clusters = int(np.nanmax(ds.PCM_LABELS.values) + 1)
height_list: list = []
theta_mean_lol: list = []
theta_std_lol: list = []
salt_mean_lol: list = []
salt_std_lol: list = []
labels = xvl.order_indexes(ds.PCM_LABELS, [cst.T_COORD, cst.Y_COORD, cst.X_COORD])
salt = xvl.order_indexes(
ds.SALT, [cst.Z_COORD, cst.T_COORD, cst.Y_COORD, cst.X_COORD]
)
theta = xvl.order_indexes(
ds.THETA, [cst.Z_COORD, cst.T_COORD, cst.Y_COORD, cst.X_COORD]
)
init_depth_levels = ds.coords[cst.Z_COORD].values
for k_cluster in range(k_clusters):
for list_of_list in [
theta_mean_lol,
theta_std_lol,
salt_mean_lol,
salt_std_lol,
]:
list_of_list.append([])
for depth_index in range(len(init_depth_levels)):
depth = init_depth_levels[depth_index]
if -cst.MIN_DEPTH >= depth >= -cst.MAX_DEPTH:
theta_filtered = np.where(
labels == k_cluster, theta[depth_index, :, :, :], np.nan
)
theta_mean_lol[-1].append(np.nanmean(theta_filtered))
theta_std_lol[-1].append(np.nanstd(theta_filtered))
salt_filtered = np.where(
labels == k_cluster, salt[depth_index, :, :, :], np.nan
)
salt_mean_lol[-1].append(np.nanmean(salt_filtered))
salt_std_lol[-1].append(np.nanstd(salt_filtered))
if k_cluster == 0:
height_list.append(depth)
new_ds = xr.Dataset(
{
"theta_mean": ([cst.CLUST_COORD, cst.Z_COORD], np.asarray(theta_mean_lol)),
"salt_mean": ([cst.CLUST_COORD, cst.Z_COORD], np.asarray(salt_mean_lol)),
"theta_std": ([cst.CLUST_COORD, cst.Z_COORD], np.asarray(theta_std_lol)),
"salt_std": ([cst.CLUST_COORD, cst.Z_COORD], np.asarray(salt_std_lol)),
},
coords={
cst.Z_COORD: np.array(height_list),
cst.CLUST_COORD: range(0, k_clusters),
},
)
print("profile_characteristics", new_ds)
return new_ds
def plot_profiles(ds: xr.Dataset) -> None:
"""
Plot profiles.
Originally from:
https://scitools.org.uk/iris/docs/v1.6/examples/graphics/atlantic_profiles.html
A program to plot profiles, originally of the original components etc.
https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.pyplot.plot.html
There's a fair deal of duplication in this function.
Could probably half its length without changing its functionality.
Args:
ds (xr.Dataset): Profile dataset to plot.
"""
k_clusters = len(ds.coords[cst.CLUST_COORD].values)
print("k_clusters", k_clusters)
color_list = col.cluster_colors(k_clusters)
ylim = [300, 1800]
def plot_part(mean_name: str, std_name: str) -> None:
"""
Plot one point.
Args:
mean_name (str): e.g. 'salt_mean'.
std_name (str): e.g. 'salt_std'.
"""
for i in range(0, k_clusters):
plt.plot(
ds.isel(cluster=i)[mean_name],
-ds.coords[cst.Z_COORD].values,
color=color_list[i],
linewidth=2,
alpha=0.5,
label=str(i + 1),
)
for sig_mult, alpha in [[1, 0.4]]:
plt.fill_betweenx(
-ds.coords[cst.Z_COORD].values,
ds.isel(cluster=i)[mean_name]
- np.multiply(sig_mult, ds.isel(cluster=i)[std_name]),
ds.isel(cluster=i)[mean_name]
+ np.multiply(sig_mult, ds.isel(cluster=i)[std_name]),
alpha=alpha,
color=color_list[i],
)
# THETA PLOTTING.
plt.subplot(1, 2, 1)
ax1 = plt.gca()
plot_part("theta_mean", "theta_std")
ax1.set_xlabel(r"Potential Temperature, $\theta$ ($^{\circ}\mathrm{C}$)")
ax1.set_ylabel("Depth (m)")
ax1.set_ylim(ylim)
ax1.set_xlim([-2, 15])
ax1.invert_yaxis()
# SALINITY PLOTTING.
plt.subplot(1, 2, 2)
ax2 = plt.gca()
plot_part("salt_mean", "salt_std")
ax2.set_xlabel(r"Salinity, $S$ (PSU)")
ax2.set_ylim(ylim)
ax2.set_xlim([34, 35.5])
ax2.set_yticks([])
ax2.invert_yaxis()
plt.setp(ax2.get_yticklabels(), visible=False)
ax1.legend(
bbox_to_anchor=(0.0, 1.02, 2.05, 0.102),
loc="lower left",
ncol=2,
mode="expand",
borderaxespad=0.0,
)
gp.label_subplots([ax1, ax2])
plt.tight_layout()
| [
"matplotlib.pyplot.subplot",
"numpy.asarray",
"numpy.nanstd",
"numpy.where",
"numpy.array",
"numpy.nanmean",
"matplotlib.pyplot.gca",
"src.plot_utils.gen_panels.label_subplots",
"src.plot_utils.latex_style.mpl_params",
"matplotlib.pyplot.tight_layout",
"src.plot_utils.colors.cluster_colors",
"... | [((551, 568), 'src.plot_utils.latex_style.mpl_params', 'lsty.mpl_params', ([], {}), '()\n', (566, 568), True, 'import src.plot_utils.latex_style as lsty\n'), ((784, 857), 'src.data_loading.xr_loader.order_indexes', 'xvl.order_indexes', (['ds.PCM_LABELS', '[cst.T_COORD, cst.Y_COORD, cst.X_COORD]'], {}), '(ds.PCM_LABELS, [cst.T_COORD, cst.Y_COORD, cst.X_COORD])\n', (801, 857), True, 'import src.data_loading.xr_loader as xvl\n'), ((869, 954), 'src.data_loading.xr_loader.order_indexes', 'xvl.order_indexes', (['ds.SALT', '[cst.Z_COORD, cst.T_COORD, cst.Y_COORD, cst.X_COORD]'], {}), '(ds.SALT, [cst.Z_COORD, cst.T_COORD, cst.Y_COORD, cst.X_COORD]\n )\n', (886, 954), True, 'import src.data_loading.xr_loader as xvl\n'), ((976, 1062), 'src.data_loading.xr_loader.order_indexes', 'xvl.order_indexes', (['ds.THETA', '[cst.Z_COORD, cst.T_COORD, cst.Y_COORD, cst.X_COORD]'], {}), '(ds.THETA, [cst.Z_COORD, cst.T_COORD, cst.Y_COORD, cst.\n X_COORD])\n', (993, 1062), True, 'import src.data_loading.xr_loader as xvl\n'), ((3369, 3399), 'src.plot_utils.colors.cluster_colors', 'col.cluster_colors', (['k_clusters'], {}), '(k_clusters)\n', (3387, 3399), True, 'import src.plot_utils.colors as col\n'), ((4437, 4457), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4448, 4457), True, 'import matplotlib.pyplot as plt\n'), ((4468, 4477), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4475, 4477), True, 'import matplotlib.pyplot as plt\n'), ((4732, 4752), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4743, 4752), True, 'import matplotlib.pyplot as plt\n'), ((4763, 4772), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (4770, 4772), True, 'import matplotlib.pyplot as plt\n'), ((5172, 5201), 'src.plot_utils.gen_panels.label_subplots', 'gp.label_subplots', (['[ax1, ax2]'], {}), '([ax1, ax2])\n', (5189, 5201), True, 'import src.plot_utils.gen_panels as gp\n'), ((5207, 5225), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5223, 5225), True, 'import matplotlib.pyplot as plt\n'), ((591, 622), 'numpy.nanmax', 'np.nanmax', (['ds.PCM_LABELS.values'], {}), '(ds.PCM_LABELS.values)\n', (600, 622), True, 'import numpy as np\n'), ((1553, 1619), 'numpy.where', 'np.where', (['(labels == k_cluster)', 'theta[depth_index, :, :, :]', 'np.nan'], {}), '(labels == k_cluster, theta[depth_index, :, :, :], np.nan)\n', (1561, 1619), True, 'import numpy as np\n'), ((1828, 1893), 'numpy.where', 'np.where', (['(labels == k_cluster)', 'salt[depth_index, :, :, :]', 'np.nan'], {}), '(labels == k_cluster, salt[depth_index, :, :, :], np.nan)\n', (1836, 1893), True, 'import numpy as np\n'), ((2242, 2268), 'numpy.asarray', 'np.asarray', (['theta_mean_lol'], {}), '(theta_mean_lol)\n', (2252, 2268), True, 'import numpy as np\n'), ((2329, 2354), 'numpy.asarray', 'np.asarray', (['salt_mean_lol'], {}), '(salt_mean_lol)\n', (2339, 2354), True, 'import numpy as np\n'), ((2415, 2440), 'numpy.asarray', 'np.asarray', (['theta_std_lol'], {}), '(theta_std_lol)\n', (2425, 2440), True, 'import numpy as np\n'), ((2500, 2524), 'numpy.asarray', 'np.asarray', (['salt_std_lol'], {}), '(salt_std_lol)\n', (2510, 2524), True, 'import numpy as np\n'), ((2580, 2601), 'numpy.array', 'np.array', (['height_list'], {}), '(height_list)\n', (2588, 2601), True, 'import numpy as np\n'), ((1700, 1726), 'numpy.nanmean', 'np.nanmean', (['theta_filtered'], {}), '(theta_filtered)\n', (1710, 1726), True, 'import numpy as np\n'), ((1769, 1794), 'numpy.nanstd', 'np.nanstd', (['theta_filtered'], {}), '(theta_filtered)\n', (1778, 1794), True, 'import numpy as np\n'), ((1973, 1998), 'numpy.nanmean', 'np.nanmean', (['salt_filtered'], {}), '(salt_filtered)\n', (1983, 1998), True, 'import numpy as np\n'), ((2040, 2064), 'numpy.nanstd', 'np.nanstd', (['salt_filtered'], {}), '(salt_filtered)\n', (2049, 2064), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import glob
import random
from typing import Counter
import numpy as np
from PIL import Image
from tqdm import tqdm
import skimage.transform as transform
import matplotlib.colors
import matplotlib.pyplot as plt
cmp = matplotlib.colors.ListedColormap(
["tan", "cyan", "pink", "forestgreen", "blue", "purple"]
)
def foregroundAug(foreground):
# Random rotation, zoom, translation
angle = np.random.randint(-10, 10) * (np.pi / 180.0) # Convert to radians
zoom = np.random.random() * 0.4 + 0.1 # Zoom in range [0.8,1.2)
t_x = np.random.randint(0, int(foreground.shape[1] / 3))
t_y = np.random.randint(0, int(foreground.shape[0] / 3))
tform = transform.AffineTransform(
scale=(zoom, zoom), rotation=angle, translation=(t_x, t_y)
)
foreground = transform.warp(foreground, tform.inverse)
# Random horizontal flip with 0.5 probability
if np.random.randint(0, 100) >= 50:
foreground = foreground[:, ::-1]
return foreground
def compose(index, foregrounds, background):
background = Image.fromarray(background)
bg_w, bg_h = background.size
# Offset list
t_x_list = []
t_y_list = []
for i in range(len(foregrounds)):
current_foreground = Image.fromarray((foregrounds[i] * 255).astype(np.uint8))
img_w, img_h = current_foreground.size
# Random Offsets
t_x = np.random.randint(int(-bg_w / 1.5), int(bg_w / 1.5))
t_y = np.random.randint(int(-bg_h / 8), int(bg_h / 1.5))
t_x_list.append(t_x)
t_y_list.append(t_y)
offset = ((bg_w - img_w + t_x) // 2, (bg_h - img_h + t_y) // 2)
background.paste(current_foreground, offset, current_foreground.convert("RGBA")) #RGBA == RGB alpha channel
return background, t_x_list, t_y_list
def getForegroundMask(
index, foregrounds, background, background_mask, classes_list, t_x_list, t_y_list
):
background = Image.fromarray(background)
bg_w, bg_h = background.size
# 2D mask
mask_new = background_mask.astype(np.uint8)
for i in range(len(foregrounds)):
foregrounds[i] = foregrounds[i] * 255 # Scaling
# Get current foreground mask
current_foreground = (
1 - np.uint8(np.all(foregrounds[i][:, :, :3] == 0, axis=2))
) * classes_list[i]
img_w, img_h = current_foreground.shape
offset = ((bg_w - img_h + t_x_list[i]) // 2, (bg_h - img_w + t_y_list[i]) // 2)
# Paste current foreground mask over previous mask
mask_new[
offset[1] : offset[1] + img_w, offset[0] : offset[0] + img_h
] = np.maximum(
mask_new[offset[1] : offset[1] + img_w, offset[0] : offset[0] + img_h],
current_foreground,
)
return mask_new
def generator(index, background, background_mask, foreground_full_list):
# Cluster limits
cluster_low_limit = 7
cluster_high_limit = 13
foreground_list = random.sample(foreground_full_list, random.randint(cluster_low_limit, cluster_high_limit))
classes_list = [x.rsplit("/", 2)[-2][-1] for x in foreground_list]
classes_list = [int(i) for i in classes_list]
f = Counter(classes_list)
return 1, f
foregrounds = []
for i in foreground_list:
foregrounds.append(np.asarray(Image.open(i)))
for i in range(len(foregrounds)):
foregrounds[i] = foregroundAug(foregrounds[i])
try:
final_background, t_x_list, t_y_list = compose(index, foregrounds, background)
mask_new = getForegroundMask(
index,
foregrounds,
background,
background_mask,
classes_list,
t_x_list,
t_y_list,
)
mask_new_pil = Image.fromarray(mask_new)
final_background.save(f"./synth_images/img_{index}.jpeg")
mask_new_pil.save(f"./synth_labels/img_{index}.png")
plt.imsave(
f"./synth_rgb_labels/img_{index}.png",
np.asarray(mask_new),
vmin=1,
vmax=7,
cmap=cmp,
)
return 1, f
except:
return 0, f
background_list = sorted(glob.glob(os.getcwd() + "/backgrounds-only/*"))
background_labels_list = sorted(glob.glob(os.getcwd() + "/beach_labels/*"))
# N = 1 => IMGs == 31
# N = 2 => IMGs == 62
N = 20
background_list *= N
background_labels_list *= N
assert len(background_list) == len(background_labels_list)
foreground_full_list = []
folders_list = glob.glob(os.getcwd() + "/trashnet-pngs/*")
for folder in folders_list:
foreground_full_list.extend(glob.glob(folder + "/*"))
# Progress bar stuff
pbar = tqdm(range(len(background_list)), desc="description")
count = 0
dist = Counter()
for index in pbar:
background = np.asarray(Image.open(background_list[index]))
background_mask = np.asarray(Image.open(background_labels_list[index]))
curr_count, curr_dist = generator(index, background, background_mask, foreground_full_list)
count += curr_count
dist += curr_dist
pbar.set_description("Generated: %d" % count)
print(dist) | [
"numpy.maximum",
"random.randint",
"os.getcwd",
"numpy.asarray",
"PIL.Image.open",
"PIL.Image.fromarray",
"numpy.random.randint",
"typing.Counter",
"numpy.random.random",
"skimage.transform.warp",
"glob.glob",
"skimage.transform.AffineTransform",
"numpy.all"
] | [((4773, 4782), 'typing.Counter', 'Counter', ([], {}), '()\n', (4780, 4782), False, 'from typing import Counter\n'), ((729, 819), 'skimage.transform.AffineTransform', 'transform.AffineTransform', ([], {'scale': '(zoom, zoom)', 'rotation': 'angle', 'translation': '(t_x, t_y)'}), '(scale=(zoom, zoom), rotation=angle, translation=(\n t_x, t_y))\n', (754, 819), True, 'import skimage.transform as transform\n'), ((846, 887), 'skimage.transform.warp', 'transform.warp', (['foreground', 'tform.inverse'], {}), '(foreground, tform.inverse)\n', (860, 887), True, 'import skimage.transform as transform\n'), ((1105, 1132), 'PIL.Image.fromarray', 'Image.fromarray', (['background'], {}), '(background)\n', (1120, 1132), False, 'from PIL import Image\n'), ((1986, 2013), 'PIL.Image.fromarray', 'Image.fromarray', (['background'], {}), '(background)\n', (2001, 2013), False, 'from PIL import Image\n'), ((3231, 3252), 'typing.Counter', 'Counter', (['classes_list'], {}), '(classes_list)\n', (3238, 3252), False, 'from typing import Counter\n'), ((458, 484), 'numpy.random.randint', 'np.random.randint', (['(-10)', '(10)'], {}), '(-10, 10)\n', (475, 484), True, 'import numpy as np\n'), ((945, 970), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)'], {}), '(0, 100)\n', (962, 970), True, 'import numpy as np\n'), ((2674, 2776), 'numpy.maximum', 'np.maximum', (['mask_new[offset[1]:offset[1] + img_w, offset[0]:offset[0] + img_h]', 'current_foreground'], {}), '(mask_new[offset[1]:offset[1] + img_w, offset[0]:offset[0] +\n img_h], current_foreground)\n', (2684, 2776), True, 'import numpy as np\n'), ((3047, 3100), 'random.randint', 'random.randint', (['cluster_low_limit', 'cluster_high_limit'], {}), '(cluster_low_limit, cluster_high_limit)\n', (3061, 3100), False, 'import random\n'), ((3804, 3829), 'PIL.Image.fromarray', 'Image.fromarray', (['mask_new'], {}), '(mask_new)\n', (3819, 3829), False, 'from PIL import Image\n'), ((4551, 4562), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4560, 4562), False, 'import os\n'), ((4645, 4669), 'glob.glob', 'glob.glob', (["(folder + '/*')"], {}), "(folder + '/*')\n", (4654, 4669), False, 'import glob\n'), ((4831, 4865), 'PIL.Image.open', 'Image.open', (['background_list[index]'], {}), '(background_list[index])\n', (4841, 4865), False, 'from PIL import Image\n'), ((4900, 4941), 'PIL.Image.open', 'Image.open', (['background_labels_list[index]'], {}), '(background_labels_list[index])\n', (4910, 4941), False, 'from PIL import Image\n'), ((536, 554), 'numpy.random.random', 'np.random.random', ([], {}), '()\n', (552, 554), True, 'import numpy as np\n'), ((4040, 4060), 'numpy.asarray', 'np.asarray', (['mask_new'], {}), '(mask_new)\n', (4050, 4060), True, 'import numpy as np\n'), ((4223, 4234), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4232, 4234), False, 'import os\n'), ((4303, 4314), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4312, 4314), False, 'import os\n'), ((3359, 3372), 'PIL.Image.open', 'Image.open', (['i'], {}), '(i)\n', (3369, 3372), False, 'from PIL import Image\n'), ((2299, 2344), 'numpy.all', 'np.all', (['(foregrounds[i][:, :, :3] == 0)'], {'axis': '(2)'}), '(foregrounds[i][:, :, :3] == 0, axis=2)\n', (2305, 2344), True, 'import numpy as np\n')] |
import cv2
import numpy as np
from skimage.util import random_noise
# Load the image
img = cv2.imread("input/Lenna.png")
# Add salt-and-pepper noise to the image.
noise_img = random_noise(img, mode='s&p',amount=0.3)
# The above function returns a floating-point image
# on the range [0, 1], thus we changed it to 'uint8'
# and from [0,255]
noise_img = np.array(255*noise_img, dtype = 'uint8')
# Display the noise image
cv2.imshow('noise',noise_img)
cv2.imwrite("s&p_noise_lenna.png",noise_img)
cv2.waitKey(0)
# Generate Gaussian noise
gauss_noise_img = random_noise(img, mode='gaussian',mean=0,var=0.01)
gauss_noise_img = np.array(255*gauss_noise_img, dtype = 'uint8')
cv2.imwrite("gaussian_noise_lenna.png",gauss_noise_img)
# Display the image
cv2.imshow('a',gauss_noise_img)
cv2.waitKey(0) | [
"cv2.waitKey",
"cv2.imwrite",
"skimage.util.random_noise",
"cv2.imread",
"numpy.array",
"cv2.imshow"
] | [((93, 122), 'cv2.imread', 'cv2.imread', (['"""input/Lenna.png"""'], {}), "('input/Lenna.png')\n", (103, 122), False, 'import cv2\n'), ((179, 220), 'skimage.util.random_noise', 'random_noise', (['img'], {'mode': '"""s&p"""', 'amount': '(0.3)'}), "(img, mode='s&p', amount=0.3)\n", (191, 220), False, 'from skimage.util import random_noise\n'), ((358, 398), 'numpy.array', 'np.array', (['(255 * noise_img)'], {'dtype': '"""uint8"""'}), "(255 * noise_img, dtype='uint8')\n", (366, 398), True, 'import numpy as np\n'), ((427, 457), 'cv2.imshow', 'cv2.imshow', (['"""noise"""', 'noise_img'], {}), "('noise', noise_img)\n", (437, 457), False, 'import cv2\n'), ((457, 502), 'cv2.imwrite', 'cv2.imwrite', (['"""s&p_noise_lenna.png"""', 'noise_img'], {}), "('s&p_noise_lenna.png', noise_img)\n", (468, 502), False, 'import cv2\n'), ((502, 516), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (513, 516), False, 'import cv2\n'), ((562, 614), 'skimage.util.random_noise', 'random_noise', (['img'], {'mode': '"""gaussian"""', 'mean': '(0)', 'var': '(0.01)'}), "(img, mode='gaussian', mean=0, var=0.01)\n", (574, 614), False, 'from skimage.util import random_noise\n'), ((631, 677), 'numpy.array', 'np.array', (['(255 * gauss_noise_img)'], {'dtype': '"""uint8"""'}), "(255 * gauss_noise_img, dtype='uint8')\n", (639, 677), True, 'import numpy as np\n'), ((678, 734), 'cv2.imwrite', 'cv2.imwrite', (['"""gaussian_noise_lenna.png"""', 'gauss_noise_img'], {}), "('gaussian_noise_lenna.png', gauss_noise_img)\n", (689, 734), False, 'import cv2\n'), ((754, 786), 'cv2.imshow', 'cv2.imshow', (['"""a"""', 'gauss_noise_img'], {}), "('a', gauss_noise_img)\n", (764, 786), False, 'import cv2\n'), ((786, 800), 'cv2.waitKey', 'cv2.waitKey', (['(0)'], {}), '(0)\n', (797, 800), False, 'import cv2\n')] |
import numpy as np
import tensorflow as tf
import scipy.optimize
def linesearch1(f, x, fullstep, expected_improve_rate, kl_bound, max_backtracks=10, accept_ratio=.1):
"""
Backtracking linesearch, where expected_improve_rate is the slope dy/dx at the initial point
"""
fval = f(x)[0]
for stepfrac in (.5 ** np.arange(max_backtracks)):
xnew = x + stepfrac * fullstep
newfval, newkl = f(xnew)
# if newkl > kl_bound:
# newfval += np.inf
actual_improve = fval - newfval
expected_improve = expected_improve_rate * stepfrac
ratio = actual_improve / expected_improve
if ratio > accept_ratio and actual_improve > 0:
return True, xnew
return False, x
def cg(f_Ax, b, cg_iters=10, callback=None, residual_tol=1e-10):
"""
Demmel p 312
"""
p = b.copy()
r = b.copy()
x = np.zeros_like(b)
rdotr = r.dot(r)
for i in range(cg_iters):
if callback is not None:
callback(x)
z = f_Ax(p)
v = rdotr / p.dot(z)
x += v*p
r -= v*z
newrdotr = r.dot(r)
mu = newrdotr/rdotr
p = r + mu*p
rdotr = newrdotr
if rdotr < residual_tol:
break
if callback is not None:
callback(x)
return x
def flatgrad(y, x):
"""
:param y: Function y
:param x: List of variable x
:return: Flattened gradients (dy/dx)
"""
grads = tf.gradients(y, x)
return tf.concat([tf.reshape(g, [-1]) for g in grads], axis=0)
def unflatten_params(flat_params, shapes):
"""
:param shapes: Shapes of the flat parameters
:param flat_params: Flat parameters
:return: Unflattened Parameters
"""
unflat_params = []
start = 0
for i, shape in enumerate(shapes):
size = np.prod(shape)
param = tf.reshape(flat_params[start:start + size], shape)
unflat_params.append(param)
start += size
return unflat_params
def jvp(f, x, u, v):
"""
Computes the Jacobian-Vector Product: (df/dx)u
See: https://j-towns.github.io/2017/06/12/A-new-trick.html
and: https://github.com/renmengye/tensorflow-forward-ad/issues/2
:param f: Function to be differentiated
:param x: Variable
:param u: Vector to be multiplied with
:param v: Dummy variable (type tf.placeholder)
:return: Jacobian Vector Product: (df/dx)u
"""
g = tf.gradients(f, x, grad_ys=v)
jvp = tf.gradients(g, v, grad_ys=u)
return jvp
def optimize_dual(dual, x0, bounds, ftol=1e-12):
"""
Compute COPOS Discrete dual optimization to return eta and omega
:param dual: Dual function which takes as input x = [eta, omega]
:param x0: [eta, omega]
:param bounds: Limits for eta, omega. e.g. bounds = ((1e-12, 1e+8), (1e-12, 1e+8))
:return: res (see scipy.optimize), eta, omega
"""
res = scipy.optimize.minimize(dual, x0,
method='SLSQP',
jac=True,
bounds=bounds,
options={'ftol': ftol})
return res, res.x[0], res.x[1]
| [
"numpy.zeros_like",
"tensorflow.reshape",
"numpy.arange",
"tensorflow.gradients",
"numpy.prod"
] | [((893, 909), 'numpy.zeros_like', 'np.zeros_like', (['b'], {}), '(b)\n', (906, 909), True, 'import numpy as np\n'), ((1468, 1486), 'tensorflow.gradients', 'tf.gradients', (['y', 'x'], {}), '(y, x)\n', (1480, 1486), True, 'import tensorflow as tf\n'), ((2451, 2480), 'tensorflow.gradients', 'tf.gradients', (['f', 'x'], {'grad_ys': 'v'}), '(f, x, grad_ys=v)\n', (2463, 2480), True, 'import tensorflow as tf\n'), ((2491, 2520), 'tensorflow.gradients', 'tf.gradients', (['g', 'v'], {'grad_ys': 'u'}), '(g, v, grad_ys=u)\n', (2503, 2520), True, 'import tensorflow as tf\n'), ((328, 353), 'numpy.arange', 'np.arange', (['max_backtracks'], {}), '(max_backtracks)\n', (337, 353), True, 'import numpy as np\n'), ((1832, 1846), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (1839, 1846), True, 'import numpy as np\n'), ((1864, 1914), 'tensorflow.reshape', 'tf.reshape', (['flat_params[start:start + size]', 'shape'], {}), '(flat_params[start:start + size], shape)\n', (1874, 1914), True, 'import tensorflow as tf\n'), ((1509, 1528), 'tensorflow.reshape', 'tf.reshape', (['g', '[-1]'], {}), '(g, [-1])\n', (1519, 1528), True, 'import tensorflow as tf\n')] |
import numpy as np
import os
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import layers
from kerascv.data.voc_segmentation import voc_segmentation_dataset_from_directory
num_classes = 21
input_shape = (480, 480, 3)
class MyIOUMetrics(tf.keras.metrics.MeanIoU):
def __init__(self, name=None, **kwargs):
super(MyIOUMetrics, self).__init__(num_classes=num_classes, name=name, **kwargs)
def update_state(self, y_true, y_pred, sample_weight=None):
y_pred = tf.argmax(y_pred, axis=-1)
super(MyIOUMetrics, self).update_state(y_true, y_pred, sample_weight)
def set_upsampling_weight(layer):
kernel = layer.kernel
kernel_shape = kernel.shape.as_list()
kernel_size = kernel_shape[0]
in_channels = kernel_shape[2]
out_channels = kernel_shape[3]
"""Make a 2D bilinear kernel suitable for upsampling"""
factor = (kernel_size + 1) // 2
if kernel_size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:kernel_size, :kernel_size]
filt = (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
weight = np.zeros((kernel_size, kernel_size, out_channels, in_channels),
dtype=np.float64)
for i in range(out_channels):
for j in range(in_channels):
weight[:, :, i, j] = filt
kernel.assign(weight)
def get_fcn_32(weights="imagenet"):
keras_inp = tf.keras.Input(shape=input_shape, name="fcn_32s")
backbone = tf.keras.applications.vgg16.VGG16(include_top=False, weights=weights, input_tensor=keras_inp)
x = backbone.outputs[0]
x = layers.Conv2D(4096, 7, padding="same", activation="relu", name="fc6")(x)
x = layers.Dropout(0.5)(x)
x = layers.Conv2D(4096, 1, padding="same", activation="relu", name="fc7")(x)
x = layers.Dropout(0.5)(x)
x = layers.Conv2D(num_classes, 1, kernel_initializer="he_normal", name="fcn_32_fc7_pool_conv_1")(x)
conv_upsample = layers.Conv2DTranspose(num_classes, kernel_size=(64, 64), strides=(32, 32),
use_bias=False, padding="same", name="fcn_32_conv2d_transpose_32")
x = conv_upsample(x)
set_upsampling_weight(conv_upsample)
x = layers.Activation("softmax")(x)
return tf.keras.Model(keras_inp, x, name="fcn32_vgg16")
def get_fcn_16():
backbone = tf.keras.models.load_model('fcn_32.hdf5', compile=False)
o1 = backbone.get_layer("conv2d").output
conv_upsample = layers.Conv2DTranspose(num_classes, kernel_size=(4, 4), strides=(2, 2), use_bias=False,
padding="same", name="fcn_16_pool5_conv2d_transpose_2")
o1 = conv_upsample(o1)
set_upsampling_weight(conv_upsample)
o2 = backbone.get_layer("block4_pool").output
o2 = layers.Conv2D(num_classes, 1, kernel_initializer="he_normal", name="fcn_16_block4_pool_conv_1")(o2)
o = layers.Add()([o1, o2])
conv_upsample = layers.Conv2DTranspose(num_classes, kernel_size=(32, 32), strides=(16, 16),
use_bias=False, padding="same", name="fcn_16_conv2d_transpose_16")
o = conv_upsample(o)
set_upsampling_weight(conv_upsample)
o = layers.Activation("softmax")(o)
return tf.keras.Model(backbone.input, o, name="fcn16_vgg16")
def train_val_save_fcn_32():
batch_size = 20
train_voc_ds_2012 = voc_segmentation_dataset_from_directory(split="train", batch_size=batch_size)
eval_voc_ds_2012 = voc_segmentation_dataset_from_directory(split="val", batch_size=batch_size)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
optimizer = tfa.optimizers.AdamW(weight_decay=0.0002, learning_rate=0.001)
model = get_fcn_32()
iou_metric = MyIOUMetrics()
model.compile(optimizer, "sparse_categorical_crossentropy", weighted_metrics=["accuracy", iou_metric])
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='fcn_32.hdf5', save_best_only=True, monitor="val_my_iou_metrics")
lr_callback = tf.keras.callbacks.ReduceLROnPlateau(patience=3, min_delta=0.01, monitor="val_my_iou_metrics")
print('-------------------Start Training FCN32-------------------')
print('-------------------Trainable Variables-------------------')
for var in model.trainable_variables:
print('var {}, {}'.format(var.name, var.shape))
model.summary()
# 2913 images is around 150 steps
model.fit(train_voc_ds_2012.prefetch(tf.data.experimental.AUTOTUNE), epochs=100,
callbacks=[lr_callback, ckpt_callback], validation_data=eval_voc_ds_2012)
def train_val_save_fcn_16():
batch_size = 20
train_voc_ds_2012 = voc_segmentation_dataset_from_directory(split="train", batch_size=batch_size)
eval_voc_ds_2012 = voc_segmentation_dataset_from_directory(split="val", batch_size=batch_size)
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
optimizer = tfa.optimizers.AdamW(weight_decay=0.0002, learning_rate=0.0001)
model = get_fcn_16()
iou_metric = MyIOUMetrics()
model.compile(optimizer, "sparse_categorical_crossentropy", weighted_metrics=["accuracy", iou_metric])
ckpt_callback = tf.keras.callbacks.ModelCheckpoint(
filepath='fcn_32.hdf5', save_best_only=True, monitor="val_my_iou_metrics")
lr_callback = tf.keras.callbacks.ReduceLROnPlateau(patience=3, min_delta=0.01, monitor="val_my_iou_metrics")
print('-------------------Start Training FCN16-------------------')
print('-------------------Trainable Variables-------------------')
for var in model.trainable_variables:
print('var {}, {}'.format(var.name, var.shape))
model.summary()
# 2913 images is around 150 steps
model.fit(train_voc_ds_2012.prefetch(tf.data.experimental.AUTOTUNE), epochs=40,
callbacks=[lr_callback, ckpt_callback], validation_data=eval_voc_ds_2012)
if __name__ == "__main__":
if not os.path.exists("fcn_32.hdf5"):
train_val_save_fcn_32()
elif not os.path.exists("fcn_16.hdf5"):
train_val_save_fcn_16()
| [
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.argmax",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.Input",
"numpy.zeros",
"tensorflow.distribute.MirroredStrategy",
"tensorflow_addons.optimizers.AdamW",
"... | [((1174, 1260), 'numpy.zeros', 'np.zeros', (['(kernel_size, kernel_size, out_channels, in_channels)'], {'dtype': 'np.float64'}), '((kernel_size, kernel_size, out_channels, in_channels), dtype=np.\n float64)\n', (1182, 1260), True, 'import numpy as np\n'), ((1467, 1516), 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'input_shape', 'name': '"""fcn_32s"""'}), "(shape=input_shape, name='fcn_32s')\n", (1481, 1516), True, 'import tensorflow as tf\n'), ((1532, 1629), 'tensorflow.keras.applications.vgg16.VGG16', 'tf.keras.applications.vgg16.VGG16', ([], {'include_top': '(False)', 'weights': 'weights', 'input_tensor': 'keras_inp'}), '(include_top=False, weights=weights,\n input_tensor=keras_inp)\n', (1565, 1629), True, 'import tensorflow as tf\n'), ((2002, 2148), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['num_classes'], {'kernel_size': '(64, 64)', 'strides': '(32, 32)', 'use_bias': '(False)', 'padding': '"""same"""', 'name': '"""fcn_32_conv2d_transpose_32"""'}), "(num_classes, kernel_size=(64, 64), strides=(32, 32),\n use_bias=False, padding='same', name='fcn_32_conv2d_transpose_32')\n", (2024, 2148), False, 'from tensorflow.keras import layers\n'), ((2305, 2353), 'tensorflow.keras.Model', 'tf.keras.Model', (['keras_inp', 'x'], {'name': '"""fcn32_vgg16"""'}), "(keras_inp, x, name='fcn32_vgg16')\n", (2319, 2353), True, 'import tensorflow as tf\n'), ((2389, 2445), 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['"""fcn_32.hdf5"""'], {'compile': '(False)'}), "('fcn_32.hdf5', compile=False)\n", (2415, 2445), True, 'import tensorflow as tf\n'), ((2512, 2659), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['num_classes'], {'kernel_size': '(4, 4)', 'strides': '(2, 2)', 'use_bias': '(False)', 'padding': '"""same"""', 'name': '"""fcn_16_pool5_conv2d_transpose_2"""'}), "(num_classes, kernel_size=(4, 4), strides=(2, 2),\n use_bias=False, padding='same', name='fcn_16_pool5_conv2d_transpose_2')\n", (2534, 2659), False, 'from tensorflow.keras import layers\n'), ((2979, 3125), 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['num_classes'], {'kernel_size': '(32, 32)', 'strides': '(16, 16)', 'use_bias': '(False)', 'padding': '"""same"""', 'name': '"""fcn_16_conv2d_transpose_16"""'}), "(num_classes, kernel_size=(32, 32), strides=(16, 16),\n use_bias=False, padding='same', name='fcn_16_conv2d_transpose_16')\n", (3001, 3125), False, 'from tensorflow.keras import layers\n'), ((3282, 3335), 'tensorflow.keras.Model', 'tf.keras.Model', (['backbone.input', 'o'], {'name': '"""fcn16_vgg16"""'}), "(backbone.input, o, name='fcn16_vgg16')\n", (3296, 3335), True, 'import tensorflow as tf\n'), ((3411, 3488), 'kerascv.data.voc_segmentation.voc_segmentation_dataset_from_directory', 'voc_segmentation_dataset_from_directory', ([], {'split': '"""train"""', 'batch_size': 'batch_size'}), "(split='train', batch_size=batch_size)\n", (3450, 3488), False, 'from kerascv.data.voc_segmentation import voc_segmentation_dataset_from_directory\n'), ((3512, 3587), 'kerascv.data.voc_segmentation.voc_segmentation_dataset_from_directory', 'voc_segmentation_dataset_from_directory', ([], {'split': '"""val"""', 'batch_size': 'batch_size'}), "(split='val', batch_size=batch_size)\n", (3551, 3587), False, 'from kerascv.data.voc_segmentation import voc_segmentation_dataset_from_directory\n'), ((3603, 3635), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (3633, 3635), True, 'import tensorflow as tf\n'), ((4734, 4811), 'kerascv.data.voc_segmentation.voc_segmentation_dataset_from_directory', 'voc_segmentation_dataset_from_directory', ([], {'split': '"""train"""', 'batch_size': 'batch_size'}), "(split='train', batch_size=batch_size)\n", (4773, 4811), False, 'from kerascv.data.voc_segmentation import voc_segmentation_dataset_from_directory\n'), ((4835, 4910), 'kerascv.data.voc_segmentation.voc_segmentation_dataset_from_directory', 'voc_segmentation_dataset_from_directory', ([], {'split': '"""val"""', 'batch_size': 'batch_size'}), "(split='val', batch_size=batch_size)\n", (4874, 4910), False, 'from kerascv.data.voc_segmentation import voc_segmentation_dataset_from_directory\n'), ((4926, 4958), 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), '()\n', (4956, 4958), True, 'import tensorflow as tf\n'), ((514, 540), 'tensorflow.argmax', 'tf.argmax', (['y_pred'], {'axis': '(-1)'}), '(y_pred, axis=-1)\n', (523, 540), True, 'import tensorflow as tf\n'), ((1662, 1731), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(4096)', '(7)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""fc6"""'}), "(4096, 7, padding='same', activation='relu', name='fc6')\n", (1675, 1731), False, 'from tensorflow.keras import layers\n'), ((1743, 1762), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (1757, 1762), False, 'from tensorflow.keras import layers\n'), ((1774, 1843), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(4096)', '(1)'], {'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""fc7"""'}), "(4096, 1, padding='same', activation='relu', name='fc7')\n", (1787, 1843), False, 'from tensorflow.keras import layers\n'), ((1855, 1874), 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.5)'], {}), '(0.5)\n', (1869, 1874), False, 'from tensorflow.keras import layers\n'), ((1886, 1983), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_classes', '(1)'], {'kernel_initializer': '"""he_normal"""', 'name': '"""fcn_32_fc7_pool_conv_1"""'}), "(num_classes, 1, kernel_initializer='he_normal', name=\n 'fcn_32_fc7_pool_conv_1')\n", (1899, 1983), False, 'from tensorflow.keras import layers\n'), ((2262, 2290), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""softmax"""'], {}), "('softmax')\n", (2279, 2290), False, 'from tensorflow.keras import layers\n'), ((2827, 2927), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_classes', '(1)'], {'kernel_initializer': '"""he_normal"""', 'name': '"""fcn_16_block4_pool_conv_1"""'}), "(num_classes, 1, kernel_initializer='he_normal', name=\n 'fcn_16_block4_pool_conv_1')\n", (2840, 2927), False, 'from tensorflow.keras import layers\n'), ((2936, 2948), 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), '()\n', (2946, 2948), False, 'from tensorflow.keras import layers\n'), ((3239, 3267), 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""softmax"""'], {}), "('softmax')\n", (3256, 3267), False, 'from tensorflow.keras import layers\n'), ((3683, 3745), 'tensorflow_addons.optimizers.AdamW', 'tfa.optimizers.AdamW', ([], {'weight_decay': '(0.0002)', 'learning_rate': '(0.001)'}), '(weight_decay=0.0002, learning_rate=0.001)\n', (3703, 3745), True, 'import tensorflow_addons as tfa\n'), ((3946, 4060), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""fcn_32.hdf5"""', 'save_best_only': '(True)', 'monitor': '"""val_my_iou_metrics"""'}), "(filepath='fcn_32.hdf5', save_best_only=\n True, monitor='val_my_iou_metrics')\n", (3980, 4060), True, 'import tensorflow as tf\n'), ((4091, 4190), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'tf.keras.callbacks.ReduceLROnPlateau', ([], {'patience': '(3)', 'min_delta': '(0.01)', 'monitor': '"""val_my_iou_metrics"""'}), "(patience=3, min_delta=0.01, monitor=\n 'val_my_iou_metrics')\n", (4127, 4190), True, 'import tensorflow as tf\n'), ((5006, 5069), 'tensorflow_addons.optimizers.AdamW', 'tfa.optimizers.AdamW', ([], {'weight_decay': '(0.0002)', 'learning_rate': '(0.0001)'}), '(weight_decay=0.0002, learning_rate=0.0001)\n', (5026, 5069), True, 'import tensorflow_addons as tfa\n'), ((5270, 5384), 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', ([], {'filepath': '"""fcn_32.hdf5"""', 'save_best_only': '(True)', 'monitor': '"""val_my_iou_metrics"""'}), "(filepath='fcn_32.hdf5', save_best_only=\n True, monitor='val_my_iou_metrics')\n", (5304, 5384), True, 'import tensorflow as tf\n'), ((5415, 5514), 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'tf.keras.callbacks.ReduceLROnPlateau', ([], {'patience': '(3)', 'min_delta': '(0.01)', 'monitor': '"""val_my_iou_metrics"""'}), "(patience=3, min_delta=0.01, monitor=\n 'val_my_iou_metrics')\n", (5451, 5514), True, 'import tensorflow as tf\n'), ((6022, 6051), 'os.path.exists', 'os.path.exists', (['"""fcn_32.hdf5"""'], {}), "('fcn_32.hdf5')\n", (6036, 6051), False, 'import os\n'), ((6098, 6127), 'os.path.exists', 'os.path.exists', (['"""fcn_16.hdf5"""'], {}), "('fcn_16.hdf5')\n", (6112, 6127), False, 'import os\n')] |
import logging
import numpy as np
from pylops import LinearOperator
try:
from numba import jit
from ._Spread_numba import _matvec_numba_table, _rmatvec_numba_table, \
_matvec_numba_onthefly, _rmatvec_numba_onthefly
except ModuleNotFoundError:
jit = None
jit_message = 'Numba not available, reverting to numpy.'
except Exception as e:
jit = None
jit_message = 'Failed to import numba (error:%s), use numpy.' % e
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.WARNING)
class Spread(LinearOperator):
r"""Spread operator.
Spread values from the input model vector arranged as a 2-dimensional
array of size :math:`[n_{x0} \times n_{t0}]` into the data vector of size
:math:`[n_x \times n_t]`. Spreading is performed along parametric curves
provided as look-up table of pre-computed indices (``table``)
or computed on-the-fly using a function handle (``fh``).
In adjont mode, values from the data vector are instead stacked
along the same parametric curves.
Parameters
----------
dims : :obj:`tuple`
Dimensions of model vector (vector will be reshaped internally into
a two-dimensional array of size :math:`[n_{x0} \times n_{t0}]`,
where the first dimension is the spreading/stacking direction)
dimsd : :obj:`tuple`
Dimensions of model vector (vector will be reshaped internal into
a two-dimensional array of size :math:`[n_x \times n_t]`)
table : :obj:`np.ndarray`, optional
Look-up table of indeces of size
:math:`[n_{x0} \times n_{t0} \times n_x]` (if ``None`` use function
handle ``fh``)
dtable : :obj:`np.ndarray`, optional
Look-up table of decimals remainders for linear interpolation of size
:math:`[n_{x0} \times n_{t0} \times n_x]` (if ``None`` use function
handle ``fh``)
fh : :obj:`np.ndarray`, optional
Function handle that returns an index (and a fractional value in case
of ``interp=True``) to be used for spreading/stacking given indices
in :math:`x0` and :math:`t` axes (if ``None`` use look-up table
``table``)
interp : :obj:`bool`, optional
Apply linear interpolation (``True``) or nearest interpolation
(``False``) during stacking/spreading along parametric curve. To be
used only if ``engine='numba'``, inferred directly from the number of
outputs of ``fh`` for ``engine='numpy'``
engine : :obj:`str`, optional
Engine used for fft computation (``numpy`` or ``numba``). Note that
``numba`` can only be used when providing a look-up table
dtype : :obj:`str`, optional
Type of elements in input array.
Attributes
----------
shape : :obj:`tuple`
Operator shape
explicit : :obj:`bool`
Operator contains a matrix that can be solved explicitly (``True``) or
not (``False``)
Raises
------
KeyError
If ``engine`` is neither ``numpy`` nor ``numba``
NotImplementedError
If both ``table`` and ``fh`` are not provided
ValueError
If ``table`` has shape different from
:math:`[n_{x0} \times n_t0 \times n_x]`
Notes
-----
The Spread operator applies the following linear transform in forward mode
to the model vector after reshaping it into a 2-dimensional array of size
:math:`[n_x \times n_t]`:
.. math::
m(x0, t_0) \rightarrow d(x, t=f(x0, x, t_0))
where :math:`f(x0, x, t)` is a mapping function that returns a value t
given values :math:`x0`, :math:`x`, and :math:`t_0`.
In adjoint mode, the model is reconstructed by means of the following
stacking operation:
.. math::
m(x0, t_0) = \int{d(x, t=f(x0, x, t_0))} dx
Note that ``table`` (or ``fh``) must return integer numbers
representing indices in the axis :math:`t`. However it also possible to
perform linear interpolation as part of the spreading/stacking process by
providing the decimal part of the mapping function (:math:`t - \lfloor
t \rfloor`) either in ``dtable`` input parameter or as second value in
the return of ``fh`` function.
"""
def __init__(self, dims, dimsd, table=None, dtable=None,
fh=None, interp=False, engine='numpy', dtype='float64'):
if not engine in ['numpy', 'numba']:
raise KeyError('engine must be numpy or numba')
if engine == 'numba' and jit is not None:
self.engine = 'numba'
else:
if engine == 'numba' and jit is None:
logging.warning(jit_message)
self.engine = 'numpy'
# axes
self.dims, self.dimsd = dims, dimsd
self.nx0, self.nt0 = self.dims[0], self.dims[1]
self.nx, self.nt = self.dimsd[0], self.dimsd[1]
self.table = table
self.dtable = dtable
self.fh = fh
# find out if mapping is in table of function handle
if table is None and fh is None:
raise NotImplementedError('provide either table or fh...')
elif table is not None:
if self.table.shape != (self.nx0, self.nt0, self.nx):
raise ValueError('table must have shape [nx0 x nt0 x nx]')
self.usetable = True
if np.any(self.table > self.nt):
raise ValueError('values in table must be smaller than nt')
else:
self.usetable = False
# find out if linear interpolation has to be carried out
self.interp = False
if self.usetable:
if dtable is not None:
if self.dtable.shape != (self.nx0, self.nt0, self.nx):
raise ValueError('dtable must have shape [nx0 x nt x nx]')
self.interp = True
else:
if self.engine == 'numba':
self.interp = interp
else:
if len(fh(0, 0)) == 2:
self.interp = True
self.shape = (int(np.prod(self.dimsd)), int(np.prod(self.dims)))
self.dtype = np.dtype(dtype)
self.explicit = False
def _matvec_numpy(self, x):
x = x.reshape(self.dims)
y = np.zeros(self.dimsd, dtype=self.dtype)
for it in range(self.dims[1]):
for ix0 in range(self.dims[0]):
if self.usetable:
indices = self.table[ix0, it]
if self.interp:
dindices = self.dtable[ix0, it]
else:
if self.interp:
indices, dindices = self.fh(ix0, it)
else:
indices = self.fh(ix0, it)
mask = np.argwhere(~np.isnan(indices))
if mask.size > 0:
indices = (indices[mask]).astype(np.int)
if not self.interp:
y[mask, indices] += x[ix0, it]
else:
y[mask, indices] += (1-dindices[mask])*x[ix0, it]
y[mask, indices + 1] += dindices[mask] * x[ix0, it]
return y.ravel()
def _rmatvec_numpy(self, x):
x = x.reshape(self.dimsd)
y = np.zeros(self.dims, dtype=self.dtype)
for it in range(self.dims[1]):
for ix0 in range(self.dims[0]):
if self.usetable:
indices = self.table[ix0, it]
if self.interp:
dindices = self.dtable[ix0, it]
else:
if self.interp:
indices, dindices = self.fh(ix0, it)
else:
indices = self.fh(ix0, it)
mask = np.argwhere(~np.isnan(indices))
if mask.size > 0:
indices = (indices[mask]).astype(np.int)
if not self.interp:
y[ix0, it] = np.sum(x[mask, indices])
else:
y[ix0, it] = \
np.sum(x[mask, indices]*(1-dindices[mask])) + \
np.sum(x[mask, indices+1]*dindices[mask])
return y.ravel()
def _matvec(self, x):
if self.engine == 'numba':
y = np.zeros(self.dimsd, dtype=self.dtype)
if self.usetable:
y = _matvec_numba_table(x, y, self.dims, self.interp,
self.table,
self.table if self.dtable is None
else self.dtable)
else:
y = _matvec_numba_onthefly(x, y, self.dims, self.interp,
self.fh)
else:
y = self._matvec_numpy(x)
return y
def _rmatvec(self, x):
if self.engine == 'numba':
y = np.zeros(self.dims, dtype=self.dtype)
if self.usetable:
y = _rmatvec_numba_table(x, y, self.dims, self.dimsd,
self.interp, self.table,
self.table if self.dtable is None
else self.dtable)
else:
y = _rmatvec_numba_onthefly(x, y, self.dims, self.dimsd,
self.interp, self.fh)
else:
y = self._rmatvec_numpy(x)
return y
| [
"numpy.sum",
"logging.basicConfig",
"logging.warning",
"numpy.dtype",
"numpy.zeros",
"numpy.isnan",
"numpy.any",
"numpy.prod"
] | [((445, 524), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s: %(message)s"""', 'level': 'logging.WARNING'}), "(format='%(levelname)s: %(message)s', level=logging.WARNING)\n", (464, 524), False, 'import logging\n'), ((6088, 6103), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (6096, 6103), True, 'import numpy as np\n'), ((6212, 6250), 'numpy.zeros', 'np.zeros', (['self.dimsd'], {'dtype': 'self.dtype'}), '(self.dimsd, dtype=self.dtype)\n', (6220, 6250), True, 'import numpy as np\n'), ((7232, 7269), 'numpy.zeros', 'np.zeros', (['self.dims'], {'dtype': 'self.dtype'}), '(self.dims, dtype=self.dtype)\n', (7240, 7269), True, 'import numpy as np\n'), ((8291, 8329), 'numpy.zeros', 'np.zeros', (['self.dimsd'], {'dtype': 'self.dtype'}), '(self.dimsd, dtype=self.dtype)\n', (8299, 8329), True, 'import numpy as np\n'), ((8905, 8942), 'numpy.zeros', 'np.zeros', (['self.dims'], {'dtype': 'self.dtype'}), '(self.dims, dtype=self.dtype)\n', (8913, 8942), True, 'import numpy as np\n'), ((4607, 4635), 'logging.warning', 'logging.warning', (['jit_message'], {}), '(jit_message)\n', (4622, 4635), False, 'import logging\n'), ((5314, 5342), 'numpy.any', 'np.any', (['(self.table > self.nt)'], {}), '(self.table > self.nt)\n', (5320, 5342), True, 'import numpy as np\n'), ((6020, 6039), 'numpy.prod', 'np.prod', (['self.dimsd'], {}), '(self.dimsd)\n', (6027, 6039), True, 'import numpy as np\n'), ((6046, 6064), 'numpy.prod', 'np.prod', (['self.dims'], {}), '(self.dims)\n', (6053, 6064), True, 'import numpy as np\n'), ((6742, 6759), 'numpy.isnan', 'np.isnan', (['indices'], {}), '(indices)\n', (6750, 6759), True, 'import numpy as np\n'), ((7761, 7778), 'numpy.isnan', 'np.isnan', (['indices'], {}), '(indices)\n', (7769, 7778), True, 'import numpy as np\n'), ((7952, 7976), 'numpy.sum', 'np.sum', (['x[mask, indices]'], {}), '(x[mask, indices])\n', (7958, 7976), True, 'import numpy as np\n'), ((8070, 8117), 'numpy.sum', 'np.sum', (['(x[mask, indices] * (1 - dindices[mask]))'], {}), '(x[mask, indices] * (1 - dindices[mask]))\n', (8076, 8117), True, 'import numpy as np\n'), ((8146, 8191), 'numpy.sum', 'np.sum', (['(x[mask, indices + 1] * dindices[mask])'], {}), '(x[mask, indices + 1] * dindices[mask])\n', (8152, 8191), True, 'import numpy as np\n')] |
import numpy as np
from flask import Flask, request, jsonify
import werkzeug
from keras.models import load_model
from keras.preprocessing.image import load_img, img_to_array
app = Flask(__name__)
print('loading saved artifacts')
classes = labels = ['Type 1', 'Type 2', 'Type 3']
model = load_model('cancer_screen_model.h5')
def preprocess(imagefile):
filename = werkzeug.utils.secure_filename(imagefile.filename)
print("\nReceived image File name : " + filename)
img_path = './data/raw/test_images/' + filename
imagefile.save(img_path)
img = load_img(img_path, target_size= (180, 180))
img_arr = img_to_array(img)/255.0
img_arr = img_arr.reshape(1, 180, 180, 3)
return img_arr
def classify_img(image_file):
print('processing image...')
img = preprocess(image_file)
print('classifying image...')
probability = np.round(model.predict(img)[0], 2)
print("rounded probability", probability)
prediction = labels[np.argmax(probability)]
print('getting results...')
result = {'class': prediction, 'probability': probability.tolist()}
print(result)
return result
# server to get image file
# and return prediction result
@app.route('/classify', methods=['GET', 'POST'])
def handle_request():
image_file = request.files['image']
response = jsonify(classify_img(image_file))
response.headers.add('Access-Control-Allow-Origin', '*')
return response
if __name__ == '__main__':
print('Starting python flask server')
app.run(host="0.0.0.0", port=5000, debug=True) | [
"keras.models.load_model",
"numpy.argmax",
"flask.Flask",
"werkzeug.utils.secure_filename",
"keras.preprocessing.image.img_to_array",
"keras.preprocessing.image.load_img"
] | [((181, 196), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (186, 196), False, 'from flask import Flask, request, jsonify\n'), ((289, 325), 'keras.models.load_model', 'load_model', (['"""cancer_screen_model.h5"""'], {}), "('cancer_screen_model.h5')\n", (299, 325), False, 'from keras.models import load_model\n'), ((370, 420), 'werkzeug.utils.secure_filename', 'werkzeug.utils.secure_filename', (['imagefile.filename'], {}), '(imagefile.filename)\n', (400, 420), False, 'import werkzeug\n'), ((566, 608), 'keras.preprocessing.image.load_img', 'load_img', (['img_path'], {'target_size': '(180, 180)'}), '(img_path, target_size=(180, 180))\n', (574, 608), False, 'from keras.preprocessing.image import load_img, img_to_array\n'), ((624, 641), 'keras.preprocessing.image.img_to_array', 'img_to_array', (['img'], {}), '(img)\n', (636, 641), False, 'from keras.preprocessing.image import load_img, img_to_array\n'), ((968, 990), 'numpy.argmax', 'np.argmax', (['probability'], {}), '(probability)\n', (977, 990), True, 'import numpy as np\n')] |
import numpy as np
from sklearn.linear_model import LinearRegression
a = np.array([1, 2, 3, 4, 5, 6])
b = np.array([10, 20, 30, 40, 50, 60])
test = np.array([8, 9, 10])
model = LinearRegression()
model.fit(a.reshape(-1, 1), b)
model.predict(test.reshape(-1, 1))
| [
"sklearn.linear_model.LinearRegression",
"numpy.array"
] | [((74, 102), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (82, 102), True, 'import numpy as np\n'), ((107, 141), 'numpy.array', 'np.array', (['[10, 20, 30, 40, 50, 60]'], {}), '([10, 20, 30, 40, 50, 60])\n', (115, 141), True, 'import numpy as np\n'), ((149, 169), 'numpy.array', 'np.array', (['[8, 9, 10]'], {}), '([8, 9, 10])\n', (157, 169), True, 'import numpy as np\n'), ((178, 196), 'sklearn.linear_model.LinearRegression', 'LinearRegression', ([], {}), '()\n', (194, 196), False, 'from sklearn.linear_model import LinearRegression\n')] |
import numpy as np
import pytest
from gl0learn import fit, synthetic
from hypothesis import given, settings, assume
from hypothesis.strategies import integers, floats, random_module, just, booleans
from conftest import MAX_OVERLAPS
from utils import (
random_penalty_values,
random_penalty,
overlap_covariance_matrix,
sample_from_cov,
)
@pytest.mark.parametrize(
"x",
(
[["not"]], # not an array
np, # not an array
np.ones([3, 3], dtype=int), # wrong dtype
np.ones([3, 3, 3]), # wrong number of dimensions
np.ones([3, 1]), # wrong number of columns
np.ones([1, 3]), # wrong number of rows
),
)
def test_fit_bad_x(x):
with pytest.raises(ValueError):
_ = fit(x)
@pytest.mark.parametrize("algorithm", ["CD", "CDPSI"])
@given(
max_iter=integers(1, 1000),
active_set=floats(0, 2),
tol=floats(1e-16, 1e-1),
super_active_set=floats(0, 2),
p=integers(2, 10),
n=floats(0, 1000),
overlaps=integers(1, MAX_OVERLAPS - 1),
module=random_module(),
lXs=random_penalty_values(
penalty_strategies=random_penalty(l0=just(True), l1=booleans(), l2=booleans()),
values_strategies={
"l0": floats(0, 10),
"l1": floats(0, 10),
"l2": floats(0, 10),
},
),
)
@settings(max_examples=1000, deadline=None)
def test_fit_is_reproducible(
n, p, max_iter, module, overlaps, active_set, super_active_set, lXs, tol, algorithm
):
assume(active_set > super_active_set)
num_samples = max(1, int(n * p**2))
theta_truth = overlap_covariance_matrix(
p=p,
seed=module.seed,
max_overlaps=overlaps,
decay=1 - np.exp(overlaps - MAX_OVERLAPS),
)
assume(all(np.linalg.eigvalsh(theta_truth) > 0))
x = sample_from_cov(n=num_samples, cov=theta_truth)
_, _, _, _, y, _ = synthetic.preprocess(x, assume_centered=False, cholesky=True)
fit_dict = dict(
**lXs,
scale_x=False,
theta_init=None,
active_set=active_set,
max_iter=max_iter,
seed=module.seed,
super_active_set=super_active_set,
max_active_set_ratio=1.0,
tol=tol
)
fit1 = fit(y, **fit_dict)
fit2 = fit(y, **fit_dict)
assert fit1 == fit2
| [
"utils.sample_from_cov",
"gl0learn.fit",
"numpy.ones",
"hypothesis.strategies.floats",
"numpy.linalg.eigvalsh",
"hypothesis.strategies.just",
"hypothesis.settings",
"pytest.raises",
"hypothesis.strategies.booleans",
"numpy.exp",
"gl0learn.synthetic.preprocess",
"pytest.mark.parametrize",
"hy... | [((760, 813), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""algorithm"""', "['CD', 'CDPSI']"], {}), "('algorithm', ['CD', 'CDPSI'])\n", (783, 813), False, 'import pytest\n'), ((1332, 1374), 'hypothesis.settings', 'settings', ([], {'max_examples': '(1000)', 'deadline': 'None'}), '(max_examples=1000, deadline=None)\n', (1340, 1374), False, 'from hypothesis import given, settings, assume\n'), ((1500, 1537), 'hypothesis.assume', 'assume', (['(active_set > super_active_set)'], {}), '(active_set > super_active_set)\n', (1506, 1537), False, 'from hypothesis import given, settings, assume\n'), ((1812, 1859), 'utils.sample_from_cov', 'sample_from_cov', ([], {'n': 'num_samples', 'cov': 'theta_truth'}), '(n=num_samples, cov=theta_truth)\n', (1827, 1859), False, 'from utils import random_penalty_values, random_penalty, overlap_covariance_matrix, sample_from_cov\n'), ((1884, 1945), 'gl0learn.synthetic.preprocess', 'synthetic.preprocess', (['x'], {'assume_centered': '(False)', 'cholesky': '(True)'}), '(x, assume_centered=False, cholesky=True)\n', (1904, 1945), False, 'from gl0learn import fit, synthetic\n'), ((2226, 2244), 'gl0learn.fit', 'fit', (['y'], {}), '(y, **fit_dict)\n', (2229, 2244), False, 'from gl0learn import fit, synthetic\n'), ((2257, 2275), 'gl0learn.fit', 'fit', (['y'], {}), '(y, **fit_dict)\n', (2260, 2275), False, 'from gl0learn import fit, synthetic\n'), ((711, 736), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (724, 736), False, 'import pytest\n'), ((750, 756), 'gl0learn.fit', 'fit', (['x'], {}), '(x)\n', (753, 756), False, 'from gl0learn import fit, synthetic\n'), ((468, 494), 'numpy.ones', 'np.ones', (['[3, 3]'], {'dtype': 'int'}), '([3, 3], dtype=int)\n', (475, 494), True, 'import numpy as np\n'), ((519, 537), 'numpy.ones', 'np.ones', (['[3, 3, 3]'], {}), '([3, 3, 3])\n', (526, 537), True, 'import numpy as np\n'), ((577, 592), 'numpy.ones', 'np.ones', (['[3, 1]'], {}), '([3, 1])\n', (584, 592), True, 'import numpy as np\n'), ((629, 644), 'numpy.ones', 'np.ones', (['[1, 3]'], {}), '([1, 3])\n', (636, 644), True, 'import numpy as np\n'), ((835, 852), 'hypothesis.strategies.integers', 'integers', (['(1)', '(1000)'], {}), '(1, 1000)\n', (843, 852), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((869, 881), 'hypothesis.strategies.floats', 'floats', (['(0)', '(2)'], {}), '(0, 2)\n', (875, 881), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((891, 909), 'hypothesis.strategies.floats', 'floats', (['(1e-16)', '(0.1)'], {}), '(1e-16, 0.1)\n', (897, 909), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((933, 945), 'hypothesis.strategies.floats', 'floats', (['(0)', '(2)'], {}), '(0, 2)\n', (939, 945), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((953, 968), 'hypothesis.strategies.integers', 'integers', (['(2)', '(10)'], {}), '(2, 10)\n', (961, 968), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((976, 991), 'hypothesis.strategies.floats', 'floats', (['(0)', '(1000)'], {}), '(0, 1000)\n', (982, 991), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1006, 1035), 'hypothesis.strategies.integers', 'integers', (['(1)', '(MAX_OVERLAPS - 1)'], {}), '(1, MAX_OVERLAPS - 1)\n', (1014, 1035), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1048, 1063), 'hypothesis.strategies.random_module', 'random_module', ([], {}), '()\n', (1061, 1063), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1711, 1742), 'numpy.exp', 'np.exp', (['(overlaps - MAX_OVERLAPS)'], {}), '(overlaps - MAX_OVERLAPS)\n', (1717, 1742), True, 'import numpy as np\n'), ((1766, 1797), 'numpy.linalg.eigvalsh', 'np.linalg.eigvalsh', (['theta_truth'], {}), '(theta_truth)\n', (1784, 1797), True, 'import numpy as np\n'), ((1230, 1243), 'hypothesis.strategies.floats', 'floats', (['(0)', '(10)'], {}), '(0, 10)\n', (1236, 1243), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1263, 1276), 'hypothesis.strategies.floats', 'floats', (['(0)', '(10)'], {}), '(0, 10)\n', (1269, 1276), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1296, 1309), 'hypothesis.strategies.floats', 'floats', (['(0)', '(10)'], {}), '(0, 10)\n', (1302, 1309), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1141, 1151), 'hypothesis.strategies.just', 'just', (['(True)'], {}), '(True)\n', (1145, 1151), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1156, 1166), 'hypothesis.strategies.booleans', 'booleans', ([], {}), '()\n', (1164, 1166), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n'), ((1171, 1181), 'hypothesis.strategies.booleans', 'booleans', ([], {}), '()\n', (1179, 1181), False, 'from hypothesis.strategies import integers, floats, random_module, just, booleans\n')] |
#!/usr/bin/env python3
from network_128x4_64_64 import Network1
import numpy as np
def drop_piece(board, col, piece):
next_board = board.copy()
for row in range(5, -1, -1):
if next_board[row][col] == 0:
break
next_board[row][col] = piece
return next_board
def _build_board_state(piece, board):
return list(map(lambda row: list(map(lambda v: 1 if v == piece else 0 if v == 0 else -1, row)), board))
current_batch_board_states = []
current_batch_rewards = []
# winning
moves_win = [
(1,
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], np.int)),
(2,
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 2]], np.int)),
(3,
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 2, 0, 2]], np.int)),
(0,
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 2, 0, 0, 0],
[0, 1, 1, 1, 2, 0, 2]], np.int))]
reward = 1
for move, board in reversed(moves_win):
next_board = drop_piece(board, move, 1)
board_state = _build_board_state(1, next_board)
current_batch_board_states.append(board_state)
current_batch_rewards.append(reward)
reward *= 0.7
# loosing
moves_lose = [
(6,
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0]], np.int)),
(4,
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 0, 0, 0, 2]], np.int)),
(3,
np.array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 2, 0, 2]], np.int))]
reward = -1
for move, board in reversed(moves_lose):
next_board = drop_piece(board, move, 2)
board_state = _build_board_state(2, next_board)
current_batch_board_states.append(board_state)
current_batch_rewards.append(reward)
reward *= 0.7
network = Network1()
network.update(current_batch_board_states, current_batch_rewards)
# test predictions
for move, board in moves_win:
print("win=", move)
scores = []
for col in range(7):
next_board = drop_piece(board, col, 1)
board_state = _build_board_state(1, next_board)
score = network.predict(board_state)
scores.append(round(score[0][0], 3))
print("predicted=", scores.index(max(scores)), "scores=", scores )
print("")
for move, board in moves_lose:
print("lose=", move)
scores = []
for col in range(7):
next_board = drop_piece(board, col, 2)
board_state = _build_board_state(2, next_board)
score = network.predict(board_state)
scores.append(round(score[0][0], 3))
print("predicted=", scores.index(max(scores)), "scores=", scores )
print("")
| [
"numpy.array",
"network_128x4_64_64.Network1"
] | [((2398, 2408), 'network_128x4_64_64.Network1', 'Network1', ([], {}), '()\n', (2406, 2408), False, 'from network_128x4_64_64 import Network1\n'), ((503, 669), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]]', 'np.int'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0]\n ], np.int)\n', (511, 669), True, 'import numpy as np\n'), ((700, 866), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 2]]', 'np.int'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 2]\n ], np.int)\n', (708, 866), True, 'import numpy as np\n'), ((897, 1063), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 2, 0, 2]]', 'np.int'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 2, 0, 2]\n ], np.int)\n', (905, 1063), True, 'import numpy as np\n'), ((1094, 1260), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 2, 0, 0, 0], [0, 1, 1, 1, 2, 0, 2]]', 'np.int'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 2, 0, 0, 0], [0, 1, 1, 1, 2, 0, 2]\n ], np.int)\n', (1102, 1260), True, 'import numpy as np\n'), ((1559, 1725), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0]]', 'np.int'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0, 0]\n ], np.int)\n', (1567, 1725), True, 'import numpy as np\n'), ((1756, 1922), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 2]]', 'np.int'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 0, 0, 0, 2]\n ], np.int)\n', (1764, 1922), True, 'import numpy as np\n'), ((1953, 2119), 'numpy.array', 'np.array', (['[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0,\n 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 2, 0, 2]]', 'np.int'], {}), '([[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, \n 0], [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 2, 0, 2]\n ], np.int)\n', (1961, 2119), True, 'import numpy as np\n')] |
import numpy as np
from gym import Env, spaces
from gym.utils import seeding
import sys
from six import StringIO, b
import copy
from gym import utils
from gym.envs.toy_text import discrete
from scipy.misc import imresize
RENDER_DIR = 'renderings/'
LEFT = 0
DOWN = 1
RIGHT = 2
UP = 3
ACTIONS = [(-1, 0), (0, 1), (1, 0), (0, -1),'pickup', 'drop', 'exit']
def categorical_sample(prob_n, np_random):
"""
Sample from categorical distribution
Each row specifies class probabilities
"""
prob_n = np.asarray(prob_n)
csprob_n = np.cumsum(prob_n)
return (csprob_n > np_random.rand()).argmax()
AGENT = 'agent'
PICKUPABLE = ['hammer', 'axe', 'sticks']
BLOCKING = ['rock', 'tree']
HOLDING = 'holding'
CLIP = False
if CLIP:
base_sprite = np.zeros((3,3,3))/10.
else:
base_sprite = np.ones((3,3,3))/10.
sprites = []
for i in range(3):
for j in range(3):
new_sprite = base_sprite.copy()
new_sprite[i,j,:] = 1.0
sprites.append(new_sprite)
SPRITES = {'agent':sprites[0],
'rock': sprites[1],
'hammer': sprites[2],
'tree': sprites[3],
'bread': sprites[4],
'wheat': sprites[5],
'sticks': sprites[6],
'axe': sprites[7],
'house': sprites[8],
#'wheat': sprites[9],
}
#BGR
SPRITES['agent'][0,0] = np.array([0/255., 0/255., 255/255.])
SPRITES['rock'][0,1] = np.array([211/255., 211/255., 211/255.])
SPRITES['hammer'][0,2] = np.array([204/255., 204/255., 0/255.])
SPRITES['tree'][1,0] = np.array([34/255., 133/255., 34/255.])
SPRITES['bread'][1,1] = np.array([0/255., 215/255., 255/255.])
SPRITES['wheat'][1,2] = np.array([10/255., 215/255., 100/255.])
SPRITES['sticks'][2,0] = np.array([45/255., 82/255., 160/255.])
SPRITES['axe'][2,1] = np.array([255/255., 102/255., 102/255.])
SPRITES['house'][2,2] = np.array([153/255., 52/255., 255/255.])
BIGSPRITES = copy.deepcopy(SPRITES)
OBJECTS =['rock', 'hammer', 'tree', 'axe', 'bread', 'sticks', 'house', 'wheat']
OBJECT_PROBS = [0.25, 0.0, 0.25, 0.0, 0.1, 0.2, 0.0, 0.2]
print("SUM OF PROB", sum(OBJECT_PROBS))
class HammerWorld(Env):
"""
"""
metadata = {'render.modes': ['rgb', 'ansi']}
def __init__(self, size=[10,10], res=39, add_objects=[], visible_agent=True, reward_function=None, state_obs=False, few_obj=False,
use_exit=False, agent_centric=True, batch_reward=False, success_function=None, goal_dim=0, pretty_renderable=False):
self.nrow, self.ncol = size
self.reward_range = (0, 1)
self.renderres = 9
self.agent_centric = agent_centric
#print("hellos")
self.ACTIONS = ACTIONS#[:-3]
if not use_exit:
self.ACTIONS = self.ACTIONS[:-1]
nA = len(self.ACTIONS)
nS = self.nrow * self.ncol
self.add_objects = add_objects
self.reward_function = reward_function
self.success_function = success_function
self.nS = nS
self.nA = nA
self.lastaction=None
self.batch_reward = batch_reward
self.visible_agent = visible_agent
self.few_obj = few_obj
self.episode = 0
self.state_obs = state_obs
self.action_space = spaces.Discrete(self.nA)
self.goal_dim=goal_dim
if self.state_obs:
assert(self.few_obj)
self.max_num_per_obj = 3
self.state_space_size = len(OBJECTS)*2*self.max_num_per_obj+2+1+1
self.observation_space = spaces.Box(low=0, high=self.nS, shape=(self.state_space_size,))
self.state_space = self.observation_space
self.goal_space = self.observation_space
elif self.agent_centric:
self.observation_space = spaces.Box(low=0, high=1., shape=((self.nrow+1)*res*2*self.ncol*res*2*3+goal_dim,))
else:
self.observation_space = spaces.Box(low=0, high=1., shape=((self.nrow+1)*res*self.ncol*res*3+goal_dim,))
self.objects = []
self.res = res
for obj in SPRITES.keys():
size = SPRITES[obj].shape[0]
if size < self.res:
new_sprite = np.repeat(SPRITES[obj]*255, repeats=self.res/size, axis = 1)
new_sprite = np.repeat(new_sprite, repeats=self.res/size, axis =0)
SPRITES[obj] = new_sprite/255
size = BIGSPRITES[obj].shape[0]
if size < self.renderres:
new_sprite = np.repeat(BIGSPRITES[obj]*255, repeats=self.renderres/size, axis = 1)
new_sprite = np.repeat(new_sprite, repeats=self.renderres/size, axis =0)
BIGSPRITES[obj] = new_sprite/255
self.BIGSPRITES= BIGSPRITES
self.SPRITES= SPRITES
if pretty_renderable:
import os
self.pretty_render_res = 30
self.render_order = ['house', 'tree', 'rock', 'sticks', 'wheat', 'hammer', 'axe', 'bread', 'agent']
asset_path = '/'.join(os.path.realpath(__file__).split('/')[:-1]+['assets/*.png'])
print("asset_path", asset_path)
import glob
import cv2
asset_paths = glob.glob(asset_path)
self.pretty_render_sprites = {asset.split('/')[-1].split('.')[0]: cv2.imread(asset) for asset in asset_paths}
def sample_objects(self, min_obj=None):
num_objects = np.random.randint(15,25)
indices = np.random.multinomial(1, OBJECT_PROBS, size=num_objects)
indices = np.argmax(indices, axis=1)
self.objects = []
for obj in OBJECTS:
i =1
self.objects.append(obj)
if min_obj is not None:
while i < min_obj[obj]:
self.objects.append(obj)
i+=1
if not self.few_obj:
for i in range(max(num_objects-len(self.objects), 0)):
obj_idx = indices[i]#np.random.randint(0, len(OBJECTS))
obj = OBJECTS[obj_idx]
self.objects.append(obj)
return self.objects
def from_s(self, s):
row = int(s/self.ncol)
return (row,s- row*self.ncol)
def to_s(self, row, col):
return row*self.ncol + col
def get_root(self, obj):
if obj is None or obj == HOLDING or obj == 'hunger' or obj == '':
return None
elif '_' in obj:
return obj.split('_')[0]
elif obj == 'agent':
return obj
def reset(self, init_from_state=None, min_obj=None):
if init_from_state is None:
self.init_from_state = False
self.state = {}
self.state[HOLDING] = ''
self.state['hunger'] = 1.0
self.state['count'] = 0
if self.goal_dim > 0:
self.goal = self.reward_function.delta_star
self.objects = []
self.sample_objects(min_obj=min_obj)
self.objects += self.add_objects
self.object_counts = {k:0 for k in OBJECTS}
self.obj_max_index = copy.deepcopy(self.object_counts)
self.state['obj_max_index'] = self.obj_max_index
self.state['object_counts'] = self.object_counts
self.state['object_positions'] = {}
positions = np.random.permutation(self.nS)[:len(self.objects)+1]
agent_pos = self.from_s(positions[0])
self.state['agent'] = agent_pos
for i, ob in enumerate(self.objects):
pos = self.from_s(positions[i+1])
self.add_obj(ob, pos)
else:
self.init_from_state = True
self.state = init_from_state
self.object_counts = self.state['object_counts']
self.obj_max_index = self.state['obj_max_index']
self.objects = self.object_counts.keys()
self.lastaction=None
self.episode = np.random.randint(20)
total = self.verify_env()
self.total_count = total
self.init_state = copy.deepcopy(self.state)
self.episode_states = [self.init_state]
obs = self.get_obs()
self.init_img = obs.copy()
if self.goal_dim > 0:
obs = np.concatenate([obs.flatten(), self.goal.flatten()])
return obs.flatten().astype(np.uint8)
def sample_free_square(self):
perm = np.random.permutation(self.nS)
for s in perm:
pos = self.from_s(s)
if pos not in self.state.values():
return pos
def add_obj(self, objtype, pos):
if objtype not in self.object_counts:
self.object_counts[objtype] = 0
suffix = self.obj_max_index[objtype] + 1
self.obj_max_index[objtype] += 1
self.object_counts[objtype] += 1
self.state['object_positions'][objtype + '_'+str(suffix)] = pos
def remove_obj(self, obj):
objtype = obj.split('_')[0]
if objtype not in self.object_counts:
import pdb; pdb.set_trace()
self.object_counts[objtype] -= 1
del self.state['object_positions'][obj]
def perform_object(self, obj):
blocked = False
if obj.startswith('tree'):
if self.state[HOLDING].startswith('axe'):
pos = self.state['object_positions'][obj]
self.add_obj('sticks', pos)
self.remove_obj(obj)
else:
blocked = True
elif obj.startswith('rock'):
if self.state[HOLDING].startswith('hammer'):
self.remove_obj(obj)
else:
blocked = True
elif obj.startswith('bread') or obj.startswith('house'):
self.state['hunger'] = 0
if obj.startswith('bread'):
self.remove_obj(obj)
elif obj.startswith('sticks') and 'hammer' in self.state[HOLDING]:
pos = self.state['object_positions'][obj]
self.add_obj('house', pos)
self.remove_obj(obj)
elif obj.startswith('wheat') and 'axe' in self.state[HOLDING]:
pos = self.state['object_positions'][obj]
self.add_obj('bread', pos)
self.remove_obj(obj)
return blocked
def move_agent(self, a):
act = ACTIONS[a]
pos = self.state[AGENT]
row, col = pos[0]+act[0], pos[1]+act[1]
#Check bounds
if row in range(self.nrow) and col in range(self.ncol):
local_objects = []
for obj in self.state['object_positions'].keys():
root = self.get_root(obj)
if root !='agent':
obj_pos = self.state['object_positions'][obj]
if obj_pos == (row, col):
local_objects.append(obj)
is_blocked = False
for obj in local_objects:
blocked = self.perform_object(obj)
is_blocked = blocked or is_blocked
#Check obstacles:
if is_blocked:
return pos
self.state[AGENT] = (row, col)
if len(self.state[HOLDING]) > 0:
obj = self.state[HOLDING]
self.state['object_positions'][obj] = (row, col)
return (row, col)
else:
return pos
def try_pickup(self):
pos = self.state[AGENT]
for obj in self.state['object_positions'].keys():
root = self.get_root(obj)
if root is not None and root != obj:
obj_pos = self.state['object_positions'][obj]
if obj_pos == pos and root in PICKUPABLE:
if self.state[HOLDING] == '':
self.state[HOLDING] = obj
return
def try_drop(self):
# Can only drop if nothing else is there
pos = self.state[AGENT]
if self.state[HOLDING] is not None:
for obj in self.state['object_positions'].keys():
root = self.get_root(obj)
if root is not None and root != obj:
obj_pos = self.state['object_positions'][obj]
if obj_pos == pos and obj != self.state[HOLDING]:
return
self.state[HOLDING] = ''
return
def verify_env(self):
my_obj_counts = {k:0 for k in OBJECTS}
for obj in self.state['object_positions'].keys():
if obj != 'agent' and obj!='holding' and obj != 'object_counts' and obj!='hunger':
objtype = obj.split('_')[0]
if objtype not in my_obj_counts:
my_obj_counts[objtype] = 0
my_obj_counts[objtype] += 1
for k in my_obj_counts.keys():
if my_obj_counts[k] !=self.object_counts[k]:
import pdb; pdb.set_trace()
assert(my_obj_counts[k] == self.object_counts[k])
for k in self.object_counts.keys():
assert(my_obj_counts[k] == self.object_counts[k])
return sum(my_obj_counts.values())
def step(self, a):
total = self.verify_env()
prev_state = copy.deepcopy(self.state)
self.state['count'] +=1
assert(total <= self.total_count)
self.total_count = total
r = 0
d = False
action = ACTIONS[a]
if action == 'exit':
d = True
elif action == 'pickup':
self.try_pickup()
elif action == 'drop':
self.try_drop()
else:
new_pos = self.move_agent(a)
self.lastaction=a
obs = self.get_obs()
success = 0
self.episode_states.append(copy.deepcopy(self.state))
if self.success_function is not None:
r = self.success_function(prev_state, self.state)
success = self.success_function(self.init_state, self.state)>0
#old_delta = self.goal
if self.goal_dim > 0:
r = self.get_reward(obs) +r
if self.goal_dim > 0:
obs = np.concatenate([obs.flatten(), self.goal.flatten()])
#success = self.reward_function(self.init_state, self.state)>0
return (obs.flatten().astype(np.uint8), r, d, {'success': success, 'count': self.state['count'], 'done':d})
def get_obs(self, mode='rgb'):
if self.state_obs:
obs = self.imagine_obs(self.state)
return obs
if mode == 'rgb':
img = np.zeros(((self.nrow+1)*self.res, self.ncol*self.res, 3))
to_get_obs = self.state['object_positions'].keys()
for obj in to_get_obs:
root = self.get_root(obj)
if root is not None:
if root in self.SPRITES:
row, col = self.state['object_positions'][obj]
img[row*self.res:(row+1)*self.res, col*self.res:(col+1)*self.res, :] += self.SPRITES[root]
if self.visible_agent:
row, col = self.state[AGENT]
img[row*self.res:(row+1)*self.res, col*self.res:(col+1)*self.res, :] += self.SPRITES[AGENT]
if self.agent_centric:
img = self.center_agent(img, self.res)
w,h,c = img.shape
img[w-self.res:w, 0:self.res, 0] = self.state['hunger']
img[w-self.res:w, self.res:self.res*2, :] = (len(self.state[HOLDING])>0)
if CLIP:
img = np.clip(img, 0, 1.0)
return img.flatten()*255
if mode == 'ascii':
img = np.zeros((self.nrow, self.ncol)).astype(np.str)
for obj in self.state.keys():
root = self.get_root(obj)
if root is not None:
if '_' in obj:
objtype, obj_num = obj.split('_')
row, col = self.state[obj]
img[row, col] = objtype[0] + obj_num[0]
elif 'agent' in obj:
row, col = self.state[obj]
img[row, col] = 'AG'
img[img=='0.0'] = ' '
return img
def imagine_obs(self, state, mode='rgb'):
if self.state_obs:
obs = np.zeros(self.state_space_size)
obs[:2] = state['agent']
for obj, pos in state['object_positions'].items():
root, num = obj.split('_')
num = int(num)-1
assert(num <3)
assert(num >=0)
idx = OBJECTS.index(root)*2*self.max_num_per_obj+2+num*2
obs[idx:idx+2] = pos
obs[-2] = state['hunger']
obs[-1] = (len(state[HOLDING])>0)
return obs
if mode == 'rgb':
img = np.zeros(((self.nrow+1)*self.res, self.ncol*self.res, 3))
to_get_obs = state['object_positions'].keys()
for obj in to_get_obs:
root = self.get_root(obj)
if root is not None:
if root in self.SPRITES:
row, col = state['object_positions'][obj]
img[row*self.res:(row+1)*self.res, col*self.res:(col+1)*self.res, :] += self.SPRITES[root]
if self.visible_agent:
row, col = state[AGENT]
img[row*self.res:(row+1)*self.res, col*self.res:(col+1)*self.res, :] += self.SPRITES[AGENT]
if self.agent_centric:
img = self.center_agent(img, self.res)
w,h,c = img.shape
img[w-self.res:w, 0:self.res, 0] = self.state['hunger']
img[w-self.res:w, self.res:self.res*2, :] = (len(self.state[HOLDING])>0)
return img.flatten()
def center_agent(self, img, res):
new_obs = np.zeros((img.shape[0]*2, img.shape[1]*2, 3))+0.1
row, col = self.state[AGENT]
disp_x = img.shape[0] - row*res
disp_y = img.shape[1] - col*res
new_obs[disp_x:disp_x+img.shape[0], disp_y:disp_y + img.shape[1]] = img
return new_obs
def render(self, mode='rgb'):
import cv2
if mode == 'rgb':
img = np.zeros(((self.nrow+1)*self.renderres, self.ncol*self.renderres, 3))
to_get_obs = self.state['object_positions'].keys()
for obj in to_get_obs:
root = self.get_root(obj)
if root is not None:
if root in self.SPRITES:
row, col = self.state['object_positions'][obj]
img[row*self.renderres:(row+1)*self.renderres, col*self.renderres:(col+1)*self.renderres, :] += self.BIGSPRITES[root]
if self.visible_agent:
row, col = self.state[AGENT]
img[row*self.renderres:(row+1)*self.renderres, col*self.renderres:(col+1)*self.renderres, :] += self.BIGSPRITES[AGENT]
if self.agent_centric:
img = self.center_agent(img, self.renderres)
w,h,c = img.shape
img[w-self.renderres:w, 0:self.renderres, 0] = self.state['hunger']
img[w-self.renderres:w, self.renderres:self.renderres*2, :] = (len(self.state[HOLDING])>0)
cv2.imwrite(RENDER_DIR+'img{:04d}_{:04d}.png'.format(self.episode, self.state['count']), img*255)
def pretty_render(self, mode='rgb'):
import cv2
if mode == 'rgb':
img = np.zeros(((self.nrow+1)*self.pretty_render_res, self.ncol*self.pretty_render_res, 3)).astype(np.uint8)
grass = (self.pretty_render_sprites['grass2']/3).astype(np.uint8)
for row in range(self.nrow):
for col in range(self.ncol):
img[row*self.pretty_render_res:(row+1)*self.pretty_render_res, col*self.pretty_render_res:(col+1)*self.pretty_render_res] = grass
to_get_obs = self.state['object_positions'].keys()
for to_render_obj in self.render_order:
if to_render_obj == 'agent':
sprite = self.pretty_render_sprites[to_render_obj]
row, col = self.state[AGENT]
gray_pixels = np.max(sprite, axis=2)
idx = np.where(gray_pixels > 0)
col_offset = col*self.pretty_render_res
row_offset = row*self.pretty_render_res
img[(idx[0]+row_offset, idx[1]+col_offset)] = sprite[idx]
else:
for obj in to_get_obs:
root = self.get_root(obj)
if root == to_render_obj:
#This code is to layer the sprites properly: Don't blend the colors
sprite = self.pretty_render_sprites[to_render_obj]
row, col = self.state['object_positions'][obj]
gray_pixels = np.max(sprite, axis=2)
idx = np.where(gray_pixels > 0)
col_offset = col*self.pretty_render_res
row_offset = row*self.pretty_render_res
img[(idx[0]+row_offset, idx[1]+col_offset)] = sprite[idx]
if self.agent_centric:
img = self.center_agent(img, self.pretty_render_res)
w,h,c = img.shape
if len(self.state[HOLDING])>0:
root = self.get_root(self.state[HOLDING])
img[w-self.pretty_render_res:w, self.pretty_render_res:self.pretty_render_res*2] = self.pretty_render_sprites[root]
cv2.imwrite(RENDER_DIR+'pretty_img{:04d}_{:04d}.png'.format(self.episode, self.state['count']), img)
return img
def check_move_agent(self, a):
""" Does not actually change state"""
act = ACTIONS[a]
pos = self.state[AGENT]
row, col = pos[0]+act[0], pos[1]+act[1]
#Check bounds
removes_obj = None
blocked = False
if row in range(self.nrow) and col in range(self.ncol):
local_objects = []
for obj in self.state['object_positions'].keys():
root = self.get_root(obj)
if root !='agent':
obj_pos = self.state['object_positions'][obj]
if obj_pos == (row, col):
local_objects.append(obj)
is_blocked = False
for obj in local_objects:
blocked = False
if obj.startswith('tree'):
if not self.state[HOLDING].startswith('axe'):
blocked = True
else:
removes_obj = 'tree'
elif obj.startswith('rock'):
if not self.state[HOLDING].startswith('hammer'):
blocked = True
else:
removes_obj = 'rock'
elif obj.startswith('bread'):
removes_obj= 'bread'
elif obj.startswith('wheat') and self.state[HOLDING].startswith('axe'):
removes_obj = 'wheat'
else:
blocked = True
if blocked:
row, col = pos
return (row, col), blocked, removes_obj
def get_diagnostics(self,paths, **kwargs):
successes = [p['env_infos'][-1]['success'] for p in paths]
success_rate = sum(successes)/len(successes)
lengths = [p['env_infos'][-1]['count'] for p in paths]
length_rate = sum(lengths)/len(lengths)
return {'SuccessRate': success_rate, 'PathLengthMean': length_rate, 'PathLengthMin':min(lengths)}
| [
"numpy.argmax",
"numpy.random.multinomial",
"gym.spaces.Discrete",
"numpy.ones",
"numpy.clip",
"numpy.random.randint",
"glob.glob",
"numpy.cumsum",
"numpy.max",
"numpy.repeat",
"copy.deepcopy",
"numpy.asarray",
"os.path.realpath",
"numpy.random.permutation",
"numpy.zeros",
"cv2.imread"... | [((1353, 1398), 'numpy.array', 'np.array', (['[0 / 255.0, 0 / 255.0, 255 / 255.0]'], {}), '([0 / 255.0, 0 / 255.0, 255 / 255.0])\n', (1361, 1398), True, 'import numpy as np\n'), ((1414, 1463), 'numpy.array', 'np.array', (['[211 / 255.0, 211 / 255.0, 211 / 255.0]'], {}), '([211 / 255.0, 211 / 255.0, 211 / 255.0])\n', (1422, 1463), True, 'import numpy as np\n'), ((1481, 1528), 'numpy.array', 'np.array', (['[204 / 255.0, 204 / 255.0, 0 / 255.0]'], {}), '([204 / 255.0, 204 / 255.0, 0 / 255.0])\n', (1489, 1528), True, 'import numpy as np\n'), ((1545, 1592), 'numpy.array', 'np.array', (['[34 / 255.0, 133 / 255.0, 34 / 255.0]'], {}), '([34 / 255.0, 133 / 255.0, 34 / 255.0])\n', (1553, 1592), True, 'import numpy as np\n'), ((1609, 1656), 'numpy.array', 'np.array', (['[0 / 255.0, 215 / 255.0, 255 / 255.0]'], {}), '([0 / 255.0, 215 / 255.0, 255 / 255.0])\n', (1617, 1656), True, 'import numpy as np\n'), ((1672, 1720), 'numpy.array', 'np.array', (['[10 / 255.0, 215 / 255.0, 100 / 255.0]'], {}), '([10 / 255.0, 215 / 255.0, 100 / 255.0])\n', (1680, 1720), True, 'import numpy as np\n'), ((1737, 1784), 'numpy.array', 'np.array', (['[45 / 255.0, 82 / 255.0, 160 / 255.0]'], {}), '([45 / 255.0, 82 / 255.0, 160 / 255.0])\n', (1745, 1784), True, 'import numpy as np\n'), ((1799, 1848), 'numpy.array', 'np.array', (['[255 / 255.0, 102 / 255.0, 102 / 255.0]'], {}), '([255 / 255.0, 102 / 255.0, 102 / 255.0])\n', (1807, 1848), True, 'import numpy as np\n'), ((1865, 1913), 'numpy.array', 'np.array', (['[153 / 255.0, 52 / 255.0, 255 / 255.0]'], {}), '([153 / 255.0, 52 / 255.0, 255 / 255.0])\n', (1873, 1913), True, 'import numpy as np\n'), ((1918, 1940), 'copy.deepcopy', 'copy.deepcopy', (['SPRITES'], {}), '(SPRITES)\n', (1931, 1940), False, 'import copy\n'), ((513, 531), 'numpy.asarray', 'np.asarray', (['prob_n'], {}), '(prob_n)\n', (523, 531), True, 'import numpy as np\n'), ((547, 564), 'numpy.cumsum', 'np.cumsum', (['prob_n'], {}), '(prob_n)\n', (556, 564), True, 'import numpy as np\n'), ((762, 781), 'numpy.zeros', 'np.zeros', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (770, 781), True, 'import numpy as np\n'), ((808, 826), 'numpy.ones', 'np.ones', (['(3, 3, 3)'], {}), '((3, 3, 3))\n', (815, 826), True, 'import numpy as np\n'), ((3231, 3255), 'gym.spaces.Discrete', 'spaces.Discrete', (['self.nA'], {}), '(self.nA)\n', (3246, 3255), False, 'from gym import Env, spaces\n'), ((5390, 5415), 'numpy.random.randint', 'np.random.randint', (['(15)', '(25)'], {}), '(15, 25)\n', (5407, 5415), True, 'import numpy as np\n'), ((5433, 5489), 'numpy.random.multinomial', 'np.random.multinomial', (['(1)', 'OBJECT_PROBS'], {'size': 'num_objects'}), '(1, OBJECT_PROBS, size=num_objects)\n', (5454, 5489), True, 'import numpy as np\n'), ((5509, 5535), 'numpy.argmax', 'np.argmax', (['indices'], {'axis': '(1)'}), '(indices, axis=1)\n', (5518, 5535), True, 'import numpy as np\n'), ((7936, 7957), 'numpy.random.randint', 'np.random.randint', (['(20)'], {}), '(20)\n', (7953, 7957), True, 'import numpy as np\n'), ((8051, 8076), 'copy.deepcopy', 'copy.deepcopy', (['self.state'], {}), '(self.state)\n', (8064, 8076), False, 'import copy\n'), ((8395, 8425), 'numpy.random.permutation', 'np.random.permutation', (['self.nS'], {}), '(self.nS)\n', (8416, 8425), True, 'import numpy as np\n'), ((13140, 13165), 'copy.deepcopy', 'copy.deepcopy', (['self.state'], {}), '(self.state)\n', (13153, 13165), False, 'import copy\n'), ((3499, 3562), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': 'self.nS', 'shape': '(self.state_space_size,)'}), '(low=0, high=self.nS, shape=(self.state_space_size,))\n', (3509, 3562), False, 'from gym import Env, spaces\n'), ((5156, 5177), 'glob.glob', 'glob.glob', (['asset_path'], {}), '(asset_path)\n', (5165, 5177), False, 'import glob\n'), ((7091, 7124), 'copy.deepcopy', 'copy.deepcopy', (['self.object_counts'], {}), '(self.object_counts)\n', (7104, 7124), False, 'import copy\n'), ((9024, 9039), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (9037, 9039), False, 'import pdb\n'), ((13670, 13695), 'copy.deepcopy', 'copy.deepcopy', (['self.state'], {}), '(self.state)\n', (13683, 13695), False, 'import copy\n'), ((14449, 14512), 'numpy.zeros', 'np.zeros', (['((self.nrow + 1) * self.res, self.ncol * self.res, 3)'], {}), '(((self.nrow + 1) * self.res, self.ncol * self.res, 3))\n', (14457, 14512), True, 'import numpy as np\n'), ((16200, 16231), 'numpy.zeros', 'np.zeros', (['self.state_space_size'], {}), '(self.state_space_size)\n', (16208, 16231), True, 'import numpy as np\n'), ((16750, 16813), 'numpy.zeros', 'np.zeros', (['((self.nrow + 1) * self.res, self.ncol * self.res, 3)'], {}), '(((self.nrow + 1) * self.res, self.ncol * self.res, 3))\n', (16758, 16813), True, 'import numpy as np\n'), ((17768, 17817), 'numpy.zeros', 'np.zeros', (['(img.shape[0] * 2, img.shape[1] * 2, 3)'], {}), '((img.shape[0] * 2, img.shape[1] * 2, 3))\n', (17776, 17817), True, 'import numpy as np\n'), ((18137, 18212), 'numpy.zeros', 'np.zeros', (['((self.nrow + 1) * self.renderres, self.ncol * self.renderres, 3)'], {}), '(((self.nrow + 1) * self.renderres, self.ncol * self.renderres, 3))\n', (18145, 18212), True, 'import numpy as np\n'), ((3740, 3844), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1.0)', 'shape': '((self.nrow + 1) * res * 2 * self.ncol * res * 2 * 3 + goal_dim,)'}), '(low=0, high=1.0, shape=((self.nrow + 1) * res * 2 * self.ncol *\n res * 2 * 3 + goal_dim,))\n', (3750, 3844), False, 'from gym import Env, spaces\n'), ((3888, 3984), 'gym.spaces.Box', 'spaces.Box', ([], {'low': '(0)', 'high': '(1.0)', 'shape': '((self.nrow + 1) * res * self.ncol * res * 3 + goal_dim,)'}), '(low=0, high=1.0, shape=((self.nrow + 1) * res * self.ncol * res *\n 3 + goal_dim,))\n', (3898, 3984), False, 'from gym import Env, spaces\n'), ((4156, 4218), 'numpy.repeat', 'np.repeat', (['(SPRITES[obj] * 255)'], {'repeats': '(self.res / size)', 'axis': '(1)'}), '(SPRITES[obj] * 255, repeats=self.res / size, axis=1)\n', (4165, 4218), True, 'import numpy as np\n'), ((4247, 4301), 'numpy.repeat', 'np.repeat', (['new_sprite'], {'repeats': '(self.res / size)', 'axis': '(0)'}), '(new_sprite, repeats=self.res / size, axis=0)\n', (4256, 4301), True, 'import numpy as np\n'), ((4461, 4532), 'numpy.repeat', 'np.repeat', (['(BIGSPRITES[obj] * 255)'], {'repeats': '(self.renderres / size)', 'axis': '(1)'}), '(BIGSPRITES[obj] * 255, repeats=self.renderres / size, axis=1)\n', (4470, 4532), True, 'import numpy as np\n'), ((4561, 4621), 'numpy.repeat', 'np.repeat', (['new_sprite'], {'repeats': '(self.renderres / size)', 'axis': '(0)'}), '(new_sprite, repeats=self.renderres / size, axis=0)\n', (4570, 4621), True, 'import numpy as np\n'), ((5269, 5286), 'cv2.imread', 'cv2.imread', (['asset'], {}), '(asset)\n', (5279, 5286), False, 'import cv2\n'), ((7319, 7349), 'numpy.random.permutation', 'np.random.permutation', (['self.nS'], {}), '(self.nS)\n', (7340, 7349), True, 'import numpy as np\n'), ((12834, 12849), 'pdb.set_trace', 'pdb.set_trace', ([], {}), '()\n', (12847, 12849), False, 'import pdb\n'), ((15422, 15442), 'numpy.clip', 'np.clip', (['img', '(0)', '(1.0)'], {}), '(img, 0, 1.0)\n', (15429, 15442), True, 'import numpy as np\n'), ((15526, 15558), 'numpy.zeros', 'np.zeros', (['(self.nrow, self.ncol)'], {}), '((self.nrow, self.ncol))\n', (15534, 15558), True, 'import numpy as np\n'), ((19396, 19492), 'numpy.zeros', 'np.zeros', (['((self.nrow + 1) * self.pretty_render_res, self.ncol * self.\n pretty_render_res, 3)'], {}), '(((self.nrow + 1) * self.pretty_render_res, self.ncol * self.\n pretty_render_res, 3))\n', (19404, 19492), True, 'import numpy as np\n'), ((20127, 20149), 'numpy.max', 'np.max', (['sprite'], {'axis': '(2)'}), '(sprite, axis=2)\n', (20133, 20149), True, 'import numpy as np\n'), ((20176, 20201), 'numpy.where', 'np.where', (['(gray_pixels > 0)'], {}), '(gray_pixels > 0)\n', (20184, 20201), True, 'import numpy as np\n'), ((20857, 20879), 'numpy.max', 'np.max', (['sprite'], {'axis': '(2)'}), '(sprite, axis=2)\n', (20863, 20879), True, 'import numpy as np\n'), ((20914, 20939), 'numpy.where', 'np.where', (['(gray_pixels > 0)'], {}), '(gray_pixels > 0)\n', (20922, 20939), True, 'import numpy as np\n'), ((4977, 5003), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (4993, 5003), False, 'import os\n')] |
# Copyright 2021, <NAME>.
#
# Developed as a thesis project at the TORSEC research group of the Polytechnic of Turin (Italy) under the supervision
# of professor <NAME> and engineer <NAME> and with the support of engineer <NAME>.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import configparser # implements a basic configuration language for Python programs
import json # json encoder and decoder
import os # provides a portable way of using operating system dependent functionality
import sys # system-specific parameters and functions
import tempfile # used to create temporary files and directories
import time # provides various time-related functions
from copy import deepcopy # creates a new object and recursively copies the original object elements
import baker # easy, powerful access to Python functions from the command line
import mlflow # open source platform for managing the end-to-end machine learning lifecycle
import numpy as np # the fundamental package for scientific computing with Python
import pandas as pd # pandas is a flexible and easy to use open source data analysis and manipulation tool
import psutil # used for retrieving information on running processes and system utilization
import torch # tensor library like NumPy, with strong GPU support
from logzero import logger # robust and effective logging for Python
from nets.Contrastive_Model_net import Net
from nets.generators.fresh_generators import get_generator
from utils.ranking_metrics import (mean_reciprocal_rank, mean_average_precision,
max_reciprocal_rank, min_reciprocal_rank,
max_average_precision, min_average_precision)
# get config file path
model_dir = os.path.dirname(os.path.abspath(__file__))
src_dir = os.path.dirname(model_dir)
config_filepath = os.path.join(src_dir, 'config.ini')
# instantiate config parser and read config file
config = configparser.ConfigParser()
config.read(config_filepath)
# get variables from config file
device = config['general']['device']
N_SAMPLES = int(config['sorel20mDataset']['test_n_samples'])
try:
# try getting layer sizes from config file
layer_sizes = json.loads(config['mtje']['layer_sizes'])
except json.JSONDecodeError:
# if the option is not present in the config file set layer sizes to None
layer_sizes = None
# instantiate run additional parameters dict setting values got from config file
run_additional_params = {
'layer_sizes': layer_sizes,
'dropout_p': float(config['mtje']['dropout_p']),
'activation_function': config['mtje']['activation_function'],
'normalization_function': config['mtje']['normalization_function'],
'optimizer': config['contrastiveLearning']['optimizer'],
'lr': float(config['contrastiveLearning']['lr']),
'momentum': float(config['contrastiveLearning']['momentum']),
'weight_decay': float(config['contrastiveLearning']['weight_decay']),
'hard': int(config['contrastiveLearning']['hard']),
'margin': float(config['contrastiveLearning']['margin']),
'squared': int(config['contrastiveLearning']['squared'])
}
def compute_ranking_scores(rank_per_query): # list of ranks computed by the model evaluation procedure
""" Compute ranking scores (MRR and MAP) and a bunch of interesting ranks to save to file from a list of ranks.
Args:
rank_per_query: List of ranks computed by the model evaluation procedure
Returns:
ranking scores (in a dict) and a dict of interesting ranks to save to file.
"""
# compute binarized (0/1) relevance scores
rs = [np.asarray([i == rank['ground_truth_label'] for i in rank['rank_labels']], dtype=np.dtype(int))
for rank in rank_per_query]
# compute and log MRR and MAP scores
ranking_scores = {'MRR': mean_reciprocal_rank(rs), 'MAP': mean_average_precision(rs)}
# compute a bunch of indexes for interesting queries to save in csv files as examples
max_rr, max_rr_idx = max_reciprocal_rank(rs)
min_rr, min_rr_idx = min_reciprocal_rank(rs)
max_ap, max_ap_idx = max_average_precision(rs)
min_ap, min_ap_idx = min_average_precision(rs)
# save indexes (and values) just computed to a dict
queries_indexes = {
'max_rr': {'value': max_rr, 'index': max_rr_idx},
'min_rr': {'value': min_rr, 'index': min_rr_idx},
'max_ap': {'value': max_ap, 'index': max_ap_idx},
'min_ap': {'value': min_ap, 'index': min_ap_idx}
}
# get interesting queries
ranks_to_save = {
key: {
'value': scores['value'],
'rank': rank_per_query[scores['index']]
}
for key, scores in queries_indexes.items()
}
# return computed scores and interesting queries
return ranking_scores, ranks_to_save
def normalize_results(labels,
predictions):
""" Normalize results to make them easier to be saved to file.
Args:
labels: Array-like (tensor or numpy array) object containing the ground truth labels
predictions: Array-like (tensor or numpy array) object containing the model predictions
Returns:
Dictionary containing the normalized labels and predictions tensors.
"""
# initialize the result value dict detaching and copying the labels tensor
rv = {
'label': Net.detach_and_copy_array(labels)
}
# for each prediction in the 'predictions' dict
for k, v in predictions.items():
# save into the return value dict the current model prediction tensor after having detached and copied it
rv['{}-NN_pred'.format(k)] = Net.detach_and_copy_array(v)
# return 'return value'
return rv
@baker.command
def evaluate_network(fresh_ds_path, # path of the directory where to find the fresh dataset (containing .dat files)
checkpoint_path, # path to the model checkpoint to load
training_run=0, # training run identifier
train_split_proportion=7, # train subsplit proportion value
valid_split_proportion=1, # validation subsplit proportion value
test_split_proportion=2, # test subsplit proportion value
batch_size=250, # how many samples per batch to load
rank_size=20, # size (number of samples) of the ranking to produce
knn_k_min=1, # minimum value of k to use when applying the k-nn algorithm
knn_k_max=11, # maximum value of k to use when applying the k-nn algorithm
# if provided, seed random number generation with this value (default: None, no seeding)
random_seed=None,
# how many worker (threads) the dataloader uses (default: 0 -> use multiprocessing.cpu_count())
workers=0):
""" Evaluate the model on both the family prediction task and on the family ranking task.
Args:
fresh_ds_path: Path of the directory where to find the fresh dataset (containing .dat files)
checkpoint_path: Path to the model checkpoint to load
training_run: Training run identifier (default: 0)
train_split_proportion: Train subsplit proportion value (default: 7)
valid_split_proportion: Validation subsplit proportion value (default: 1)
test_split_proportion: Test subsplit proportion value (default: 2)
batch_size: How many samples per batch to load (default: 250)
rank_size: Size (number of samples) of the ranking to produce (default: 20)
knn_k_min: Minimum value of k to use when applying the k-nn algorithm (default: 1)
knn_k_max: Maximum value of k to use when applying the k-nn algorithm (default: 11)
random_seed: If provided, seed random number generation with this value (default: None, no seeding)
workers: How many worker (threads) the dataloader uses (default: 0 -> use multiprocessing.cpu_count())
"""
# start mlflow run
with mlflow.start_run() as mlrun:
# if the split proportions are not as expected raise ValueError
if train_split_proportion <= 0 or valid_split_proportion <= 0 or test_split_proportion <= 0:
raise ValueError('train, valid and test split proportions must be positive integers.')
# the rank size must be smaller (or at most equal) to the selected batch size
if rank_size > batch_size:
raise ValueError('rank size should be smaller or equal to the batch size.')
# generate the dataset split proportions (list)
dataset_split_proportions = [train_split_proportion, valid_split_proportion, test_split_proportion]
# if workers has a value (it is not None) then convert it to int if it is > 0, otherwise set it to None
workers = workers if workers is None else int(workers) if int(workers) > 0 else None
if random_seed is not None: # if a seed was provided
logger.info(f"Setting random seed to {int(random_seed)}.")
# set the seed for generating random numbers
torch.manual_seed(int(random_seed))
logger.info('...instantiating siamese network for contrastive evaluation run n. {}'.format(training_run))
# create fresh dataset generators
train_generator, _, test_generator = get_generator(ds_root=fresh_ds_path,
splits=dataset_split_proportions,
batch_size=batch_size,
return_shas=True,
num_workers=workers,
shuffle=True) # shuffle samples
# get label to signature function from the test dataset (used to convert numerical labels to family names)
label_to_sig = test_generator.dataset.label_to_sig
# get total number of families
n_families = test_generator.dataset.n_families
# create contrastive (siamese) mtjeNet model
model = Net(feature_dimension=2381,
embedding_dimension=32,
layer_sizes=run_additional_params['layer_sizes'],
dropout_p=run_additional_params['dropout_p'],
activation_function=run_additional_params['activation_function'],
normalization_function=run_additional_params['normalization_function'])
# load model checkpoint
model.load_state_dict(torch.load(checkpoint_path))
# allocate model to selected device (CPU or GPU)
model.to(device)
logger.info('Evaluating contrastive learning model..')
# set model into evaluation mode
model.eval()
# get number of steps per epoch (# of total batches) from test generator
test_steps_per_epoch = len(test_generator)
# get number of steps per epoch (# of total batches) from train generator
train_steps_per_epoch = len(train_generator)
# create temporary directory
with tempfile.TemporaryDirectory() as tempdir:
# compute result file path
filename = os.path.join(tempdir, 'results.csv')
# create and open the results file in write mode
with open(filename, 'w') as f:
first_batch = True
ranks = []
# set current epoch start time
start_time = time.time()
# for all mini-batches of samples of the test generator
for i, (query_shas, query_features, query_labels) in enumerate(test_generator):
# get the query samples shas
query_shas = np.asarray(query_shas)
# transfer query features and labels to selected device
query_features = deepcopy(query_features).to(device)
query_labels = deepcopy(query_labels.long()).to(device)
with torch.no_grad(): # disable gradient calculation
# perform a forward pass through the network to get the query samples embeddings
query_pe_embeddings = model(query_features)
# initialize top samples arrays with null value
top_shas = None
top_labels = None
top_distances = None
predictions = {}
# for all mini-batches of data from the train generator
for j, (anchor_shas, anchor_features, anchor_labels) in enumerate(train_generator):
# get the anchor samples shas
anchor_shas = np.asarray(anchor_shas)
# transfer anchor features to selected device
anchor_features = anchor_features.to(device)
with torch.no_grad(): # disable gradient calculation
# perform a forward pass through the network to get the anchor embeddings
anchor_pe_embeddings = model(anchor_features)
# compute euclidean distances between query and anchor samples
distances = torch.cdist(query_pe_embeddings, anchor_pe_embeddings, p=2.0)
# if top distances is none
if top_distances is None:
# assign to top distances the current computed distances
top_distances = distances
# compute array of indices which sort the distances in ascending order
indices = top_distances.argsort(dim=1)
# obtain the shas of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_shas = np.concatenate([np.expand_dims(
np.repeat(np.expand_dims(anchor_shas, axis=0), query_shas.shape[0], axis=0)[x, y],
axis=0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).reshape(-1, rank_size)
# obtain the labels of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_labels = torch.cat([anchor_labels.repeat(query_labels.shape[0], 1)[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
# obtain the distances of the first 'rank_size' most similar query samples to the current
# anchor (based on the computed indices)
top_distances = torch.cat([top_distances[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
else:
# concatenate the current shas to the top shas array
top_shas = np.concatenate((top_shas, np.repeat(np.expand_dims(anchor_shas, axis=0),
top_shas.shape[0], axis=0)), axis=1)
# concatenate the current labels to the top labels tensor
top_labels = torch.cat((top_labels, anchor_labels.repeat(top_labels.size()[0], 1)), dim=1)
# concatenate the current distances to the top distances tensor
top_distances = torch.cat((top_distances, distances), dim=1)
# compute array of indices which sort the distances in ascending order
indices = top_distances.argsort(dim=1)
# obtain the labels of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_shas = np.concatenate([np.expand_dims(top_shas[x, y], axis=0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).reshape(-1, rank_size)
# obtain the labels of the first 'rank_size' most similar query samples
# (based on the computed indices)
top_labels = torch.cat([top_labels[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
# obtain the distances of the first 'rank_size' most similar query samples to the current
# anchor (based on the computed indices)
top_distances = torch.cat([top_distances[x, y].unsqueeze(0)
for x, row in enumerate(indices[:, :rank_size])
for y in row]).view(-1, rank_size)
# for all query samples
for k, s in enumerate(query_shas):
# save ranking
ranks.append({
'query_sha': s,
'ground_truth_label': int(query_labels[k].item()),
'ground_truth_family': label_to_sig(int(query_labels[k].item())),
'rank_shas': top_shas[k].tolist(),
'rank_labels': [int(lab.item()) for lab in top_labels[k]],
'rank_families': [label_to_sig(int(lab.item())) for lab in top_labels[k]]
})
# for all odd values of k from knn_k_min to knn_k_max (included)
for k in range(knn_k_min if knn_k_min % 2 else knn_k_min + 1, knn_k_max + 1, 2):
# get the first k labels from the top labels tensor
knn_labels = top_labels[:, :k]
# get the first k distances from the top distances tensor and raise them to the power of -2
knn_weights = torch.pow(top_distances[:, :k], -2)
# initialize per family-scores to 0
knn_scores = torch.zeros((knn_labels.shape[0], n_families)).to(device)
# for all top k labels
for idx, labs in enumerate(knn_labels):
# compute the per-family sum of distance weights
knn_scores[idx].index_add_(0, torch.tensor([int(lab.item()) for lab in labs]).to(device),
knn_weights[idx])
# save as prediction the family with the maximum score
predictions[str(k)] = torch.argmax(knn_scores, dim=1)
# compute current epoch elapsed time (in seconds)
elapsed_time = time.time() - start_time
# write on standard out the elapsed time, predicted total epoch completion time, current mean speed
# and main memory usage
sys.stdout.write('\r Contrastive learning evaluation: {}/{} '.format(i + 1, test_steps_per_epoch)
+ '[{}/{}, {:6.3f}it/s, RAM used: {:4.1f}%] '
.format(time.strftime("%H:%M:%S", time.gmtime(elapsed_time)), # show elapsed time
time.strftime("%H:%M:%S", # predict total epoch completion time
time.gmtime(test_steps_per_epoch * elapsed_time / (i + 1))),
(i + 1) / elapsed_time, # compute current mean speed (it/s)
psutil.virtual_memory().percent)) # get percentage of main memory used
# flush standard output
sys.stdout.flush()
# normalize the results
results = normalize_results(query_labels, predictions)
# store results into a pandas dataframe (indexed by the sha265 keys) and then save it as csv into
# file f (inserting the header only if this is the first batch in the loop)
pd.DataFrame(results, index=query_shas).to_csv(f, header=first_batch)
first_batch = False
# compute ranking scores (MAP and MRR) and interesting ranks to save
ranking_scores, ranks_to_save = compute_ranking_scores(ranks)
# log ranking scores
mlflow.log_metric('MRR', float(ranking_scores['MRR']))
mlflow.log_metric('MAP', float(ranking_scores['MAP']))
# compute example rank dataframes
dataframes = {
key: pd.DataFrame({"sha256": value['rank']['rank_shas'],
"label": value['rank']['rank_labels'],
"family": value['rank']['rank_families']})
for key, value in ranks_to_save.items()
}
# compute example ranks metadata
metadata = {
key: pd.Series([
'{}: {}'.format(key, value['value']),
'Query sample sha256: {}'.format(value['rank']['query_sha']),
'Ground truth label: {}'.format(value['rank']['ground_truth_label']),
'Ground truth family: {}'.format(value['rank']['ground_truth_family'])
])
for key, value in ranks_to_save.items()
}
logger.info('Saving results..')
# for each example rank
for df_key, df_val in dataframes.items():
# retrieve metadata
meta = metadata[df_key]
# create file name
df_filename = os.path.join(tempdir, '{}_example_rank.csv'.format(df_key))
# open dataframe dest file and write both metadata and dataframe to it
with open(df_filename, 'w') as df_f:
meta.to_csv(df_f, index=False)
df_val.to_csv(df_f)
# log results file as artifact
mlflow.log_artifact(df_filename, artifact_path="contrastive_learning_results")
# log results file as artifact
mlflow.log_artifact(filename, artifact_path="contrastive_learning_results")
logger.info('...done')
if __name__ == '__main__':
# start baker in order to make it possible to run the script and use function names and parameters
# as the command line interface, using ``optparse``-style options
baker.run()
| [
"psutil.virtual_memory",
"nets.Contrastive_Model_net.Net.detach_and_copy_array",
"torch.argmax",
"utils.ranking_metrics.min_reciprocal_rank",
"torch.cat",
"mlflow.log_artifact",
"sys.stdout.flush",
"torch.no_grad",
"os.path.join",
"utils.ranking_metrics.mean_average_precision",
"mlflow.start_run... | [((2287, 2313), 'os.path.dirname', 'os.path.dirname', (['model_dir'], {}), '(model_dir)\n', (2302, 2313), False, 'import os\n'), ((2332, 2367), 'os.path.join', 'os.path.join', (['src_dir', '"""config.ini"""'], {}), "(src_dir, 'config.ini')\n", (2344, 2367), False, 'import os\n'), ((2427, 2454), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (2452, 2454), False, 'import configparser\n'), ((2250, 2275), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (2265, 2275), False, 'import os\n'), ((2687, 2728), 'json.loads', 'json.loads', (["config['mtje']['layer_sizes']"], {}), "(config['mtje']['layer_sizes'])\n", (2697, 2728), False, 'import json\n'), ((4485, 4508), 'utils.ranking_metrics.max_reciprocal_rank', 'max_reciprocal_rank', (['rs'], {}), '(rs)\n', (4504, 4508), False, 'from utils.ranking_metrics import mean_reciprocal_rank, mean_average_precision, max_reciprocal_rank, min_reciprocal_rank, max_average_precision, min_average_precision\n'), ((4534, 4557), 'utils.ranking_metrics.min_reciprocal_rank', 'min_reciprocal_rank', (['rs'], {}), '(rs)\n', (4553, 4557), False, 'from utils.ranking_metrics import mean_reciprocal_rank, mean_average_precision, max_reciprocal_rank, min_reciprocal_rank, max_average_precision, min_average_precision\n'), ((4583, 4608), 'utils.ranking_metrics.max_average_precision', 'max_average_precision', (['rs'], {}), '(rs)\n', (4604, 4608), False, 'from utils.ranking_metrics import mean_reciprocal_rank, mean_average_precision, max_reciprocal_rank, min_reciprocal_rank, max_average_precision, min_average_precision\n'), ((4634, 4659), 'utils.ranking_metrics.min_average_precision', 'min_average_precision', (['rs'], {}), '(rs)\n', (4655, 4659), False, 'from utils.ranking_metrics import mean_reciprocal_rank, mean_average_precision, max_reciprocal_rank, min_reciprocal_rank, max_average_precision, min_average_precision\n'), ((23964, 23975), 'baker.run', 'baker.run', ([], {}), '()\n', (23973, 23975), False, 'import baker\n'), ((4308, 4332), 'utils.ranking_metrics.mean_reciprocal_rank', 'mean_reciprocal_rank', (['rs'], {}), '(rs)\n', (4328, 4332), False, 'from utils.ranking_metrics import mean_reciprocal_rank, mean_average_precision, max_reciprocal_rank, min_reciprocal_rank, max_average_precision, min_average_precision\n'), ((4341, 4367), 'utils.ranking_metrics.mean_average_precision', 'mean_average_precision', (['rs'], {}), '(rs)\n', (4363, 4367), False, 'from utils.ranking_metrics import mean_reciprocal_rank, mean_average_precision, max_reciprocal_rank, min_reciprocal_rank, max_average_precision, min_average_precision\n'), ((5839, 5872), 'nets.Contrastive_Model_net.Net.detach_and_copy_array', 'Net.detach_and_copy_array', (['labels'], {}), '(labels)\n', (5864, 5872), False, 'from nets.Contrastive_Model_net import Net\n'), ((6120, 6148), 'nets.Contrastive_Model_net.Net.detach_and_copy_array', 'Net.detach_and_copy_array', (['v'], {}), '(v)\n', (6145, 6148), False, 'from nets.Contrastive_Model_net import Net\n'), ((8531, 8549), 'mlflow.start_run', 'mlflow.start_run', ([], {}), '()\n', (8547, 8549), False, 'import mlflow\n'), ((9855, 10005), 'nets.generators.fresh_generators.get_generator', 'get_generator', ([], {'ds_root': 'fresh_ds_path', 'splits': 'dataset_split_proportions', 'batch_size': 'batch_size', 'return_shas': '(True)', 'num_workers': 'workers', 'shuffle': '(True)'}), '(ds_root=fresh_ds_path, splits=dataset_split_proportions,\n batch_size=batch_size, return_shas=True, num_workers=workers, shuffle=True)\n', (9868, 10005), False, 'from nets.generators.fresh_generators import get_generator\n'), ((10656, 10961), 'nets.Contrastive_Model_net.Net', 'Net', ([], {'feature_dimension': '(2381)', 'embedding_dimension': '(32)', 'layer_sizes': "run_additional_params['layer_sizes']", 'dropout_p': "run_additional_params['dropout_p']", 'activation_function': "run_additional_params['activation_function']", 'normalization_function': "run_additional_params['normalization_function']"}), "(feature_dimension=2381, embedding_dimension=32, layer_sizes=\n run_additional_params['layer_sizes'], dropout_p=run_additional_params[\n 'dropout_p'], activation_function=run_additional_params[\n 'activation_function'], normalization_function=run_additional_params[\n 'normalization_function'])\n", (10659, 10961), False, 'from nets.Contrastive_Model_net import Net\n'), ((11226, 11280), 'logzero.logger.info', 'logger.info', (['"""Evaluating contrastive learning model.."""'], {}), "('Evaluating contrastive learning model..')\n", (11237, 11280), False, 'from logzero import logger\n'), ((23735, 23757), 'logzero.logger.info', 'logger.info', (['"""...done"""'], {}), "('...done')\n", (23746, 23757), False, 'from logzero import logger\n'), ((11105, 11132), 'torch.load', 'torch.load', (['checkpoint_path'], {}), '(checkpoint_path)\n', (11115, 11132), False, 'import torch\n'), ((11663, 11692), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (11690, 11692), False, 'import tempfile\n'), ((11767, 11803), 'os.path.join', 'os.path.join', (['tempdir', '"""results.csv"""'], {}), "(tempdir, 'results.csv')\n", (11779, 11803), False, 'import os\n'), ((4184, 4197), 'numpy.dtype', 'np.dtype', (['int'], {}), '(int)\n', (4192, 4197), True, 'import numpy as np\n'), ((12049, 12060), 'time.time', 'time.time', ([], {}), '()\n', (12058, 12060), False, 'import time\n'), ((22838, 22869), 'logzero.logger.info', 'logger.info', (['"""Saving results.."""'], {}), "('Saving results..')\n", (22849, 22869), False, 'from logzero import logger\n'), ((23650, 23725), 'mlflow.log_artifact', 'mlflow.log_artifact', (['filename'], {'artifact_path': '"""contrastive_learning_results"""'}), "(filename, artifact_path='contrastive_learning_results')\n", (23669, 23725), False, 'import mlflow\n'), ((12312, 12334), 'numpy.asarray', 'np.asarray', (['query_shas'], {}), '(query_shas)\n', (12322, 12334), True, 'import numpy as np\n'), ((21041, 21059), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (21057, 21059), False, 'import sys\n'), ((21977, 22115), 'pandas.DataFrame', 'pd.DataFrame', (["{'sha256': value['rank']['rank_shas'], 'label': value['rank']['rank_labels'\n ], 'family': value['rank']['rank_families']}"], {}), "({'sha256': value['rank']['rank_shas'], 'label': value['rank'][\n 'rank_labels'], 'family': value['rank']['rank_families']})\n", (21989, 22115), True, 'import pandas as pd\n'), ((23507, 23585), 'mlflow.log_artifact', 'mlflow.log_artifact', (['df_filename'], {'artifact_path': '"""contrastive_learning_results"""'}), "(df_filename, artifact_path='contrastive_learning_results')\n", (23526, 23585), False, 'import mlflow\n'), ((12586, 12601), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (12599, 12601), False, 'import torch\n'), ((13303, 13326), 'numpy.asarray', 'np.asarray', (['anchor_shas'], {}), '(anchor_shas)\n', (13313, 13326), True, 'import numpy as np\n'), ((13845, 13906), 'torch.cdist', 'torch.cdist', (['query_pe_embeddings', 'anchor_pe_embeddings'], {'p': '(2.0)'}), '(query_pe_embeddings, anchor_pe_embeddings, p=2.0)\n', (13856, 13906), False, 'import torch\n'), ((19177, 19212), 'torch.pow', 'torch.pow', (['top_distances[:, :k]', '(-2)'], {}), '(top_distances[:, :k], -2)\n', (19186, 19212), False, 'import torch\n'), ((19874, 19905), 'torch.argmax', 'torch.argmax', (['knn_scores'], {'dim': '(1)'}), '(knn_scores, dim=1)\n', (19886, 19905), False, 'import torch\n'), ((20012, 20023), 'time.time', 'time.time', ([], {}), '()\n', (20021, 20023), False, 'import time\n'), ((12448, 12472), 'copy.deepcopy', 'deepcopy', (['query_features'], {}), '(query_features)\n', (12456, 12472), False, 'from copy import deepcopy\n'), ((13496, 13511), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (13509, 13511), False, 'import torch\n'), ((16465, 16509), 'torch.cat', 'torch.cat', (['(top_distances, distances)'], {'dim': '(1)'}), '((top_distances, distances), dim=1)\n', (16474, 16509), False, 'import torch\n'), ((21415, 21454), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {'index': 'query_shas'}), '(results, index=query_shas)\n', (21427, 21454), True, 'import pandas as pd\n'), ((19311, 19357), 'torch.zeros', 'torch.zeros', (['(knn_labels.shape[0], n_families)'], {}), '((knn_labels.shape[0], n_families))\n', (19322, 19357), False, 'import torch\n'), ((20474, 20499), 'time.gmtime', 'time.gmtime', (['elapsed_time'], {}), '(elapsed_time)\n', (20485, 20499), False, 'import time\n'), ((20692, 20750), 'time.gmtime', 'time.gmtime', (['(test_steps_per_epoch * elapsed_time / (i + 1))'], {}), '(test_steps_per_epoch * elapsed_time / (i + 1))\n', (20703, 20750), False, 'import time\n'), ((20904, 20927), 'psutil.virtual_memory', 'psutil.virtual_memory', ([], {}), '()\n', (20925, 20927), False, 'import psutil\n'), ((15975, 16010), 'numpy.expand_dims', 'np.expand_dims', (['anchor_shas'], {'axis': '(0)'}), '(anchor_shas, axis=0)\n', (15989, 16010), True, 'import numpy as np\n'), ((16895, 16933), 'numpy.expand_dims', 'np.expand_dims', (['top_shas[x, y]'], {'axis': '(0)'}), '(top_shas[x, y], axis=0)\n', (16909, 16933), True, 'import numpy as np\n'), ((14589, 14624), 'numpy.expand_dims', 'np.expand_dims', (['anchor_shas'], {'axis': '(0)'}), '(anchor_shas, axis=0)\n', (14603, 14624), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""Part 2 - Contact Networks.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1ogskk35uEbaZjwe0IgzGGn-qGJrt2AqG
# COVID-19 Contact Networks
## Import Packages
First we import all required Python packages that we need during the project.
"""
# the pandas package provides tools for storing and manipulating dataframes
import pandas as pd
# numpy package for working with arrays of numbers
import numpy as np
# import matplotlib.pyplot package for functions to plot numeric data
import matplotlib.pyplot as plt
# import "os" package which provides methods for using filesystem
import os
# import "networkx" package which provides methods for generating and processing networks
import networkx as nx
# import "datetime" package which provides methods to process date and time strings
import datetime as dt
# import "geopy" package which provides methods to process geographic coordinates
from geopy.distance import geodesic
# import "sklearn" package which provides methods for processing label values
import sklearn as sk
#import module "preprocessing" from package "sklearn"
from sklearn import preprocessing
"""## Step 1 - Generate Contact Network
In this step, the "PeopleLocations.csv" is read in as a pandas dataframe, which contains a list of recorded locations of individiuals at specific times. Each individual has a unique identifier (ID), and corresponding latitude (Latidute), longitude(Longitude), date(Date) and time(Time) at which the recording was taken. Thus the dataframe contains the features:
`ID; Latidute; Longitude; Date; Time; Covid19`
Here `ID` is a string that represents the identifier of the individual. The fields `Latitude` and `Longitude`are GPS coordinates of the individual at the time of recording. The fields `Date` and `Time` are strings representing the date and time of the recording. The last field `Covid19` contains `"y"` if the individual has been found Covid-19 positive at the time of recording. Otherwise the field `Covid19` contains `"n"`.
"""
# read in data from PeopleLocations.csv into dataframe "df"
peloc = pd.read_csv('PeopleLocations.csv',sep=';')
# store the header of the first colum of dataframe df in variable "IDcol"
IDcol = peloc.columns[0]
# determine a list of different individuals for which there is at least one record in the csv file
uniquepart = peloc[IDcol].unique()
# count the number of different individuals. this will be the number of nodes in the contace network
nrnodes = len(uniquepart)
# build up the network by adding a node for each individual with a record in "PeopleLocations.csv"
# create an object "G" using the constructor nx.Graph()
G = nx.Graph()
# create object "le" of class preprocessing.LabelEncoder()
# the object "le" will be used to transfrom values 'y' or 'n' for the attribue "Covid19" to values 1 or 0
le = preprocessing.LabelEncoder()
# use function le.fit() to define the label values "n" and "y" to be mapped to 0 and 1
le.fit(["n", "y"])
# iterate over different individuals indexed by nodeidx=0,1,...
for nodeidx in range(nrnodes):
# read in identifier of individual from list `uniquepart` and store in variable personid
personid = uniquepart[nodeidx]
# create dataframe dmydf by selecting all rows from dataframe `df` with attribute `ID` equal to `personid`
dmydf = pd.DataFrame(df.loc[df['ID'] == personid].copy())
# reset index of dataframe dmydf
dmydf.reset_index(drop=True, inplace=True)
# read in latitude of first location recording in `dmydf` and store in variable `latitude`
latitude=dmydf.loc[0,['Lat']][0]
# read in longitude of first location recording in `dmydf` and store in variable `longitude`
longitude=dmydf.loc[0,['Lon']][0]
# read in Covid19 infection status of first location recording in `dmydf` and store in variable `valtmp`
valtmp=dmydf.loc[0,['Covid19']][0]
# use le.transform() to map the infection status `valtmp` as `y`->1 and `n`-> 0
infected=le.transform([valtmp])
# read in the date of the recording and store in variable date_tmp
date_tmp = dt.datetime.strptime(dmydf.loc[0,['Date']][0], '%d-%m-%Y').date()
# read in the time of the recording and store in variable time_tmp
time_tmp = dt.datetime.strptime(dmydf.loc[0,['Time']][0], '%H:%M:%S').time()
# combine date and time of location racording using `datetime.combine()
mydatetime = dt.datetime.combine(date_tmp, time_tmp)
# add a node with index `nodeidx`
G.add_node(nodeidx)
# for node with index `nodeidx`, add attribute "name" with value given by "personid"
G.nodes[nodeidx]['name']= personid
# for node with index `nodeidx`, add attribute "coords" with value being a numpy
# array of length 2 with elements latitude and longitude
G.nodes[nodeidx]['coords']= np.array([latitude,longitude])
# for node with index `nodeidx`, add attribute "timestamp" with value given by variable "mydatetime"
G.nodes[nodeidx]['timestamp'] = mydatetime
# for node with index `nodeidx`, add attribute "Rate" with value given by "infected[0]"
G.nodes[nodeidx]['Rate'] = infected[0]
# loop over all pairs of different nodes and determine if the corresponding distance
# between G.nodes[..]['coords'] is below 2 meters. If yes then add an edge to networkx object "G"
for nodeidx1 in range(nrnodes):
for nodeidx2 in range(nrnodes):
if nodeidx1!=nodeidx2 :
# compute the geodesic distance between individualas "nodeidx1" and "nodeidx2" in meters
nodedist=geodesic(G.nodes[nodeidx1]['coords'],G.nodes[nodeidx2]['coords']).meters
# if distance is below two meters connect invididuals by and edge.
if nodedist<2:
G.add_edge(nodeidx1,nodeidx2)
# plot contact network G using nx.draw()
nc = nx.draw(G,pos=nx.spring_layout(G))
# display figure using plt.show()
plt.show()
"""## Step 2 - Infection Rate
This milestone requires to determine the infection rate using the contact network obtained when completing Milestone 1. The infection rate is the total number of individuals with a confirmed infection, infection status `"y"`, divided by the total number of individuals in the contact networks.
"""
# set variable "nriters" to 30
nriters=30
# create numpy array `W_MH` of shape (nrnodes,nrnodes) and with all entries zero
W_MH = np.zeros((nrnodes,nrnodes))
# loop over all edges in G.edges ()
for edge in G.edges():
# store first node of this edge in variable "node_a"
node_a = edge[0]
# store second node of this edge in variable "node_b"
node_b = edge[1]
# set entry W_MH[node_a,node_b] and W_MH[node_b,node_a] to MH weight
W_MH[node_a,node_b] = 1/(np.max([G.degree(node_a),G.degree(node_b)])+1)
W_MH[node_b,node_a] = 1/(np.max([G.degree(node_a),G.degree(node_b)])+1)
# loop over all nodes in the contact network G
for nodedmy in G.nodes():
# set weights W[nodedmy,nodedmy] to 1 - sum of weights for all neighbors of nodedmy
W_MH[nodedmy,nodedmy] = 1-np.sum(W_MH[nodedmy,:])
# loop over iterations for computing average infection rates
for iterdmy in range(nriters):
# read in current values of "Rate" attributes into numpy array `graphsigold`
graphsigold = np.fromiter(nx.get_node_attributes(G,'Rate').values(),dtype=float, count=nrnodes)
# loop over all nodes in "G", use loop variable "node_i"
for node_i in G.nodes(data=False):
# set auxiliary variable "tmp" to 0
dmy = 0
# loop over all neighbours of current node "node_i"
for node_j in G[node_i]:
dmy = dmy+W_MH[node_i,node_j]*graphsigold[node_j]#/(fac1*fac2)
G.nodes[node_i]['Rate'] =W_MH[node_i,node_i]*graphsigold[node_i] + dmy
# create a NodeView object "nodes" for the nodes in the networkx graph "G"
nodes = G.nodes()
# create a list of "Rate" attribute for all nodes in "G"
list_of_rates = [G.nodes[node_i]['Rate'] for node_i in nodes]
# create coordinates of nodes in figure using nx.spring_layout(G)
pos = nx.spring_layout(G)
# draw edges of "G" using nx.draw_networkx_edges()
ec = nx.draw_networkx_edges(G, pos)
# use nx.draw_networkx_nodes() to draw nodes of "G" using node colours given by "Rate" value for each node
nc = nx.draw_networkx_nodes(G, pos, nodelist=nodes, node_color=list_of_rates,
label=None, node_size=100, cmap=plt.cm.jet)
# add colorbar using plt.colorbar(); this needs as parameter the object returned by the above nx.draw_networkx_nodes()
plt.colorbar(nc)
# display figure using plt.show()
plt.show()
| [
"matplotlib.pyplot.show",
"networkx.draw_networkx_edges",
"numpy.sum",
"pandas.read_csv",
"geopy.distance.geodesic",
"numpy.zeros",
"sklearn.preprocessing.LabelEncoder",
"matplotlib.pyplot.colorbar",
"datetime.datetime.strptime",
"networkx.spring_layout",
"networkx.Graph",
"networkx.draw_netwo... | [((2236, 2279), 'pandas.read_csv', 'pd.read_csv', (['"""PeopleLocations.csv"""'], {'sep': '""";"""'}), "('PeopleLocations.csv', sep=';')\n", (2247, 2279), True, 'import pandas as pd\n'), ((2819, 2829), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2827, 2829), True, 'import networkx as nx\n'), ((3003, 3031), 'sklearn.preprocessing.LabelEncoder', 'preprocessing.LabelEncoder', ([], {}), '()\n', (3029, 3031), False, 'from sklearn import preprocessing\n'), ((6120, 6130), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (6128, 6130), True, 'import matplotlib.pyplot as plt\n'), ((6595, 6623), 'numpy.zeros', 'np.zeros', (['(nrnodes, nrnodes)'], {}), '((nrnodes, nrnodes))\n', (6603, 6623), True, 'import numpy as np\n'), ((8274, 8293), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (8290, 8293), True, 'import networkx as nx\n'), ((8354, 8384), 'networkx.draw_networkx_edges', 'nx.draw_networkx_edges', (['G', 'pos'], {}), '(G, pos)\n', (8376, 8384), True, 'import networkx as nx\n'), ((8500, 8620), 'networkx.draw_networkx_nodes', 'nx.draw_networkx_nodes', (['G', 'pos'], {'nodelist': 'nodes', 'node_color': 'list_of_rates', 'label': 'None', 'node_size': '(100)', 'cmap': 'plt.cm.jet'}), '(G, pos, nodelist=nodes, node_color=list_of_rates,\n label=None, node_size=100, cmap=plt.cm.jet)\n', (8522, 8620), True, 'import networkx as nx\n'), ((8766, 8782), 'matplotlib.pyplot.colorbar', 'plt.colorbar', (['nc'], {}), '(nc)\n', (8778, 8782), True, 'import matplotlib.pyplot as plt\n'), ((8819, 8829), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (8827, 8829), True, 'import matplotlib.pyplot as plt\n'), ((4623, 4662), 'datetime.datetime.combine', 'dt.datetime.combine', (['date_tmp', 'time_tmp'], {}), '(date_tmp, time_tmp)\n', (4642, 4662), True, 'import datetime as dt\n'), ((5041, 5072), 'numpy.array', 'np.array', (['[latitude, longitude]'], {}), '([latitude, longitude])\n', (5049, 5072), True, 'import numpy as np\n'), ((6063, 6082), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (6079, 6082), True, 'import networkx as nx\n'), ((7265, 7289), 'numpy.sum', 'np.sum', (['W_MH[nodedmy, :]'], {}), '(W_MH[nodedmy, :])\n', (7271, 7289), True, 'import numpy as np\n'), ((4301, 4360), 'datetime.datetime.strptime', 'dt.datetime.strptime', (["dmydf.loc[0, ['Date']][0]", '"""%d-%m-%Y"""'], {}), "(dmydf.loc[0, ['Date']][0], '%d-%m-%Y')\n", (4321, 4360), True, 'import datetime as dt\n'), ((4459, 4518), 'datetime.datetime.strptime', 'dt.datetime.strptime', (["dmydf.loc[0, ['Time']][0]", '"""%H:%M:%S"""'], {}), "(dmydf.loc[0, ['Time']][0], '%H:%M:%S')\n", (4479, 4518), True, 'import datetime as dt\n'), ((5772, 5838), 'geopy.distance.geodesic', 'geodesic', (["G.nodes[nodeidx1]['coords']", "G.nodes[nodeidx2]['coords']"], {}), "(G.nodes[nodeidx1]['coords'], G.nodes[nodeidx2]['coords'])\n", (5780, 5838), False, 'from geopy.distance import geodesic\n'), ((7497, 7530), 'networkx.get_node_attributes', 'nx.get_node_attributes', (['G', '"""Rate"""'], {}), "(G, 'Rate')\n", (7519, 7530), True, 'import networkx as nx\n')] |
import os, subprocess
import numpy as np
import simtk.unit as unit
from statistics import mean
from scipy.stats import linregress
from scipy import spatial
import matplotlib.pyplot as plt
from cg_openmm.utilities.random_builder import *
from cg_openmm.utilities.iotools import write_pdbfile_without_topology
kB = unit.MOLAR_GAS_CONSTANT_R # Boltzmann constant
def get_helical_parameters(cgmodel):
"""
Given a coarse grained model as input, this function uses the `kHelios software package <https://pubs.acs.org/doi/10.1021/acs.jcim.6b00721>`_ to analyze the helical properties of the model.
:param cgmodel: CGModel() class object
:type cgmodel: class
:returns:
- pitch ( float ) - The distance between monomers in adjacent turns of a helix
- radius ( float ) - The radius of the helix
- monomers_per_turn ( float ) - The number of monomrs per turn of the helix
- residual ( float ) - The average distance of all backbone particles from a circle projected onto the x-y plane. Used to determine the accuracy of the helical axis, as fit to the input data. Units are in Angstroms.
.. warning:: This function requires a pre-installed version of `kHelios <https://pubs.acs.org/doi/10.1021/acs.jcim.6b00721>`_ . Because kHelios is formatted to accept input job scripts, this function writes and executes a job script for kHelios. In order to function properly, the user must redefine the 'helios_path' variable for their system.
"""
helios_path = str("../../foldamers/foldamers/parameters/helios.o")
cgmodel = orient_along_z_axis(cgmodel)
write_pdbfile_without_topology(cgmodel, "temp_pitch.pdb")
kHelix_run_file = "run_kHelix.sh"
file = open(kHelix_run_file, "w")
file.write("#!/bin/bash\n")
file.write("\n")
file.write("cat > input << EOF\n")
file.write("inputhelix $1\n")
file.write("helixout_name kHelix.out\n")
file.write("coord_type 1\n")
file.write("num_grid 20\n")
file.write("natoms " + str(round(cgmodel.num_beads / 2)) + "\n")
file.write("nframes 1\n")
file.write("grid_phi_beg 0\n")
file.write("grid_phi_end 20\n")
file.write("grid_theta_beg 0\n")
file.write("grid_theta_end 20\n")
file.write("helix_atom_names X1\n")
file.write("print_to_plot 1\n")
file.write("EOF\n")
file.write(str(helios_path) + " input\n")
# file.write('done\n')
file.close()
subprocess.run(["chmod", "+x", "run_kHelix.sh"])
subprocess.run(
[
str(str(os.getcwd()) + "/" + str(kHelix_run_file)),
"temp_pitch.pdb",
">",
"helios_output",
]
)
# os.remove("helios_output")
file = open("kHelix.out", mode="r")
output = file.readlines()
line_index = 1
for line in output:
if line_index == 43:
residual = line.split()[2]
radius = line.split()[3]
pitch = line.split()[4]
sweep = float(line.split()[5])
monomers_per_turn = cgmodel.polymer_length / (sweep / 360.0)
break
line_index = line_index + 1
return (pitch, radius, monomers_per_turn, residual)
def orient_along_z_axis(cgmodel, plot_projections=False):
"""
Given a coarse grained model as input, this function orients the model along the z-axis.
:param cgmodel: CGModel() class object
:type cgmodel: class
:param plot_projections: Variable indicating whether or not to plot intermediate projections/operations during identification of a helical axis.
:returns:
- cgmodel ( class ) - CGModel() class object, with positions oriented so that the helical axis is along the z-axis
"""
positions = np.array(
[
[float(i.in_units_of(unit.angstrom)._value) for i in position]
for position in cgmodel.positions
]
)
# 1) Get the backbone particle positions
# We should be able to input the particle type name(s) of the backbone bead
backbone_positions = []
for particle in range(len(cgmodel.positions)):
if cgmodel.get_particle_type_name(particle) == "bb":
backbone_positions.append(cgmodel.positions[particle])
backbone_positions = np.array(
[
[float(i.in_units_of(unit.angstrom)._value) for i in coord]
for coord in backbone_positions
]
)
# 2) Project the backbone positions onto the (x,y) plane
xy_projected_positions = backbone_positions
x_data = []
y_data = []
for position in xy_projected_positions:
position[2] = 0.0
x_data.append(position[0])
y_data.append(position[1])
# 3) Calculate the best fit line for these projected positions
slope, intercept, r, p, std_err = linregress(
np.array([x for x in x_data]), np.array([y for y in y_data])
)
if plot_projections:
# Plot this projected data, as well as the best fit line
file_name = "xy_projection.png"
figure = pyplot.figure(1)
x_data = np.array([x for x in x_data])
y_data = np.array([y for y in y_data])
pyplot.xlabel("x")
pyplot.ylabel("y")
pyplot.scatter(x_data, y_data)
x = np.linspace(min(x_data), max(x_data), 100)
pyplot.plot(
x,
slope * x + intercept,
label=str("y=" + str(round(slope, 2)) + "x+" + str(round(intercept, 2))),
)
pyplot.legend()
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
# 4) Rotate the coordinate system so that this line is oriented along the x-axis
# Calculate angle from z-axis:
z_axis_angle = np.arctan(slope)
z_axis_rotation_matrix = spatial.transform.Rotation.from_euler(
"xyz", [0.0, 0.0, z_axis_angle], degrees=False
)
x_oriented_positions = z_axis_rotation_matrix.apply(positions)
# 5) Project the positions onto the (x,z) plane
xz_projected_positions = backbone_positions
x_data = []
z_data = []
for position_index in range(len(xz_projected_positions)):
xz_projected_positions[position_index][1] = 0.0
x_data.append(positions[position_index][0])
z_data.append(positions[position_index][2])
# 6) Calculate the best fit line for these projected positions
slope, intercept, r, p, std_err = linregress(
np.array([x for x in x_data]), np.array([z for z in z_data])
)
if plot_projections:
# Plot this projected data, as well as the best fit line
file_name = "xz_projection.png"
figure = pyplot.figure(1)
x_data = np.array([x for x in x_data])
z_data = np.array([z for z in z_data])
pyplot.xlabel("x")
pyplot.ylabel("z")
pyplot.scatter(x_data, z_data)
x = np.linspace(min(x_data), max(x_data), 100)
pyplot.plot(
x,
slope * x + intercept,
label=str("z=" + str(round(slope, 2)) + "x+" + str(round(intercept, 2))),
)
pyplot.legend()
pyplot.savefig(file_name)
pyplot.show()
pyplot.close()
# 7) Rotate the coordinate system so that this line is oriented along the x-axis
# Calculate angle from y-axis:
y_axis_angle = np.arctan(slope)
y_axis_rotation_matrix = spatial.transform.Rotation.from_euler(
"xyz", [0.0, y_axis_angle, 0.0], degrees=False
)
new_positions = y_axis_rotation_matrix.apply(x_oriented_positions)
# 8) For comparison with kHelios output, rotate the molecule again so that the helical (x) axis is oriented along the z axis instead.
z_axis_angle = 3.1415 / 2.0
z_axis_rotation_matrix = spatial.transform.Rotation.from_euler(
"xyz", [0.0, z_axis_angle, 0.0], degrees=False
)
final_positions = z_axis_rotation_matrix.apply(new_positions)
cgmodel.positions = unit.Quantity(final_positions, unit.angstrom)
file = open("after_rotation.pdb", "w")
PDBFile.writeFile(cgmodel.topology, cgmodel.positions, file=file)
return cgmodel
def show_helical_fit(cgmodel):
"""
Given a coarse grained model containing positions, this function performs a helical fit for the backbone particles with `kHelios <https://pubs.acs.org/doi/10.1021/acs.jcim.6b00721>`_ , and uses 'matplotlib' to display attributes of the helical fit.
"""
# 1) Get the backbone particle positions
positions = np.array(
[
[float(i.in_units_of(unit.angstrom)._value) for i in position]
for position in cgmodel.positions
]
)
backbone_positions = []
for particle in range(len(cgmodel.positions)):
if cgmodel.get_particle_type_name(particle) == "bb":
backbone_positions.append(cgmodel.positions[particle])
backbone_positions = np.array(
[
[float(i.in_units_of(unit.angstrom)._value) for i in coord]
for coord in backbone_positions
]
)
c = backbone_positions
curves = [c]
labels = ["helix (unrotated)"]
for i in range(len(curves)):
fig = plt.figure(i)
curve = curves[i]
label = labels[i]
ax = fig.gca(projection="3d")
ax.plot(curve[:, 0], curve[:, 1], curve[:, 2], label=label)
ax.legend()
plt.xlabel("x")
plt.ylabel("y")
# plt.zlabel('z') # not defined?
plt.show()
return
def calculate_p2(cgmodel):
"""
Given a coarse grained model containing positions, this function returns the `'P2' <http://cmt.dur.ac.uk/sjc/thesis_dlc/node19.html>`_ orientational ordering parameter value for the current pose.
.. warning:: By default, 'P2' is evaluated using the positions for only the backbone particles.
:param cgmodel: CGModel() class object
:type cgmodel: class
:returns:
- p2 ( float ) - The value for the 'P2' orientational ordering parameter.
"""
positions = np.array(
[
[float(i.in_units_of(unit.angstrom)._value) for i in position]
for position in cgmodel.positions
]
)
# 1) Get the backbone particle positions
backbone_positions = []
for particle in range(len(cgmodel.positions)):
if cgmodel.get_particle_type_name(particle) == "bb":
backbone_positions.append(cgmodel.positions[particle])
backbone_positions = np.array(
[
[float(i.in_units_of(unit.angstrom)._value) for i in coord]
for coord in backbone_positions
]
)
c = backbone_positions
u = np.diff(c, axis=0)
for ui in u:
ui /= np.sqrt(np.dot(ui, ui))
Q = np.zeros([3, 3])
for ui in u:
Q += 1.5 * np.outer(ui, ui)
Q /= len(u)
Q -= 0.5 * np.eye(3)
vals, vecs = np.linalg.eig(Q)
p2 = np.mean(np.dot(u, vecs), axis=0)
dirindices = np.argsort(np.abs(p2))
h = vecs[:, dirindices[2]]
l = vecs[:, dirindices[1]]
m = vecs[:, dirindices[0]]
# rotate the helix itself into the new coordinates
# in many cases, this seems to not be a a great rotation. It seems to
# start tilting the helix a bit in many cases. Not sure why!!!!
S = np.zeros([3, 3])
S = vecs
cp = np.dot(c, S)
cp1 = cp[:, dirindices]
cp = cp1
up = np.dot(u, S)
up1 = up[:, dirindices]
up = up1
up2 = np.diff(cp, axis=0)
for upi in up2:
upi /= np.sqrt(np.dot(upi, upi))
FQ = np.zeros([3, 3])
for upi in up:
FQ += 1.5 * np.outer(upi, upi)
FQ /= len(up)
FQ -= 0.5 * np.eye(3)
avecos = np.mean(up[:, 2])
avesin = np.sqrt(1 - avecos ** 2)
zaxis = np.array([0, 0, 1])
upr = np.zeros(np.shape(u))
for i in range(np.shape(upr)[0]):
scal = np.sqrt(1 - up[i, 2] ** 2)
# project out into x,y plane
ax1 = np.array([up[i, 0] / scal, up[i, 1] / scal, 0])
# normal from the plane
nm = np.cross(zaxis, ax1) # the normal to the plane
v = up[i] # the vector to rotate
# R(theta)v = nm(nm.v) + cos(theta) (nm x v) x nm + sin(-theta)(nm x v) # from wikipedia
upr[i] = (
nm * np.dot(nm, v) + avecos * np.cross(np.cross(nm, v), nm) - avesin * np.cross(nm, v)
)
# cmid = 0.5*(cp[0:-1,:] + cp[1:,:])
# z = cmid[0:,2]/length
curves = [c, cp, u, up, upr]
labels = [
"helix (unrotated)",
"helix (rotated)",
"directors (unrotated)",
"directors (rotated)",
"directors (rotated to helix)",
]
for i in range(len(curves)):
fig = plt.figure(i)
curve = curves[i]
label = labels[i]
ax = fig.gca(projection="3d")
ax.plot(curve[:, 0], curve[:, 1], curve[:, 2], label=label)
ax.legend()
plt.xlabel("x")
plt.ylabel("y")
# plt.zlabel('z') # not defined?
plt.show()
return p2
def get_helical_data(cgmodel):
"""
"""
cgmodel = orient_along_z_axis(cgmodel)
# Get the new backbone particle positions
backbone_positions = []
for particle in range(len(cgmodel.positions)):
if cgmodel.get_particle_type_name(particle) == "bb":
backbone_positions.append(cgmodel.positions[particle])
backbone_positions = np.array(
[
[float(i.in_units_of(unit.angstrom)._value) for i in coord]
for coord in backbone_positions
]
)
# radius
axis_distances = []
rotations = 0.0
for position in backbone_positions:
axis_distance = distance(
unit.Quantity([float(position[0]), 0.0, 0.0], unit.angstrom),
unit.Quantity(position, unit.angstrom),
)
axis_distances.append(axis_distance)
if len(axis_distances) > 1:
rotation = np.arctan(position[1] / position[2]) - last_angle
last_angle = rotation
rotations = rotations + rotation
else:
rotation = np.arctan(position[1] / position[2])
last_angle = rotation
rotations = rotations + rotation
radius = mean(
np.array([float(dist.in_units_of(unit.angstrom)._value) for dist in axis_distances])
)
particles_per_turn = float(cgmodel.polymer_length / (rotations / 6.28))
# pitch
#
# Shift all coordinates so that the first backbone atom has z=0
shift = -cgmodel.positions[0][2]._value
axis_deltas = []
for position in cgmodel.positions:
position[0]._value = position[0]._value + shift
if abs(position[0]._value - cgmodel.positions[0][0]._value) > 0:
axis_deltas.append(float(position[0]._value - cgmodel.positions[0][0]._value))
average_delta = mean(axis_deltas)
pitch = average_delta * particles_per_turn
return (radius, pitch, particles_per_turn)
| [
"numpy.abs",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.mean",
"numpy.linalg.eig",
"cg_openmm.utilities.iotools.write_pdbfile_without_topology",
"matplotlib.pyplot.show",
"numpy.cross",
"statistics.mean",
"matplotlib.pyplot.ylabel",
"numpy.dot",
"numpy.arctan",
"subprocess.run",
"nu... | [((1646, 1703), 'cg_openmm.utilities.iotools.write_pdbfile_without_topology', 'write_pdbfile_without_topology', (['cgmodel', '"""temp_pitch.pdb"""'], {}), "(cgmodel, 'temp_pitch.pdb')\n", (1676, 1703), False, 'from cg_openmm.utilities.iotools import write_pdbfile_without_topology\n'), ((2455, 2503), 'subprocess.run', 'subprocess.run', (["['chmod', '+x', 'run_kHelix.sh']"], {}), "(['chmod', '+x', 'run_kHelix.sh'])\n", (2469, 2503), False, 'import os, subprocess\n'), ((5730, 5746), 'numpy.arctan', 'np.arctan', (['slope'], {}), '(slope)\n', (5739, 5746), True, 'import numpy as np\n'), ((5776, 5865), 'scipy.spatial.transform.Rotation.from_euler', 'spatial.transform.Rotation.from_euler', (['"""xyz"""', '[0.0, 0.0, z_axis_angle]'], {'degrees': '(False)'}), "('xyz', [0.0, 0.0, z_axis_angle],\n degrees=False)\n", (5813, 5865), False, 'from scipy import spatial\n'), ((7308, 7324), 'numpy.arctan', 'np.arctan', (['slope'], {}), '(slope)\n', (7317, 7324), True, 'import numpy as np\n'), ((7354, 7443), 'scipy.spatial.transform.Rotation.from_euler', 'spatial.transform.Rotation.from_euler', (['"""xyz"""', '[0.0, y_axis_angle, 0.0]'], {'degrees': '(False)'}), "('xyz', [0.0, y_axis_angle, 0.0],\n degrees=False)\n", (7391, 7443), False, 'from scipy import spatial\n'), ((7727, 7816), 'scipy.spatial.transform.Rotation.from_euler', 'spatial.transform.Rotation.from_euler', (['"""xyz"""', '[0.0, z_axis_angle, 0.0]'], {'degrees': '(False)'}), "('xyz', [0.0, z_axis_angle, 0.0],\n degrees=False)\n", (7764, 7816), False, 'from scipy import spatial\n'), ((7918, 7963), 'simtk.unit.Quantity', 'unit.Quantity', (['final_positions', 'unit.angstrom'], {}), '(final_positions, unit.angstrom)\n', (7931, 7963), True, 'import simtk.unit as unit\n'), ((10625, 10643), 'numpy.diff', 'np.diff', (['c'], {'axis': '(0)'}), '(c, axis=0)\n', (10632, 10643), True, 'import numpy as np\n'), ((10708, 10724), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (10716, 10724), True, 'import numpy as np\n'), ((10836, 10852), 'numpy.linalg.eig', 'np.linalg.eig', (['Q'], {}), '(Q)\n', (10849, 10852), True, 'import numpy as np\n'), ((11238, 11254), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (11246, 11254), True, 'import numpy as np\n'), ((11277, 11289), 'numpy.dot', 'np.dot', (['c', 'S'], {}), '(c, S)\n', (11283, 11289), True, 'import numpy as np\n'), ((11341, 11353), 'numpy.dot', 'np.dot', (['u', 'S'], {}), '(u, S)\n', (11347, 11353), True, 'import numpy as np\n'), ((11406, 11425), 'numpy.diff', 'np.diff', (['cp'], {'axis': '(0)'}), '(cp, axis=0)\n', (11413, 11425), True, 'import numpy as np\n'), ((11497, 11513), 'numpy.zeros', 'np.zeros', (['[3, 3]'], {}), '([3, 3])\n', (11505, 11513), True, 'import numpy as np\n'), ((11630, 11647), 'numpy.mean', 'np.mean', (['up[:, 2]'], {}), '(up[:, 2])\n', (11637, 11647), True, 'import numpy as np\n'), ((11661, 11685), 'numpy.sqrt', 'np.sqrt', (['(1 - avecos ** 2)'], {}), '(1 - avecos ** 2)\n', (11668, 11685), True, 'import numpy as np\n'), ((11699, 11718), 'numpy.array', 'np.array', (['[0, 0, 1]'], {}), '([0, 0, 1])\n', (11707, 11718), True, 'import numpy as np\n'), ((14747, 14764), 'statistics.mean', 'mean', (['axis_deltas'], {}), '(axis_deltas)\n', (14751, 14764), False, 'from statistics import mean\n'), ((4846, 4875), 'numpy.array', 'np.array', (['[x for x in x_data]'], {}), '([x for x in x_data])\n', (4854, 4875), True, 'import numpy as np\n'), ((4877, 4906), 'numpy.array', 'np.array', (['[y for y in y_data]'], {}), '([y for y in y_data])\n', (4885, 4906), True, 'import numpy as np\n'), ((5095, 5124), 'numpy.array', 'np.array', (['[x for x in x_data]'], {}), '([x for x in x_data])\n', (5103, 5124), True, 'import numpy as np\n'), ((5142, 5171), 'numpy.array', 'np.array', (['[y for y in y_data]'], {}), '([y for y in y_data])\n', (5150, 5171), True, 'import numpy as np\n'), ((6424, 6453), 'numpy.array', 'np.array', (['[x for x in x_data]'], {}), '([x for x in x_data])\n', (6432, 6453), True, 'import numpy as np\n'), ((6455, 6484), 'numpy.array', 'np.array', (['[z for z in z_data]'], {}), '([z for z in z_data])\n', (6463, 6484), True, 'import numpy as np\n'), ((6673, 6702), 'numpy.array', 'np.array', (['[x for x in x_data]'], {}), '([x for x in x_data])\n', (6681, 6702), True, 'import numpy as np\n'), ((6720, 6749), 'numpy.array', 'np.array', (['[z for z in z_data]'], {}), '([z for z in z_data])\n', (6728, 6749), True, 'import numpy as np\n'), ((9137, 9150), 'matplotlib.pyplot.figure', 'plt.figure', (['i'], {}), '(i)\n', (9147, 9150), True, 'import matplotlib.pyplot as plt\n'), ((9337, 9352), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (9347, 9352), True, 'import matplotlib.pyplot as plt\n'), ((9361, 9376), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (9371, 9376), True, 'import matplotlib.pyplot as plt\n'), ((9426, 9436), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (9434, 9436), True, 'import matplotlib.pyplot as plt\n'), ((10809, 10818), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (10815, 10818), True, 'import numpy as np\n'), ((10870, 10885), 'numpy.dot', 'np.dot', (['u', 'vecs'], {}), '(u, vecs)\n', (10876, 10885), True, 'import numpy as np\n'), ((10924, 10934), 'numpy.abs', 'np.abs', (['p2'], {}), '(p2)\n', (10930, 10934), True, 'import numpy as np\n'), ((11606, 11615), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (11612, 11615), True, 'import numpy as np\n'), ((11738, 11749), 'numpy.shape', 'np.shape', (['u'], {}), '(u)\n', (11746, 11749), True, 'import numpy as np\n'), ((11804, 11830), 'numpy.sqrt', 'np.sqrt', (['(1 - up[i, 2] ** 2)'], {}), '(1 - up[i, 2] ** 2)\n', (11811, 11830), True, 'import numpy as np\n'), ((11882, 11929), 'numpy.array', 'np.array', (['[up[i, 0] / scal, up[i, 1] / scal, 0]'], {}), '([up[i, 0] / scal, up[i, 1] / scal, 0])\n', (11890, 11929), True, 'import numpy as np\n'), ((11975, 11995), 'numpy.cross', 'np.cross', (['zaxis', 'ax1'], {}), '(zaxis, ax1)\n', (11983, 11995), True, 'import numpy as np\n'), ((12623, 12636), 'matplotlib.pyplot.figure', 'plt.figure', (['i'], {}), '(i)\n', (12633, 12636), True, 'import matplotlib.pyplot as plt\n'), ((12823, 12838), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""x"""'], {}), "('x')\n", (12833, 12838), True, 'import matplotlib.pyplot as plt\n'), ((12847, 12862), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""y"""'], {}), "('y')\n", (12857, 12862), True, 'import matplotlib.pyplot as plt\n'), ((12912, 12922), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (12920, 12922), True, 'import matplotlib.pyplot as plt\n'), ((10683, 10697), 'numpy.dot', 'np.dot', (['ui', 'ui'], {}), '(ui, ui)\n', (10689, 10697), True, 'import numpy as np\n'), ((10761, 10777), 'numpy.outer', 'np.outer', (['ui', 'ui'], {}), '(ui, ui)\n', (10769, 10777), True, 'import numpy as np\n'), ((11469, 11485), 'numpy.dot', 'np.dot', (['upi', 'upi'], {}), '(upi, upi)\n', (11475, 11485), True, 'import numpy as np\n'), ((11553, 11571), 'numpy.outer', 'np.outer', (['upi', 'upi'], {}), '(upi, upi)\n', (11561, 11571), True, 'import numpy as np\n'), ((11770, 11783), 'numpy.shape', 'np.shape', (['upr'], {}), '(upr)\n', (11778, 11783), True, 'import numpy as np\n'), ((13683, 13721), 'simtk.unit.Quantity', 'unit.Quantity', (['position', 'unit.angstrom'], {}), '(position, unit.angstrom)\n', (13696, 13721), True, 'import simtk.unit as unit\n'), ((14003, 14039), 'numpy.arctan', 'np.arctan', (['(position[1] / position[2])'], {}), '(position[1] / position[2])\n', (14012, 14039), True, 'import numpy as np\n'), ((12265, 12280), 'numpy.cross', 'np.cross', (['nm', 'v'], {}), '(nm, v)\n', (12273, 12280), True, 'import numpy as np\n'), ((13837, 13873), 'numpy.arctan', 'np.arctan', (['(position[1] / position[2])'], {}), '(position[1] / position[2])\n', (13846, 13873), True, 'import numpy as np\n'), ((12199, 12212), 'numpy.dot', 'np.dot', (['nm', 'v'], {}), '(nm, v)\n', (12205, 12212), True, 'import numpy as np\n'), ((12233, 12248), 'numpy.cross', 'np.cross', (['nm', 'v'], {}), '(nm, v)\n', (12241, 12248), True, 'import numpy as np\n'), ((2554, 2565), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2563, 2565), False, 'import os, subprocess\n')] |
import numpy as np
# compute sigmoid nonlinearity
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
# convert output of sigmoid function to its derivative
def sigmoid_output_to_derivative(output):
return output*(1-output)
# input dataset
X = np.array([ [0,1],
[0,1],
[1,0],
[1,0] ])
# output dataset
y = np.array([[0,0,1,1]]).T
# seed random numbers to make calculation
# deterministic (just a good practice)
np.random.seed(1)
# initialize weights randomly with mean 0
synapse_0 = 2*np.random.random((2,1)) - 1
for iter in range(10000):
# forward propagation
layer_0 = X
layer_1 = sigmoid(np.dot(layer_0,synapse_0))
# how much did we miss?
layer_1_error = layer_1 - y
# multiply how much we missed by the
# slope of the sigmoid at the values in l1
layer_1_delta = layer_1_error * sigmoid_output_to_derivative(layer_1)
synapse_0_derivative = np.dot(layer_0.T,layer_1_delta)
# update weights
synapse_0 -= synapse_0_derivative
print ("Output After Training:")
print (layer_1)
| [
"numpy.random.seed",
"numpy.random.random",
"numpy.array",
"numpy.exp",
"numpy.dot"
] | [((279, 321), 'numpy.array', 'np.array', (['[[0, 1], [0, 1], [1, 0], [1, 0]]'], {}), '([[0, 1], [0, 1], [1, 0], [1, 0]])\n', (287, 321), True, 'import numpy as np\n'), ((523, 540), 'numpy.random.seed', 'np.random.seed', (['(1)'], {}), '(1)\n', (537, 540), True, 'import numpy as np\n'), ((413, 437), 'numpy.array', 'np.array', (['[[0, 0, 1, 1]]'], {}), '([[0, 0, 1, 1]])\n', (421, 437), True, 'import numpy as np\n'), ((1015, 1047), 'numpy.dot', 'np.dot', (['layer_0.T', 'layer_1_delta'], {}), '(layer_0.T, layer_1_delta)\n', (1021, 1047), True, 'import numpy as np\n'), ((602, 626), 'numpy.random.random', 'np.random.random', (['(2, 1)'], {}), '((2, 1))\n', (618, 626), True, 'import numpy as np\n'), ((728, 754), 'numpy.dot', 'np.dot', (['layer_0', 'synapse_0'], {}), '(layer_0, synapse_0)\n', (734, 754), True, 'import numpy as np\n'), ((89, 99), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (95, 99), True, 'import numpy as np\n')] |
from setuptools import setup, find_packages, Extension
from setuptools import dist
dist.Distribution().fetch_build_eggs(['numpy==1.19.3', 'Cython==0.29.21'])
import numpy
#from Cython.Build import cythonize
setup(name='sibreg',
version='1.2.0a1',
description='Functions for performing robust GWAS using sibpairs in a random effects model',
url='http://github.com/alexTISYoung/sibreg',
download_url='https://github.com/AlexTISYoung/hlmm/archive/1.2.0a1.tar.gz',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
scripts=['fGWAS.py', 'fPGS.py', 'impute_runner.py'],
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
# Indicate who your project is intended for
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 3.9',
],
keywords='statistics genetics',
packages=['sibreg', 'sibreg.bin'],
setup_requires=['pytest-runner', 'numpy==1.19.3', 'Cython==0.29.21'],
install_requires=[
'bgen_reader==4.0.7',
'pandas==1.1.1',
'Cython==0.29.21',
'scipy==1.7.1',
'numpy==1.19.3',
'pysnptools==0.4.11',
'networkx==2.2',
'h5py==2.10.0',
'pooch==1.5.1',
'numba==0.50.0',
'gitpython==3.1.24',
],
tests_require=['pytest'],
extras_require={
'test': ['numdifftools'],
},
test_suite="tests",
zip_safe=False,
ext_modules=[Extension("sibreg.bin.impute_from_sibs",
["sibreg/bin/impute_from_sibs.pyx"],
include_dirs=[numpy.get_include()],
language='c++',
extra_compile_args=['-fopenmp'],
extra_link_args=['-fopenmp'],
),
],
)
| [
"setuptools.dist.Distribution",
"numpy.get_include"
] | [((83, 102), 'setuptools.dist.Distribution', 'dist.Distribution', ([], {}), '()\n', (100, 102), False, 'from setuptools import dist\n'), ((2168, 2187), 'numpy.get_include', 'numpy.get_include', ([], {}), '()\n', (2185, 2187), False, 'import numpy\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import collections
import math
import numpy as np
import os
import random
import tensorflow as tf
import zipfile
from itertools import compress
from matplotlib import pylab
from six.moves import range
from six.moves.urllib.request import urlretrieve
from sklearn.manifold import TSNE
class Embedder:
def __init__(self,
filenames=['clean_en_US.blogs.txt',
'clean_en_US.news.txt',
'clean_en_US.twitter.txt'],
vocabulary_size=10000,
data_index=0,
num_skips=1,
skip_window=6,
batch_size=1024,
embedding_size=28,
valid_size=8,
valid_window=100,
num_sampled=4096,
num_steps=3001
):
self.filenames = filenames
self.word_list = []
self.vocabulary_size = vocabulary_size
self.dictionary = dict()
self.data = list()
self.count = [['UNK', -1]]
self.reverse_dictionary = None
self.data_index = data_index
self.num_skips = num_skips # How many times to reuse an input to generate a label.
self.skip_window = skip_window # How many words to consider left and right.
self.batch_size = batch_size
self.embedding_size = embedding_size # Dimension of the embedding vector.
# We pick a random validation set to sample nearest neighbors. here we limit the
# validation samples to the words that have a low numeric ID, which by
# construction are also the most frequent.
self.valid_size = valid_size # Random set of words to evaluate similarity on.
self.valid_window = valid_window # Only pick dev samples in the head of the distribution.
self.valid_examples = np.array(random.sample(range(valid_window), valid_size))
self.num_sampled = num_sampled # Number of negative examples to sample.
assert self.batch_size % self.num_skips == 0
assert self.num_skips <= 2 * self.skip_window
self.graph = tf.Graph()
self.num_steps = num_steps
def build_embedding(self):
self.read_data()
self.build_dataset()
##self.test_data()
return self.train_data()
def read_data(self):
"""Load the data from each line and put it in a list."""
print('Generating list for embedding.')
for temp_file_name in self.filenames:
with open(temp_file_name, 'r', encoding="utf8") as temp_file:
for line in temp_file:
temp_line = line.strip().split()
self.word_list.extend(temp_line)
print('List is %d words long.' % len(self.word_list))
def build_dataset(self):
self.count.extend(collections.Counter(self.word_list).most_common(self.vocabulary_size - 1))
for word, _ in self.count:
self.dictionary[word] = len(self.dictionary)
unk_count = 0
for word in self.word_list:
if word in self.dictionary:
index = self.dictionary[word]
else:
index = 0 # dictionary['UNK']
unk_count = unk_count + 1
self.data.append(index)
self.count[0][1] = unk_count
self.reverse_dictionary = dict(zip(self.dictionary.values(), self.dictionary.keys()))
print('Most common words (+UNK)', self.count[:5])
print('Sample data', self.data[:10])
self.word_list = None
##return data, count, dictionary, reverse_dictionary
def generate_batch(self):
##global data_index
batch = np.ndarray(shape=(self.batch_size), dtype=np.int32)
labels = np.ndarray(shape=(self.batch_size, 1), dtype=np.int32)
span = 2 * self.skip_window + 1 # [ skip_window target skip_window ]
buffer = collections.deque(maxlen=span)
for _ in range(span):
buffer.append(self.data[self.data_index])
self.data_index = (self.data_index + 1) % len(self.data)
for i in range(self.batch_size // self.num_skips):
target = self.skip_window # target label at the center of the buffer
targets_to_avoid = [self.skip_window]
for j in range(self.num_skips):
while target in targets_to_avoid:
target = random.randint(0, span - 1)
targets_to_avoid.append(target)
batch[i * self.num_skips + j] = buffer[self.skip_window]
labels[i * self.num_skips + j, 0] = buffer[target]
buffer.append(self.data[self.data_index])
self.data_index = (self.data_index + 1) % len(self.data)
return batch, labels
def test_data(self):
print('data:', [self.reverse_dictionary[di] for di in self.data[:8]])
for num_skips, skip_window in [(2, 1), (4, 2)]:
data_index = 0
batch, labels = self.generate_batch()
print('\nwith num_skips = %d and skip_window = %d:' % (num_skips, skip_window))
print(' batch:', [self.reverse_dictionary[bi] for bi in batch])
print(' labels:', [self.reverse_dictionary[li] for li in labels.reshape(self.batch_size)])
"""==============================PROGRESS=============================="""
def create_graph(self):
with self.graph.as_default(), tf.device('/cpu:0'):
# Input data.
self.train_dataset = tf.placeholder(shape=[self.batch_size], dtype=tf.int32)
self.train_labels = tf.placeholder(shape=[self.batch_size, 1], dtype=tf.int32)
self.valid_dataset = tf.constant(self.valid_examples, dtype=tf.int32)
# Variables.
self.embeddings = tf.Variable(
tf.random_uniform([self.vocabulary_size, self.embedding_size], -1.0, 1.0))
self.softmax_weights = tf.Variable(
tf.random_uniform([self.vocabulary_size, self.embedding_size], -1.0, 1.0))
self.softmax_biases = tf.Variable(tf.zeros([self.vocabulary_size]))
# Model.
# Look up embeddings for inputs.
self.embed = tf.nn.embedding_lookup(self.embeddings, self.train_dataset)
##print(tf.DType.is_floating(self.embed))
##self.embed = tf.nn.embedding_lookup(self.train_dataset, self.embeddings)
# Compute the softmax loss, using a sample of the negative labels each time.
##self.loss = tf.reduce_mean(
## tf.nn.sampled_softmax_loss(self.softmax_weights,
## self.softmax_biases,
## self.train_labels,
## self.embed,
## self.num_sampled,
##
## self.vocabulary_size))
self.loss = tf.reduce_mean(tf.nn.nce_loss(self.softmax_weights,
self.softmax_biases,
self.train_labels,
self.embed,
self.num_sampled,
self.vocabulary_size))
# Optimizer.
# Note: The optimizer will optimize the softmax_weights AND the embeddings.
# This is because the embeddings are defined as a variable quantity and the
# optimizer's `minimize` method will by default modify all variable quantities
# that contribute to the tensor it is passed.
# See docs on `tf.train.Optimizer.minimize()` for more details.
self.optimizer = tf.train.AdagradOptimizer(1.0).minimize(self.loss)
# Compute the similarity between minibatch examples and all embeddings.
# We use the cosine distance:
self.norm = tf.sqrt(tf.reduce_sum(tf.square(self.embeddings), 1, keepdims=True))
self.normalized_embeddings = self.embeddings / self.norm
self.valid_embeddings = tf.nn.embedding_lookup(
self.normalized_embeddings, self.valid_dataset)
self.similarity = tf.matmul(self.valid_embeddings,
tf.transpose(self.normalized_embeddings))
def run_graph(self):
with self.graph.as_default(), tf.device('/cpu:0'):
with tf.Session(graph=self.graph) as session:
tf.global_variables_initializer().run()
print('Initialized')
average_loss = 0
for step in range(self.num_steps):
batch_data, batch_labels = self.generate_batch()
feed_dict = {self.train_dataset : batch_data, self.train_labels : batch_labels}
_, l = session.run([self.optimizer, self.loss], feed_dict=feed_dict)
average_loss += l
if step % 1000 == 0:
if step > 0:
average_loss = average_loss / 1000
# The average loss is an estimate of the loss over the last 2000 batches.
print('Average loss at step %d: %f' % (step, average_loss))
average_loss = 0
# note that this is expensive (~20% slowdown if computed every 500 steps)
if step % 2000 == 0:
sim = self.similarity.eval()
for i in range(self.valid_size):
valid_word = self.reverse_dictionary[self.valid_examples[i]]
top_k = 8 # number of nearest neighbors
nearest = (-sim[i, :]).argsort()[1:top_k+1]
log = 'Nearest to %s:' % valid_word
for k in range(top_k):
close_word = self.reverse_dictionary[nearest[k]]
log = '%s %s,' % (log, close_word)
print(log)
return self.normalized_embeddings.eval()
##final_embeddings = self.normalized_embeddings.eval()
def train_data(self):
self.create_graph()
return self.run_graph()
if __name__ == "__main__":
os.chdir('..')
os.chdir('Datasets')
temp = Embedder()
print(temp.build_embedding())
| [
"numpy.ndarray",
"os.chdir",
"collections.deque",
"tensorflow.nn.nce_loss",
"random.randint",
"six.moves.range",
"tensorflow.placeholder",
"collections.Counter",
"tensorflow.nn.embedding_lookup",
"tensorflow.global_variables_initializer",
"tensorflow.train.AdagradOptimizer",
"tensorflow.Sessio... | [((10639, 10653), 'os.chdir', 'os.chdir', (['""".."""'], {}), "('..')\n", (10647, 10653), False, 'import os\n'), ((10658, 10678), 'os.chdir', 'os.chdir', (['"""Datasets"""'], {}), "('Datasets')\n", (10666, 10678), False, 'import os\n'), ((2303, 2313), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (2311, 2313), True, 'import tensorflow as tf\n'), ((3872, 3921), 'numpy.ndarray', 'np.ndarray', ([], {'shape': 'self.batch_size', 'dtype': 'np.int32'}), '(shape=self.batch_size, dtype=np.int32)\n', (3882, 3921), True, 'import numpy as np\n'), ((3941, 3995), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(self.batch_size, 1)', 'dtype': 'np.int32'}), '(shape=(self.batch_size, 1), dtype=np.int32)\n', (3951, 3995), True, 'import numpy as np\n'), ((4090, 4120), 'collections.deque', 'collections.deque', ([], {'maxlen': 'span'}), '(maxlen=span)\n', (4107, 4120), False, 'import collections\n'), ((4138, 4149), 'six.moves.range', 'range', (['span'], {}), '(span)\n', (4143, 4149), False, 'from six.moves import range\n'), ((4291, 4331), 'six.moves.range', 'range', (['(self.batch_size // self.num_skips)'], {}), '(self.batch_size // self.num_skips)\n', (4296, 4331), False, 'from six.moves import range\n'), ((4486, 4507), 'six.moves.range', 'range', (['self.num_skips'], {}), '(self.num_skips)\n', (4491, 4507), False, 'from six.moves import range\n'), ((5619, 5638), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (5628, 5638), True, 'import tensorflow as tf\n'), ((5700, 5755), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.batch_size]', 'dtype': 'tf.int32'}), '(shape=[self.batch_size], dtype=tf.int32)\n', (5714, 5755), True, 'import tensorflow as tf\n'), ((5788, 5846), 'tensorflow.placeholder', 'tf.placeholder', ([], {'shape': '[self.batch_size, 1]', 'dtype': 'tf.int32'}), '(shape=[self.batch_size, 1], dtype=tf.int32)\n', (5802, 5846), True, 'import tensorflow as tf\n'), ((5880, 5928), 'tensorflow.constant', 'tf.constant', (['self.valid_examples'], {'dtype': 'tf.int32'}), '(self.valid_examples, dtype=tf.int32)\n', (5891, 5928), True, 'import tensorflow as tf\n'), ((6400, 6459), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.embeddings', 'self.train_dataset'], {}), '(self.embeddings, self.train_dataset)\n', (6422, 6459), True, 'import tensorflow as tf\n'), ((8403, 8473), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.normalized_embeddings', 'self.valid_dataset'], {}), '(self.normalized_embeddings, self.valid_dataset)\n', (8425, 8473), True, 'import tensorflow as tf\n'), ((8700, 8719), 'tensorflow.device', 'tf.device', (['"""/cpu:0"""'], {}), "('/cpu:0')\n", (8709, 8719), True, 'import tensorflow as tf\n'), ((2061, 2080), 'six.moves.range', 'range', (['valid_window'], {}), '(valid_window)\n', (2066, 2080), False, 'from six.moves import range\n'), ((6014, 6087), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.vocabulary_size, self.embedding_size]', '(-1.0)', '(1.0)'], {}), '([self.vocabulary_size, self.embedding_size], -1.0, 1.0)\n', (6031, 6087), True, 'import tensorflow as tf\n'), ((6153, 6226), 'tensorflow.random_uniform', 'tf.random_uniform', (['[self.vocabulary_size, self.embedding_size]', '(-1.0)', '(1.0)'], {}), '([self.vocabulary_size, self.embedding_size], -1.0, 1.0)\n', (6170, 6226), True, 'import tensorflow as tf\n'), ((6274, 6306), 'tensorflow.zeros', 'tf.zeros', (['[self.vocabulary_size]'], {}), '([self.vocabulary_size])\n', (6282, 6306), True, 'import tensorflow as tf\n'), ((7171, 7303), 'tensorflow.nn.nce_loss', 'tf.nn.nce_loss', (['self.softmax_weights', 'self.softmax_biases', 'self.train_labels', 'self.embed', 'self.num_sampled', 'self.vocabulary_size'], {}), '(self.softmax_weights, self.softmax_biases, self.train_labels,\n self.embed, self.num_sampled, self.vocabulary_size)\n', (7185, 7303), True, 'import tensorflow as tf\n'), ((8594, 8634), 'tensorflow.transpose', 'tf.transpose', (['self.normalized_embeddings'], {}), '(self.normalized_embeddings)\n', (8606, 8634), True, 'import tensorflow as tf\n'), ((8738, 8766), 'tensorflow.Session', 'tf.Session', ([], {'graph': 'self.graph'}), '(graph=self.graph)\n', (8748, 8766), True, 'import tensorflow as tf\n'), ((8933, 8954), 'six.moves.range', 'range', (['self.num_steps'], {}), '(self.num_steps)\n', (8938, 8954), False, 'from six.moves import range\n'), ((3018, 3053), 'collections.Counter', 'collections.Counter', (['self.word_list'], {}), '(self.word_list)\n', (3037, 3053), False, 'import collections\n'), ((4588, 4615), 'random.randint', 'random.randint', (['(0)', '(span - 1)'], {}), '(0, span - 1)\n', (4602, 4615), False, 'import random\n'), ((8027, 8057), 'tensorflow.train.AdagradOptimizer', 'tf.train.AdagradOptimizer', (['(1.0)'], {}), '(1.0)\n', (8052, 8057), True, 'import tensorflow as tf\n'), ((8251, 8277), 'tensorflow.square', 'tf.square', (['self.embeddings'], {}), '(self.embeddings)\n', (8260, 8277), True, 'import tensorflow as tf\n'), ((8795, 8828), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (8826, 8828), True, 'import tensorflow as tf\n'), ((9837, 9859), 'six.moves.range', 'range', (['self.valid_size'], {}), '(self.valid_size)\n', (9842, 9859), False, 'from six.moves import range\n'), ((10191, 10203), 'six.moves.range', 'range', (['top_k'], {}), '(top_k)\n', (10196, 10203), False, 'from six.moves import range\n')] |
"""Convert Senate tweets from 2020 Democratic candidates to bag of words.
This requires that a file containing the tweets called `tweets.csv` be stored
in `data/candidate-tweets-2020/raw/`.
"""
import os
import numpy as np
import pandas as pd
from scipy import sparse
import setup_utils as utils
from sklearn.feature_extraction.text import CountVectorizer
project_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir))
data_dir = os.path.join(project_dir, "data/candidate-tweets-2020/raw")
save_dir = os.path.join(project_dir, "data/candidate-tweets-2020/clean")
df = pd.read_csv(os.path.join(data_dir, "tweets.csv"))
# Don't include tweets before 2019.
df = df[pd.to_datetime(df['created_at']) > pd.to_datetime('2019')]
# Remove unorthodox campaigns of Yang, Williamson, and Inslee.
df = df[df.screen_name != 'AndrewYang']
df = df[df.screen_name != 'marwilliamson']
df = df[df.screen_name != 'JayInslee']
candidates = np.array(df['screen_name'])
tweets = np.array(df['text'])
candidate_to_candidate_id = dict(
[(y.title(), x) for x, y in enumerate(sorted(set(candidates)))])
author_indices = np.array(
[candidate_to_candidate_id[s.title()] for s in candidates])
author_map = np.array(list(candidate_to_candidate_id.keys()))
stopwords = set(np.loadtxt(
os.path.join(project_dir, "setup/stopwords/candidate_tweets.txt"),
dtype=str,
delimiter="\n"))
count_vectorizer = CountVectorizer(min_df=0.0005,
max_df=0.3,
ngram_range=(1, 3),
stop_words=stopwords,
token_pattern="[a-zA-Z#]+")
# Learn initial document term matrix to identify words to exclude based
# on author counts.
counts = count_vectorizer.fit_transform(tweets)
vocabulary = np.array(
[k for (k, v) in sorted(count_vectorizer.vocabulary_.items(),
key=lambda kv: kv[1])])
# Remove phrases spoken by only 1 candidate.
counts_per_author = utils.bincount_2d(author_indices, counts.toarray())
min_authors_per_word = 2
author_counts_per_word = np.sum(counts_per_author > 0, axis=0)
acceptable_words = np.where(
author_counts_per_word >= min_authors_per_word)[0]
# Fit final document-term matrix with new vocabulary.
count_vectorizer = CountVectorizer(ngram_range=(1, 3),
vocabulary=vocabulary[acceptable_words],
token_pattern="[a-zA-Z#]+")
counts = count_vectorizer.fit_transform(tweets)
vocabulary = np.array(
[k for (k, v) in sorted(count_vectorizer.vocabulary_.items(),
key=lambda kv: kv[1])])
# Adjust counts by removing unigram/n-gram pairs which co-occur.
counts_dense = utils.remove_cooccurring_ngrams(counts, vocabulary)
counts = sparse.csr_matrix(counts_dense)
# Save data.
if not os.path.exists(save_dir):
os.makedirs(save_dir)
# `counts.npz` is a [num_documents, num_words] sparse matrix containing the
# word counts for each document.
sparse.save_npz(os.path.join(save_dir, "counts.npz"),
sparse.csr_matrix(counts).astype(np.float32))
# `author_indices.npy` is a [num_documents] vector where each entry is an
# integer indicating the author of the corresponding document.
np.save(os.path.join(save_dir, "author_indices.npy"), author_indices)
# `vocabulary.txt` is a [num_words] vector where each entry is a string
# denoting the corresponding word in the vocabulary.
np.savetxt(os.path.join(save_dir, "vocabulary.txt"), vocabulary, fmt="%s")
# `author_map.txt` is a [num_authors] vector of strings providing the name of
# each author in the corpus.
np.savetxt(os.path.join(save_dir, "author_map.txt"), author_map, fmt="%s")
# `raw_documents.txt` contains all the documents we ended up using.
stripped_tweets = [tweet.replace("\n", ' ').replace("\r", ' ')
for tweet in tweets]
np.savetxt(os.path.join(save_dir, "raw_documents.txt"),
stripped_tweets,
fmt="%s")
| [
"sklearn.feature_extraction.text.CountVectorizer",
"numpy.sum",
"os.makedirs",
"setup_utils.remove_cooccurring_ngrams",
"os.path.dirname",
"os.path.exists",
"scipy.sparse.csr_matrix",
"numpy.array",
"numpy.where",
"pandas.to_datetime",
"os.path.join"
] | [((460, 519), 'os.path.join', 'os.path.join', (['project_dir', '"""data/candidate-tweets-2020/raw"""'], {}), "(project_dir, 'data/candidate-tweets-2020/raw')\n", (472, 519), False, 'import os\n'), ((531, 592), 'os.path.join', 'os.path.join', (['project_dir', '"""data/candidate-tweets-2020/clean"""'], {}), "(project_dir, 'data/candidate-tweets-2020/clean')\n", (543, 592), False, 'import os\n'), ((952, 979), 'numpy.array', 'np.array', (["df['screen_name']"], {}), "(df['screen_name'])\n", (960, 979), True, 'import numpy as np\n'), ((989, 1009), 'numpy.array', 'np.array', (["df['text']"], {}), "(df['text'])\n", (997, 1009), True, 'import numpy as np\n'), ((1423, 1540), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'min_df': '(0.0005)', 'max_df': '(0.3)', 'ngram_range': '(1, 3)', 'stop_words': 'stopwords', 'token_pattern': '"""[a-zA-Z#]+"""'}), "(min_df=0.0005, max_df=0.3, ngram_range=(1, 3), stop_words=\n stopwords, token_pattern='[a-zA-Z#]+')\n", (1438, 1540), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((2129, 2166), 'numpy.sum', 'np.sum', (['(counts_per_author > 0)'], {'axis': '(0)'}), '(counts_per_author > 0, axis=0)\n', (2135, 2166), True, 'import numpy as np\n'), ((2325, 2433), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'ngram_range': '(1, 3)', 'vocabulary': 'vocabulary[acceptable_words]', 'token_pattern': '"""[a-zA-Z#]+"""'}), "(ngram_range=(1, 3), vocabulary=vocabulary[acceptable_words],\n token_pattern='[a-zA-Z#]+')\n", (2340, 2433), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((2771, 2822), 'setup_utils.remove_cooccurring_ngrams', 'utils.remove_cooccurring_ngrams', (['counts', 'vocabulary'], {}), '(counts, vocabulary)\n', (2802, 2822), True, 'import setup_utils as utils\n'), ((2832, 2863), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['counts_dense'], {}), '(counts_dense)\n', (2849, 2863), False, 'from scipy import sparse\n'), ((611, 647), 'os.path.join', 'os.path.join', (['data_dir', '"""tweets.csv"""'], {}), "(data_dir, 'tweets.csv')\n", (623, 647), False, 'import os\n'), ((2186, 2242), 'numpy.where', 'np.where', (['(author_counts_per_word >= min_authors_per_word)'], {}), '(author_counts_per_word >= min_authors_per_word)\n', (2194, 2242), True, 'import numpy as np\n'), ((2885, 2909), 'os.path.exists', 'os.path.exists', (['save_dir'], {}), '(save_dir)\n', (2899, 2909), False, 'import os\n'), ((2913, 2934), 'os.makedirs', 'os.makedirs', (['save_dir'], {}), '(save_dir)\n', (2924, 2934), False, 'import os\n'), ((3061, 3097), 'os.path.join', 'os.path.join', (['save_dir', '"""counts.npz"""'], {}), "(save_dir, 'counts.npz')\n", (3073, 3097), False, 'import os\n'), ((3306, 3350), 'os.path.join', 'os.path.join', (['save_dir', '"""author_indices.npy"""'], {}), "(save_dir, 'author_indices.npy')\n", (3318, 3350), False, 'import os\n'), ((3504, 3544), 'os.path.join', 'os.path.join', (['save_dir', '"""vocabulary.txt"""'], {}), "(save_dir, 'vocabulary.txt')\n", (3516, 3544), False, 'import os\n'), ((3686, 3726), 'os.path.join', 'os.path.join', (['save_dir', '"""author_map.txt"""'], {}), "(save_dir, 'author_map.txt')\n", (3698, 3726), False, 'import os\n'), ((3933, 3976), 'os.path.join', 'os.path.join', (['save_dir', '"""raw_documents.txt"""'], {}), "(save_dir, 'raw_documents.txt')\n", (3945, 3976), False, 'import os\n'), ((409, 434), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (424, 434), False, 'import os\n'), ((694, 726), 'pandas.to_datetime', 'pd.to_datetime', (["df['created_at']"], {}), "(df['created_at'])\n", (708, 726), True, 'import pandas as pd\n'), ((729, 751), 'pandas.to_datetime', 'pd.to_datetime', (['"""2019"""'], {}), "('2019')\n", (743, 751), True, 'import pandas as pd\n'), ((1300, 1365), 'os.path.join', 'os.path.join', (['project_dir', '"""setup/stopwords/candidate_tweets.txt"""'], {}), "(project_dir, 'setup/stopwords/candidate_tweets.txt')\n", (1312, 1365), False, 'import os\n'), ((3115, 3140), 'scipy.sparse.csr_matrix', 'sparse.csr_matrix', (['counts'], {}), '(counts)\n', (3132, 3140), False, 'from scipy import sparse\n')] |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 7 10:56:28 2019
@author: rdamseh
"""
import os
import sys
# add VascGraph package to python path
try:
sys.path.append(os.getcwd())
except: pass
import numpy as np
from scipy import sparse
class ReadVTrails:
def __init__(self, filepath, sampling=1):
from VascGraph.GeomGraph import Graph
try:
import h5py
except:
print('To run this function, \'h5py\' sould be installed.')
return
# ---- read ----#
f=h5py.File(filepath, 'r')
refs=[i[0] for i in f.get('GeodesicMSTs/CGPathContinuous')]
pathes=[np.array(f[i]).T for i in refs]
data=f.get('GeodesicMSTsMatrix/M/data')
ir=np.array(f.get('GeodesicMSTsMatrix/M/ir'))
jc=np.array(f.get('GeodesicMSTsMatrix/M/jc'))
# ----- sampling of pathes nodes ----#
ind=[len(i)/sampling for i in pathes]
ind=[np.array(range(i))*sampling for i in ind]
pathes=[i[indx] for i, indx in zip(pathes, ind)]
# ----- build graph from pathes -----#
path_ext=[]
g=Graph()
for path in pathes:
n=g.number_of_nodes()
nodes=np.array(range(len(path)))+n
e1=nodes[1:]
e2=nodes[:-1]
e=np.array([e1,e2]).T
path_ext.append([nodes[0], nodes[-1]])
g.add_nodes_from(nodes)
g.add_edges_from(e)
for node, pos in zip(nodes, path):
g.node[node]['pos']=np.array([pos[1], pos[0], pos[2]])
# ------- connection between pathes ----#
path_ext=np.array(path_ext)
a = sparse.csc_matrix((data, ir, jc))
ind1, ind2 = np.where(a.todense()>0)
e=[]
for i,j in zip(ind1,ind2):
ee=[[path_ext[i][0], path_ext[j][1]],
[path_ext[i][1], path_ext[j][0]],
[path_ext[i][0], path_ext[j][0]],
[path_ext[i][1], path_ext[j][1]]]
poss=np.array([[g.node[k[0]]['pos'], g.node[k[1]]['pos']] for k in ee])
poss= poss[:,0,:]-poss[:,1,:]
norm=np.linalg.norm(poss, axis=1)
indx=np.where(norm==norm.min())[0][0]
e.append(ee[indx])
g.add_edges_from(e)
self.graph=g
def Update(self): pass
def GetOutput(self):
return self.graph | [
"h5py.File",
"os.getcwd",
"scipy.sparse.csc_matrix",
"numpy.array",
"numpy.linalg.norm",
"VascGraph.GeomGraph.Graph"
] | [((197, 208), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (206, 208), False, 'import os\n'), ((589, 613), 'h5py.File', 'h5py.File', (['filepath', '"""r"""'], {}), "(filepath, 'r')\n", (598, 613), False, 'import h5py\n'), ((1199, 1206), 'VascGraph.GeomGraph.Graph', 'Graph', ([], {}), '()\n', (1204, 1206), False, 'from VascGraph.GeomGraph import Graph\n'), ((1755, 1773), 'numpy.array', 'np.array', (['path_ext'], {}), '(path_ext)\n', (1763, 1773), True, 'import numpy as np\n'), ((1786, 1819), 'scipy.sparse.csc_matrix', 'sparse.csc_matrix', (['(data, ir, jc)'], {}), '((data, ir, jc))\n', (1803, 1819), False, 'from scipy import sparse\n'), ((2177, 2243), 'numpy.array', 'np.array', (["[[g.node[k[0]]['pos'], g.node[k[1]]['pos']] for k in ee]"], {}), "([[g.node[k[0]]['pos'], g.node[k[1]]['pos']] for k in ee])\n", (2185, 2243), True, 'import numpy as np\n'), ((2303, 2331), 'numpy.linalg.norm', 'np.linalg.norm', (['poss'], {'axis': '(1)'}), '(poss, axis=1)\n', (2317, 2331), True, 'import numpy as np\n'), ((698, 712), 'numpy.array', 'np.array', (['f[i]'], {}), '(f[i])\n', (706, 712), True, 'import numpy as np\n'), ((1381, 1399), 'numpy.array', 'np.array', (['[e1, e2]'], {}), '([e1, e2])\n', (1389, 1399), True, 'import numpy as np\n'), ((1642, 1676), 'numpy.array', 'np.array', (['[pos[1], pos[0], pos[2]]'], {}), '([pos[1], pos[0], pos[2]])\n', (1650, 1676), True, 'import numpy as np\n')] |
""" Utility functions for the neural network and tensor manipulation of run_mtqa.py """
from __future__ import absolute_import, division, print_function
import os
import sys
if os.name == 'nt':
root_path = "/".join(os.path.realpath(__file__).split("\\")[:-2])
else:
root_path = "/".join(os.path.realpath(__file__).split("/")[:-2])
if root_path not in sys.path:
sys.path.append(root_path)
import logging
import numpy as np
import scipy
import torch
from sqlalchemy import create_engine
from tqdm import trange
# from colorama import Style
from typing import List
from torch.utils.data import DataLoader, RandomSampler, DistributedSampler
from torch._six import container_abcs
from transformers import get_linear_schedule_with_warmup
# from data_processing.datatypes import LABELS
from metrics.sequence_labeling import get_entities_with_names
from data_processing.nn_output_to_indra import get_db_xrefs, make_indra_statements
from data_processing.datatypes import PAD_TOKEN_LABEL_ID
from configs import PUBMED_EVIDENCE_ANNOTATIONS_DB
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def update_with_nn_output(inputs, output_seq, logits, preds, all_preds, out_label_ids, out_token_ids,
attention_masks, crf_bool):
if out_label_ids is None:
all_preds = logits.detach().cpu().numpy()
if crf_bool:
preds = output_seq.detach().cpu().numpy()
else:
preds = np.argmax(all_preds, axis=2)
out_label_ids = inputs["labels"].detach().cpu().numpy()
out_token_ids = inputs["input_ids"].detach().cpu().numpy()
attention_masks = inputs["attention_mask"].detach().cpu().numpy()
else:
if crf_bool:
all_preds = np.append(all_preds, logits.detach().cpu().numpy(), axis=0)
preds = np.append(preds, output_seq.detach().cpu().numpy(), axis=0)
else:
logits_numpy = logits.detach().cpu().numpy()
all_preds = np.append(all_preds, logits_numpy, axis=0)
preds = np.append(preds, np.argmax(logits_numpy, axis=2), axis=0)
out_label_ids = np.append(out_label_ids, inputs["labels"].detach().cpu().numpy(), axis=0)
out_token_ids = np.append(out_token_ids, inputs["input_ids"].detach().cpu().numpy(), axis=0)
attention_masks = np.append(attention_masks, inputs["attention_mask"].detach().cpu().numpy(), axis=0)
return preds.tolist(), all_preds, out_label_ids, out_token_ids, attention_masks
def update_metadata(input_pubmed_ids, input_subjects, input_whitespaces, input_positions, input_question_ids, input_subject_lengths, input_question_types,
input_debug_label_ids, input_blinded_token_ids,
pubmed_ids, subjects, whitespace_bools, position_ids, question_ids, subject_lengths, question_types, debug_label_ids, blinded_token_ids,
subject_label_encoder):
if pubmed_ids is None:
pubmed_ids = input_pubmed_ids.numpy()
subject_lengths = input_subject_lengths.numpy()
subjects = subject_label_encoder.inverse_transform(input_subjects.numpy().ravel())
whitespace_bools = input_whitespaces.numpy()
position_ids = input_positions.numpy()
question_ids = input_question_ids.numpy()
question_types = input_question_types.numpy()
debug_label_ids = input_debug_label_ids.numpy()
blinded_token_ids = input_blinded_token_ids.numpy()
else:
pubmed_ids = np.append(pubmed_ids, input_pubmed_ids.numpy(), axis=0)
subject_lengths = np.append(subject_lengths, input_subject_lengths.numpy(), axis=0)
subjects = np.append(subjects, subject_label_encoder.inverse_transform(input_subjects.numpy().ravel()), axis=0)
whitespace_bools = np.append(whitespace_bools, input_whitespaces.numpy(), axis=0)
position_ids = np.append(position_ids, input_positions.numpy(), axis=0)
question_ids = np.append(question_ids, input_question_ids.numpy(), axis=0)
question_types = np.append(question_types, input_question_types.numpy(), axis=0)
debug_label_ids = np.append(debug_label_ids, input_debug_label_ids.numpy(), axis=0)
blinded_token_ids = np.append(blinded_token_ids, input_blinded_token_ids.numpy(), axis=0)
return pubmed_ids, subjects, whitespace_bools, position_ids, question_ids, subject_lengths, question_types, debug_label_ids, blinded_token_ids
def get_answer_probs(answer_list, db_refs, logits, attention_mask, model, args, answer_start_pos, groundtruth_bool=False):
new_answer_list = []
for i, answer in enumerate(answer_list):
if groundtruth_bool is False:
if answer[0].startswith("##"): # Answers beginning in the middle of a token are ignored
continue
answer_tokens = answer[6]
answer_labels = []
probs = []
start = answer_start_pos + answer[2]
# end = answer_start_pos + answer[3] + 1
# logger.info(answer)
# logger.info(start)
for j, token in enumerate(answer_tokens):
if j == 0:
answer_labels.append('B')
probs.append(scipy.special.softmax(logits[start + j])[2])
# probs.append(logits[start + j][2])
elif token.startswith("##"):
answer_labels.append('X')
else:
answer_labels.append('I')
probs.append(scipy.special.softmax(logits[start + j])[3])
# probs.append(logits[start + j][3])
# answer_label_ids = [LABELS.index(label) for label in answer_labels]
# tags = torch.zeros(attention_mask.shape[0], 1) # (seq_length, batch_size: 1)
# if answer[3] - answer[2] != len(answer_label_ids) - 1:
# print(answer)
# print(len(answer_label_ids))
# assert answer[3] - answer[2] == len(answer_label_ids) - 1
# tags[start:end] = torch.tensor(answer_label_ids, dtype=torch.long, device=args.device).unsqueeze(1)
# mask = torch.tensor(attention_mask, device=args.device).unsqueeze(1) - 1
# mask[start:end, 0] = 1
# emissions = torch.tensor(logits, device=args.device).unsqueeze(1).transpose(0, 1)
# tags = tags.long().transpose(0, 1)
# mask = mask.long().transpose(0, 1)
# Calculate marginal probability of answer in CRF
# with torch.no_grad():
# if (type(model) == torch.nn.DataParallel or type(model) == torch.nn.parallel.DistributedDataParallel) and args.local_rank in [-1, 0]:
# prob = model.module.crf.conditional_probability(emissions, tags, mask).cpu().item()
# else:
# prob = model.crf.conditional_probability(emissions, tags, mask).cpu().item()
prob = np.mean(probs)
# if prob < 0.1:
# print(probs)
# print(prob)
else:
prob = 1.0
if db_refs is not None:
for db_ref in db_refs[i]:
answer_values = tuple(list(answer) + [db_ref] + [prob])
new_answer_list.append(answer_values)
else: # Used for debugging and plotting main answer probs in wandb histogram
answer_values = [prob]
new_answer_list.append(answer_values)
return new_answer_list
def highlight_text(tokens: List[str], answer_list: List[tuple]) -> str:
# pubmed_text = Style.DIM
pubmed_text = ""
current_start = 0
current_end = 0
for word, _, chunk_start, chunk_end, _, _, _, db_ref, confidence in answer_list:
current_start = current_end
current_end = chunk_start
pubmed_text += " ".join(tokens[current_start:current_end]).replace(" ##", "")
current_start = current_end
current_end = chunk_end + 1
# answer_text = Style.NORMAL + Style.BRIGHT + " " + " ".join(tokens[current_start:current_end]).replace(" ##", "") \
# + " (" + db_ref[0] + " " + db_ref[1] + ", {:.4f}) ".format(confidence) + Style.NORMAL + Style.DIM
answer_text = "====" + " " + " ".join(tokens[current_start:current_end]).replace(" ##", "") \
+ " (" + db_ref[0] + " " + db_ref[1] + ", {:.4f}) ".format(confidence) + "===="
pubmed_text += answer_text
current_start = current_end
current_end = len(tokens)
# pubmed_text += " ".join(tokens[current_start:current_end]).replace(" ##", "") + Style.NORMAL
pubmed_text += " ".join(tokens[current_start:current_end]).replace(" ##", "")
return pubmed_text
def extract_from_nn_output(labels, out_label_ids, preds, all_preds, out_token_ids, attention_masks, whitespace_bools, position_ids,
tokenizer, pubmed_ids, subjects, question_ids, subject_lengths, question_types, out_debug_label_ids, model, args):
label_map = {i: label for i, label in enumerate(labels)}
amount_of_questions = len(question_ids)
question_ids = [i for i in range(len(question_ids))]
logger.debug("Amount of questions: {}".format(amount_of_questions))
out_label_dict = [[] for i in range(amount_of_questions)]
out_debug_label_dict = [[] for i in range(amount_of_questions)]
preds_dict = [[] for i in range(amount_of_questions)]
token_dict = [[] for i in range(amount_of_questions)]
whitespace_dict = [[] for i in range(amount_of_questions)]
position_dict = [[] for i in range(amount_of_questions)]
answer_start_dict = [-1 for i in range(amount_of_questions)]
pubmed_list = ["" for i in range(amount_of_questions)]
subject_list = [[] for i in range(amount_of_questions)]
# Debug variables
logger.debug("Subjects: {}".format(subjects))
logger.debug("Subjects shape: {}".format(subjects.shape))
logger.debug("Subjects len: {}".format(subject_lengths))
logger.debug("Subjects len shape: {}".format(subject_lengths.shape))
logger.debug("Out label shape: {}".format(out_label_ids.shape))
logger.debug("Pubmed IDs shape: {}".format(pubmed_ids.shape))
logger.debug("Attention masks shape: {}".format(attention_masks.shape))
logger.debug("All logits shape: {}".format(all_preds.shape))
logger.debug(type(all_preds))
logger.debug(type(all_preds[0]))
groundtruth = {}
predictions = {}
debug_scores = []
subjects = subjects.reshape((out_label_ids.shape[0], -1))
for i in range(out_label_ids.shape[0]):
if question_ids[i] == -1:
# Padding sequence
continue
pubmed_list[question_ids[i]] = pubmed_ids[i]
subject_list[question_ids[i]] = subjects[i, :subject_lengths[i]]
answer_started = False
for j in range(out_label_ids.shape[1]):
if out_label_ids[i, j] != PAD_TOKEN_LABEL_ID:
if not answer_started:
answer_started = True
answer_start_dict[question_ids[i]] = j
out_label_dict[question_ids[i]].append(label_map[out_label_ids[i][j]])
out_debug_label_dict[question_ids[i]].append(label_map[out_debug_label_ids[i][j]])
preds_dict[question_ids[i]].append(label_map[preds[i][j]])
token_dict[question_ids[i]].append(tokenizer.convert_ids_to_tokens(out_token_ids[i][j].item()))
whitespace_dict[question_ids[i]].append(whitespace_bools[i][j])
position_dict[question_ids[i]].append(position_ids[i][j])
# Python dicts retain order automatically since version 3.6
# out_label_list = list(out_label_dict.values())
# preds_list = list(preds_dict.values())
pubmed_engine = create_engine(PUBMED_EVIDENCE_ANNOTATIONS_DB)
for i in trange(len(out_label_dict), desc="Extract Entities"):
if len(out_label_dict[i]) == 0:
# Padding sequence
continue
groundtruth_list = get_entities_with_names(token_dict[i], out_label_dict[i], whitespace_dict[i], position_dict[i], question_types[i])
answer_list = get_entities_with_names(token_dict[i], preds_dict[i], whitespace_dict[i], position_dict[i], question_types[i])
# Add DB xrefs to groundtruth_list and answer_list if possible
groundtruth_db_list = get_db_xrefs(groundtruth_list, pubmed_list[i], pubmed_engine, use_simple_normalizer=args.use_simple_normalizer)
groundtruth_probs_list = get_answer_probs(groundtruth_list, groundtruth_db_list, all_preds[i], attention_masks[i], model, args, answer_start_dict[i])
db_list = get_db_xrefs(answer_list, pubmed_list[i], pubmed_engine, use_simple_normalizer=args.use_simple_normalizer)
answer_probs_list = get_answer_probs(answer_list, db_list, all_preds[i], attention_masks[i], model, args, answer_start_dict[i])
# Extract highlighted Pubmed text
pubmed_tokens = token_dict[i]
groundtruth_text_highlighted = highlight_text(pubmed_tokens, groundtruth_probs_list)
prediction_text_highlighted = highlight_text(pubmed_tokens, answer_probs_list)
# debug_probs: Main answer probabilities to be later plotted in wandb histogram
debug_list = get_entities_with_names(token_dict[i], out_label_dict[i], whitespace_dict[i], position_dict[i], question_types[i])
debug_probs = get_answer_probs(debug_list, None, all_preds[i], attention_masks[i], model, args, answer_start_dict[i])
if len(debug_probs) > 0:
debug_scores.append(debug_probs)
if pubmed_list[i] in groundtruth:
groundtruth[pubmed_list[i]].append((subject_list[i], groundtruth_probs_list, groundtruth_text_highlighted))
else:
groundtruth[pubmed_list[i]] = [(subject_list[i], groundtruth_probs_list, groundtruth_text_highlighted)]
if pubmed_list[i] in predictions:
predictions[pubmed_list[i]].append((subject_list[i], answer_probs_list, prediction_text_highlighted))
else:
predictions[pubmed_list[i]] = [(subject_list[i], answer_probs_list, prediction_text_highlighted)]
# logger.warn(groundtruth.keys())
# debug_info = {}
# for pmid, value in groundtruth.items():
# for infos in value:
# debug_info.setdefault(tuple(infos[0]), [])
# if len(infos[1]) > 0:
# debug_info[tuple(infos[0])].append((pmid, infos[1]))
# debug_info_non_empty = [k for k, v in debug_info if len(v) > 0]
# # logger.warn(debug_info)
# # logger.warn(debug_info.keys())
# logger.warn(len(debug_info_non_empty))
# logger.warn(len(debug_info.keys()))
# exit()
# Make Indra Statements from the answers
indra_preds = make_indra_statements(predictions)
return groundtruth, predictions, out_label_dict, preds_dict, indra_preds, debug_scores
def initialize_loaders(args, dataset, optimizer, n_gpu):
sampler = RandomSampler(dataset) if args.local_rank == -1 else DistributedSampler(dataset)
dataloader = DataLoader(dataset, sampler=sampler, batch_size=1, collate_fn=collate_fn, pin_memory=True)
total = len(dataloader) // args.gradient_accumulation_steps * args.num_train_epochs
warmup_steps = int(args.warmup_proportion * total)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=warmup_steps, num_training_steps=total)
return dataloader, scheduler
def collate_fn(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, torch.Tensor):
out = None
# Pad "subjects" tensor to tensor with largest number of subjects
max_dimension = max([element.size(-1) for element in batch])
# a_1 = batch[0].size()
# a_2 = batch[1].size()
# print(batch)
# print(a_1)
# print(a_2)
batch = [torch.nn.functional.pad(element, [0, max_dimension - element.size(-1)]) for element in batch]
# if a_1 != a_2:
# b_1 = batch[0].size()
# b_2 = batch[1].size()
# print(len(batch))
# print(a_1)
# print(a_2)
# print(b_1)
# print(b_2)
# exit()
# print(batch[0])
# print(batch)
return torch.cat(batch, 0, out=out)
elif isinstance(elem, container_abcs.Sequence):
transposed = zip(*batch)
return [collate_fn(samples) for samples in transposed]
elif isinstance(elem, container_abcs.Mapping):
return {key: collate_fn([d[key] for d in batch]) for key in elem}
raise TypeError("collate_fn: batch must contain tensors or lists; found {}".format(elem_type))
| [
"sys.path.append",
"torch.utils.data.DataLoader",
"metrics.sequence_labeling.get_entities_with_names",
"torch.utils.data.RandomSampler",
"numpy.argmax",
"os.path.realpath",
"torch.cat",
"data_processing.nn_output_to_indra.get_db_xrefs",
"torch.utils.data.DistributedSampler",
"data_processing.nn_ou... | [((1060, 1087), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1077, 1087), False, 'import logging\n'), ((375, 401), 'sys.path.append', 'sys.path.append', (['root_path'], {}), '(root_path)\n', (390, 401), False, 'import sys\n'), ((11753, 11798), 'sqlalchemy.create_engine', 'create_engine', (['PUBMED_EVIDENCE_ANNOTATIONS_DB'], {}), '(PUBMED_EVIDENCE_ANNOTATIONS_DB)\n', (11766, 11798), False, 'from sqlalchemy import create_engine\n'), ((14735, 14769), 'data_processing.nn_output_to_indra.make_indra_statements', 'make_indra_statements', (['predictions'], {}), '(predictions)\n', (14756, 14769), False, 'from data_processing.nn_output_to_indra import get_db_xrefs, make_indra_statements\n'), ((15033, 15127), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'sampler': 'sampler', 'batch_size': '(1)', 'collate_fn': 'collate_fn', 'pin_memory': '(True)'}), '(dataset, sampler=sampler, batch_size=1, collate_fn=collate_fn,\n pin_memory=True)\n', (15043, 15127), False, 'from torch.utils.data import DataLoader, RandomSampler, DistributedSampler\n'), ((15283, 15386), 'transformers.get_linear_schedule_with_warmup', 'get_linear_schedule_with_warmup', (['optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'total'}), '(optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=total)\n', (15314, 15386), False, 'from transformers import get_linear_schedule_with_warmup\n'), ((11985, 12104), 'metrics.sequence_labeling.get_entities_with_names', 'get_entities_with_names', (['token_dict[i]', 'out_label_dict[i]', 'whitespace_dict[i]', 'position_dict[i]', 'question_types[i]'], {}), '(token_dict[i], out_label_dict[i], whitespace_dict[i\n ], position_dict[i], question_types[i])\n', (12008, 12104), False, 'from metrics.sequence_labeling import get_entities_with_names\n'), ((12122, 12236), 'metrics.sequence_labeling.get_entities_with_names', 'get_entities_with_names', (['token_dict[i]', 'preds_dict[i]', 'whitespace_dict[i]', 'position_dict[i]', 'question_types[i]'], {}), '(token_dict[i], preds_dict[i], whitespace_dict[i],\n position_dict[i], question_types[i])\n', (12145, 12236), False, 'from metrics.sequence_labeling import get_entities_with_names\n'), ((12335, 12450), 'data_processing.nn_output_to_indra.get_db_xrefs', 'get_db_xrefs', (['groundtruth_list', 'pubmed_list[i]', 'pubmed_engine'], {'use_simple_normalizer': 'args.use_simple_normalizer'}), '(groundtruth_list, pubmed_list[i], pubmed_engine,\n use_simple_normalizer=args.use_simple_normalizer)\n', (12347, 12450), False, 'from data_processing.nn_output_to_indra import get_db_xrefs, make_indra_statements\n'), ((12623, 12733), 'data_processing.nn_output_to_indra.get_db_xrefs', 'get_db_xrefs', (['answer_list', 'pubmed_list[i]', 'pubmed_engine'], {'use_simple_normalizer': 'args.use_simple_normalizer'}), '(answer_list, pubmed_list[i], pubmed_engine,\n use_simple_normalizer=args.use_simple_normalizer)\n', (12635, 12733), False, 'from data_processing.nn_output_to_indra import get_db_xrefs, make_indra_statements\n'), ((13237, 13356), 'metrics.sequence_labeling.get_entities_with_names', 'get_entities_with_names', (['token_dict[i]', 'out_label_dict[i]', 'whitespace_dict[i]', 'position_dict[i]', 'question_types[i]'], {}), '(token_dict[i], out_label_dict[i], whitespace_dict[i\n ], position_dict[i], question_types[i])\n', (13260, 13356), False, 'from metrics.sequence_labeling import get_entities_with_names\n'), ((14935, 14957), 'torch.utils.data.RandomSampler', 'RandomSampler', (['dataset'], {}), '(dataset)\n', (14948, 14957), False, 'from torch.utils.data import DataLoader, RandomSampler, DistributedSampler\n'), ((14988, 15015), 'torch.utils.data.DistributedSampler', 'DistributedSampler', (['dataset'], {}), '(dataset)\n', (15006, 15015), False, 'from torch.utils.data import DataLoader, RandomSampler, DistributedSampler\n'), ((16329, 16357), 'torch.cat', 'torch.cat', (['batch', '(0)'], {'out': 'out'}), '(batch, 0, out=out)\n', (16338, 16357), False, 'import torch\n'), ((1466, 1494), 'numpy.argmax', 'np.argmax', (['all_preds'], {'axis': '(2)'}), '(all_preds, axis=2)\n', (1475, 1494), True, 'import numpy as np\n'), ((1990, 2032), 'numpy.append', 'np.append', (['all_preds', 'logits_numpy'], {'axis': '(0)'}), '(all_preds, logits_numpy, axis=0)\n', (1999, 2032), True, 'import numpy as np\n'), ((6951, 6965), 'numpy.mean', 'np.mean', (['probs'], {}), '(probs)\n', (6958, 6965), True, 'import numpy as np\n'), ((2070, 2101), 'numpy.argmax', 'np.argmax', (['logits_numpy'], {'axis': '(2)'}), '(logits_numpy, axis=2)\n', (2079, 2101), True, 'import numpy as np\n'), ((221, 247), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (237, 247), False, 'import os\n'), ((297, 323), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (313, 323), False, 'import os\n'), ((5229, 5269), 'scipy.special.softmax', 'scipy.special.softmax', (['logits[start + j]'], {}), '(logits[start + j])\n', (5250, 5269), False, 'import scipy\n'), ((5523, 5563), 'scipy.special.softmax', 'scipy.special.softmax', (['logits[start + j]'], {}), '(logits[start + j])\n', (5544, 5563), False, 'import scipy\n')] |
from numpy.random import RandomState
import math
from bblr.Utils import Utils
class MainPatternGenerator(object):
MAX_TRIES = 100
def __init__(self, properties, seedAddition):
self.properties = properties
# Validating the configuration.
seed = properties.get('seed')
dataSetSize = self.properties.get('dataSetSize')
patternSize = properties.get('patternSize')
extraBits = properties.get('extraBits')
distance = properties.get('distance')
scale = properties.get('scale')
Utils.assertInt('Random seed', seed)
Utils.assertInt('Pattern data set size', dataSetSize, 1)
Utils.assertInt('Pattern size', patternSize, 1)
if extraBits != None:
Utils.assertInt('Number of extra bits', extraBits.get('number'), 1)
if extraBits.get('values') not in (0, 1, 'random', 'randomFixed'):
raise Exception('Extra bits values must be 0, 1, "random" or "randomFixed"')
if distance != None:
Utils.assertFloat('Mean distance', distance.get('mean'), 0)
Utils.assertFloat('Standard deviation of distance', distance.get('stdev'), 0)
if scale != None:
if scale.get('type') == '1D':
Utils.assertInt('Scale factor for 1D', scale.get('factor'), 1)
elif scale.get('type') == '2D':
Utils.assertInt('Scale pattern width', scale.get('patternWidth'), 1)
Utils.assertInt('Scale pattern height', scale.get('patternHeight'), 1)
Utils.assertInt('Scale width factor', scale.get('widthFactor'), 1)
Utils.assertInt('Scale height factor', scale.get('heightFactor'), 1)
if scale.get('patternWidth') * scale.get('patternHeight') != patternSize:
raise Exception('Scale pattern width and pattern height do not fit with the given pattern size')
else:
raise Exception('Unknown scale type ' + scale.get('type'))
# Initializing the random generator.
randomGenerator = RandomState()
randomGenerator.seed(seed + seedAddition)
# Generating the patterns.
self.originalPatterns = Utils.generateDataSet(randomGenerator, dataSetSize, patternSize, self.computeError if 'distance' in self.properties else None, MainPatternGenerator.MAX_TRIES)
# Applying transformations.
self.patterns = Utils.transformDataSet(randomGenerator, self.originalPatterns, self.properties)
# Public methods. A generator must implement these methods in order to use it in Main.py
def getPatterns(self):
return self.patterns
def getOriginalPatterns(self):
return self.originalPatterns
def analyze(self):
return self.analyzeDataSet(self.patterns)
# Private methods.
def analyzeDataSet(self, dataSet):
n = len(dataSet)
ds = []
k = 0
for i in xrange(n):
for j in xrange(i + 1, n):
ds.append(Utils.distance(dataSet[i], dataSet[j]))
k += 1
k = float(k)
mean = sum(ds) / k
variance = sum(map(lambda d: (d - mean)**2, ds)) / k
stdev = math.sqrt(variance)
return {
'patternDataSetSize': n,
'patternDimension': len(dataSet[0]),
'patternsDistanceMean': mean,
'patternsDistanceStdev': stdev
}
def computeError(self, dataSet):
analysis = self.analyzeDataSet(dataSet)
distance = self.properties.get('distance')
return Utils.meanSquareError([analysis['patternsDistanceMean'], analysis['patternsDistanceStdev']], [distance.get('mean'), distance.get('stdev')])
| [
"bblr.Utils.Utils.assertInt",
"bblr.Utils.Utils.transformDataSet",
"math.sqrt",
"numpy.random.RandomState",
"bblr.Utils.Utils.generateDataSet",
"bblr.Utils.Utils.distance"
] | [((574, 610), 'bblr.Utils.Utils.assertInt', 'Utils.assertInt', (['"""Random seed"""', 'seed'], {}), "('Random seed', seed)\n", (589, 610), False, 'from bblr.Utils import Utils\n'), ((619, 675), 'bblr.Utils.Utils.assertInt', 'Utils.assertInt', (['"""Pattern data set size"""', 'dataSetSize', '(1)'], {}), "('Pattern data set size', dataSetSize, 1)\n", (634, 675), False, 'from bblr.Utils import Utils\n'), ((684, 731), 'bblr.Utils.Utils.assertInt', 'Utils.assertInt', (['"""Pattern size"""', 'patternSize', '(1)'], {}), "('Pattern size', patternSize, 1)\n", (699, 731), False, 'from bblr.Utils import Utils\n'), ((2165, 2178), 'numpy.random.RandomState', 'RandomState', ([], {}), '()\n', (2176, 2178), False, 'from numpy.random import RandomState\n'), ((2305, 2472), 'bblr.Utils.Utils.generateDataSet', 'Utils.generateDataSet', (['randomGenerator', 'dataSetSize', 'patternSize', "(self.computeError if 'distance' in self.properties else None)", 'MainPatternGenerator.MAX_TRIES'], {}), "(randomGenerator, dataSetSize, patternSize, self.\n computeError if 'distance' in self.properties else None,\n MainPatternGenerator.MAX_TRIES)\n", (2326, 2472), False, 'from bblr.Utils import Utils\n'), ((2533, 2612), 'bblr.Utils.Utils.transformDataSet', 'Utils.transformDataSet', (['randomGenerator', 'self.originalPatterns', 'self.properties'], {}), '(randomGenerator, self.originalPatterns, self.properties)\n', (2555, 2612), False, 'from bblr.Utils import Utils\n'), ((3349, 3368), 'math.sqrt', 'math.sqrt', (['variance'], {}), '(variance)\n', (3358, 3368), False, 'import math\n'), ((3152, 3190), 'bblr.Utils.Utils.distance', 'Utils.distance', (['dataSet[i]', 'dataSet[j]'], {}), '(dataSet[i], dataSet[j])\n', (3166, 3190), False, 'from bblr.Utils import Utils\n')] |
"""
Copyright 2017 Neural Networks and Deep Learning lab, MIPT
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import numpy as np
from deeppavlov.core.common.registry import register
from deeppavlov.core.data.data_learning_iterator import DataLearningIterator
def process_word(word, to_lower=False, append_case=None):
if all(x.isupper() for x in word) and len(word) > 1:
uppercase = "<ALL_UPPER>"
elif word[0].isupper():
uppercase = "<FIRST_UPPER>"
else:
uppercase = None
if to_lower:
word = word.lower()
if word.isdigit():
answer = ["<DIGIT>"]
elif word.startswith("http://") or word.startswith("www."):
answer = ["<HTTP>"]
else:
answer = list(word)
if to_lower and uppercase is not None:
if append_case == "first":
answer = [uppercase] + answer
elif append_case == "last":
answer = answer + [uppercase]
return tuple(answer)
def preprocess_data(data, to_lower=True, append_case="first"):
new_data = []
for words, tags in data:
new_words = [process_word(word, to_lower=to_lower, append_case=append_case)
for word in words]
# tags could also be processed in future
new_tags = tags
new_data.append((new_words, new_tags))
return new_data
@register('morphotagger_dataset')
class MorphoTaggerDatasetIterator(DataLearningIterator):
def __init__(self, data, seed=None, shuffle=True,
validation_split=0.2, bucket=True):
# processed_data = {mode: preprocess_data(sample, to_lower=to_lower,
# append_case=append_case)
# for mode, sample in data.items()}
processed_data = data
self.bucket = bucket
self.validation_split = validation_split
super().__init__(processed_data, seed, shuffle)
def split(self):
if len(self.valid) == 0:
if self.shuffle:
random.shuffle(self.train)
L = int(len(self.train) * (1.0 - self.validation_split))
self.train, self.valid = self.train[:L], self.valid[L:]
return
def gen_batches(self, batch_size: int, data_type: str = 'train',
shuffle: bool = None, return_indexes: bool = False):
if shuffle is None:
shuffle = self.shuffle
data = self.data[data_type]
if shuffle:
random.shuffle(data)
lengths = [len(x[0]) for x in data]
indexes = np.argsort(lengths)
L = len(data)
if batch_size < 0:
batch_size = L
for start in range(0, L, batch_size):
indexes_to_yield = indexes[start:start+batch_size]
data_to_yield = tuple(zip(*([data[i] for i in indexes_to_yield])))
if return_indexes:
yield indexes_to_yield, data_to_yield
else:
yield data_to_yield | [
"numpy.argsort",
"random.shuffle",
"deeppavlov.core.common.registry.register"
] | [((1836, 1868), 'deeppavlov.core.common.registry.register', 'register', (['"""morphotagger_dataset"""'], {}), "('morphotagger_dataset')\n", (1844, 1868), False, 'from deeppavlov.core.common.registry import register\n'), ((3049, 3068), 'numpy.argsort', 'np.argsort', (['lengths'], {}), '(lengths)\n', (3059, 3068), True, 'import numpy as np\n'), ((2966, 2986), 'random.shuffle', 'random.shuffle', (['data'], {}), '(data)\n', (2980, 2986), False, 'import random\n'), ((2513, 2539), 'random.shuffle', 'random.shuffle', (['self.train'], {}), '(self.train)\n', (2527, 2539), False, 'import random\n')] |
import numpy as np
from app.imagetoolbox.ImageConfigBase import ImageConfigBase
from app.utilities.util_config import assign_raise, assign_fallback
class NormalizeConfig(ImageConfigBase):
SECTION = "IMAGE-PREPROCESSING"
_normalize_mean = np.array([0.485, 0.456, 0.406])
_normalize_stdev = np.array([0.229, 0.224, 0.225])
_normalize_samplewise = False
@property
def normalize_by_mean_var(self):
return assign_raise(self.cp[self.SECTION].getboolean("normalize_by_mean_var"))
@property
def normalize_samplewise(self):
return assign_fallback(self.cp[self.SECTION].getboolean("normalize_samplewise"), self._normalize_samplewise)
@property
def norm_mean(self):
if self.color_mode == "rgb" or self.color_mode == "hsv":
mean_chan1 = assign_fallback(self.cp[self.SECTION].getfloat("normalize_mean_chan1"),
self._normalize_mean[0])
mean_chan2 = assign_fallback(self.cp[self.SECTION].getfloat("normalize_mean_chan2"),
self._normalize_mean[1])
mean_chan3 = assign_fallback(self.cp[self.SECTION].getfloat("normalize_mean_chan3"),
self._normalize_mean[2])
return np.array([mean_chan1, mean_chan2, mean_chan3])
elif self.color_mode == "grayscale":
grayscale_mean = np.mean(self._normalize_mean)
return assign_fallback(self.cp[self.SECTION].getfloat("normalize_mean"), grayscale_mean)
@property
def norm_stdev(self):
if self.color_mode == "rgb" or self.color_mode == "hsv":
stdev_chan1 = assign_fallback(self.cp[self.SECTION].getfloat("normalize_stdev_chan1"),
self._normalize_stdev[0])
stdev_chan2 = assign_fallback(self.cp[self.SECTION].getfloat("normalize_stdev_chan2"),
self._normalize_stdev[1])
stdev_chan3 = assign_fallback(self.cp[self.SECTION].getfloat("normalize_stdev_chan3"),
self._normalize_stdev[2])
return np.array([stdev_chan1, stdev_chan2, stdev_chan3])
elif self.color_mode == "grayscale":
grayscale_stdev = np.mean(self._normalize_stdev)
return assign_fallback(self.cp[self.SECTION].getfloat("normalize_stdev"), grayscale_stdev)
| [
"numpy.mean",
"numpy.array"
] | [((250, 281), 'numpy.array', 'np.array', (['[0.485, 0.456, 0.406]'], {}), '([0.485, 0.456, 0.406])\n', (258, 281), True, 'import numpy as np\n'), ((305, 336), 'numpy.array', 'np.array', (['[0.229, 0.224, 0.225]'], {}), '([0.229, 0.224, 0.225])\n', (313, 336), True, 'import numpy as np\n'), ((1291, 1337), 'numpy.array', 'np.array', (['[mean_chan1, mean_chan2, mean_chan3]'], {}), '([mean_chan1, mean_chan2, mean_chan3])\n', (1299, 1337), True, 'import numpy as np\n'), ((2169, 2218), 'numpy.array', 'np.array', (['[stdev_chan1, stdev_chan2, stdev_chan3]'], {}), '([stdev_chan1, stdev_chan2, stdev_chan3])\n', (2177, 2218), True, 'import numpy as np\n'), ((1412, 1441), 'numpy.mean', 'np.mean', (['self._normalize_mean'], {}), '(self._normalize_mean)\n', (1419, 1441), True, 'import numpy as np\n'), ((2294, 2324), 'numpy.mean', 'np.mean', (['self._normalize_stdev'], {}), '(self._normalize_stdev)\n', (2301, 2324), True, 'import numpy as np\n')] |
import random
import numpy as np
from modules import solver
def make_random():
np_z_board = np.zeros([9,9], int)
z_board = np_z_board.tolist()
digits = [1,2,3,4,5,6,7,8,9]
for i in range(0,9,3):
for j in range(0,9,3):
x = random.randint(i,i+2)
y = random.randint(j,j+2)
z_board[x][y] = digits.pop(random.randint(0,len(digits)-1))
solved = solver.solve_it(z_board)
for i in range(9):
for j in range(9):
a = random.randint(0,1)
if a != 0:
solved[i][j] = 0
return solved
if __name__ == "__main__":
rando_board = make_random()
for i in rando_board:
print(i)
input('Press enter to exit...') | [
"numpy.zeros",
"random.randint",
"modules.solver.solve_it"
] | [((97, 118), 'numpy.zeros', 'np.zeros', (['[9, 9]', 'int'], {}), '([9, 9], int)\n', (105, 118), True, 'import numpy as np\n'), ((437, 461), 'modules.solver.solve_it', 'solver.solve_it', (['z_board'], {}), '(z_board)\n', (452, 461), False, 'from modules import solver\n'), ((278, 302), 'random.randint', 'random.randint', (['i', '(i + 2)'], {}), '(i, i + 2)\n', (292, 302), False, 'import random\n'), ((316, 340), 'random.randint', 'random.randint', (['j', '(j + 2)'], {}), '(j, j + 2)\n', (330, 340), False, 'import random\n'), ((529, 549), 'random.randint', 'random.randint', (['(0)', '(1)'], {}), '(0, 1)\n', (543, 549), False, 'import random\n')] |
import numpy as np
from sklearn.linear_model import LogisticRegression
from src.policy import BasePolicy
from sklearn.ensemble import RandomForestClassifier
class IPSEvaluator:
def __init__(self, log_policy: BasePolicy, eval_policy: BasePolicy):
self.log_policy = log_policy
self.eval_policy = eval_policy
def evaluate_one_reward(self, x, a, r):
return r * self.eval_policy.give_probability(x, a) / self.log_policy.give_probability(x, a)
def evaluate_policy(self):
expected_rewards = []
for (x, a, r) in self.log_policy.history:
expected_rewards.append(self.evaluate_one_reward(x, a, r))
return np.array(expected_rewards).mean()
def train(self, data):
pass
class DoublyRobustEstimator:
def __init__(self, log_policy: BasePolicy, eval_policy: BasePolicy):
self.log_policy = log_policy
self.eval_policy = eval_policy
self.model_based_estimator = None
def train(self, data):
X = np.array(list(map(lambda x: np.append(x[0], [x[1]]), data)))
y = np.array(list(map(lambda x: x[2], data)))
# self.model_based_estimator = LogisticRegression()
self.model_based_estimator = RandomForestClassifier(max_depth=5, random_state=0)
self.model_based_estimator.fit(X, y)
def evaluate_one_reward(self, x, a, r):
mb = self.model_based_estimator.predict([np.append(x, [self.eval_policy.give_a(x)])])[0] # it can be round
return mb + (r - mb) * self.eval_policy.give_probability(x, a) / self.log_policy.give_probability(x, a)
def evaluate_policy(self):
expected_rewards = []
for (x, a, r) in self.log_policy.history:
expected_rewards.append(self.evaluate_one_reward(x, a, r))
return np.array(expected_rewards).mean()
class ModelBasedEstimator:
def __init__(self, log_policy: BasePolicy, eval_policy: BasePolicy):
self.log_policy = log_policy
self.eval_policy = eval_policy
self.model_based_estimator = None
def train(self, data):
X = np.array(list(map(lambda x: np.append(x[0], [x[1]]), data)))
y = np.array(list(map(lambda x: x[2], data)))
# self.model_based_estimator = LogisticRegression()
self.model_based_estimator = RandomForestClassifier(max_depth=5, random_state=0)
self.model_based_estimator.fit(X, y)
def evaluate_one_reward(self, x, a, r):
mb = self.model_based_estimator.predict([np.append(x, [self.eval_policy.give_a(x)])])[0] # it can be round
return mb
def evaluate_policy(self):
expected_rewards = []
for (x, a, r) in self.log_policy.history:
# mb = self.evaluate_one_reward(x, a, r)
# print(self.eval_policy.history[i][2], mb)
# expected_rewards.append(mb)
expected_rewards.append(self.evaluate_one_reward(x, a, r))
return np.array(expected_rewards).mean()
class Replay:
def __init__(self, log_policy: BasePolicy, eval_policy: BasePolicy):
self.log_policy = log_policy
self.eval_policy = eval_policy
self.T = 0
def train(self, data):
pass
def evaluate_one_reward(self, x, a, r):
if a == self.eval_policy.give_a(x):
return r
return None
def evaluate_policy(self):
expected_rewards = []
for (x, a, r) in self.log_policy.history:
new_reward = self.evaluate_one_reward(x, a, r)
if new_reward is not None:
expected_rewards.append(new_reward)
return np.array(expected_rewards).mean()
| [
"sklearn.ensemble.RandomForestClassifier",
"numpy.append",
"numpy.array"
] | [((1222, 1273), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(5)', 'random_state': '(0)'}), '(max_depth=5, random_state=0)\n', (1244, 1273), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((2297, 2348), 'sklearn.ensemble.RandomForestClassifier', 'RandomForestClassifier', ([], {'max_depth': '(5)', 'random_state': '(0)'}), '(max_depth=5, random_state=0)\n', (2319, 2348), False, 'from sklearn.ensemble import RandomForestClassifier\n'), ((672, 698), 'numpy.array', 'np.array', (['expected_rewards'], {}), '(expected_rewards)\n', (680, 698), True, 'import numpy as np\n'), ((1790, 1816), 'numpy.array', 'np.array', (['expected_rewards'], {}), '(expected_rewards)\n', (1798, 1816), True, 'import numpy as np\n'), ((2923, 2949), 'numpy.array', 'np.array', (['expected_rewards'], {}), '(expected_rewards)\n', (2931, 2949), True, 'import numpy as np\n'), ((3589, 3615), 'numpy.array', 'np.array', (['expected_rewards'], {}), '(expected_rewards)\n', (3597, 3615), True, 'import numpy as np\n'), ((1038, 1061), 'numpy.append', 'np.append', (['x[0]', '[x[1]]'], {}), '(x[0], [x[1]])\n', (1047, 1061), True, 'import numpy as np\n'), ((2113, 2136), 'numpy.append', 'np.append', (['x[0]', '[x[1]]'], {}), '(x[0], [x[1]])\n', (2122, 2136), True, 'import numpy as np\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2019 <NAME> (Nagoya University)
# based on PyTorch implementation for WaveNet vocoder by <NAME> (Nagoya University)
# Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
from __future__ import division
from __future__ import print_function
import argparse
from dateutil.relativedelta import relativedelta
from distutils.util import strtobool
import logging
import itertools
import os
import sys
import time
import numpy as np
import six
import torch
from torch.autograd import Variable
from torch import nn
from torchvision import transforms
from torch.utils.data import DataLoader
from utils import find_files
from utils import read_hdf5
from utils import read_txt
from gru_vae import initialize
from gru_vae import GRU_RNN
from gru_vae import TWFSEloss
from gru_vae import sampling_vae_batch, loss_vae
from dataset import FeatureDatasetSingleVAE, padding
from dtw_c import dtw_c as dtw
#np.set_printoptions(threshold=np.inf)
#torch.set_printoptions(threshold=np.inf)
def train_generator(dataloader, device, batch_size=80):
while True:
c_idx = 0
# process over all of files
for idx, batch in enumerate(dataloader):
flens = batch['flen_src'].data.numpy()
max_flen = np.max(flens) ## get max frame length
flens_spc_src = batch['flen_spc_src'].data.numpy()
max_flen_spc_src = np.max(flens_spc_src) ## get max frame length
flens_src_trg = batch['flen_src_trg'].data.numpy()
max_flen_src_trg = np.max(flens_src_trg) ## get max frame length
flens_spc_src_trg = batch['flen_spc_src_trg'].data.numpy()
max_flen_spc_src_trg = np.max(flens_spc_src_trg) ## get max frame length
hs_src = batch['h_src'][:,:max_flen].to(device)
src_codes = batch['src_code'][:,:max_flen].to(device)
trg_codes = batch['trg_code'][:,:max_flen].to(device)
cvs_src = batch['cv_src'][:,:max_flen].to(device)
spcidcs_src = batch['spcidx_src'][:,:max_flen_spc_src].to(device)
hs_src_trg = batch['h_src_trg'][:,:max_flen_src_trg].to(device)
spcidcs_src_trg = batch['spcidx_src_trg'][:,:max_flen_spc_src_trg].to(device)
featfiles_src = batch['featfile_src']
featfiles_src_trg = batch['featfile_src_trg']
n_batch_utt = hs_src.size(0)
# use mini batch
if batch_size != 0:
src_idx_s = 0
src_idx_e = batch_size-1
spcidcs_src_s_idx = np.repeat(-1,n_batch_utt)
spcidcs_src_e_idx = np.repeat(-1,n_batch_utt)
s_flag = np.repeat(False,n_batch_utt)
e_flag = np.repeat(True,n_batch_utt)
flen_acc = np.repeat(batch_size,n_batch_utt)
for j in range(n_batch_utt):
for i in range(spcidcs_src_e_idx[j]+1,flens_spc_src[j]):
if not s_flag[j] and spcidcs_src[j,i] >= src_idx_s:
if spcidcs_src[j,i] > src_idx_e:
spcidcs_src_s_idx[j] = -1
break
spcidcs_src_s_idx[j] = i
s_flag[j] = True
e_flag[j] = False
if i == flens_spc_src[j]-1:
spcidcs_src_e_idx[j] = i
s_flag[j] = False
e_flag[j] = True
break
elif not e_flag[j] and (spcidcs_src[j,i] >= src_idx_e or i == flens_spc_src[j]-1):
if spcidcs_src[j,i] > src_idx_e:
spcidcs_src_e_idx[j] = i-1
else:
spcidcs_src_e_idx[j] = i
s_flag[j] = False
e_flag[j] = True
break
select_utt_idx = [i for i in range(n_batch_utt)]
yield hs_src, src_codes[:,src_idx_s:src_idx_e+1], trg_codes[:,src_idx_s:src_idx_e+1], hs_src_trg, cvs_src, src_idx_s, src_idx_e, spcidcs_src_s_idx, spcidcs_src_e_idx, c_idx, idx, spcidcs_src, spcidcs_src_trg, featfiles_src, featfiles_src_trg, flens, flens_src_trg, flens_spc_src, flens_spc_src_trg, select_utt_idx, flen_acc, n_batch_utt
while src_idx_e < max_flen-1:
src_idx_s = src_idx_e + 1
src_idx_e = src_idx_s+batch_size-1
if src_idx_e >= max_flen:
src_idx_e = max_flen-1
select_utt_idx = []
for j in range(n_batch_utt):
if spcidcs_src_e_idx[j] < flens_spc_src[j]-1:
if src_idx_e >= flens[j]:
flen_acc[j] = flens[j]-src_idx_s
for i in range(spcidcs_src_e_idx[j]+1,flens_spc_src[j]):
if not s_flag[j] and spcidcs_src[j,i] >= src_idx_s:
if spcidcs_src[j,i] > src_idx_e:
spcidcs_src_s_idx[j] = -1
break
spcidcs_src_s_idx[j] = i
s_flag[j] = True
e_flag[j] = False
if i == flens_spc_src[j]-1:
spcidcs_src_e_idx[j] = i
s_flag[j] = False
e_flag[j] = True
break
elif not e_flag[j] and (spcidcs_src[j,i] >= src_idx_e or i == flens_spc_src[j]-1):
if spcidcs_src[j,i] > src_idx_e:
spcidcs_src_e_idx[j] = i-1
else:
spcidcs_src_e_idx[j] = i
s_flag[j] = False
e_flag[j] = True
break
select_utt_idx.append(j)
yield hs_src, src_codes[:,src_idx_s:src_idx_e+1], trg_codes[:,src_idx_s:src_idx_e+1], hs_src_trg, cvs_src, src_idx_s, src_idx_e, spcidcs_src_s_idx, spcidcs_src_e_idx, c_idx, idx, spcidcs_src, spcidcs_src_trg, featfiles_src, featfiles_src_trg, flens, flens_src_trg, flens_spc_src, flens_spc_src_trg, select_utt_idx, flen_acc, n_batch_utt
# use utterance batch
else:
yield hs_src, src_codes, trg_codes, hs_src_trg, cvs_src, c_idx, idx, spcidcs_src, spcidcs_src_trg, featfiles_src, featfiles_src_trg, flens, flens_src_trg, flens_spc_src, flens_spc_src_trg, n_batch_utt
c_idx += 1
if c_idx > 0:
#if c_idx > 1:
#if c_idx > 2:
break
if batch_size > 0:
yield [], [], [], [], [], [], [], [], [], -1, -1, [], [], [], [], [], [], [], [], [], [], []
else:
yield [], [], [], [], [], -1, -1, [], [], [], [], [], [], [], [], []
def save_checkpoint(checkpoint_dir, model_encoder, model_decoder, optimizer, numpy_random_state, torch_random_state, iterations):
model_encoder.cpu()
model_decoder.cpu()
checkpoint = {
"model_encoder": model_encoder.state_dict(),
"model_decoder": model_decoder.state_dict(),
"optimizer": optimizer.state_dict(),
"numpy_random_state": numpy_random_state,
"torch_random_state": torch_random_state,
"iterations": iterations}
if not os.path.exists(checkpoint_dir):
os.makedirs(checkpoint_dir)
torch.save(checkpoint, checkpoint_dir + "/checkpoint-%d.pkl" % iterations)
model_encoder.cuda()
model_decoder.cuda()
logging.info("%d-iter checkpoint created." % iterations)
def main():
parser = argparse.ArgumentParser()
# path setting
parser.add_argument("--feats_src", required=True,
type=str, help="directory or list of source feat files")
parser.add_argument("--feats_src_trg", required=True,
type=str, help="directory or list of source feat files")
parser.add_argument("--feats_trg", required=True,
type=str, help="directory or list of target feat files")
parser.add_argument("--feats_trg_src", required=True,
type=str, help="directory or list of target feat files")
parser.add_argument("--feats_eval_src", required=True,
type=str, help="directory or list of evaluation source feat files")
parser.add_argument("--feats_eval_trg", required=True,
type=str, help="directory or list of evaluation target feat files")
parser.add_argument("--stats_src", required=True,
type=str, help="hdf5 file including source statistics")
parser.add_argument("--stats_trg", required=True,
type=str, help="hdf5 file including target statistics")
parser.add_argument("--stats_jnt", required=True,
type=str, help="hdf5 file including target statistics")
parser.add_argument("--spk_src", required=True,
type=str, help="hdf5 file including source statistics")
parser.add_argument("--spk_trg", required=True,
type=str, help="hdf5 file including source statistics")
parser.add_argument("--expdir", required=True,
type=str, help="directory to save the model")
parser.add_argument('--batch_size_utt', default=8, type=int,
help='Batch size')
parser.add_argument('--batch_size_utt_eval', default=8, type=int,
help='Batch size')
parser.add_argument('--pad_len', default=2200, type=int,
help='Batch length')
parser.add_argument('--n_workers', default=2, type=int,
help='# of workers for dataset')
parser.add_argument('--stdim', default=4, type=int,
help='stdim for mcep')
# network structure setting
parser.add_argument("--in_dim", default=54,
type=int, help="number of dimension of input features")
parser.add_argument("--lat_dim", default=32,
type=int, help="number of dimension of output features")
parser.add_argument("--out_dim", default=50,
type=int, help="number of dimension of output features")
parser.add_argument("--hidden_layers", default=1,
type=int, help="number of hidden layers")
parser.add_argument("--hidden_units", default=1024,
type=int, help="number of hidden units")
parser.add_argument("--kernel_size", default=3,
type=int, help="number of hidden units")
parser.add_argument("--dilation_size", default=2,
type=int, help="number of hidden units")
parser.add_argument("--n_cyc", default=2,
type=int, help="number of hidden units")
parser.add_argument("--do_prob", default=0.5,
type=float, help="dropout probability")
# network training setting
parser.add_argument("--lr", default=1e-4,
type=float, help="learning rate")
parser.add_argument("--weight_decay", default=0.0,
type=float, help="weight decay coefficient")
parser.add_argument("--batch_size", default=80,
type=int, help="batch size (if set 0, utterance batch will be used)")
parser.add_argument("--epoch_count", default=400,
type=int, help="number of training epochs")
# other setting
parser.add_argument("--seed", default=1,
type=int, help="seed number")
parser.add_argument("--resume", default=None,
type=str, help="model path to restart training")
parser.add_argument("--GPU_device", default=0,
type=int, help="selection of GPU device")
parser.add_argument("--verbose", default=1,
type=int, help="log level")
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.GPU_device)
# make experimental directory
if not os.path.exists(args.expdir):
os.makedirs(args.expdir)
# set log level
if args.verbose == 1:
logging.basicConfig(level=logging.INFO,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/train.log")
logging.getLogger().addHandler(logging.StreamHandler())
elif args.verbose > 1:
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/train.log")
logging.getLogger().addHandler(logging.StreamHandler())
else:
logging.basicConfig(level=logging.WARN,
format='%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S',
filename=args.expdir + "/train.log")
logging.getLogger().addHandler(logging.StreamHandler())
logging.warn("logging is disabled.")
# fix seed
os.environ['PYTHONHASHSEED'] = str(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if str(device) == "cpu":
raise ValueError('ERROR: Training by CPU is not acceptable.')
if args.n_cyc < 1:
half_cyc = True
args.n_cyc = 1
else:
half_cyc = False
# save args as conf
torch.save(args, args.expdir + "/model.conf")
stdim = args.stdim
stdim_ = stdim+1
# define statistics src
mean_jnt = torch.FloatTensor(read_hdf5(args.stats_jnt, "/mean_feat_org_lf0_jnt"))
std_jnt = torch.FloatTensor(read_hdf5(args.stats_jnt, "/scale_feat_org_lf0_jnt"))
mean_jnt_trg = torch.FloatTensor(read_hdf5(args.stats_jnt, "/mean_feat_org_lf0_jnt")[stdim:])
std_jnt_trg = torch.FloatTensor(read_hdf5(args.stats_jnt, "/scale_feat_org_lf0_jnt")[stdim:])
if torch.cuda.is_available():
mean_jnt = mean_jnt.cuda()
std_jnt = std_jnt.cuda()
mean_jnt_trg = mean_jnt_trg.cuda()
std_jnt_trg = std_jnt_trg.cuda()
else:
logging.error("gpu is not available. please check the setting.")
# define network
model_encoder = GRU_RNN(
in_dim=args.in_dim,
out_dim=args.lat_dim*2,
hidden_layers=args.hidden_layers,
hidden_units=args.hidden_units,
kernel_size=args.kernel_size,
dilation_size=args.dilation_size,
do_prob=args.do_prob,
scale_out_flag=False)
logging.info(model_encoder)
model_decoder = GRU_RNN(
in_dim=args.lat_dim+2,
out_dim=args.out_dim,
hidden_layers=args.hidden_layers,
hidden_units=args.hidden_units,
kernel_size=args.kernel_size,
dilation_size=args.dilation_size,
do_prob=args.do_prob,
scale_in_flag=False)
logging.info(model_decoder)
criterion_mcd = TWFSEloss()
# send to gpu
if torch.cuda.is_available():
model_encoder.cuda()
model_decoder.cuda()
criterion_mcd.cuda()
else:
logging.error("gpu is not available. please check the setting.")
sys.exit(1)
model_encoder.apply(initialize)
model_encoder.train()
model_decoder.apply(initialize)
model_decoder.train()
model_encoder.scale_in.weight = torch.nn.Parameter(torch.diag(1.0/std_jnt.data).unsqueeze(2))
model_encoder.scale_in.bias = torch.nn.Parameter(-(mean_jnt.data/std_jnt.data))
model_decoder.scale_out.weight = torch.nn.Parameter(torch.diag(std_jnt_trg.data).unsqueeze(2))
model_decoder.scale_out.bias = torch.nn.Parameter(mean_jnt_trg.data)
if args.resume is None:
epoch_idx = 0
else:
checkpoint = torch.load(args.resume)
model_encoder.load_state_dict(checkpoint["model_encoder"])
model_decoder.load_state_dict(checkpoint["model_decoder"])
epoch_idx = checkpoint["iterations"]
logging.info("restored from %d-iter checkpoint." % (epoch_idx))
init_pp = np.zeros((args.batch_size_utt,1,args.lat_dim*2))
y_in_pp = torch.FloatTensor(init_pp).cuda()
y_in_src = y_in_trg = torch.unsqueeze(torch.unsqueeze((0-mean_jnt_trg)/std_jnt_trg,0),0).repeat(args.batch_size_utt,1,1)
with torch.no_grad():
init_pp_eval = np.zeros((args.batch_size_utt_eval,1,args.lat_dim*2))
y_in_pp_eval = torch.FloatTensor(init_pp_eval).cuda()
y_in_src_eval = y_in_trg_eval = torch.unsqueeze(torch.unsqueeze((0-mean_jnt_trg)/std_jnt_trg,0),0).repeat(args.batch_size_utt_eval,1,1)
for param in model_encoder.parameters():
param.requires_grad = True
for param in model_decoder.parameters():
param.requires_grad = True
for param in model_encoder.scale_in.parameters():
param.requires_grad = False
for param in model_decoder.scale_out.parameters():
param.requires_grad = False
module_list = list(model_encoder.conv.parameters())
module_list += list(model_encoder.gru.parameters()) + list(model_encoder.out_1.parameters())
module_list += list(model_decoder.conv.parameters())
module_list += list(model_decoder.gru.parameters()) + list(model_decoder.out_1.parameters())
optimizer = torch.optim.Adam(module_list, lr=args.lr)
if args.resume is not None:
optimizer.load_state_dict(checkpoint["optimizer"])
parameters = filter(lambda p: p.requires_grad, model_encoder.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1000000
logging.info('Trainable Parameters (encoder): %.3f million' % parameters)
parameters = filter(lambda p: p.requires_grad, model_decoder.parameters())
parameters = sum([np.prod(p.size()) for p in parameters]) / 1000000
logging.info('Trainable Parameters (decoder): %.3f million' % parameters)
# define generator training
if os.path.isdir(args.feats_src):
feat_list_src = sorted(find_files(args.feats_src, "*.h5", use_dir_name=False))
elif os.path.isfile(args.feats_src):
feat_list_src = read_txt(args.feats_src)
else:
logging.error("--feats_src should be directory or list.")
sys.exit(1)
if os.path.isdir(args.feats_src_trg):
feat_list_src_trg = sorted(find_files(args.feats_src_trg, "*.h5", use_dir_name=False))
elif os.path.isfile(args.feats_src_trg):
feat_list_src_trg = read_txt(args.feats_src_trg)
else:
logging.error("--feats_src_trg should be directory or list.")
sys.exit(1)
assert(len(feat_list_src) == len(feat_list_src_trg))
logging.info("number of training src data = %d." % len(feat_list_src))
if os.path.isdir(args.feats_trg):
feat_list_trg = sorted(find_files(args.feats_trg, "*.h5", use_dir_name=False))
elif os.path.isfile(args.feats_trg):
feat_list_trg = read_txt(args.feats_trg)
else:
logging.error("--feats_trg should be directory or list.")
sys.exit(1)
if os.path.isdir(args.feats_trg_src):
feat_list_trg_src = sorted(find_files(args.feats_trg_src, "*.h5", use_dir_name=False))
elif os.path.isfile(args.feats_trg_src):
feat_list_trg_src = read_txt(args.feats_trg_src)
else:
logging.error("--feats_trg_src should be directory or list.")
sys.exit(1)
assert(len(feat_list_trg) == len(feat_list_trg_src))
logging.info("number of training trg data = %d." % len(feat_list_trg))
n_train_data = len(feat_list_src) + len(feat_list_trg)
mod_train_batch = n_train_data % args.batch_size_utt
if mod_train_batch > 0:
init_pp_mod = np.zeros((mod_train_batch,1,args.lat_dim*2))
y_in_pp_mod= torch.FloatTensor(init_pp_mod).cuda()
y_in_src_mod = y_in_trg_mod = torch.unsqueeze(torch.unsqueeze((0-mean_jnt_trg)/std_jnt_trg,0),0).repeat(mod_train_batch,1,1)
# define generator evaluation
if os.path.isdir(args.feats_eval_src):
feat_list_eval_src = sorted(find_files(args.feats_eval_src, "*.h5", use_dir_name=False))
elif os.path.isfile(args.feats_eval_src):
feat_list_eval_src = read_txt(args.feats_eval_src)
else:
logging.error("--feats_eval_src should be directory or list.")
sys.exit(1)
if os.path.isdir(args.feats_eval_trg):
feat_list_eval_trg = sorted(find_files(args.feats_eval_trg, "*.h5", use_dir_name=False))
elif os.path.isfile(args.feats_eval_trg):
feat_list_eval_trg = read_txt(args.feats_eval_trg)
else:
logging.error("--feats_eval_trg should be directory or list.")
sys.exit(1)
assert(len(feat_list_eval_src) == len(feat_list_eval_trg))
logging.info("number of evaluation data = %d." % len(feat_list_eval_src))
n_eval_data = len(feat_list_eval_src)
mod_eval_batch = n_eval_data % args.batch_size_utt_eval
if mod_eval_batch > 0:
with torch.no_grad():
init_pp_eval_mod = np.zeros((mod_eval_batch,1,args.lat_dim*2))
y_in_pp_eval_mod = torch.FloatTensor(init_pp_eval_mod).cuda()
y_in_src_eval_mod = y_in_trg_eval_mod = torch.unsqueeze(torch.unsqueeze((0-mean_jnt_trg)/std_jnt_trg,0),0).repeat(mod_eval_batch,1,1)
# data
def zero_pad(x): return padding(x, args.pad_len, value=0.0)
pad_transform = transforms.Compose([zero_pad])
dataset = FeatureDatasetSingleVAE(feat_list_src+feat_list_trg, feat_list_src_trg+feat_list_trg_src, pad_transform, args.spk_src)
dataloader = DataLoader(dataset, batch_size=args.batch_size_utt, shuffle=True, num_workers=args.n_workers)
dataset_eval_src = FeatureDatasetSingleVAE(feat_list_eval_src, feat_list_eval_trg, pad_transform, args.spk_src)
dataloader_eval_src = DataLoader(dataset_eval_src, batch_size=args.batch_size_utt_eval, num_workers=args.n_workers)
dataset_eval_trg = FeatureDatasetSingleVAE(feat_list_eval_trg, feat_list_eval_src, pad_transform, args.spk_src)
dataloader_eval_trg = DataLoader(dataset_eval_trg, batch_size=args.batch_size_utt_eval, num_workers=args.n_workers)
# generator optimization instance
generator_src = train_generator(dataloader, device, batch_size=args.batch_size)
# generator eval instance
generator_eval_src = train_generator(dataloader_eval_src, device, batch_size=0)
generator_eval_trg = train_generator(dataloader_eval_trg, device, batch_size=0)
gv_trg_mean = read_hdf5(args.stats_trg, "/gv_range_mean")[1:]
gv_src_mean = read_hdf5(args.stats_src, "/gv_range_mean")[1:]
# train
batch_lat_src = [None]*args.n_cyc
y_in_pp_src = [None]*args.n_cyc
h_in_pp_src = [None]*args.n_cyc
batch_trj_src_src = [None]*args.n_cyc
y_in_src_src = [None]*args.n_cyc
h_in_src_src = [None]*args.n_cyc
batch_trj_src_trg = [None]*args.n_cyc
y_in_src_trg = [None]*args.n_cyc
h_in_src_trg = [None]*args.n_cyc
batch_lat_src_trg = [None]*args.n_cyc
y_in_pp_src_trg = [None]*args.n_cyc
h_in_pp_src_trg = [None]*args.n_cyc
batch_trj_src_trg_src = [None]*args.n_cyc
y_in_src_trg_src = [None]*args.n_cyc
h_in_src_trg_src = [None]*args.n_cyc
batch_lat_trg_ = [None]*args.n_cyc
batch_trj_trg_trg_ = [None]*args.n_cyc
batch_trj_trg_src_ = [None]*args.n_cyc
batch_lat_trg_src_ = [None]*args.n_cyc
batch_trj_trg_src_trg_ = [None]*args.n_cyc
batch_lat_src_ = [None]*args.n_cyc
batch_trj_src_src_ = [None]*args.n_cyc
batch_trj_src_trg_ = [None]*args.n_cyc
batch_lat_src_trg_ = [None]*args.n_cyc
batch_trj_src_trg_src_ = [None]*args.n_cyc
batch_loss_mcd_trg_trg = [None]*args.n_cyc
batch_loss_mcd_trg_src_trg = [None]*args.n_cyc
batch_loss_mcd_trg_src = [None]*args.n_cyc
batch_loss_mcd_src_src = [None]*args.n_cyc
batch_loss_mcd_src_trg_src = [None]*args.n_cyc
batch_loss_mcd_src_trg = [None]*args.n_cyc
batch_loss_lat_src = [None]*args.n_cyc
batch_loss_lat_trg = [None]*args.n_cyc
batch_loss_lat_src_cv = [None]*args.n_cyc
batch_loss_lat_trg_cv = [None]*args.n_cyc
batch_gv_trg_trg = [None]*args.n_cyc
batch_mcdpow_trg_trg = [None]*args.n_cyc
batch_mcd_trg_trg = [None]*args.n_cyc
batch_gv_trg_src_trg = [None]*args.n_cyc
batch_mcdpow_trg_src_trg = [None]*args.n_cyc
batch_mcd_trg_src_trg = [None]*args.n_cyc
batch_gv_trg_src = [None]*args.n_cyc
batch_mcdpow_trg_src = [None]*args.n_cyc
batch_mcd_trg_src = [None]*args.n_cyc
batch_lat_dist_trgsrc1 = [None]*args.n_cyc
batch_lat_dist_trgsrc2 = [None]*args.n_cyc
batch_lat_cdist_trgsrc1 = [None]*args.n_cyc
batch_lat_cdist_trgsrc2 = [None]*args.n_cyc
batch_gv_src_src = [None]*args.n_cyc
batch_mcdpow_src_src = [None]*args.n_cyc
batch_mcd_src_src = [None]*args.n_cyc
batch_gv_src_trg_src = [None]*args.n_cyc
batch_mcdpow_src_trg_src = [None]*args.n_cyc
batch_mcd_src_trg_src = [None]*args.n_cyc
batch_gv_src_trg = [None]*args.n_cyc
batch_mcdpow_src_trg = [None]*args.n_cyc
batch_mcd_src_trg = [None]*args.n_cyc
batch_lat_dist_srctrg1 = [None]*args.n_cyc
batch_lat_dist_srctrg2 = [None]*args.n_cyc
batch_lat_cdist_srctrg1 = [None]*args.n_cyc
batch_lat_cdist_srctrg2 = [None]*args.n_cyc
loss = []
loss_mcd_trg_trg = []
loss_mcd_trg_src_trg = []
loss_mcd_trg_src = []
loss_mcd_src_src = []
loss_mcd_src_trg_src = []
loss_mcd_src_trg = []
loss_lat_src = []
loss_lat_trg = []
loss_lat_src_cv = []
loss_lat_trg_cv = []
gv_trg_trg = []
mcdpow_trg_trg = []
mcd_trg_trg = []
gv_trg_src_trg = []
mcdpow_trg_src_trg = []
mcd_trg_src_trg = []
gv_trg_src = []
mcdpow_trg_src = []
mcd_trg_src = []
lat_dist_trgsrc1 = []
lat_dist_trgsrc2 = []
gv_src_src = []
mcdpow_src_src = []
mcd_src_src = []
gv_src_trg_src = []
mcdpow_src_trg_src = []
mcd_src_trg_src = []
gv_src_trg = []
mcdpow_src_trg = []
mcd_src_trg = []
lat_dist_srctrg1 = []
lat_dist_srctrg2 = []
for i in range(args.n_cyc):
loss_mcd_trg_trg.append([])
loss_mcd_trg_src_trg.append([])
loss_mcd_trg_src.append([])
loss_mcd_src_src.append([])
loss_mcd_src_trg_src.append([])
loss_mcd_src_trg.append([])
loss_lat_src.append([])
loss_lat_trg.append([])
loss_lat_src_cv.append([])
loss_lat_trg_cv.append([])
gv_trg_trg.append([])
mcdpow_trg_trg.append([])
mcd_trg_trg.append([])
gv_trg_src_trg.append([])
mcdpow_trg_src_trg.append([])
mcd_trg_src_trg.append([])
gv_trg_src.append([])
mcdpow_trg_src.append([])
mcd_trg_src.append([])
lat_dist_trgsrc1.append([])
lat_dist_trgsrc2.append([])
gv_src_src.append([])
mcdpow_src_src.append([])
mcd_src_src.append([])
gv_src_trg_src.append([])
mcdpow_src_trg_src.append([])
mcd_src_trg_src.append([])
gv_src_trg.append([])
mcdpow_src_trg.append([])
mcd_src_trg.append([])
lat_dist_srctrg1.append([])
lat_dist_srctrg2.append([])
total = []
n_ev_cyc = 1
#if args.n_cyc > 1:
# n_ev_cyc = 2
#else:
# n_ev_cyc = 1
eval_loss_mcd_trg_trg = [None]*n_ev_cyc
eval_loss_mcd_trg_src_trg = [None]*n_ev_cyc
eval_loss_mcd_trg_src = [None]*n_ev_cyc
eval_loss_mcd_src_src = [None]*n_ev_cyc
eval_loss_mcd_src_trg_src = [None]*n_ev_cyc
eval_loss_mcd_src_trg = [None]*n_ev_cyc
eval_loss_lat_src = [None]*n_ev_cyc
eval_loss_lat_trg = [None]*n_ev_cyc
eval_loss_lat_src_cv = [None]*n_ev_cyc
eval_loss_lat_trg_cv = [None]*n_ev_cyc
eval_gv_trg_trg = [None]*n_ev_cyc
eval_mcdpow_trg_trg = [None]*n_ev_cyc
eval_mcd_trg_trg = [None]*n_ev_cyc
eval_gv_trg_src_trg = [None]*n_ev_cyc
eval_mcdpow_trg_src_trg = [None]*n_ev_cyc
eval_mcd_trg_src_trg = [None]*n_ev_cyc
eval_gv_trg_src = [None]*n_ev_cyc
eval_mcdpow_trg_src = [None]*n_ev_cyc
eval_mcdpowstd_trg_src = [None]*n_ev_cyc
eval_mcd_trg_src = [None]*n_ev_cyc
eval_mcdstd_trg_src = [None]*n_ev_cyc
eval_lat_dist_trgsrc1 = [None]*n_ev_cyc
eval_lat_dist_trgsrc2 = [None]*n_ev_cyc
eval_gv_src_src = [None]*n_ev_cyc
eval_mcdpow_src_src = [None]*n_ev_cyc
eval_mcd_src_src = [None]*n_ev_cyc
eval_gv_src_trg_src = [None]*n_ev_cyc
eval_mcdpow_src_trg_src = [None]*n_ev_cyc
eval_mcd_src_trg_src = [None]*n_ev_cyc
eval_gv_src_trg = [None]*n_ev_cyc
eval_mcdpow_src_trg = [None]*n_ev_cyc
eval_mcdpowstd_src_trg = [None]*n_ev_cyc
eval_mcd_src_trg = [None]*n_ev_cyc
eval_mcdstd_src_trg = [None]*n_ev_cyc
eval_lat_dist_srctrg1 = [None]*n_ev_cyc
eval_lat_dist_srctrg2 = [None]*n_ev_cyc
prev_featfile_src = np.repeat("",args.batch_size_utt)
iter_idx = 0
iter_count = 0
min_idx = -1
min_eval_mcdpow_src_trg = 99999999.99
min_eval_mcdpowstd_src_trg = 99999999.99
min_eval_mcd_src_trg = 99999999.99
min_eval_mcdstd_src_trg = 99999999.99
if args.resume is not None:
np.random.set_state(checkpoint["numpy_random_state"])
torch.set_rng_state(checkpoint["torch_random_state"])
logging.info("==%d EPOCH==" % (epoch_idx+1))
logging.info("Training data")
while epoch_idx < args.epoch_count:
start = time.time()
if args.batch_size > 0:
if iter_count > 0:
featfile_src_ = featfile_src
featfile_src_trg_ = featfile_src_trg
spcidx_src_ = spcidx_src
prev_flens_src = flens_src
flens_spc_src_ = flens_spc_src
batch_src_trg_ = batch_src_trg
spcidx_src_trg_ = spcidx_src_trg
flens_spc_src_trg_ = flens_spc_src_trg
n_batch_utt_ = n_batch_utt
batch_src, batch_src_src_code, batch_src_trg_code, batch_src_trg, batch_cv_src, src_idx_s, src_idx_e, spcidx_src_s_idx, spcidx_src_e_idx, c_idx_src, utt_idx_src, spcidx_src, spcidx_src_trg, featfile_src, featfile_src_trg, flens_src, flens_src_trg, flens_spc_src, flens_spc_src_trg, select_utt_idx, flen_acc, n_batch_utt = next(generator_src)
if iter_count > 0 and (src_idx_s == 0 or c_idx_src < 0):
with torch.no_grad():
if n_batch_utt_ == args.batch_size_utt:
trj_lat_srctrg, _, _ = model_encoder(batch_src_trg_, y_in_pp, clamp_vae=True, lat_dim=args.lat_dim)
else:
trj_lat_srctrg, _, _ = model_encoder(batch_src_trg_, y_in_pp_mod, clamp_vae=True, lat_dim=args.lat_dim)
for i in range(n_batch_utt_):
_, _, batch_mcdpow_src_trg[0], _ = dtw.dtw_org_to_trg(np.array(torch.index_select(trj_src_trg[i],0,spcidx_src_[i,:flens_spc_src_[i]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_src_trg_[i][:,stdim:],0,spcidx_src_trg_[i,:flens_spc_src_trg_[i]]).cpu().data.numpy(), dtype=np.float64))
_, _, batch_mcd_src_trg[0], _ = dtw.dtw_org_to_trg(np.array(torch.index_select(trj_src_trg[i][:,1:],0,spcidx_src_[i,:flens_spc_src_[i]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_src_trg_[i][:,stdim_:],0,spcidx_src_trg_[i,:flens_spc_src_trg_[i]]).cpu().data.numpy(), dtype=np.float64))
trj_lat_srctrg_ = np.array(torch.index_select(trj_lat_srctrg[i],0,spcidx_src_trg_[i,:flens_spc_src_trg_[i]]).cpu().data.numpy(), dtype=np.float64)
trj_lat_src_ = np.array(torch.index_select(trj_lat_src[i],0,spcidx_src_[i,:flens_spc_src_[i]]).cpu().data.numpy(), dtype=np.float64)
aligned_lat_srctrg1, _, _, _ = dtw.dtw_org_to_trg(trj_lat_src_, trj_lat_srctrg_)
batch_lat_dist_srctrg1[0] = np.mean(np.sqrt(np.mean((aligned_lat_srctrg1-trj_lat_srctrg_)**2, axis=0)))
_, _, batch_lat_cdist_srctrg1[0], _ = dtw.dtw_org_to_trg(trj_lat_srctrg_, trj_lat_src_, mcd=0)
aligned_lat_srctrg2, _, _, _ = dtw.dtw_org_to_trg(trj_lat_srctrg_, trj_lat_src_)
batch_lat_dist_srctrg2[0] = np.mean(np.sqrt(np.mean((aligned_lat_srctrg2-trj_lat_src_)**2, axis=0)))
_, _, batch_lat_cdist_srctrg2[0], _ = dtw.dtw_org_to_trg(trj_lat_src_, trj_lat_srctrg_, mcd=0)
if os.path.basename(os.path.dirname(featfile_src_[i])) == args.spk_src:
mcdpow_src_trg[0].append(batch_mcdpow_src_trg[0])
mcd_src_trg[0].append(batch_mcd_src_trg[0])
batch_lat_dist_srctrg1[0] = (batch_lat_dist_srctrg1[0]+batch_lat_dist_srctrg2[0])/2
lat_dist_srctrg1[0].append(batch_lat_dist_srctrg1[0])
batch_lat_dist_srctrg2[0] = (batch_lat_cdist_srctrg1[0]+batch_lat_cdist_srctrg2[0])/2
lat_dist_srctrg2[0].append(batch_lat_dist_srctrg2[0])
logging.info("batch srctrg loss %s %s = %.3f dB %.3f dB , %.3f %.3f" % (featfile_src_[i], featfile_src_trg_[i], batch_mcdpow_src_trg[0], batch_mcd_src_trg[0], batch_lat_dist_srctrg1[0], batch_lat_dist_srctrg2[0]))
else:
mcdpow_trg_src[0].append(batch_mcdpow_src_trg[0])
mcd_trg_src[0].append(batch_mcd_src_trg[0])
batch_lat_dist_trgsrc1[0] = (batch_lat_dist_srctrg1[0]+batch_lat_dist_srctrg2[0])/2
lat_dist_trgsrc1[0].append(batch_lat_dist_trgsrc1[0])
batch_lat_dist_trgsrc2[0] = (batch_lat_cdist_srctrg1[0]+batch_lat_cdist_srctrg2[0])/2
lat_dist_trgsrc2[0].append(batch_lat_dist_trgsrc2[0])
logging.info("batch trgsrc loss %s %s = %.3f dB %.3f dB , %.3f %.3f" % (featfile_src_[i], featfile_src_trg_[i], batch_mcdpow_src_trg[0], batch_mcd_src_trg[0], batch_lat_dist_trgsrc1[0], batch_lat_dist_trgsrc2[0]))
else:
batch_src, batch_src_src_code, batch_src_trg_code, batch_src_trg, batch_cv_src, c_idx_src, utt_idx_src, spcidx_src, spcidx_src_trg, featfile_src, featfile_src_trg, flens_src, flens_src_trg, flens_spc_src, flens_spc_src_trg, n_batch_utt = next(generator_src)
if c_idx_src < 0:
numpy_random_state = np.random.get_state()
torch_random_state = torch.get_rng_state()
# save current epoch model
save_checkpoint(args.expdir, model_encoder, model_decoder, optimizer, numpy_random_state, torch_random_state, epoch_idx + 1)
if args.batch_size > 0:
batch_src, batch_src_src_code, batch_src_trg_code, batch_src_trg, batch_cv_src, src_idx_s, src_idx_e, spcidx_src_s_idx, spcidx_src_e_idx, c_idx_src, utt_idx_src, spcidx_src, spcidx_src_trg, featfile_src, featfile_src_trg, flens_src, flens_src_trg, flens_spc_src, flens_spc_src_trg, select_utt_idx, flen_acc, n_batch_utt = next(generator_src)
else:
batch_src, batch_src_src_code, batch_src_trg_code, batch_src_trg, batch_cv_src, c_idx_src, utt_idx_src, spcidx_src, spcidx_src_trg, featfile_src, featfile_src_trg, flens_src, flens_src_trg, flens_spc_src, flens_spc_src_trg, n_batch_utt = next(generator_src)
numpy_random_state = np.random.get_state()
torch_random_state = torch.get_rng_state()
# report current epoch
text_log = "%.3f ;; " % np.mean(loss)
#for i in range(args.n_cyc):
for i in range(n_ev_cyc):
eval_gv_trg_trg[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_trg_trg[i], axis=0))-np.log(gv_trg_mean))))
eval_gv_src_trg[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_src_trg[i], axis=0))-np.log(gv_trg_mean))))
eval_gv_trg_src_trg[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_trg_src_trg[i], axis=0))-np.log(gv_trg_mean))))
eval_gv_src_src[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_src_src[i], axis=0))-np.log(gv_src_mean))))
eval_gv_trg_src[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_trg_src[i], axis=0))-np.log(gv_src_mean))))
eval_gv_src_trg_src[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_src_trg_src[i], axis=0))-np.log(gv_src_mean))))
text_log += "[%d] %.3f %.3f %.3f %.3f %.3f %.3f ; %.3f %.3f %.3f %.3f ; %.6f %.3f dB %.6f dB , %.3f %.3f dB %.3f dB , %.6f %.3f dB (+- %.3f) %.6f dB (+- %.3f) , %.6f %.6f ; %.6f %.3f dB %.6f dB , %.3f %.3f dB %.3f dB , %.6f %.3f dB (+- %.3f) %.6f dB (+- %.3f) , %.6f %.6f ;; " % (
i+1, np.mean(loss_mcd_trg_trg[i]), np.mean(loss_mcd_trg_src_trg[i]), np.mean(loss_mcd_trg_src[i]),
np.mean(loss_mcd_src_src[i]), np.mean(loss_mcd_src_trg_src[i]), np.mean(loss_mcd_src_trg[i]),
np.mean(loss_lat_trg[i]), np.mean(loss_lat_trg_cv[i]), np.mean(loss_lat_src[i]), np.mean(loss_lat_src_cv[i]),
eval_gv_trg_trg[i], np.mean(mcdpow_trg_trg[i]), np.mean(mcd_trg_trg[i]),
eval_gv_trg_src_trg[i], np.mean(mcdpow_trg_src_trg[i]), np.mean(mcd_trg_src_trg[i]),
eval_gv_trg_src[i], np.mean(mcdpow_trg_src[i]), np.std(mcdpow_trg_src[i]), np.mean(mcd_trg_src[i]), np.std(mcd_trg_src[i]),
np.mean(lat_dist_trgsrc1[i]), np.mean(lat_dist_trgsrc2[i]), eval_gv_src_src[i], np.mean(mcdpow_src_src[i]), np.mean(mcd_src_src[i]),
eval_gv_src_trg_src[i], np.mean(mcdpow_src_trg_src[i]), np.mean(mcd_src_trg_src[i]),
eval_gv_src_trg[i], np.mean(mcdpow_src_trg[i]), np.std(mcdpow_src_trg[i]), np.mean(mcd_src_trg[i]), np.std(mcd_src_trg[i]),
np.mean(lat_dist_srctrg1[i]), np.mean(lat_dist_srctrg2[i]))
logging.info("(EPOCH:%d) average optimization loss = %s (%.3f min., %.3f sec / batch)" % (epoch_idx + 1, text_log, np.sum(total) / 60.0, np.mean(total)))
logging.info("estimated training required time = {0.days:02}:{0.hours:02}:{0.minutes:02}:{0.seconds:02}".format(relativedelta(seconds=int((args.epoch_count - (epoch_idx + 1)) * np.sum(total)))))
model_encoder.eval()
model_decoder.eval()
for param in model_encoder.parameters():
param.requires_grad = False
for param in model_decoder.parameters():
param.requires_grad = False
# compute loss in evaluation data
loss = []
loss_mcd_trg_trg = []
loss_mcd_trg_src_trg = []
loss_mcd_trg_src = []
loss_mcd_src_src = []
loss_mcd_src_trg_src = []
loss_mcd_src_trg = []
loss_lat_src = []
loss_lat_trg = []
loss_lat_src_cv = []
loss_lat_trg_cv = []
gv_trg_trg = []
mcdpow_trg_trg = []
mcd_trg_trg = []
gv_trg_src_trg = []
mcdpow_trg_src_trg = []
mcd_trg_src_trg = []
gv_trg_src = []
mcdpow_trg_src = []
mcd_trg_src = []
lat_dist_trgsrc1 = []
lat_dist_trgsrc2 = []
gv_src_src = []
mcdpow_src_src = []
mcd_src_src = []
gv_src_trg_src = []
mcdpow_src_trg_src = []
mcd_src_trg_src = []
gv_src_trg = []
mcdpow_src_trg = []
mcd_src_trg = []
lat_dist_srctrg1 = []
lat_dist_srctrg2 = []
for i in range(n_ev_cyc):
loss_mcd_trg_trg.append([])
loss_mcd_trg_src_trg.append([])
loss_mcd_trg_src.append([])
loss_mcd_src_src.append([])
loss_mcd_src_trg_src.append([])
loss_mcd_src_trg.append([])
loss_lat_src.append([])
loss_lat_trg.append([])
loss_lat_src_cv.append([])
loss_lat_trg_cv.append([])
gv_trg_trg.append([])
mcdpow_trg_trg.append([])
mcd_trg_trg.append([])
gv_trg_src_trg.append([])
mcdpow_trg_src_trg.append([])
mcd_trg_src_trg.append([])
gv_trg_src.append([])
mcdpow_trg_src.append([])
mcd_trg_src.append([])
lat_dist_trgsrc1.append([])
lat_dist_trgsrc2.append([])
gv_src_src.append([])
mcdpow_src_src.append([])
mcd_src_src.append([])
gv_src_trg_src.append([])
mcdpow_src_trg_src.append([])
mcd_src_trg_src.append([])
gv_src_trg.append([])
mcdpow_src_trg.append([])
mcd_src_trg.append([])
lat_dist_srctrg1.append([])
lat_dist_srctrg2.append([])
total = []
iter_count = 0
logging.info("Evaluation data")
with torch.no_grad():
while True:
start = time.time()
batch_src_, batch_src_src_code_, batch_src_trg_code_, batch_src_trg_, batch_cv_src_, c_idx, utt_idx, spcidx_src_, spcidx_src_trg_, featfile_src_, featfile_src_trg_, flens_src_, flens_src_trg_, flens_spc_src_, flens_spc_src_trg_, n_batch_utt_ = next(generator_eval_src)
batch_trg_, batch_trg_trg_code_, batch_trg_src_code_, batch_trg_src_, batch_cv_trg_, c_idx, utt_idx, spcidx_trg_, spcidx_trg_src_, featfile_trg_, featfile_trg_src_, flens_trg_, flens_trg_src_, flens_spc_trg_, flens_spc_trg_src_, n_batch_utt_ = next(generator_eval_trg)
if c_idx < 0:
break
for i in range(n_batch_utt_):
logging.info("%s %s %d %d %d %d" % (featfile_src_[i], featfile_src_trg_[i], flens_src_[i], flens_src_trg_[i], flens_spc_src_[i], flens_spc_src_trg_[i]))
logging.info("%s %s %d %d %d %d" % (featfile_trg_[i], featfile_trg_src_[i], flens_trg_[i], flens_trg_src_[i], flens_spc_trg_[i], flens_spc_trg_src_[i]))
if n_batch_utt_ == args.batch_size_utt_eval:
y_in_pp_eval_ = y_in_pp_eval
y_in_trg_eval_ = y_in_trg_eval
y_in_src_eval_ = y_in_src_eval
else:
y_in_pp_eval_ = y_in_pp_eval_mod
y_in_trg_eval_ = y_in_trg_eval_mod
y_in_src_eval_ = y_in_src_eval_mod
trj_lat_srctrg, _, _ = model_encoder(batch_src_trg_, y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
trj_lat_trgsrc, _, _ = model_encoder(batch_trg_src_, y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
for i in range(n_ev_cyc):
batch_mcdpow_src_src[i] = []
batch_mcd_src_src[i] = []
batch_mcdpow_src_trg_src[i] = []
batch_mcd_src_trg_src[i] = []
batch_mcdpow_src_trg[i] = []
batch_mcd_src_trg[i] = []
batch_mcdpow_trg_trg[i] = []
batch_mcd_trg_trg[i] = []
batch_mcdpow_trg_src_trg[i] = []
batch_mcd_trg_src_trg[i] = []
batch_mcdpow_trg_src[i] = []
batch_mcd_trg_src[i] = []
batch_lat_dist_srctrg1[i] = []
batch_lat_dist_srctrg2[i] = []
batch_lat_dist_trgsrc1[i] = []
batch_lat_dist_trgsrc2[i] = []
if i > 0:
batch_lat_trg_[i], _, _ = model_encoder(torch.cat((batch_trg_[:,:,:stdim], batch_trj_trg_src_trg_[i-1]),2), y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_lat_src_[i], _, _ = model_encoder(torch.cat((batch_src_[:,:,:stdim], batch_trj_src_trg_src_[i-1]),2), y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_trg_trg_[i], _, _ = model_decoder(torch.cat((batch_trg_trg_code_, sampling_vae_batch(batch_lat_trg_[i], lat_dim=args.lat_dim)),2), y_in_trg_eval_)
batch_trj_trg_src_[i], _, _ = model_decoder(torch.cat((batch_trg_src_code_, sampling_vae_batch(batch_lat_trg_[i], lat_dim=args.lat_dim)),2), y_in_src_eval_)
batch_trj_src_src_[i], _, _ = model_decoder(torch.cat((batch_src_src_code_, sampling_vae_batch(batch_lat_src_[i], lat_dim=args.lat_dim)),2), y_in_src_eval_)
batch_trj_src_trg_[i], _, _ = model_decoder(torch.cat((batch_src_trg_code_, sampling_vae_batch(batch_lat_src_[i], lat_dim=args.lat_dim)),2), y_in_trg_eval_)
batch_lat_trg_src_[i], _, _ = model_encoder(torch.cat((batch_cv_trg_, batch_trj_trg_src_[i]),2), y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_lat_src_trg_[i], _, _ = model_encoder(torch.cat((batch_cv_src_, batch_trj_src_trg_[i]),2), y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_trg_src_trg_[i], _, _ = model_decoder(torch.cat((batch_trg_trg_code_, sampling_vae_batch(batch_lat_trg_src_[i], lat_dim=args.lat_dim)),2), y_in_trg_eval_)
batch_trj_src_trg_src_[i], _, _ = model_decoder(torch.cat((batch_src_src_code_, sampling_vae_batch(batch_lat_src_trg_[i], lat_dim=args.lat_dim)),2), y_in_src_eval_)
else:
batch_lat_trg_[0], _, _ = model_encoder(batch_trg_, y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_lat_src_[0], _, _ = model_encoder(batch_src_, y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_trg_trg_[0], _, _ = model_decoder(torch.cat((batch_trg_trg_code_, sampling_vae_batch(batch_lat_trg_[0], lat_dim=args.lat_dim)),2), y_in_trg_eval_)
batch_trj_trg_src_[0], _, _ = model_decoder(torch.cat((batch_trg_src_code_, sampling_vae_batch(batch_lat_trg_[0], lat_dim=args.lat_dim)),2), y_in_src_eval_)
batch_trj_src_src_[0], _, _ = model_decoder(torch.cat((batch_src_src_code_, sampling_vae_batch(batch_lat_src_[0], lat_dim=args.lat_dim)),2), y_in_src_eval_)
batch_trj_src_trg_[0], _, _ = model_decoder(torch.cat((batch_src_trg_code_, sampling_vae_batch(batch_lat_src_[0], lat_dim=args.lat_dim)),2), y_in_trg_eval_)
batch_lat_trg_src_[0], _, _ = model_encoder(torch.cat((batch_cv_trg_, batch_trj_trg_src_[0]),2), y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_lat_src_trg_[0], _, _ = model_encoder(torch.cat((batch_cv_src_, batch_trj_src_trg_[0]),2), y_in_pp_eval_, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_trg_src_trg_[0], _, _ = model_decoder(torch.cat((batch_trg_trg_code_, sampling_vae_batch(batch_lat_trg_src_[0], lat_dim=args.lat_dim)),2), y_in_trg_eval_)
batch_trj_src_trg_src_[0], _, _ = model_decoder(torch.cat((batch_src_src_code_, sampling_vae_batch(batch_lat_src_trg_[0], lat_dim=args.lat_dim)),2), y_in_src_eval_)
for j in range(n_batch_utt_):
gv_src_src[i].append(np.var(batch_trj_src_src_[i][j,:flens_src_[j],1:].cpu().data.numpy(), axis=0))
gv_src_trg[i].append(np.var(batch_trj_src_trg_[i][j,:flens_src_[j],1:].cpu().data.numpy(), axis=0))
gv_src_trg_src[i].append(np.var(batch_trj_src_trg_src_[i][j,:flens_src_[j],1:].cpu().data.numpy(), axis=0))
gv_trg_trg[i].append(np.var(batch_trj_trg_trg_[i][j,:flens_trg_[j],1:].cpu().data.numpy(), axis=0))
gv_trg_src[i].append(np.var(batch_trj_trg_src_[i][j,:flens_trg_[j],1:].cpu().data.numpy(), axis=0))
gv_trg_src_trg[i].append(np.var(batch_trj_trg_src_trg_[i][j,:flens_trg_[j],1:].cpu().data.numpy(), axis=0))
trj_lat_srctrg_ = np.array(torch.index_select(trj_lat_srctrg[j],0,spcidx_src_trg_[j,:flens_spc_src_trg_[j]]).cpu().data.numpy(), dtype=np.float64)
trj_lat_src_ = np.array(torch.index_select(batch_lat_src_[0][j],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64)
aligned_lat_srctrg1, _, _, _ = dtw.dtw_org_to_trg(trj_lat_src_, trj_lat_srctrg_)
tmp_batch_lat_dist_srctrg1 = np.mean(np.sqrt(np.mean((aligned_lat_srctrg1-trj_lat_srctrg_)**2, axis=0)))
_, _, tmp_batch_lat_cdist_srctrg1, _ = dtw.dtw_org_to_trg(trj_lat_srctrg_, trj_lat_src_, mcd=0)
aligned_lat_srctrg2, _, _, _ = dtw.dtw_org_to_trg(trj_lat_srctrg_, trj_lat_src_)
tmp_batch_lat_dist_srctrg2 = np.mean(np.sqrt(np.mean((aligned_lat_srctrg2-trj_lat_src_)**2, axis=0)))
_, _, tmp_batch_lat_cdist_srctrg2, _ = dtw.dtw_org_to_trg(trj_lat_src_, trj_lat_srctrg_, mcd=0)
tmp_batch_lat_dist_srctrg1 = (tmp_batch_lat_dist_srctrg1+tmp_batch_lat_dist_srctrg2)/2
lat_dist_srctrg1[0].append(tmp_batch_lat_dist_srctrg1)
tmp_batch_lat_dist_srctrg2 = (tmp_batch_lat_cdist_srctrg1+tmp_batch_lat_cdist_srctrg2)/2
lat_dist_srctrg2[0].append(tmp_batch_lat_dist_srctrg2)
batch_lat_dist_srctrg1[0].append(tmp_batch_lat_dist_srctrg1)
batch_lat_dist_srctrg2[0].append(tmp_batch_lat_dist_srctrg2)
trj_lat_trgsrc_ = np.array(torch.index_select(trj_lat_trgsrc[j],0,spcidx_trg_src_[j,:flens_spc_trg_src_[j]]).cpu().data.numpy(), dtype=np.float64)
trj_lat_trg_ = np.array(torch.index_select(batch_lat_trg_[0][j],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64)
aligned_lat_trgsrc1, _, _, _ = dtw.dtw_org_to_trg(trj_lat_trg_, trj_lat_trgsrc_)
tmp_batch_lat_dist_trgsrc1 = np.mean(np.sqrt(np.mean((aligned_lat_trgsrc1-trj_lat_trgsrc_)**2, axis=0)))
_, _, tmp_batch_lat_cdist_trgsrc1, _ = dtw.dtw_org_to_trg(trj_lat_trgsrc_, trj_lat_trg_, mcd=0)
aligned_lat_trgsrc2, _, _, _ = dtw.dtw_org_to_trg(trj_lat_trgsrc_, trj_lat_trg_)
tmp_batch_lat_dist_trgsrc2 = np.mean(np.sqrt(np.mean((aligned_lat_trgsrc2-trj_lat_trg_)**2, axis=0)))
_, _, tmp_batch_lat_cdist_trgsrc2, _ = dtw.dtw_org_to_trg(trj_lat_trg_, trj_lat_trgsrc_, mcd=0)
tmp_batch_lat_dist_trgsrc1 = (tmp_batch_lat_dist_trgsrc1+tmp_batch_lat_dist_trgsrc2)/2
lat_dist_trgsrc1[0].append(tmp_batch_lat_dist_trgsrc1)
tmp_batch_lat_dist_trgsrc2 = (tmp_batch_lat_cdist_trgsrc1+tmp_batch_lat_cdist_trgsrc2)/2
lat_dist_trgsrc2[0].append(tmp_batch_lat_dist_trgsrc2)
batch_lat_dist_trgsrc1[0].append(tmp_batch_lat_dist_trgsrc1)
batch_lat_dist_trgsrc2[0].append(tmp_batch_lat_dist_trgsrc2)
batch_trg_spc_ = np.array(torch.index_select(batch_trg_[j,:,stdim:],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64)
batch_trg_spc__ = np.array(torch.index_select(batch_trg_[j,:,stdim_:],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64)
tmp_batch_mcdpow_trg_trg, _ = dtw.calc_mcd(batch_trg_spc_, np.array(torch.index_select(batch_trj_trg_trg_[i][j],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_trg_trg, _ = dtw.calc_mcd(batch_trg_spc__, np.array(torch.index_select(batch_trj_trg_trg_[i][j,:,1:],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcdpow_trg_src_trg, _ = dtw.calc_mcd(batch_trg_spc_, np.array(torch.index_select(batch_trj_trg_src_trg_[i][j],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_trg_src_trg, _ = dtw.calc_mcd(batch_trg_spc__, np.array(torch.index_select(batch_trj_trg_src_trg_[i][j,:,1:],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64))
_, _, tmp_batch_mcdpow_trg_src, _ = dtw.dtw_org_to_trg(np.array(torch.index_select(batch_trj_trg_src_[i][j],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_trg_src_[j,:,stdim:],0,spcidx_trg_src_[j,:flens_spc_trg_src_[j]]).cpu().data.numpy(), dtype=np.float64))
_, _, tmp_batch_mcd_trg_src, _ = dtw.dtw_org_to_trg(np.array(torch.index_select(batch_trj_trg_src_[i][j,:,1:],0,spcidx_trg_[j,:flens_spc_trg_[j]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_trg_src_[j,:,stdim_:],0,spcidx_trg_src_[j,:flens_spc_trg_src_[j]]).cpu().data.numpy(), dtype=np.float64))
batch_src_spc_ = np.array(torch.index_select(batch_src_[j,:,stdim:],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64)
batch_src_spc__ = np.array(torch.index_select(batch_src_[j,:,stdim_:],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64)
tmp_batch_mcdpow_src_src, _ = dtw.calc_mcd(batch_src_spc_, np.array(torch.index_select(batch_trj_src_src_[i][j],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_src_src, _ = dtw.calc_mcd(batch_src_spc__, np.array(torch.index_select(batch_trj_src_src_[i][j,:,1:],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcdpow_src_trg_src, _ = dtw.calc_mcd(batch_src_spc_, np.array(torch.index_select(batch_trj_src_trg_src_[i][j],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_src_trg_src, _ = dtw.calc_mcd(batch_src_spc__, np.array(torch.index_select(batch_trj_src_trg_src_[i][j,:,1:],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64))
_, _, tmp_batch_mcdpow_src_trg, _ = dtw.dtw_org_to_trg(np.array(torch.index_select(batch_trj_src_trg_[i][j],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_src_trg_[j,:,stdim:],0,spcidx_src_trg_[j,:flens_spc_src_trg_[j]]).cpu().data.numpy(), dtype=np.float64))
_, _, tmp_batch_mcd_src_trg, _ = dtw.dtw_org_to_trg(np.array(torch.index_select(batch_trj_src_trg_[i][j,:,1:],0,spcidx_src_[j,:flens_spc_src_[j]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_src_trg_[j,:,stdim_:],0,spcidx_src_trg_[j,:flens_spc_src_trg_[j]]).cpu().data.numpy(), dtype=np.float64))
batch_mcdpow_trg_trg[0].append(tmp_batch_mcdpow_trg_trg)
batch_mcd_trg_trg[0].append(tmp_batch_mcd_trg_trg)
batch_mcdpow_trg_src_trg[0].append(tmp_batch_mcdpow_trg_src_trg)
batch_mcd_trg_src_trg[0].append(tmp_batch_mcd_trg_src_trg)
batch_mcdpow_trg_src[0].append(tmp_batch_mcdpow_trg_src)
batch_mcd_trg_src[0].append(tmp_batch_mcd_trg_src)
batch_mcdpow_src_src[0].append(tmp_batch_mcdpow_src_src)
batch_mcd_src_src[0].append(tmp_batch_mcd_src_src)
batch_mcdpow_src_trg_src[0].append(tmp_batch_mcdpow_src_trg_src)
batch_mcd_src_trg_src[0].append(tmp_batch_mcd_src_trg_src)
batch_mcdpow_src_trg[0].append(tmp_batch_mcdpow_src_trg)
batch_mcd_src_trg[0].append(tmp_batch_mcd_src_trg)
mcdpow_trg_trg[i].append(tmp_batch_mcdpow_trg_trg)
mcd_trg_trg[i].append(tmp_batch_mcd_trg_trg)
mcdpow_trg_src_trg[i].append(tmp_batch_mcdpow_trg_src_trg)
mcd_trg_src_trg[i].append(tmp_batch_mcd_trg_src_trg)
mcdpow_trg_src[i].append(tmp_batch_mcdpow_trg_src)
mcd_trg_src[i].append(tmp_batch_mcd_trg_src)
mcdpow_src_src[i].append(tmp_batch_mcdpow_src_src)
mcd_src_src[i].append(tmp_batch_mcd_src_src)
mcdpow_src_trg_src[i].append(tmp_batch_mcdpow_src_trg_src)
mcd_src_trg_src[i].append(tmp_batch_mcd_src_trg_src)
mcdpow_src_trg[i].append(tmp_batch_mcdpow_src_trg)
mcd_src_trg[i].append(tmp_batch_mcd_src_trg)
logging.info("batch trgsrc loss %s %s = %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f" % (
featfile_trg_[j], featfile_trg_src_[j], tmp_batch_mcdpow_trg_trg, tmp_batch_mcd_trg_trg, tmp_batch_mcdpow_trg_src_trg, tmp_batch_mcd_trg_src_trg,
tmp_batch_mcdpow_trg_src, tmp_batch_mcd_trg_src, tmp_batch_lat_dist_trgsrc1, tmp_batch_lat_dist_trgsrc2))
logging.info("batch srctrg loss %s %s = %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f" % (
featfile_src_[j], featfile_src_trg_[j], tmp_batch_mcdpow_src_src, tmp_batch_mcd_src_src, tmp_batch_mcdpow_src_trg_src, tmp_batch_mcd_src_trg_src,
tmp_batch_mcdpow_src_trg, tmp_batch_mcd_src_trg, tmp_batch_lat_dist_srctrg1, tmp_batch_lat_dist_srctrg2))
batch_mcdpow_src_src[i] = np.mean(batch_mcdpow_src_src[i])
batch_mcd_src_src[i] = np.mean(batch_mcd_src_src[i])
batch_mcdpow_src_trg_src[i] = np.mean(batch_mcdpow_src_trg_src[i])
batch_mcd_src_trg_src[i] = np.mean(batch_mcd_src_trg_src[i])
batch_mcdpow_src_trg[i] = np.mean(batch_mcdpow_src_trg[i])
batch_mcd_src_trg[i] = np.mean(batch_mcd_src_trg[i])
batch_mcdpow_trg_trg[i] = np.mean(batch_mcdpow_trg_trg[i])
batch_mcd_trg_trg[i] = np.mean(batch_mcd_trg_trg[i])
batch_mcdpow_trg_src_trg[i] = np.mean(batch_mcdpow_trg_src_trg[i])
batch_mcd_trg_src_trg[i] = np.mean(batch_mcd_trg_src_trg[i])
batch_mcdpow_trg_src[i] = np.mean(batch_mcdpow_trg_src[i])
batch_mcd_trg_src[i] = np.mean(batch_mcd_trg_src[i])
batch_lat_dist_srctrg1[i] = np.mean(batch_lat_dist_srctrg1[i])
batch_lat_dist_srctrg2[i] = np.mean(batch_lat_dist_srctrg2[i])
batch_lat_dist_trgsrc1[i] = np.mean(batch_lat_dist_trgsrc1[i])
batch_lat_dist_trgsrc2[i] = np.mean(batch_lat_dist_trgsrc2[i])
for j in range(n_batch_utt_):
_, tmp_batch_loss_mcd_trg_trg, _ = criterion_mcd(batch_trj_trg_trg_[i][j,:flens_trg_[j]], batch_trg_[j,:flens_trg_[j],stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_trg_src, _ = criterion_mcd(batch_trj_trg_src_[i][j,:flens_trg_[j]], batch_trg_[j,:flens_trg_[j],stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_src_src, _ = criterion_mcd(batch_trj_src_src_[i][j,:flens_src_[j]], batch_src_[j,:flens_src_[j],stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_src_trg, _ = criterion_mcd(batch_trj_src_trg_[i][j,:flens_src_[j]], batch_src_[j,:flens_src_[j],stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_trg_src_trg, _ = criterion_mcd(batch_trj_trg_src_trg_[i][j,:flens_trg_[j]], batch_trg_[j,:flens_trg_[j],stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_src_trg_src, _ = criterion_mcd(batch_trj_src_trg_src_[i][j,:flens_src_[j]], batch_src_[j,:flens_src_[j],stdim:], L2=False, GV=False)
tmp_batch_loss_lat_trg = loss_vae(batch_lat_trg_[i][j,:flens_trg_[j]], lat_dim=args.lat_dim)
tmp_batch_loss_lat_src = loss_vae(batch_lat_src_[i][j,:flens_src_[j]], lat_dim=args.lat_dim)
tmp_batch_loss_lat_trg_cv = loss_vae(batch_lat_trg_src_[i][j,:flens_trg_[j]], lat_dim=args.lat_dim)
tmp_batch_loss_lat_src_cv = loss_vae(batch_lat_src_trg_[i][j,:flens_src_[j]], lat_dim=args.lat_dim)
if j > 0:
batch_loss_mcd_trg_trg[i] = torch.cat((batch_loss_mcd_trg_trg[i], tmp_batch_loss_mcd_trg_trg.unsqueeze(0)))
batch_loss_mcd_trg_src[i] = torch.cat((batch_loss_mcd_trg_src[i], tmp_batch_loss_mcd_trg_src.unsqueeze(0)))
batch_loss_mcd_src_src[i] = torch.cat((batch_loss_mcd_src_src[i], tmp_batch_loss_mcd_src_src.unsqueeze(0)))
batch_loss_mcd_src_trg[i] = torch.cat((batch_loss_mcd_src_trg[i], tmp_batch_loss_mcd_src_trg.unsqueeze(0)))
batch_loss_mcd_trg_src_trg[i] = torch.cat((batch_loss_mcd_trg_src_trg[i], tmp_batch_loss_mcd_trg_src_trg.unsqueeze(0)))
batch_loss_mcd_src_trg_src[i] = torch.cat((batch_loss_mcd_src_trg_src[i], tmp_batch_loss_mcd_src_trg_src.unsqueeze(0)))
batch_loss_lat_trg[i] = torch.cat((batch_loss_lat_trg[i], tmp_batch_loss_lat_trg.unsqueeze(0)))
batch_loss_lat_src[i] = torch.cat((batch_loss_lat_src[i], tmp_batch_loss_lat_src.unsqueeze(0)))
batch_loss_lat_trg_cv[i] = torch.cat((batch_loss_lat_trg_cv[i], tmp_batch_loss_lat_trg_cv.unsqueeze(0)))
batch_loss_lat_src_cv[i] = torch.cat((batch_loss_lat_src_cv[i], tmp_batch_loss_lat_src_cv.unsqueeze(0)))
else:
batch_loss_mcd_trg_trg[i] = tmp_batch_loss_mcd_trg_trg.unsqueeze(0)
batch_loss_mcd_trg_src[i] = tmp_batch_loss_mcd_trg_src.unsqueeze(0)
batch_loss_mcd_src_src[i] = tmp_batch_loss_mcd_src_src.unsqueeze(0)
batch_loss_mcd_src_trg[i] = tmp_batch_loss_mcd_src_trg.unsqueeze(0)
batch_loss_mcd_trg_src_trg[i] = tmp_batch_loss_mcd_trg_src_trg.unsqueeze(0)
batch_loss_mcd_src_trg_src[i] = tmp_batch_loss_mcd_src_trg_src.unsqueeze(0)
batch_loss_lat_trg[i] = tmp_batch_loss_lat_trg.unsqueeze(0)
batch_loss_lat_src[i] = tmp_batch_loss_lat_src.unsqueeze(0)
batch_loss_lat_trg_cv[i] = tmp_batch_loss_lat_trg_cv.unsqueeze(0)
batch_loss_lat_src_cv[i] = tmp_batch_loss_lat_src_cv.unsqueeze(0)
batch_loss_mcd_trg_trg[i] = torch.mean(batch_loss_mcd_trg_trg[i])
batch_loss_mcd_trg_src_trg[i] = torch.mean(batch_loss_mcd_trg_src_trg[i])
batch_loss_mcd_trg_src[i] = torch.mean(batch_loss_mcd_trg_src[i])
batch_loss_lat_trg[i] = torch.mean(batch_loss_lat_trg[i])
batch_loss_lat_trg_cv[i] = torch.mean(batch_loss_lat_trg_cv[i])
batch_loss_mcd_src_src[i] = torch.mean(batch_loss_mcd_src_src[i])
batch_loss_mcd_src_trg_src[i] = torch.mean(batch_loss_mcd_src_trg_src[i])
batch_loss_mcd_src_trg[i] = torch.mean(batch_loss_mcd_src_trg[i])
batch_loss_lat_src[i] = torch.mean(batch_loss_lat_src[i])
batch_loss_lat_src_cv[i] = torch.mean(batch_loss_lat_src_cv[i])
loss_mcd_trg_trg[i].append(batch_loss_mcd_trg_trg[i].item())
loss_mcd_trg_src[i].append(batch_loss_mcd_trg_src[i].item())
loss_mcd_src_src[i].append(batch_loss_mcd_src_src[i].item())
loss_mcd_src_trg[i].append(batch_loss_mcd_src_trg[i].item())
loss_mcd_trg_src_trg[i].append(batch_loss_mcd_trg_src_trg[i].item())
loss_mcd_src_trg_src[i].append(batch_loss_mcd_src_trg_src[i].item())
loss_lat_trg[i].append(batch_loss_lat_trg[i].item())
loss_lat_src[i].append(batch_loss_lat_src[i].item())
loss_lat_trg_cv[i].append(batch_loss_lat_trg_cv[i].item())
loss_lat_src_cv[i].append(batch_loss_lat_src_cv[i].item())
if i > 0:
if not half_cyc:
batch_loss += batch_loss_mcd_trg_trg[i] + batch_loss_mcd_src_src[i] + batch_loss_mcd_trg_src_trg[i] + batch_loss_mcd_src_trg_src[i] + batch_loss_lat_trg[i] + batch_loss_lat_src[i] + batch_loss_lat_trg_cv[i] + batch_loss_lat_src_cv[i]
else:
batch_loss += batch_loss_mcd_trg_trg[i] + batch_loss_mcd_src_src[i] + batch_loss_lat_trg[i] + batch_loss_lat_src[i]
else:
if not half_cyc:
batch_loss = batch_loss_mcd_trg_trg[0] + batch_loss_mcd_src_src[0] + batch_loss_mcd_trg_src_trg[0] + batch_loss_mcd_src_trg_src[0] + batch_loss_lat_trg[0] + batch_loss_lat_src[0] + batch_loss_lat_trg_cv[0] + batch_loss_lat_src_cv[0]
else:
batch_loss = batch_loss_mcd_trg_trg[0] + batch_loss_mcd_src_src[0] + batch_loss_lat_trg[0] + batch_loss_lat_src[0]
loss.append(batch_loss.item())
text_log = "%.3f ;; " % batch_loss.item()
for i in range(n_ev_cyc):
text_log += "[%d] %.3f %.3f %.3f %.3f %.3f %.3f ; %.3f %.3f %.3f %.3f ; %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f ; %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f ;; " % (
i+1, batch_loss_mcd_trg_trg[i].item(), batch_loss_mcd_trg_src_trg[i].item(), batch_loss_mcd_trg_src[i].item(),
batch_loss_mcd_src_src[i].item(), batch_loss_mcd_src_trg_src[i].item(), batch_loss_mcd_src_trg[i].item(),
batch_loss_lat_trg[i].item(), batch_loss_lat_trg_cv[i].item(), batch_loss_lat_src[i].item(), batch_loss_lat_src_cv[i].item(),
batch_mcdpow_trg_trg[i], batch_mcd_trg_trg[i], batch_mcdpow_trg_src_trg[i], batch_mcd_trg_src_trg[i],
batch_mcdpow_trg_src[i], batch_mcd_trg_src[i], batch_lat_dist_trgsrc1[i], batch_lat_dist_trgsrc2[i], batch_mcdpow_src_src[i], batch_mcd_src_src[i],
batch_mcdpow_src_trg_src[i], batch_mcd_src_trg_src[i], batch_mcdpow_src_trg[i], batch_mcd_src_trg[i], batch_lat_dist_srctrg1[i], batch_lat_dist_srctrg2[i])
logging.info("batch eval loss [%d] = %s (%.3f sec)" % (c_idx+1, text_log, time.time() - start))
total.append(time.time() - start)
eval_loss = np.mean(loss)
for i in range(n_ev_cyc):
eval_loss_mcd_trg_trg[i] = np.mean(loss_mcd_trg_trg[i])
eval_loss_mcd_trg_src_trg[i] = np.mean(loss_mcd_trg_src_trg[i])
eval_loss_mcd_trg_src[i] = np.mean(loss_mcd_trg_src[i])
eval_loss_mcd_src_src[i] = np.mean(loss_mcd_src_src[i])
eval_loss_mcd_src_trg_src[i] = np.mean(loss_mcd_src_trg_src[i])
eval_loss_mcd_src_trg[i] = np.mean(loss_mcd_src_trg[i])
eval_loss_lat_src_cv[i] = np.mean(loss_lat_src_cv[i])
eval_loss_lat_trg_cv[i] = np.mean(loss_lat_trg_cv[i])
eval_loss_lat_src[i] = np.mean(loss_lat_src[i])
eval_loss_lat_trg[i] = np.mean(loss_lat_trg[i])
eval_gv_trg_trg[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_trg_trg[i], axis=0))-np.log(gv_trg_mean))))
eval_mcdpow_trg_trg[i] = np.mean(mcdpow_trg_trg[i])
eval_mcd_trg_trg[i] = np.mean(mcd_trg_trg[i])
eval_gv_trg_src_trg[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_trg_src_trg[i], axis=0))-np.log(gv_trg_mean))))
eval_mcdpow_trg_src_trg[i] = np.mean(mcdpow_trg_src_trg[i])
eval_mcd_trg_src_trg[i] = np.mean(mcd_trg_src_trg[i])
eval_gv_trg_src[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_trg_src[i], axis=0))-np.log(gv_src_mean))))
eval_mcdpow_trg_src[i] = np.mean(mcdpow_trg_src[i])
eval_mcdpowstd_trg_src[i] = np.std(mcdpow_trg_src[i])
eval_mcd_trg_src[i] = np.mean(mcd_trg_src[i])
eval_mcdstd_trg_src[i] = np.std(mcd_trg_src[i])
eval_lat_dist_trgsrc1[i] = np.mean(lat_dist_trgsrc1[i])
eval_lat_dist_trgsrc2[i] = np.mean(lat_dist_trgsrc2[i])
eval_gv_src_src[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_src_src[i], axis=0))-np.log(gv_src_mean))))
eval_mcdpow_src_src[i] = np.mean(mcdpow_src_src[i])
eval_mcd_src_src[i] = np.mean(mcd_src_src[i])
eval_gv_src_trg_src[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_src_trg_src[i], axis=0))-np.log(gv_src_mean))))
eval_mcdpow_src_trg_src[i] = np.mean(mcdpow_src_trg_src[i])
eval_mcd_src_trg_src[i] = np.mean(mcd_src_trg_src[i])
eval_gv_src_trg[i] = np.mean(np.sqrt(np.square(np.log(np.mean(gv_src_trg[i], axis=0))-np.log(gv_trg_mean))))
eval_mcdpow_src_trg[i] = np.mean(mcdpow_src_trg[i])
eval_mcdpowstd_src_trg[i] = np.std(mcdpow_src_trg[i])
eval_mcd_src_trg[i] = np.mean(mcd_src_trg[i])
eval_mcdstd_src_trg[i] = np.std(mcd_src_trg[i])
eval_lat_dist_srctrg1[i] = np.mean(lat_dist_srctrg1[i])
eval_lat_dist_srctrg2[i] = np.mean(lat_dist_srctrg2[i])
text_log = "%.3f ;; " % eval_loss
for i in range(n_ev_cyc):
text_log += "[%d] %.3f %.3f %.3f %.3f %.3f %.3f ; %.3f %.3f %.3f %.3f ; %.6f %.3f dB %.6f dB , %.3f %.3f dB %.3f dB , %.6f %.3f dB (+- %.3f) %.6f dB (+- %.3f) , %.6f %.6f ; %.6f %.3f dB %.6f dB , %.3f %.3f dB %.3f dB , %.6f %.3f dB (+- %.3f) %.6f dB (+- %.3f) , %.6f %.6f ;; " % (
i+1, eval_loss_mcd_trg_trg[i], eval_loss_mcd_trg_src_trg[i], eval_loss_mcd_trg_src[i],
eval_loss_mcd_src_src[i], eval_loss_mcd_src_trg_src[i], eval_loss_mcd_src_trg[i],
eval_loss_lat_trg[i], eval_loss_lat_trg_cv[i], eval_loss_lat_src[i], eval_loss_lat_src_cv[i],
eval_gv_trg_trg[i], eval_mcdpow_trg_trg[i], eval_mcd_trg_trg[i],
eval_gv_trg_src_trg[i], eval_mcdpow_trg_src_trg[i], eval_mcd_trg_src_trg[i],
eval_gv_trg_src[i], eval_mcdpow_trg_src[i], eval_mcdpowstd_trg_src[i], eval_mcd_trg_src[i], eval_mcdstd_trg_src[i],
eval_lat_dist_trgsrc1[i], eval_lat_dist_trgsrc2[i], eval_gv_src_src[i], eval_mcdpow_src_src[i], eval_mcd_src_src[i],
eval_gv_src_trg_src[i], eval_mcdpow_src_trg_src[i], eval_mcd_src_trg_src[i],
eval_gv_src_trg[i], eval_mcdpow_src_trg[i], eval_mcdpowstd_src_trg[i], eval_mcd_src_trg[i], eval_mcdstd_src_trg[i], eval_lat_dist_srctrg1[i], eval_lat_dist_srctrg2[i])
logging.info("(EPOCH:%d) average evaluation loss = %s (%.3f min., %.3f sec / batch)" % (epoch_idx + 1, text_log, np.sum(total) / 60.0, np.mean(total)))
if (eval_mcdpow_src_trg[0]+eval_mcdpowstd_src_trg[0]+eval_mcd_src_trg[0]+eval_mcdstd_src_trg[0]) <= (min_eval_mcdpow_src_trg+min_eval_mcdpowstd_src_trg+min_eval_mcd_src_trg+min_eval_mcdstd_src_trg):
min_eval_loss_mcd_trg_trg = eval_loss_mcd_trg_trg[0]
min_eval_loss_mcd_trg_src_trg = eval_loss_mcd_trg_src_trg[0]
min_eval_loss_mcd_trg_src = eval_loss_mcd_trg_src[0]
min_eval_loss_mcd_src_src = eval_loss_mcd_src_src[0]
min_eval_loss_mcd_src_trg_src = eval_loss_mcd_src_trg_src[0]
min_eval_loss_mcd_src_trg = eval_loss_mcd_src_trg[0]
min_eval_loss_lat_src = eval_loss_lat_src[0]
min_eval_loss_lat_trg = eval_loss_lat_trg[0]
min_eval_loss_lat_src_cv = eval_loss_lat_src_cv[0]
min_eval_loss_lat_trg_cv = eval_loss_lat_trg_cv[0]
min_eval_gv_trg_trg = eval_gv_trg_trg[0]
min_eval_mcdpow_trg_trg = eval_mcdpow_trg_trg[0]
min_eval_mcd_trg_trg = eval_mcd_trg_trg[0]
min_eval_gv_trg_src_trg = eval_gv_trg_src_trg[0]
min_eval_mcdpow_trg_src_trg = eval_mcdpow_trg_src_trg[0]
min_eval_mcd_trg_src_trg = eval_mcd_trg_src_trg[0]
min_eval_gv_trg_src = eval_gv_trg_src[0]
min_eval_mcdpow_trg_src = eval_mcdpow_trg_src[0]
min_eval_mcdpowstd_trg_src = eval_mcdpowstd_trg_src[0]
min_eval_mcd_trg_src = eval_mcd_trg_src[0]
min_eval_mcdstd_trg_src = eval_mcdstd_trg_src[0]
min_eval_lat_dist_trgsrc1 = eval_lat_dist_trgsrc1[0]
min_eval_lat_dist_trgsrc2 = eval_lat_dist_trgsrc2[0]
min_eval_gv_src_src = eval_gv_src_src[0]
min_eval_mcdpow_src_src = eval_mcdpow_src_src[0]
min_eval_mcd_src_src = eval_mcd_src_src[0]
min_eval_gv_src_trg_src = eval_gv_src_trg_src[0]
min_eval_mcdpow_src_trg_src = eval_mcdpow_src_trg_src[0]
min_eval_mcd_src_trg_src = eval_mcd_src_trg_src[0]
min_eval_gv_src_trg = eval_gv_src_trg[0]
min_eval_mcdpow_src_trg = eval_mcdpow_src_trg[0]
min_eval_mcdpowstd_src_trg = eval_mcdpowstd_src_trg[0]
min_eval_mcd_src_trg = eval_mcd_src_trg[0]
min_eval_mcdstd_src_trg = eval_mcdstd_src_trg[0]
min_eval_lat_dist_srctrg1 = eval_lat_dist_srctrg1[0]
min_eval_lat_dist_srctrg2 = eval_lat_dist_srctrg2[0]
min_idx = epoch_idx
text_log = "%.3f %.3f %.3f %.3f %.3f %.3f ; %.3f %.3f %.3f %.3f ; %.6f %.3f dB %.6f dB , %.3f %.3f dB %.3f dB , %.6f %.3f dB (+- %.3f) %.6f dB (+- %.3f) , %.6f %.6f ; %.6f %.3f dB %.6f dB , %.3f %.3f dB %.3f dB , %.6f %.3f dB (+- %.3f) %.6f dB (+- %.3f) , %.6f %.6f ;; " % (
min_eval_loss_mcd_trg_trg, min_eval_loss_mcd_trg_src_trg, min_eval_loss_mcd_trg_src,
min_eval_loss_mcd_src_src, min_eval_loss_mcd_src_trg_src, min_eval_loss_mcd_src_trg,
min_eval_loss_lat_trg, min_eval_loss_lat_trg_cv, min_eval_loss_lat_src, min_eval_loss_lat_src_cv,
min_eval_gv_trg_trg, min_eval_mcdpow_trg_trg, min_eval_mcd_trg_trg,
min_eval_gv_trg_src_trg, min_eval_mcdpow_trg_src_trg, min_eval_mcd_trg_src_trg,
min_eval_gv_trg_src, min_eval_mcdpow_trg_src, min_eval_mcdpowstd_trg_src, min_eval_mcd_trg_src, min_eval_mcdstd_trg_src,
min_eval_lat_dist_trgsrc1, min_eval_lat_dist_trgsrc2, min_eval_gv_src_src, min_eval_mcdpow_src_src, min_eval_mcd_src_src,
min_eval_gv_src_trg_src, min_eval_mcdpow_src_trg_src, min_eval_mcd_src_trg_src,
min_eval_gv_src_trg, min_eval_mcdpow_src_trg, min_eval_mcdpowstd_src_trg, min_eval_mcd_src_trg, min_eval_mcdstd_src_trg, min_eval_lat_dist_srctrg1, min_eval_lat_dist_srctrg2)
logging.info("min_eval_acc= %s min_idx=%d" % (text_log, min_idx+1))
loss = []
loss_mcd_trg_trg = []
loss_mcd_trg_src_trg = []
loss_mcd_trg_src = []
loss_mcd_src_src = []
loss_mcd_src_trg_src = []
loss_mcd_src_trg = []
loss_lat_src = []
loss_lat_trg = []
loss_lat_src_cv = []
loss_lat_trg_cv = []
gv_trg_trg = []
mcdpow_trg_trg = []
mcd_trg_trg = []
gv_trg_src_trg = []
mcdpow_trg_src_trg = []
mcd_trg_src_trg = []
gv_trg_src = []
mcdpow_trg_src = []
mcd_trg_src = []
lat_dist_trgsrc1 = []
lat_dist_trgsrc2 = []
gv_src_src = []
mcdpow_src_src = []
mcd_src_src = []
gv_src_trg_src = []
mcdpow_src_trg_src = []
mcd_src_trg_src = []
gv_src_trg = []
mcdpow_src_trg = []
mcd_src_trg = []
lat_dist_srctrg1 = []
lat_dist_srctrg2 = []
for i in range(args.n_cyc):
loss_mcd_trg_trg.append([])
loss_mcd_trg_src_trg.append([])
loss_mcd_trg_src.append([])
loss_mcd_src_src.append([])
loss_mcd_src_trg_src.append([])
loss_mcd_src_trg.append([])
loss_lat_src.append([])
loss_lat_trg.append([])
loss_lat_src_cv.append([])
loss_lat_trg_cv.append([])
gv_trg_trg.append([])
mcdpow_trg_trg.append([])
mcd_trg_trg.append([])
gv_trg_src_trg.append([])
mcdpow_trg_src_trg.append([])
mcd_trg_src_trg.append([])
gv_trg_src.append([])
mcdpow_trg_src.append([])
mcd_trg_src.append([])
lat_dist_trgsrc1.append([])
lat_dist_trgsrc2.append([])
gv_src_src.append([])
mcdpow_src_src.append([])
mcd_src_src.append([])
gv_src_trg_src.append([])
mcdpow_src_trg_src.append([])
mcd_src_trg_src.append([])
gv_src_trg.append([])
mcdpow_src_trg.append([])
mcd_src_trg.append([])
lat_dist_srctrg1.append([])
lat_dist_srctrg2.append([])
total = []
iter_count = 0
epoch_idx += 1
np.random.set_state(numpy_random_state)
torch.set_rng_state(torch_random_state)
model_encoder.train()
model_decoder.train()
for param in model_encoder.parameters():
param.requires_grad = True
for param in model_decoder.parameters():
param.requires_grad = True
for param in model_encoder.scale_in.parameters():
param.requires_grad = False
for param in model_decoder.scale_out.parameters():
param.requires_grad = False
# start next epoch
if epoch_idx < args.epoch_count:
start = time.time()
logging.info("==%d EPOCH==" % (epoch_idx+1))
logging.info("Training data")
# feedforward and backpropagate current batch
if epoch_idx < args.epoch_count:
logging.info("%d iteration [%d]" % (iter_idx+1, epoch_idx+1))
if args.batch_size > 0: # frame-length mini-batch
for i in range(n_batch_utt):
logging.info("%s %s %d %d %d %d %d %d %d %d %d %d" % (
featfile_src[i], featfile_src_trg[i], flens_src[i], flens_src_trg[i], flens_spc_src[i], flens_spc_src_trg[i],
src_idx_s, src_idx_e, spcidx_src_s_idx[i], spcidx_src_e_idx[i], spcidx_src[i,spcidx_src_s_idx[i]].item(), spcidx_src[i,spcidx_src_e_idx[i]].item()))
if src_idx_s > 0 and prev_featfile_src == featfile_src and iter_count > 0:
for i in range(args.n_cyc):
if i > 0:
batch_lat_src[i], y_in_pp_src[i], h_in_pp_src[i] = model_encoder(torch.cat((batch_src[:,src_idx_s:src_idx_e+1,:stdim], batch_trj_src_trg_src[i-1]),2), Variable(y_in_pp_src[i].data).detach(), h_in=Variable(h_in_pp_src[i].data).detach(), do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_src[i], y_in_src_src[i], h_in_src_src[i] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src[i], lat_dim=args.lat_dim, training=True)),2), Variable(y_in_src_src[i].data).detach(), h_in=Variable(h_in_src_src[i].data).detach(), do=True)
batch_trj_src_trg[i], y_in_src_trg[i], h_in_src_trg[i] = model_decoder(torch.cat((batch_src_trg_code, sampling_vae_batch(batch_lat_src[i], lat_dim=args.lat_dim, training=True)),2), Variable(y_in_src_trg[i].data).detach(), h_in=Variable(h_in_src_trg[i].data).detach(), do=True)
batch_lat_src_trg[i], y_in_pp_src_trg[i], h_in_pp_src_trg[i] = model_encoder(torch.cat((batch_cv_src[:,src_idx_s:src_idx_e+1], batch_trj_src_trg[i]),2), Variable(y_in_pp_src_trg[i].data).detach(), h_in=Variable(h_in_pp_src_trg[i].data).detach(), do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_trg_src[i], y_in_src_trg_src[i], h_in_src_trg_src[i] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src_trg[i], lat_dim=args.lat_dim, training=True)),2), Variable(y_in_src_trg_src[i].data).detach(), h_in=Variable(h_in_src_trg_src[i].data).detach(), do=True)
else:
batch_lat_src[0], y_in_pp_src[0], h_in_pp_src[0] = model_encoder(batch_src[:,src_idx_s:src_idx_e+1], Variable(y_in_pp_src[0].data).detach(), h_in=Variable(h_in_pp_src[0].data).detach(), do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_src[0], y_in_src_src[0], h_in_src_src[0] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src[0], lat_dim=args.lat_dim, training=True)),2), Variable(y_in_src_src[0].data).detach(), h_in=Variable(h_in_src_src[0].data).detach(), do=True)
batch_trj_src_trg[0], y_in_src_trg[0], h_in_src_trg[0] = model_decoder(torch.cat((batch_src_trg_code, sampling_vae_batch(batch_lat_src[0], lat_dim=args.lat_dim, training=True)),2), Variable(y_in_src_trg[0].data).detach(), h_in=Variable(h_in_src_trg[0].data).detach(), do=True)
batch_lat_src_trg[0], y_in_pp_src_trg[0], h_in_pp_src_trg[0] = model_encoder(torch.cat((batch_cv_src[:,src_idx_s:src_idx_e+1], batch_trj_src_trg[0]),2), Variable(y_in_pp_src_trg[0].data).detach(), h_in=Variable(h_in_pp_src_trg[0].data).detach(), do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_trg_src[0], y_in_src_trg_src[0], h_in_src_trg_src[0] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src_trg[0], lat_dim=args.lat_dim, training=True)),2), Variable(y_in_src_trg_src[0].data).detach(), h_in=Variable(h_in_src_trg_src[0].data).detach(), do=True)
tmp_src_src = np.concatenate((tmp_src_src, batch_trj_src_src[0][:,:,1:].cpu().data.numpy()), axis=1)
tmp_src_trg = np.concatenate((tmp_src_trg, batch_trj_src_trg[0][:,:,1:].cpu().data.numpy()), axis=1)
tmp_src_trg_src = np.concatenate((tmp_src_trg_src, batch_trj_src_trg_src[0][:,:,1:].cpu().data.numpy()), axis=1)
trj_src_trg = torch.cat((trj_src_trg, batch_trj_src_trg[0]),1)
trj_lat_src = torch.cat((trj_lat_src, batch_lat_src[0]),1)
else:
if n_batch_utt == args.batch_size_utt:
y_in_pp_ = y_in_pp
y_in_src_ = y_in_src
y_in_trg_ = y_in_trg
else:
y_in_pp_ = y_in_pp_mod
y_in_src_ = y_in_src_mod
y_in_trg_ = y_in_trg_mod
for i in range(args.n_cyc):
if i > 0:
batch_lat_src[i], y_in_pp_src[i], h_in_pp_src[i] = model_encoder(torch.cat((batch_src[:,src_idx_s:src_idx_e+1,:stdim], batch_trj_src_trg_src[i-1]),2), y_in_pp_, do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_src[i], y_in_src_src[i], h_in_src_src[i] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src[i], lat_dim=args.lat_dim, training=True)),2), y_in_src_, do=True)
batch_trj_src_trg[i], y_in_src_trg[i], h_in_src_trg[i] = model_decoder(torch.cat((batch_src_trg_code, sampling_vae_batch(batch_lat_src[i], lat_dim=args.lat_dim, training=True)),2), y_in_trg_, do=True)
batch_lat_src_trg[i], y_in_pp_src_trg[i], h_in_pp_src_trg[i] = model_encoder(torch.cat((batch_cv_src[:,src_idx_s:src_idx_e+1], batch_trj_src_trg[i]),2), y_in_pp_, do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_trg_src[i], y_in_src_trg_src[i], h_in_src_trg_src[i] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src_trg[i], lat_dim=args.lat_dim, training=True)),2), y_in_src_, do=True)
else:
batch_lat_src[0], y_in_pp_src[0], h_in_pp_src[0] = model_encoder(batch_src[:,src_idx_s:src_idx_e+1], y_in_pp_, do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_src[0], y_in_src_src[0], h_in_src_src[0] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src[0], lat_dim=args.lat_dim, training=True)),2), y_in_src_, do=True)
batch_trj_src_trg[0], y_in_src_trg[0], h_in_src_trg[0] = model_decoder(torch.cat((batch_src_trg_code, sampling_vae_batch(batch_lat_src[0], lat_dim=args.lat_dim, training=True)),2), y_in_trg_, do=True)
batch_lat_src_trg[0], y_in_pp_src_trg[0], h_in_pp_src_trg[0] = model_encoder(torch.cat((batch_cv_src[:,src_idx_s:src_idx_e+1], batch_trj_src_trg[0]),2), y_in_pp_, do=True, clamp_vae=True, lat_dim=args.lat_dim)
batch_trj_src_trg_src[0], y_in_src_trg_src[0], h_in_src_trg_src[0] = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src_trg[0], lat_dim=args.lat_dim, training=True)),2), y_in_src_, do=True)
if iter_count > 0:
for j in range(n_batch_utt):
if os.path.basename(os.path.dirname(prev_featfile_src[j])) == args.spk_src:
gv_src_src[i].append(np.var(tmp_src_src[j,:prev_flens_src[j]], axis=0))
gv_src_trg[i].append(np.var(tmp_src_trg[j,:prev_flens_src[j]], axis=0))
gv_src_trg_src[i].append(np.var(tmp_src_trg_src[j,:prev_flens_src[j]], axis=0))
else:
gv_trg_trg[i].append(np.var(tmp_src_src[j,:prev_flens_src[j]], axis=0))
gv_trg_src[i].append(np.var(tmp_src_trg[j,:prev_flens_src[j]], axis=0))
gv_trg_src_trg[i].append(np.var(tmp_src_trg_src[j,:prev_flens_src[j]], axis=0))
tmp_src_src = batch_trj_src_src[0][:,:,1:].cpu().data.numpy()
tmp_src_trg = batch_trj_src_trg[0][:,:,1:].cpu().data.numpy()
tmp_src_trg_src = batch_trj_src_trg_src[0][:,:,1:].cpu().data.numpy()
trj_src_trg = batch_trj_src_trg[0]
trj_lat_src = batch_lat_src[0]
prev_featfile_src = featfile_src
if len(select_utt_idx) > 0:
for i in range(args.n_cyc):
batch_mcdpow_src_src[i] = []
batch_mcd_src_src[i] = []
batch_mcdpow_src_trg_src[i] = []
batch_mcd_src_trg_src[i] = []
for i in range(args.n_cyc):
for k, j in enumerate(select_utt_idx):
src_idx_e_ = src_idx_s + flen_acc[j]
_, tmp_batch_loss_mcd_src_src, _ = criterion_mcd(batch_trj_src_src[i][j,:flen_acc[j]], batch_src[j,src_idx_s:src_idx_e_,stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_src_trg_src, _ = criterion_mcd(batch_trj_src_trg_src[i][j,:flen_acc[j]], batch_src[j,src_idx_s:src_idx_e_,stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_src_trg, _ = criterion_mcd(batch_trj_src_trg[i][j,:flen_acc[j]], batch_src[j,src_idx_s:src_idx_e_,stdim:], L2=False, GV=False)
tmp_batch_loss_lat_src = loss_vae(batch_lat_src[i][j,:flen_acc[j]], lat_dim=args.lat_dim)
tmp_batch_loss_lat_src_cv = loss_vae(batch_lat_src_trg[i][j,:flen_acc[j]], lat_dim=args.lat_dim)
if os.path.basename(os.path.dirname(featfile_src[j])) == args.spk_src:
loss_mcd_src_src[i].append(tmp_batch_loss_mcd_src_src.item())
loss_mcd_src_trg_src[i].append(tmp_batch_loss_mcd_src_trg_src.item())
loss_mcd_src_trg[i].append(tmp_batch_loss_mcd_src_trg.item())
loss_lat_src_cv[i].append(tmp_batch_loss_lat_src_cv.item())
loss_lat_src[i].append(tmp_batch_loss_lat_src.item())
else:
loss_mcd_trg_trg[i].append(tmp_batch_loss_mcd_src_src.item())
loss_mcd_trg_src_trg[i].append(tmp_batch_loss_mcd_src_trg_src.item())
loss_mcd_trg_src[i].append(tmp_batch_loss_mcd_src_trg.item())
loss_lat_trg_cv[i].append(tmp_batch_loss_lat_src_cv.item())
loss_lat_trg[i].append(tmp_batch_loss_lat_src.item())
if k > 0:
batch_loss_mcd_src_src[i] = torch.cat((batch_loss_mcd_src_src[i], tmp_batch_loss_mcd_src_src.unsqueeze(0)))
batch_loss_mcd_src_trg_src[i] = torch.cat((batch_loss_mcd_src_trg_src[i], tmp_batch_loss_mcd_src_trg_src.unsqueeze(0)))
batch_loss_mcd_src_trg[i] = torch.cat((batch_loss_mcd_src_trg[i], tmp_batch_loss_mcd_src_trg.unsqueeze(0)))
batch_loss_lat_src[i] = torch.cat((batch_loss_lat_src[i], tmp_batch_loss_lat_src.unsqueeze(0)))
batch_loss_lat_src_cv[i] = torch.cat((batch_loss_lat_src[i], tmp_batch_loss_lat_src_cv.unsqueeze(0)))
else:
batch_loss_mcd_src_src[i] = tmp_batch_loss_mcd_src_src.unsqueeze(0)
batch_loss_mcd_src_trg_src[i] = tmp_batch_loss_mcd_src_trg_src.unsqueeze(0)
batch_loss_mcd_src_trg[i] = tmp_batch_loss_mcd_src_trg.unsqueeze(0)
batch_loss_lat_src[i] = tmp_batch_loss_lat_src.unsqueeze(0)
batch_loss_lat_src_cv[i] = tmp_batch_loss_lat_src_cv.unsqueeze(0)
if i > 0:
if not half_cyc:
batch_loss += batch_loss_mcd_src_src[i].sum() + batch_loss_mcd_src_trg_src[i].sum() + batch_loss_lat_src[i].sum() + batch_loss_lat_src_cv[i].sum()
else:
batch_loss += batch_loss_mcd_src_src[i].sum() + batch_loss_lat_src[i].sum()
else:
if not half_cyc:
batch_loss = batch_loss_mcd_src_src[0].sum() + batch_loss_mcd_src_trg_src[0].sum() + batch_loss_lat_src[0].sum() + batch_loss_lat_src_cv[0].sum()
else:
batch_loss = batch_loss_mcd_src_src[0].sum() + batch_loss_lat_src[0].sum()
batch_loss_mcd_src_src[i] = torch.mean(batch_loss_mcd_src_src[i])
batch_loss_mcd_src_trg_src[i] = torch.mean(batch_loss_mcd_src_trg_src[i])
batch_loss_mcd_src_trg[i] = torch.mean(batch_loss_mcd_src_trg[i])
batch_loss_lat_src[i] = torch.mean(batch_loss_lat_src[i])
batch_loss_lat_src_cv[i] = torch.mean(batch_loss_lat_src_cv[i])
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
loss.append(batch_loss.item())
print_mcd_flag = False
for i in range(args.n_cyc):
batch_mcdpow_src_src[i] = []
batch_mcd_src_src[i] = []
batch_mcdpow_src_trg_src[i] = []
batch_mcd_src_trg_src[i] = []
for j in select_utt_idx:
if spcidx_src_s_idx[j] >= 0:
print_mcd_flag = True
for i in range(args.n_cyc):
tmp_batch_mcdpow_src_src, _ = dtw.calc_mcd(np.array(torch.index_select(batch_src[j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1])[:,stdim:].cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_trj_src_src[i][j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1]-src_idx_s).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_src_src, _ = dtw.calc_mcd(np.array(torch.index_select(batch_src[j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1])[:,stdim_:].cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_trj_src_src[i][j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1]-src_idx_s)[:,1:].cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcdpow_src_trg_src, _ = dtw.calc_mcd(np.array(torch.index_select(batch_src[j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1])[:,stdim:].cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_trj_src_trg_src[i][j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1]-src_idx_s).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_src_trg_src, _ = dtw.calc_mcd(np.array(torch.index_select(batch_src[j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1])[:,stdim_:].cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_trj_src_trg_src[i][j],0,spcidx_src[j,spcidx_src_s_idx[j]:spcidx_src_e_idx[j]+1]-src_idx_s)[:,1:].cpu().data.numpy(), dtype=np.float64))
batch_mcdpow_src_src[i].append(tmp_batch_mcdpow_src_src)
batch_mcd_src_src[i].append(tmp_batch_mcd_src_src)
batch_mcdpow_src_trg_src[i].append(tmp_batch_mcdpow_src_trg_src)
batch_mcd_src_trg_src[i].append(tmp_batch_mcd_src_trg_src)
if os.path.basename(os.path.dirname(featfile_src[j])) == args.spk_src:
mcdpow_src_src[i].append(tmp_batch_mcdpow_src_src)
mcd_src_src[i].append(tmp_batch_mcd_src_src)
mcdpow_src_trg_src[i].append(tmp_batch_mcdpow_src_trg_src)
mcd_src_trg_src[i].append(tmp_batch_mcd_src_trg_src)
else:
mcdpow_trg_trg[i].append(tmp_batch_mcdpow_src_src)
mcd_trg_trg[i].append(tmp_batch_mcd_src_src)
mcdpow_trg_src_trg[i].append(tmp_batch_mcdpow_src_trg_src)
mcd_trg_src_trg[i].append(tmp_batch_mcd_src_trg_src)
text_log = "%.3f ;; " % batch_loss.item()
if print_mcd_flag:
for i in range(args.n_cyc):
text_log += "[%d] %.3f %.3f %.3f ; %.3f %.3f ; %.3f dB %.3f dB , %.3f dB %.3f dB ;; " % (
i+1, batch_loss_mcd_src_src[i].item(), batch_loss_mcd_src_trg_src[i].item(), batch_loss_mcd_src_trg[i].item(),
batch_loss_lat_src[i].item(), batch_loss_lat_src_cv[i].item(),
np.mean(batch_mcdpow_src_src[i]), np.mean(batch_mcd_src_src[i]), np.mean(batch_mcdpow_src_trg_src[i]), np.mean(batch_mcd_src_trg_src[i]))
else:
for i in range(args.n_cyc):
text_log += "[%d] %.3f %.3f %.3f ; %.3f %.3f ;; " % (
i+1, batch_loss_mcd_src_src[i].item(), batch_loss_mcd_src_trg_src[i].item(), batch_loss_mcd_src_trg[i].item(),
batch_loss_lat_src[i].item(), batch_loss_lat_src_cv[i].item())
logging.info("batch loss [%d] = %s (%.3f sec)" % (c_idx_src+1, text_log, time.time() - start))
iter_idx += 1
iter_count += 1
total.append(time.time() - start)
else: # utterance-length mini-batch
for i in range(n_batch_utt):
logging.info("%s %s %d %d %d %d" % (featfile_src[i], featfile_src_trg[i], flens_src[i], flens_src_trg[i], flens_spc_src[i], flens_spc_src_trg[i]))
if n_batch_utt == args.batch_size_utt:
y_in_pp_ = y_in_pp
y_in_trg_ = y_in_trg
y_in_src_ = y_in_src
else:
y_in_pp_ = y_in_pp_mod
y_in_trg_ = y_in_trg_mod
y_in_src_ = y_in_src_mod
with torch.no_grad():
trj_lat_srctrg, _, _ = model_encoder(batch_src_trg, y_in_pp_, clamp_vae=True, lat_dim=args.lat_dim)
for i in range(args.n_cyc):
batch_mcdpow_src_src[i] = []
batch_mcd_src_src[i] = []
batch_mcdpow_src_trg_src[i] = []
batch_mcd_src_trg_src[i] = []
if i > 0:
batch_lat_src[i], _, _ = model_encoder(torch.cat((batch_src[:,:,:stdim], batch_trj_src_trg_src[i-1]),2), y_in_pp_, clamp_vae=True, lat_dim=args.lat_dim, do=True)
batch_trj_src_src[i], _, _ = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src[i], lat_dim=args.lat_dim)),2), y_in_src_, do=True)
batch_trj_src_trg[i], _, _ = model_decoder(torch.cat((batch_src_trg_code, sampling_vae_batch(batch_lat_src[i], lat_dim=args.lat_dim)),2), y_in_trg_, do=True)
batch_lat_src_trg[i], _, _ = model_encoder(torch.cat((batch_cv_src, batch_trj_src_trg[i]),2), y_in_pp_, clamp_vae=True, lat_dim=args.lat_dim, do=True)
batch_trj_src_trg_src[i], _, _ = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src_trg[i], lat_dim=args.lat_dim)),2), y_in_src_, do=True)
else:
batch_lat_src[0], _, _ = model_encoder(batch_src, y_in_pp_, clamp_vae=True, lat_dim=args.lat_dim, do=True)
batch_trj_src_src[0], _, _ = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src[0], lat_dim=args.lat_dim)),2), y_in_src_, do=True)
batch_trj_src_trg[0], _, _ = model_decoder(torch.cat((batch_src_trg_code, sampling_vae_batch(batch_lat_src[0], lat_dim=args.lat_dim)),2), y_in_trg_, do=True)
batch_lat_src_trg[0], _, _ = model_encoder(torch.cat((batch_cv_src, batch_trj_src_trg[0]),2), y_in_pp_, clamp_vae=True, lat_dim=args.lat_dim, do=True)
batch_trj_src_trg_src[0], _, _ = model_decoder(torch.cat((batch_src_src_code, sampling_vae_batch(batch_lat_src_trg[0], lat_dim=args.lat_dim)),2), y_in_src_, do=True)
batch_mcdpow_src_trg[i] = []
batch_mcd_src_trg[i] = []
batch_lat_dist_srctrg1[i] = []
batch_lat_dist_srctrg2[i] = []
for j in range(n_batch_utt):
if os.path.basename(os.path.dirname(featfile_src[j])) == args.spk_src:
gv_src_src[i].append(np.var(batch_trj_src_src[i][j,:flens_src[j],1:].cpu().data.numpy(), axis=0))
gv_src_trg[i].append(np.var(batch_trj_src_trg[i][j,:flens_src[j],1:].cpu().data.numpy(), axis=0))
gv_src_trg_src[i].append(np.var(batch_trj_src_trg_src[i][j,:flens_src[j],1:].cpu().data.numpy(), axis=0))
else:
gv_trg_trg[i].append(np.var(batch_trj_src_src[i][j,:flens_src[j],1:].cpu().data.numpy(), axis=0))
gv_trg_src[i].append(np.var(batch_trj_src_trg[i][j,:flens_src[j],1:].cpu().data.numpy(), axis=0))
gv_trg_src_trg[i].append(np.var(batch_trj_src_trg_src[i][j,:flens_src[j],1:].cpu().data.numpy(), axis=0))
trj_lat_srctrg_ = np.array(torch.index_select(trj_lat_srctrg[j],0,spcidx_src_trg[j,:flens_spc_src_trg[j]]).cpu().data.numpy(), dtype=np.float64)
trj_lat_src_ = np.array(torch.index_select(batch_lat_src[0][j],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64)
aligned_lat_srctrg1, _, _, _ = dtw.dtw_org_to_trg(trj_lat_src_, trj_lat_srctrg_)
tmp_batch_lat_dist_srctrg1 = np.mean(np.sqrt(np.mean((aligned_lat_srctrg1-trj_lat_srctrg_)**2, axis=0)))
_, _, tmp_batch_lat_cdist_srctrg1, _ = dtw.dtw_org_to_trg(trj_lat_srctrg_, trj_lat_src_, mcd=0)
aligned_lat_srctrg2, _, _, _ = dtw.dtw_org_to_trg(trj_lat_srctrg_, trj_lat_src_)
tmp_batch_lat_dist_srctrg2 = np.mean(np.sqrt(np.mean((aligned_lat_srctrg2-trj_lat_src_)**2, axis=0)))
_, _, tmp_batch_lat_cdist_srctrg2, _ = dtw.dtw_org_to_trg(trj_lat_src_, trj_lat_srctrg_, mcd=0)
tmp_batch_lat_dist_srctrg1 = (tmp_batch_lat_dist_srctrg1+tmp_batch_lat_dist_srctrg2)/2
tmp_batch_lat_dist_srctrg2 = (tmp_batch_lat_cdist_srctrg1+tmp_batch_lat_cdist_srctrg2)/2
batch_lat_dist_srctrg1[0].append(tmp_batch_lat_dist_srctrg1)
batch_lat_dist_srctrg2[0].append(tmp_batch_lat_dist_srctrg2)
_, _, tmp_batch_mcdpow_src_trg, _ = dtw.dtw_org_to_trg(np.array(torch.index_select(batch_trj_src_trg[i][j],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_src_trg[j,:,stdim:],0,spcidx_src_trg[j,:flens_spc_src_trg[j]]).cpu().data.numpy(), dtype=np.float64))
_, _, tmp_batch_mcd_src_trg, _ = dtw.dtw_org_to_trg(np.array(torch.index_select(batch_trj_src_trg[i][j,:,1:],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64), np.array(torch.index_select(batch_src_trg[j,:,stdim_:],0,spcidx_src_trg[j,:flens_spc_src_trg[j]]).cpu().data.numpy(), dtype=np.float64))
batch_mcdpow_src_trg[0].append(tmp_batch_mcdpow_src_trg)
batch_mcd_src_trg[0].append(tmp_batch_mcd_src_trg)
text_log = "%s %s = %.3f dB %.3f dB , %.3f %.3f" % (
featfile_src[j], featfile_src_trg[j], tmp_batch_mcdpow_src_trg, tmp_batch_mcd_src_trg, tmp_batch_lat_dist_srctrg1, tmp_batch_lat_dist_srctrg2)
if os.path.basename(os.path.dirname(featfile_src[j])) == args.spk_src:
mcdpow_src_trg[i].append(tmp_batch_mcdpow_src_trg)
mcd_src_trg[i].append(tmp_batch_mcd_src_trg)
lat_dist_srctrg1[0].append(tmp_batch_lat_dist_srctrg1)
lat_dist_srctrg2[0].append(tmp_batch_lat_dist_srctrg2)
logging.info("batch srctrg loss %s " % (text_log))
else:
mcdpow_trg_src[i].append(tmp_batch_mcdpow_src_trg)
mcd_trg_src[i].append(tmp_batch_mcd_src_trg)
lat_dist_trgsrc1[0].append(tmp_batch_lat_dist_srctrg1)
lat_dist_trgsrc2[0].append(tmp_batch_lat_dist_srctrg2)
logging.info("batch trgsrc loss %s " % (text_log))
batch_mcdpow_src_trg[i] = np.mean(batch_mcdpow_src_trg[i])
batch_mcd_src_trg[i] = np.mean(batch_mcd_src_trg[i])
batch_lat_dist_srctrg1[i] = np.mean(batch_lat_dist_srctrg1[i])
batch_lat_dist_srctrg2[i] = np.mean(batch_lat_dist_srctrg2[i])
for j in range(n_batch_utt):
_, tmp_batch_loss_mcd_src_src, _ = criterion_mcd(batch_trj_src_src[i][j,:flens_src[j]], batch_src[j,:flens_src[j],stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_src_trg, _ = criterion_mcd(batch_trj_src_trg[i][j,:flens_src[j]], batch_src[j,:flens_src[j],stdim:], L2=False, GV=False)
_, tmp_batch_loss_mcd_src_trg_src, _ = criterion_mcd(batch_trj_src_trg_src[i][j,:flens_src[j]], batch_src[j,:flens_src[j],stdim:], L2=False, GV=False)
tmp_batch_loss_lat_src = loss_vae(batch_lat_src[i][j,:flens_src[j]], lat_dim=args.lat_dim)
tmp_batch_loss_lat_src_cv = loss_vae(batch_lat_src_trg[i][j,:flens_src[j]], lat_dim=args.lat_dim)
batch_src_spc_ = np.array(torch.index_select(batch_src[j,:,stdim:],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64)
batch_src_spc__ = np.array(torch.index_select(batch_src[j,:,stdim_:],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64)
tmp_batch_mcdpow_src_src, _ = dtw.calc_mcd(batch_src_spc_, np.array(torch.index_select(batch_trj_src_src[i][j],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_src_src, _ = dtw.calc_mcd(batch_src_spc__, np.array(torch.index_select(batch_trj_src_src[i][j,:,1:],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcdpow_src_trg_src, _ = dtw.calc_mcd(batch_src_spc_, np.array(torch.index_select(batch_trj_src_trg_src[i][j],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64))
tmp_batch_mcd_src_trg_src, _ = dtw.calc_mcd(batch_src_spc__, np.array(torch.index_select(batch_trj_src_trg_src[i][j,:,1:],0,spcidx_src[j,:flens_spc_src[j]]).cpu().data.numpy(), dtype=np.float64))
if j > 0:
batch_loss_mcd_src_src[i] = torch.cat((batch_loss_mcd_src_src[i], tmp_batch_loss_mcd_src_src.unsqueeze(0)))
batch_loss_mcd_src_trg[i] = torch.cat((batch_loss_mcd_src_trg[i], tmp_batch_loss_mcd_src_trg.unsqueeze(0)))
batch_loss_mcd_src_trg_src[i] = torch.cat((batch_loss_mcd_src_trg_src[i], tmp_batch_loss_mcd_src_trg_src.unsqueeze(0)))
batch_loss_lat_src[i] = torch.cat((batch_loss_lat_src[i], tmp_batch_loss_lat_src.unsqueeze(0)))
batch_loss_lat_src_cv[i] = torch.cat((batch_loss_lat_src_cv[i], tmp_batch_loss_lat_src_cv.unsqueeze(0)))
else:
batch_loss_mcd_src_src[i] = tmp_batch_loss_mcd_src_src.unsqueeze(0)
batch_loss_mcd_src_trg[i] = tmp_batch_loss_mcd_src_trg.unsqueeze(0)
batch_loss_mcd_src_trg_src[i] = tmp_batch_loss_mcd_src_trg_src.unsqueeze(0)
batch_loss_lat_src[i] = tmp_batch_loss_lat_src.unsqueeze(0)
batch_loss_lat_src_cv[i] = tmp_batch_loss_lat_src_cv.unsqueeze(0)
if os.path.basename(os.path.dirname(featfile_src[j])) == args.spk_src:
mcdpow_src_src[i].append(tmp_batch_mcdpow_src_src)
mcd_src_src[i].append(tmp_batch_mcd_src_src)
mcdpow_src_trg_src[i].append(tmp_batch_mcdpow_src_trg_src)
mcd_src_trg_src[i].append(tmp_batch_mcd_src_trg_src)
loss_mcd_src_src[i].append(tmp_batch_loss_mcd_src_src.item())
loss_mcd_src_trg[i].append(tmp_batch_loss_mcd_src_trg.item())
loss_mcd_src_trg_src[i].append(tmp_batch_loss_mcd_src_trg_src.item())
loss_lat_src[i].append(tmp_batch_loss_lat_src.item())
loss_lat_src_cv[i].append(tmp_batch_loss_lat_src_cv.item())
else:
mcdpow_trg_trg[i].append(tmp_batch_mcdpow_src_src)
mcd_trg_trg[i].append(tmp_batch_mcd_src_src)
mcdpow_trg_src_trg[i].append(tmp_batch_mcdpow_src_trg_src)
mcd_trg_src_trg[i].append(tmp_batch_mcd_src_trg_src)
loss_mcd_trg_trg[i].append(tmp_batch_loss_mcd_src_src.item())
loss_mcd_trg_src[i].append(tmp_batch_loss_mcd_src_trg.item())
loss_mcd_trg_src_trg[i].append(tmp_batch_loss_mcd_src_trg_src.item())
loss_lat_trg[i].append(tmp_batch_loss_lat_src.item())
loss_lat_trg_cv[i].append(tmp_batch_loss_lat_src_cv.item())
batch_mcdpow_src_src[i].append(tmp_batch_mcdpow_src_src)
batch_mcd_src_src[i].append(tmp_batch_mcd_src_src)
batch_mcdpow_src_trg_src[i].append(tmp_batch_mcdpow_src_trg_src)
batch_mcd_src_trg_src[i].append(tmp_batch_mcd_src_trg_src)
batch_mcdpow_src_src[i] = np.mean(batch_mcdpow_src_src[i])
batch_mcd_src_src[i] = np.mean(batch_mcd_src_src[i])
batch_mcdpow_src_trg_src[i] = np.mean(batch_mcdpow_src_trg_src[i])
batch_mcd_src_trg_src[i] = np.mean(batch_mcd_src_trg_src[i])
if i > 0:
if not half_cyc:
batch_loss += batch_loss_mcd_src_src[i].sum() + batch_loss_mcd_src_trg_src[i].sum() + batch_loss_lat_src[i].sum() + batch_loss_lat_src_cv[i].sum()
else:
batch_loss += batch_loss_mcd_src_src[i].sum() + batch_loss_lat_src[i].sum()
else:
if not half_cyc:
batch_loss = batch_loss_mcd_src_src[0].sum() + batch_loss_mcd_src_trg_src[0].sum() + batch_loss_lat_src[0].sum() + batch_loss_lat_src_cv[0].sum()
else:
batch_loss = batch_loss_mcd_src_src[0].sum() + batch_loss_lat_src[0].sum()
batch_loss_mcd_src_src[i] = torch.mean(batch_loss_mcd_src_src[i])
batch_loss_mcd_src_trg_src[i] = torch.mean(batch_loss_mcd_src_trg_src[i])
batch_loss_mcd_src_trg[i] = torch.mean(batch_loss_mcd_src_trg[i])
batch_loss_lat_src[i] = torch.mean(batch_loss_lat_src[i])
batch_loss_lat_src_cv[i] = torch.mean(batch_loss_lat_src_cv[i])
optimizer.zero_grad()
batch_loss.backward()
optimizer.step()
loss.append(batch_loss.item())
text_log = "%.3f ;; " % batch_loss.item()
for i in range(args.n_cyc):
text_log += "[%d] %.3f %.3f %.3f ; %.3f %.3f ; %.3f dB %.3f dB , %.3f dB %.3f dB;; " % (
i+1, batch_loss_mcd_src_src[i].item(), batch_loss_mcd_src_trg_src[i].item(), batch_loss_mcd_src_trg[i].item(),
batch_loss_lat_src[i].item(), batch_loss_lat_src_cv[i].item(), batch_mcdpow_src_src[i], batch_mcd_src_src[i],
batch_mcdpow_src_trg_src[i], batch_mcd_src_trg_src[i])
logging.info("batch loss [%d] = %s (%.3f sec)" % (c_idx_src+1, text_log, time.time() - start))
iter_idx += 1
iter_count += 1
total.append(time.time() - start)
# save final model
model_encoder.cpu()
model_decoder.cpu()
torch.save({"model_encoder": model_encoder.state_dict(), "model_decoder": model_decoder.state_dict()}, args.expdir + "/checkpoint-final.pkl")
logging.info("final checkpoint created.")
if __name__ == "__main__":
main()
| [
"numpy.random.seed",
"argparse.ArgumentParser",
"numpy.sum",
"utils.find_files",
"torch.get_rng_state",
"torch.cat",
"numpy.random.set_state",
"gru_vae.loss_vae",
"dataset.padding",
"os.path.isfile",
"numpy.mean",
"torch.no_grad",
"logging.error",
"torch.utils.data.DataLoader",
"numpy.st... | [((7884, 7958), 'torch.save', 'torch.save', (['checkpoint', "(checkpoint_dir + '/checkpoint-%d.pkl' % iterations)"], {}), "(checkpoint, checkpoint_dir + '/checkpoint-%d.pkl' % iterations)\n", (7894, 7958), False, 'import torch\n'), ((8013, 8069), 'logging.info', 'logging.info', (["('%d-iter checkpoint created.' % iterations)"], {}), "('%d-iter checkpoint created.' % iterations)\n", (8025, 8069), False, 'import logging\n'), ((8097, 8122), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (8120, 8122), False, 'import argparse\n'), ((13897, 13922), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (13911, 13922), True, 'import numpy as np\n'), ((13927, 13955), 'torch.manual_seed', 'torch.manual_seed', (['args.seed'], {}), '(args.seed)\n', (13944, 13955), False, 'import torch\n'), ((14265, 14310), 'torch.save', 'torch.save', (['args', "(args.expdir + '/model.conf')"], {}), "(args, args.expdir + '/model.conf')\n", (14275, 14310), False, 'import torch\n'), ((14761, 14786), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14784, 14786), False, 'import torch\n'), ((15065, 15306), 'gru_vae.GRU_RNN', 'GRU_RNN', ([], {'in_dim': 'args.in_dim', 'out_dim': '(args.lat_dim * 2)', 'hidden_layers': 'args.hidden_layers', 'hidden_units': 'args.hidden_units', 'kernel_size': 'args.kernel_size', 'dilation_size': 'args.dilation_size', 'do_prob': 'args.do_prob', 'scale_out_flag': '(False)'}), '(in_dim=args.in_dim, out_dim=args.lat_dim * 2, hidden_layers=args.\n hidden_layers, hidden_units=args.hidden_units, kernel_size=args.\n kernel_size, dilation_size=args.dilation_size, do_prob=args.do_prob,\n scale_out_flag=False)\n', (15072, 15306), False, 'from gru_vae import GRU_RNN\n'), ((15360, 15387), 'logging.info', 'logging.info', (['model_encoder'], {}), '(model_encoder)\n', (15372, 15387), False, 'import logging\n'), ((15408, 15649), 'gru_vae.GRU_RNN', 'GRU_RNN', ([], {'in_dim': '(args.lat_dim + 2)', 'out_dim': 'args.out_dim', 'hidden_layers': 'args.hidden_layers', 'hidden_units': 'args.hidden_units', 'kernel_size': 'args.kernel_size', 'dilation_size': 'args.dilation_size', 'do_prob': 'args.do_prob', 'scale_in_flag': '(False)'}), '(in_dim=args.lat_dim + 2, out_dim=args.out_dim, hidden_layers=args.\n hidden_layers, hidden_units=args.hidden_units, kernel_size=args.\n kernel_size, dilation_size=args.dilation_size, do_prob=args.do_prob,\n scale_in_flag=False)\n', (15415, 15649), False, 'from gru_vae import GRU_RNN\n'), ((15703, 15730), 'logging.info', 'logging.info', (['model_decoder'], {}), '(model_decoder)\n', (15715, 15730), False, 'import logging\n'), ((15751, 15762), 'gru_vae.TWFSEloss', 'TWFSEloss', ([], {}), '()\n', (15760, 15762), False, 'from gru_vae import TWFSEloss\n'), ((15789, 15814), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (15812, 15814), False, 'import torch\n'), ((16262, 16313), 'torch.nn.Parameter', 'torch.nn.Parameter', (['(-(mean_jnt.data / std_jnt.data))'], {}), '(-(mean_jnt.data / std_jnt.data))\n', (16280, 16313), False, 'import torch\n'), ((16446, 16483), 'torch.nn.Parameter', 'torch.nn.Parameter', (['mean_jnt_trg.data'], {}), '(mean_jnt_trg.data)\n', (16464, 16483), False, 'import torch\n'), ((16855, 16907), 'numpy.zeros', 'np.zeros', (['(args.batch_size_utt, 1, args.lat_dim * 2)'], {}), '((args.batch_size_utt, 1, args.lat_dim * 2))\n', (16863, 16907), True, 'import numpy as np\n'), ((18051, 18092), 'torch.optim.Adam', 'torch.optim.Adam', (['module_list'], {'lr': 'args.lr'}), '(module_list, lr=args.lr)\n', (18067, 18092), False, 'import torch\n'), ((18340, 18413), 'logging.info', 'logging.info', (["('Trainable Parameters (encoder): %.3f million' % parameters)"], {}), "('Trainable Parameters (encoder): %.3f million' % parameters)\n", (18352, 18413), False, 'import logging\n'), ((18569, 18642), 'logging.info', 'logging.info', (["('Trainable Parameters (decoder): %.3f million' % parameters)"], {}), "('Trainable Parameters (decoder): %.3f million' % parameters)\n", (18581, 18642), False, 'import logging\n'), ((18683, 18712), 'os.path.isdir', 'os.path.isdir', (['args.feats_src'], {}), '(args.feats_src)\n', (18696, 18712), False, 'import os\n'), ((18994, 19027), 'os.path.isdir', 'os.path.isdir', (['args.feats_src_trg'], {}), '(args.feats_src_trg)\n', (19007, 19027), False, 'import os\n'), ((19465, 19494), 'os.path.isdir', 'os.path.isdir', (['args.feats_trg'], {}), '(args.feats_trg)\n', (19478, 19494), False, 'import os\n'), ((19776, 19809), 'os.path.isdir', 'os.path.isdir', (['args.feats_trg_src'], {}), '(args.feats_trg_src)\n', (19789, 19809), False, 'import os\n'), ((20686, 20720), 'os.path.isdir', 'os.path.isdir', (['args.feats_eval_src'], {}), '(args.feats_eval_src)\n', (20699, 20720), False, 'import os\n'), ((21032, 21066), 'os.path.isdir', 'os.path.isdir', (['args.feats_eval_trg'], {}), '(args.feats_eval_trg)\n', (21045, 21066), False, 'import os\n'), ((22063, 22093), 'torchvision.transforms.Compose', 'transforms.Compose', (['[zero_pad]'], {}), '([zero_pad])\n', (22081, 22093), False, 'from torchvision import transforms\n'), ((22108, 22234), 'dataset.FeatureDatasetSingleVAE', 'FeatureDatasetSingleVAE', (['(feat_list_src + feat_list_trg)', '(feat_list_src_trg + feat_list_trg_src)', 'pad_transform', 'args.spk_src'], {}), '(feat_list_src + feat_list_trg, feat_list_src_trg +\n feat_list_trg_src, pad_transform, args.spk_src)\n', (22131, 22234), False, 'from dataset import FeatureDatasetSingleVAE, padding\n'), ((22244, 22341), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'args.batch_size_utt', 'shuffle': '(True)', 'num_workers': 'args.n_workers'}), '(dataset, batch_size=args.batch_size_utt, shuffle=True,\n num_workers=args.n_workers)\n', (22254, 22341), False, 'from torch.utils.data import DataLoader\n'), ((22361, 22457), 'dataset.FeatureDatasetSingleVAE', 'FeatureDatasetSingleVAE', (['feat_list_eval_src', 'feat_list_eval_trg', 'pad_transform', 'args.spk_src'], {}), '(feat_list_eval_src, feat_list_eval_trg,\n pad_transform, args.spk_src)\n', (22384, 22457), False, 'from dataset import FeatureDatasetSingleVAE, padding\n'), ((22480, 22577), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_eval_src'], {'batch_size': 'args.batch_size_utt_eval', 'num_workers': 'args.n_workers'}), '(dataset_eval_src, batch_size=args.batch_size_utt_eval,\n num_workers=args.n_workers)\n', (22490, 22577), False, 'from torch.utils.data import DataLoader\n'), ((22597, 22693), 'dataset.FeatureDatasetSingleVAE', 'FeatureDatasetSingleVAE', (['feat_list_eval_trg', 'feat_list_eval_src', 'pad_transform', 'args.spk_src'], {}), '(feat_list_eval_trg, feat_list_eval_src,\n pad_transform, args.spk_src)\n', (22620, 22693), False, 'from dataset import FeatureDatasetSingleVAE, padding\n'), ((22716, 22813), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset_eval_trg'], {'batch_size': 'args.batch_size_utt_eval', 'num_workers': 'args.n_workers'}), '(dataset_eval_trg, batch_size=args.batch_size_utt_eval,\n num_workers=args.n_workers)\n', (22726, 22813), False, 'from torch.utils.data import DataLoader\n'), ((29523, 29557), 'numpy.repeat', 'np.repeat', (['""""""', 'args.batch_size_utt'], {}), "('', args.batch_size_utt)\n", (29532, 29557), True, 'import numpy as np\n'), ((29940, 29986), 'logging.info', 'logging.info', (["('==%d EPOCH==' % (epoch_idx + 1))"], {}), "('==%d EPOCH==' % (epoch_idx + 1))\n", (29952, 29986), False, 'import logging\n'), ((29989, 30018), 'logging.info', 'logging.info', (['"""Training data"""'], {}), "('Training data')\n", (30001, 30018), False, 'import logging\n'), ((116239, 116280), 'logging.info', 'logging.info', (['"""final checkpoint created."""'], {}), "('final checkpoint created.')\n", (116251, 116280), False, 'import logging\n'), ((7812, 7842), 'os.path.exists', 'os.path.exists', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (7826, 7842), False, 'import os\n'), ((7852, 7879), 'os.makedirs', 'os.makedirs', (['checkpoint_dir'], {}), '(checkpoint_dir)\n', (7863, 7879), False, 'import os\n'), ((12630, 12657), 'os.path.exists', 'os.path.exists', (['args.expdir'], {}), '(args.expdir)\n', (12644, 12657), False, 'import os\n'), ((12667, 12691), 'os.makedirs', 'os.makedirs', (['args.expdir'], {}), '(args.expdir)\n', (12678, 12691), False, 'import os\n'), ((12747, 12934), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S"""', 'filename': "(args.expdir + '/train.log')"}), "(level=logging.INFO, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filename=args.expdir + '/train.log')\n", (12766, 12934), False, 'import logging\n'), ((14418, 14469), 'utils.read_hdf5', 'read_hdf5', (['args.stats_jnt', '"""/mean_feat_org_lf0_jnt"""'], {}), "(args.stats_jnt, '/mean_feat_org_lf0_jnt')\n", (14427, 14469), False, 'from utils import read_hdf5\n'), ((14503, 14555), 'utils.read_hdf5', 'read_hdf5', (['args.stats_jnt', '"""/scale_feat_org_lf0_jnt"""'], {}), "(args.stats_jnt, '/scale_feat_org_lf0_jnt')\n", (14512, 14555), False, 'from utils import read_hdf5\n'), ((14958, 15022), 'logging.error', 'logging.error', (['"""gpu is not available. please check the setting."""'], {}), "('gpu is not available. please check the setting.')\n", (14971, 15022), False, 'import logging\n'), ((15921, 15985), 'logging.error', 'logging.error', (['"""gpu is not available. please check the setting."""'], {}), "('gpu is not available. please check the setting.')\n", (15934, 15985), False, 'import logging\n'), ((15994, 16005), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (16002, 16005), False, 'import sys\n'), ((16565, 16588), 'torch.load', 'torch.load', (['args.resume'], {}), '(args.resume)\n', (16575, 16588), False, 'import torch\n'), ((16776, 16837), 'logging.info', 'logging.info', (["('restored from %d-iter checkpoint.' % epoch_idx)"], {}), "('restored from %d-iter checkpoint.' % epoch_idx)\n", (16788, 16837), False, 'import logging\n'), ((17086, 17101), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (17099, 17101), False, 'import torch\n'), ((17126, 17183), 'numpy.zeros', 'np.zeros', (['(args.batch_size_utt_eval, 1, args.lat_dim * 2)'], {}), '((args.batch_size_utt_eval, 1, args.lat_dim * 2))\n', (17134, 17183), True, 'import numpy as np\n'), ((18810, 18840), 'os.path.isfile', 'os.path.isfile', (['args.feats_src'], {}), '(args.feats_src)\n', (18824, 18840), False, 'import os\n'), ((19133, 19167), 'os.path.isfile', 'os.path.isfile', (['args.feats_src_trg'], {}), '(args.feats_src_trg)\n', (19147, 19167), False, 'import os\n'), ((19592, 19622), 'os.path.isfile', 'os.path.isfile', (['args.feats_trg'], {}), '(args.feats_trg)\n', (19606, 19622), False, 'import os\n'), ((19915, 19949), 'os.path.isfile', 'os.path.isfile', (['args.feats_trg_src'], {}), '(args.feats_trg_src)\n', (19929, 19949), False, 'import os\n'), ((20407, 20455), 'numpy.zeros', 'np.zeros', (['(mod_train_batch, 1, args.lat_dim * 2)'], {}), '((mod_train_batch, 1, args.lat_dim * 2))\n', (20415, 20455), True, 'import numpy as np\n'), ((20828, 20863), 'os.path.isfile', 'os.path.isfile', (['args.feats_eval_src'], {}), '(args.feats_eval_src)\n', (20842, 20863), False, 'import os\n'), ((21174, 21209), 'os.path.isfile', 'os.path.isfile', (['args.feats_eval_trg'], {}), '(args.feats_eval_trg)\n', (21188, 21209), False, 'import os\n'), ((22007, 22042), 'dataset.padding', 'padding', (['x', 'args.pad_len'], {'value': '(0.0)'}), '(x, args.pad_len, value=0.0)\n', (22014, 22042), False, 'from dataset import FeatureDatasetSingleVAE, padding\n'), ((23151, 23194), 'utils.read_hdf5', 'read_hdf5', (['args.stats_trg', '"""/gv_range_mean"""'], {}), "(args.stats_trg, '/gv_range_mean')\n", (23160, 23194), False, 'from utils import read_hdf5\n'), ((23217, 23260), 'utils.read_hdf5', 'read_hdf5', (['args.stats_src', '"""/gv_range_mean"""'], {}), "(args.stats_src, '/gv_range_mean')\n", (23226, 23260), False, 'from utils import read_hdf5\n'), ((29820, 29873), 'numpy.random.set_state', 'np.random.set_state', (["checkpoint['numpy_random_state']"], {}), "(checkpoint['numpy_random_state'])\n", (29839, 29873), True, 'import numpy as np\n'), ((29882, 29935), 'torch.set_rng_state', 'torch.set_rng_state', (["checkpoint['torch_random_state']"], {}), "(checkpoint['torch_random_state'])\n", (29901, 29935), False, 'import torch\n'), ((30075, 30086), 'time.time', 'time.time', ([], {}), '()\n', (30084, 30086), False, 'import time\n'), ((1293, 1306), 'numpy.max', 'np.max', (['flens'], {}), '(flens)\n', (1299, 1306), True, 'import numpy as np\n'), ((1425, 1446), 'numpy.max', 'np.max', (['flens_spc_src'], {}), '(flens_spc_src)\n', (1431, 1446), True, 'import numpy as np\n'), ((1565, 1586), 'numpy.max', 'np.max', (['flens_src_trg'], {}), '(flens_src_trg)\n', (1571, 1586), True, 'import numpy as np\n'), ((1717, 1742), 'numpy.max', 'np.max', (['flens_spc_src_trg'], {}), '(flens_spc_src_trg)\n', (1723, 1742), True, 'import numpy as np\n'), ((13049, 13072), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (13070, 13072), False, 'import logging\n'), ((13109, 13297), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S"""', 'filename': "(args.expdir + '/train.log')"}), "(level=logging.DEBUG, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filename=args.expdir + '/train.log')\n", (13128, 13297), False, 'import logging\n'), ((13455, 13642), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.WARN', 'format': '"""%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s"""', 'datefmt': '"""%m/%d/%Y %I:%M:%S"""', 'filename': "(args.expdir + '/train.log')"}), "(level=logging.WARN, format=\n '%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s',\n datefmt='%m/%d/%Y %I:%M:%S', filename=args.expdir + '/train.log')\n", (13474, 13642), False, 'import logging\n'), ((13790, 13826), 'logging.warn', 'logging.warn', (['"""logging is disabled."""'], {}), "('logging is disabled.')\n", (13802, 13826), False, 'import logging\n'), ((13993, 14018), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (14016, 14018), False, 'import torch\n'), ((14594, 14645), 'utils.read_hdf5', 'read_hdf5', (['args.stats_jnt', '"""/mean_feat_org_lf0_jnt"""'], {}), "(args.stats_jnt, '/mean_feat_org_lf0_jnt')\n", (14603, 14645), False, 'from utils import read_hdf5\n'), ((14691, 14743), 'utils.read_hdf5', 'read_hdf5', (['args.stats_jnt', '"""/scale_feat_org_lf0_jnt"""'], {}), "(args.stats_jnt, '/scale_feat_org_lf0_jnt')\n", (14700, 14743), False, 'from utils import read_hdf5\n'), ((16918, 16944), 'torch.FloatTensor', 'torch.FloatTensor', (['init_pp'], {}), '(init_pp)\n', (16935, 16944), False, 'import torch\n'), ((18745, 18799), 'utils.find_files', 'find_files', (['args.feats_src', '"""*.h5"""'], {'use_dir_name': '(False)'}), "(args.feats_src, '*.h5', use_dir_name=False)\n", (18755, 18799), False, 'from utils import find_files\n'), ((18866, 18890), 'utils.read_txt', 'read_txt', (['args.feats_src'], {}), '(args.feats_src)\n', (18874, 18890), False, 'from utils import read_txt\n'), ((18909, 18966), 'logging.error', 'logging.error', (['"""--feats_src should be directory or list."""'], {}), "('--feats_src should be directory or list.')\n", (18922, 18966), False, 'import logging\n'), ((18975, 18986), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (18983, 18986), False, 'import sys\n'), ((19064, 19122), 'utils.find_files', 'find_files', (['args.feats_src_trg', '"""*.h5"""'], {'use_dir_name': '(False)'}), "(args.feats_src_trg, '*.h5', use_dir_name=False)\n", (19074, 19122), False, 'from utils import find_files\n'), ((19197, 19225), 'utils.read_txt', 'read_txt', (['args.feats_src_trg'], {}), '(args.feats_src_trg)\n', (19205, 19225), False, 'from utils import read_txt\n'), ((19244, 19305), 'logging.error', 'logging.error', (['"""--feats_src_trg should be directory or list."""'], {}), "('--feats_src_trg should be directory or list.')\n", (19257, 19305), False, 'import logging\n'), ((19314, 19325), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (19322, 19325), False, 'import sys\n'), ((19527, 19581), 'utils.find_files', 'find_files', (['args.feats_trg', '"""*.h5"""'], {'use_dir_name': '(False)'}), "(args.feats_trg, '*.h5', use_dir_name=False)\n", (19537, 19581), False, 'from utils import find_files\n'), ((19648, 19672), 'utils.read_txt', 'read_txt', (['args.feats_trg'], {}), '(args.feats_trg)\n', (19656, 19672), False, 'from utils import read_txt\n'), ((19691, 19748), 'logging.error', 'logging.error', (['"""--feats_trg should be directory or list."""'], {}), "('--feats_trg should be directory or list.')\n", (19704, 19748), False, 'import logging\n'), ((19757, 19768), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (19765, 19768), False, 'import sys\n'), ((19846, 19904), 'utils.find_files', 'find_files', (['args.feats_trg_src', '"""*.h5"""'], {'use_dir_name': '(False)'}), "(args.feats_trg_src, '*.h5', use_dir_name=False)\n", (19856, 19904), False, 'from utils import find_files\n'), ((19979, 20007), 'utils.read_txt', 'read_txt', (['args.feats_trg_src'], {}), '(args.feats_trg_src)\n', (19987, 20007), False, 'from utils import read_txt\n'), ((20026, 20087), 'logging.error', 'logging.error', (['"""--feats_trg_src should be directory or list."""'], {}), "('--feats_trg_src should be directory or list.')\n", (20039, 20087), False, 'import logging\n'), ((20096, 20107), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (20104, 20107), False, 'import sys\n'), ((20758, 20817), 'utils.find_files', 'find_files', (['args.feats_eval_src', '"""*.h5"""'], {'use_dir_name': '(False)'}), "(args.feats_eval_src, '*.h5', use_dir_name=False)\n", (20768, 20817), False, 'from utils import find_files\n'), ((20894, 20923), 'utils.read_txt', 'read_txt', (['args.feats_eval_src'], {}), '(args.feats_eval_src)\n', (20902, 20923), False, 'from utils import read_txt\n'), ((20942, 21004), 'logging.error', 'logging.error', (['"""--feats_eval_src should be directory or list."""'], {}), "('--feats_eval_src should be directory or list.')\n", (20955, 21004), False, 'import logging\n'), ((21013, 21024), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21021, 21024), False, 'import sys\n'), ((21104, 21163), 'utils.find_files', 'find_files', (['args.feats_eval_trg', '"""*.h5"""'], {'use_dir_name': '(False)'}), "(args.feats_eval_trg, '*.h5', use_dir_name=False)\n", (21114, 21163), False, 'from utils import find_files\n'), ((21240, 21269), 'utils.read_txt', 'read_txt', (['args.feats_eval_trg'], {}), '(args.feats_eval_trg)\n', (21248, 21269), False, 'from utils import read_txt\n'), ((21288, 21350), 'logging.error', 'logging.error', (['"""--feats_eval_trg should be directory or list."""'], {}), "('--feats_eval_trg should be directory or list.')\n", (21301, 21350), False, 'import logging\n'), ((21359, 21370), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (21367, 21370), False, 'import sys\n'), ((21655, 21670), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (21668, 21670), False, 'import torch\n'), ((21703, 21750), 'numpy.zeros', 'np.zeros', (['(mod_eval_batch, 1, args.lat_dim * 2)'], {}), '((mod_eval_batch, 1, args.lat_dim * 2))\n', (21711, 21750), True, 'import numpy as np\n'), ((35054, 35075), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (35073, 35075), True, 'import numpy as np\n'), ((35109, 35130), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (35128, 35130), False, 'import torch\n'), ((36026, 36047), 'numpy.random.get_state', 'np.random.get_state', ([], {}), '()\n', (36045, 36047), True, 'import numpy as np\n'), ((36081, 36102), 'torch.get_rng_state', 'torch.get_rng_state', ([], {}), '()\n', (36100, 36102), False, 'import torch\n'), ((41973, 42004), 'logging.info', 'logging.info', (['"""Evaluation data"""'], {}), "('Evaluation data')\n", (41985, 42004), False, 'import logging\n'), ((78520, 78589), 'logging.info', 'logging.info', (["('min_eval_acc= %s min_idx=%d' % (text_log, min_idx + 1))"], {}), "('min_eval_acc= %s min_idx=%d' % (text_log, min_idx + 1))\n", (78532, 78589), False, 'import logging\n'), ((81124, 81163), 'numpy.random.set_state', 'np.random.set_state', (['numpy_random_state'], {}), '(numpy_random_state)\n', (81143, 81163), True, 'import numpy as np\n'), ((81176, 81215), 'torch.set_rng_state', 'torch.set_rng_state', (['torch_random_state'], {}), '(torch_random_state)\n', (81195, 81215), False, 'import torch\n'), ((82015, 82080), 'logging.info', 'logging.info', (["('%d iteration [%d]' % (iter_idx + 1, epoch_idx + 1))"], {}), "('%d iteration [%d]' % (iter_idx + 1, epoch_idx + 1))\n", (82027, 82080), False, 'import logging\n'), ((2583, 2609), 'numpy.repeat', 'np.repeat', (['(-1)', 'n_batch_utt'], {}), '(-1, n_batch_utt)\n', (2592, 2609), True, 'import numpy as np\n'), ((2645, 2671), 'numpy.repeat', 'np.repeat', (['(-1)', 'n_batch_utt'], {}), '(-1, n_batch_utt)\n', (2654, 2671), True, 'import numpy as np\n'), ((2696, 2725), 'numpy.repeat', 'np.repeat', (['(False)', 'n_batch_utt'], {}), '(False, n_batch_utt)\n', (2705, 2725), True, 'import numpy as np\n'), ((2750, 2778), 'numpy.repeat', 'np.repeat', (['(True)', 'n_batch_utt'], {}), '(True, n_batch_utt)\n', (2759, 2778), True, 'import numpy as np\n'), ((2805, 2839), 'numpy.repeat', 'np.repeat', (['batch_size', 'n_batch_utt'], {}), '(batch_size, n_batch_utt)\n', (2814, 2839), True, 'import numpy as np\n'), ((13018, 13037), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (13035, 13037), False, 'import logging\n'), ((13412, 13435), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (13433, 13435), False, 'import logging\n'), ((13757, 13780), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (13778, 13780), False, 'import logging\n'), ((16185, 16215), 'torch.diag', 'torch.diag', (['(1.0 / std_jnt.data)'], {}), '(1.0 / std_jnt.data)\n', (16195, 16215), False, 'import torch\n'), ((16368, 16396), 'torch.diag', 'torch.diag', (['std_jnt_trg.data'], {}), '(std_jnt_trg.data)\n', (16378, 16396), False, 'import torch\n'), ((16994, 17046), 'torch.unsqueeze', 'torch.unsqueeze', (['((0 - mean_jnt_trg) / std_jnt_trg)', '(0)'], {}), '((0 - mean_jnt_trg) / std_jnt_trg, 0)\n', (17009, 17046), False, 'import torch\n'), ((17203, 17234), 'torch.FloatTensor', 'torch.FloatTensor', (['init_pp_eval'], {}), '(init_pp_eval)\n', (17220, 17234), False, 'import torch\n'), ((20473, 20503), 'torch.FloatTensor', 'torch.FloatTensor', (['init_pp_mod'], {}), '(init_pp_mod)\n', (20490, 20503), False, 'import torch\n'), ((36174, 36187), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (36181, 36187), True, 'import numpy as np\n'), ((42022, 42037), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (42035, 42037), False, 'import torch\n'), ((69587, 69600), 'numpy.mean', 'np.mean', (['loss'], {}), '(loss)\n', (69594, 69600), True, 'import numpy as np\n'), ((69682, 69710), 'numpy.mean', 'np.mean', (['loss_mcd_trg_trg[i]'], {}), '(loss_mcd_trg_trg[i])\n', (69689, 69710), True, 'import numpy as np\n'), ((69758, 69790), 'numpy.mean', 'np.mean', (['loss_mcd_trg_src_trg[i]'], {}), '(loss_mcd_trg_src_trg[i])\n', (69765, 69790), True, 'import numpy as np\n'), ((69834, 69862), 'numpy.mean', 'np.mean', (['loss_mcd_trg_src[i]'], {}), '(loss_mcd_trg_src[i])\n', (69841, 69862), True, 'import numpy as np\n'), ((69906, 69934), 'numpy.mean', 'np.mean', (['loss_mcd_src_src[i]'], {}), '(loss_mcd_src_src[i])\n', (69913, 69934), True, 'import numpy as np\n'), ((69982, 70014), 'numpy.mean', 'np.mean', (['loss_mcd_src_trg_src[i]'], {}), '(loss_mcd_src_trg_src[i])\n', (69989, 70014), True, 'import numpy as np\n'), ((70058, 70086), 'numpy.mean', 'np.mean', (['loss_mcd_src_trg[i]'], {}), '(loss_mcd_src_trg[i])\n', (70065, 70086), True, 'import numpy as np\n'), ((70129, 70156), 'numpy.mean', 'np.mean', (['loss_lat_src_cv[i]'], {}), '(loss_lat_src_cv[i])\n', (70136, 70156), True, 'import numpy as np\n'), ((70199, 70226), 'numpy.mean', 'np.mean', (['loss_lat_trg_cv[i]'], {}), '(loss_lat_trg_cv[i])\n', (70206, 70226), True, 'import numpy as np\n'), ((70266, 70290), 'numpy.mean', 'np.mean', (['loss_lat_src[i]'], {}), '(loss_lat_src[i])\n', (70273, 70290), True, 'import numpy as np\n'), ((70330, 70354), 'numpy.mean', 'np.mean', (['loss_lat_trg[i]'], {}), '(loss_lat_trg[i])\n', (70337, 70354), True, 'import numpy as np\n'), ((70521, 70547), 'numpy.mean', 'np.mean', (['mcdpow_trg_trg[i]'], {}), '(mcdpow_trg_trg[i])\n', (70528, 70547), True, 'import numpy as np\n'), ((70586, 70609), 'numpy.mean', 'np.mean', (['mcd_trg_trg[i]'], {}), '(mcd_trg_trg[i])\n', (70593, 70609), True, 'import numpy as np\n'), ((70788, 70818), 'numpy.mean', 'np.mean', (['mcdpow_trg_src_trg[i]'], {}), '(mcdpow_trg_src_trg[i])\n', (70795, 70818), True, 'import numpy as np\n'), ((70861, 70888), 'numpy.mean', 'np.mean', (['mcd_trg_src_trg[i]'], {}), '(mcd_trg_src_trg[i])\n', (70868, 70888), True, 'import numpy as np\n'), ((71055, 71081), 'numpy.mean', 'np.mean', (['mcdpow_trg_src[i]'], {}), '(mcdpow_trg_src[i])\n', (71062, 71081), True, 'import numpy as np\n'), ((71126, 71151), 'numpy.std', 'np.std', (['mcdpow_trg_src[i]'], {}), '(mcdpow_trg_src[i])\n', (71132, 71151), True, 'import numpy as np\n'), ((71190, 71213), 'numpy.mean', 'np.mean', (['mcd_trg_src[i]'], {}), '(mcd_trg_src[i])\n', (71197, 71213), True, 'import numpy as np\n'), ((71255, 71277), 'numpy.std', 'np.std', (['mcd_trg_src[i]'], {}), '(mcd_trg_src[i])\n', (71261, 71277), True, 'import numpy as np\n'), ((71321, 71349), 'numpy.mean', 'np.mean', (['lat_dist_trgsrc1[i]'], {}), '(lat_dist_trgsrc1[i])\n', (71328, 71349), True, 'import numpy as np\n'), ((71393, 71421), 'numpy.mean', 'np.mean', (['lat_dist_trgsrc2[i]'], {}), '(lat_dist_trgsrc2[i])\n', (71400, 71421), True, 'import numpy as np\n'), ((71588, 71614), 'numpy.mean', 'np.mean', (['mcdpow_src_src[i]'], {}), '(mcdpow_src_src[i])\n', (71595, 71614), True, 'import numpy as np\n'), ((71653, 71676), 'numpy.mean', 'np.mean', (['mcd_src_src[i]'], {}), '(mcd_src_src[i])\n', (71660, 71676), True, 'import numpy as np\n'), ((71855, 71885), 'numpy.mean', 'np.mean', (['mcdpow_src_trg_src[i]'], {}), '(mcdpow_src_trg_src[i])\n', (71862, 71885), True, 'import numpy as np\n'), ((71928, 71955), 'numpy.mean', 'np.mean', (['mcd_src_trg_src[i]'], {}), '(mcd_src_trg_src[i])\n', (71935, 71955), True, 'import numpy as np\n'), ((72122, 72148), 'numpy.mean', 'np.mean', (['mcdpow_src_trg[i]'], {}), '(mcdpow_src_trg[i])\n', (72129, 72148), True, 'import numpy as np\n'), ((72193, 72218), 'numpy.std', 'np.std', (['mcdpow_src_trg[i]'], {}), '(mcdpow_src_trg[i])\n', (72199, 72218), True, 'import numpy as np\n'), ((72257, 72280), 'numpy.mean', 'np.mean', (['mcd_src_trg[i]'], {}), '(mcd_src_trg[i])\n', (72264, 72280), True, 'import numpy as np\n'), ((72322, 72344), 'numpy.std', 'np.std', (['mcd_src_trg[i]'], {}), '(mcd_src_trg[i])\n', (72328, 72344), True, 'import numpy as np\n'), ((72388, 72416), 'numpy.mean', 'np.mean', (['lat_dist_srctrg1[i]'], {}), '(lat_dist_srctrg1[i])\n', (72395, 72416), True, 'import numpy as np\n'), ((72460, 72488), 'numpy.mean', 'np.mean', (['lat_dist_srctrg2[i]'], {}), '(lat_dist_srctrg2[i])\n', (72467, 72488), True, 'import numpy as np\n'), ((81789, 81800), 'time.time', 'time.time', ([], {}), '()\n', (81798, 81800), False, 'import time\n'), ((81817, 81863), 'logging.info', 'logging.info', (["('==%d EPOCH==' % (epoch_idx + 1))"], {}), "('==%d EPOCH==' % (epoch_idx + 1))\n", (81829, 81863), False, 'import logging\n'), ((81878, 81907), 'logging.info', 'logging.info', (['"""Training data"""'], {}), "('Training data')\n", (81890, 81907), False, 'import logging\n'), ((13381, 13400), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (13398, 13400), False, 'import logging\n'), ((13726, 13745), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (13743, 13745), False, 'import logging\n'), ((17298, 17350), 'torch.unsqueeze', 'torch.unsqueeze', (['((0 - mean_jnt_trg) / std_jnt_trg)', '(0)'], {}), '((0 - mean_jnt_trg) / std_jnt_trg, 0)\n', (17313, 17350), False, 'import torch\n'), ((20565, 20617), 'torch.unsqueeze', 'torch.unsqueeze', (['((0 - mean_jnt_trg) / std_jnt_trg)', '(0)'], {}), '((0 - mean_jnt_trg) / std_jnt_trg, 0)\n', (20580, 20617), False, 'import torch\n'), ((21778, 21813), 'torch.FloatTensor', 'torch.FloatTensor', (['init_pp_eval_mod'], {}), '(init_pp_eval_mod)\n', (21795, 21813), False, 'import torch\n'), ((31017, 31032), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (31030, 31032), False, 'import torch\n'), ((32459, 32508), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_src_', 'trj_lat_srctrg_'], {}), '(trj_lat_src_, trj_lat_srctrg_)\n', (32477, 32508), True, 'from dtw_c import dtw_c as dtw\n'), ((32691, 32747), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_srctrg_', 'trj_lat_src_'], {'mcd': '(0)'}), '(trj_lat_srctrg_, trj_lat_src_, mcd=0)\n', (32709, 32747), True, 'from dtw_c import dtw_c as dtw\n'), ((32799, 32848), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_srctrg_', 'trj_lat_src_'], {}), '(trj_lat_srctrg_, trj_lat_src_)\n', (32817, 32848), True, 'from dtw_c import dtw_c as dtw\n'), ((33028, 33084), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_src_', 'trj_lat_srctrg_'], {'mcd': '(0)'}), '(trj_lat_src_, trj_lat_srctrg_, mcd=0)\n', (33046, 33084), True, 'from dtw_c import dtw_c as dtw\n'), ((42095, 42106), 'time.time', 'time.time', ([], {}), '()\n', (42104, 42106), False, 'import time\n'), ((100514, 100664), 'logging.info', 'logging.info', (["('%s %s %d %d %d %d' % (featfile_src[i], featfile_src_trg[i], flens_src[i],\n flens_src_trg[i], flens_spc_src[i], flens_spc_src_trg[i]))"], {}), "('%s %s %d %d %d %d' % (featfile_src[i], featfile_src_trg[i],\n flens_src[i], flens_src_trg[i], flens_spc_src[i], flens_spc_src_trg[i]))\n", (100526, 100664), False, 'import logging\n'), ((101015, 101030), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (101028, 101030), False, 'import torch\n'), ((113577, 113609), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_src[i]'], {}), '(batch_mcdpow_src_src[i])\n', (113584, 113609), True, 'import numpy as np\n'), ((113653, 113682), 'numpy.mean', 'np.mean', (['batch_mcd_src_src[i]'], {}), '(batch_mcd_src_src[i])\n', (113660, 113682), True, 'import numpy as np\n'), ((113733, 113769), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_trg_src[i]'], {}), '(batch_mcdpow_src_trg_src[i])\n', (113740, 113769), True, 'import numpy as np\n'), ((113817, 113850), 'numpy.mean', 'np.mean', (['batch_mcd_src_trg_src[i]'], {}), '(batch_mcd_src_trg_src[i])\n', (113824, 113850), True, 'import numpy as np\n'), ((114656, 114693), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_src[i]'], {}), '(batch_loss_mcd_src_src[i])\n', (114666, 114693), False, 'import torch\n'), ((114746, 114787), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_trg_src[i]'], {}), '(batch_loss_mcd_src_trg_src[i])\n', (114756, 114787), False, 'import torch\n'), ((114836, 114873), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_trg[i]'], {}), '(batch_loss_mcd_src_trg[i])\n', (114846, 114873), False, 'import torch\n'), ((114918, 114951), 'torch.mean', 'torch.mean', (['batch_loss_lat_src[i]'], {}), '(batch_loss_lat_src[i])\n', (114928, 114951), False, 'import torch\n'), ((114999, 115035), 'torch.mean', 'torch.mean', (['batch_loss_lat_src_cv[i]'], {}), '(batch_loss_lat_src_cv[i])\n', (115009, 115035), False, 'import torch\n'), ((21889, 21941), 'torch.unsqueeze', 'torch.unsqueeze', (['((0 - mean_jnt_trg) / std_jnt_trg)', '(0)'], {}), '((0 - mean_jnt_trg) / std_jnt_trg, 0)\n', (21904, 21941), False, 'import torch\n'), ((33717, 33944), 'logging.info', 'logging.info', (["('batch srctrg loss %s %s = %.3f dB %.3f dB , %.3f %.3f' % (featfile_src_[i\n ], featfile_src_trg_[i], batch_mcdpow_src_trg[0], batch_mcd_src_trg[0],\n batch_lat_dist_srctrg1[0], batch_lat_dist_srctrg2[0]))"], {}), "('batch srctrg loss %s %s = %.3f dB %.3f dB , %.3f %.3f' % (\n featfile_src_[i], featfile_src_trg_[i], batch_mcdpow_src_trg[0],\n batch_mcd_src_trg[0], batch_lat_dist_srctrg1[0], batch_lat_dist_srctrg2[0])\n )\n", (33729, 33944), False, 'import logging\n'), ((34497, 34724), 'logging.info', 'logging.info', (["('batch trgsrc loss %s %s = %.3f dB %.3f dB , %.3f %.3f' % (featfile_src_[i\n ], featfile_src_trg_[i], batch_mcdpow_src_trg[0], batch_mcd_src_trg[0],\n batch_lat_dist_trgsrc1[0], batch_lat_dist_trgsrc2[0]))"], {}), "('batch trgsrc loss %s %s = %.3f dB %.3f dB , %.3f %.3f' % (\n featfile_src_[i], featfile_src_trg_[i], batch_mcdpow_src_trg[0],\n batch_mcd_src_trg[0], batch_lat_dist_trgsrc1[0], batch_lat_dist_trgsrc2[0])\n )\n", (34509, 34724), False, 'import logging\n'), ((37364, 37392), 'numpy.mean', 'np.mean', (['loss_mcd_trg_trg[i]'], {}), '(loss_mcd_trg_trg[i])\n', (37371, 37392), True, 'import numpy as np\n'), ((37394, 37426), 'numpy.mean', 'np.mean', (['loss_mcd_trg_src_trg[i]'], {}), '(loss_mcd_trg_src_trg[i])\n', (37401, 37426), True, 'import numpy as np\n'), ((37428, 37456), 'numpy.mean', 'np.mean', (['loss_mcd_trg_src[i]'], {}), '(loss_mcd_trg_src[i])\n', (37435, 37456), True, 'import numpy as np\n'), ((37491, 37519), 'numpy.mean', 'np.mean', (['loss_mcd_src_src[i]'], {}), '(loss_mcd_src_src[i])\n', (37498, 37519), True, 'import numpy as np\n'), ((37521, 37553), 'numpy.mean', 'np.mean', (['loss_mcd_src_trg_src[i]'], {}), '(loss_mcd_src_trg_src[i])\n', (37528, 37553), True, 'import numpy as np\n'), ((37555, 37583), 'numpy.mean', 'np.mean', (['loss_mcd_src_trg[i]'], {}), '(loss_mcd_src_trg[i])\n', (37562, 37583), True, 'import numpy as np\n'), ((37622, 37646), 'numpy.mean', 'np.mean', (['loss_lat_trg[i]'], {}), '(loss_lat_trg[i])\n', (37629, 37646), True, 'import numpy as np\n'), ((37648, 37675), 'numpy.mean', 'np.mean', (['loss_lat_trg_cv[i]'], {}), '(loss_lat_trg_cv[i])\n', (37655, 37675), True, 'import numpy as np\n'), ((37677, 37701), 'numpy.mean', 'np.mean', (['loss_lat_src[i]'], {}), '(loss_lat_src[i])\n', (37684, 37701), True, 'import numpy as np\n'), ((37703, 37730), 'numpy.mean', 'np.mean', (['loss_lat_src_cv[i]'], {}), '(loss_lat_src_cv[i])\n', (37710, 37730), True, 'import numpy as np\n'), ((37793, 37819), 'numpy.mean', 'np.mean', (['mcdpow_trg_trg[i]'], {}), '(mcdpow_trg_trg[i])\n', (37800, 37819), True, 'import numpy as np\n'), ((37821, 37844), 'numpy.mean', 'np.mean', (['mcd_trg_trg[i]'], {}), '(mcd_trg_trg[i])\n', (37828, 37844), True, 'import numpy as np\n'), ((37915, 37945), 'numpy.mean', 'np.mean', (['mcdpow_trg_src_trg[i]'], {}), '(mcdpow_trg_src_trg[i])\n', (37922, 37945), True, 'import numpy as np\n'), ((37947, 37974), 'numpy.mean', 'np.mean', (['mcd_trg_src_trg[i]'], {}), '(mcd_trg_src_trg[i])\n', (37954, 37974), True, 'import numpy as np\n'), ((38045, 38071), 'numpy.mean', 'np.mean', (['mcdpow_trg_src[i]'], {}), '(mcdpow_trg_src[i])\n', (38052, 38071), True, 'import numpy as np\n'), ((38073, 38098), 'numpy.std', 'np.std', (['mcdpow_trg_src[i]'], {}), '(mcdpow_trg_src[i])\n', (38079, 38098), True, 'import numpy as np\n'), ((38100, 38123), 'numpy.mean', 'np.mean', (['mcd_trg_src[i]'], {}), '(mcd_trg_src[i])\n', (38107, 38123), True, 'import numpy as np\n'), ((38125, 38147), 'numpy.std', 'np.std', (['mcd_trg_src[i]'], {}), '(mcd_trg_src[i])\n', (38131, 38147), True, 'import numpy as np\n'), ((38202, 38230), 'numpy.mean', 'np.mean', (['lat_dist_trgsrc1[i]'], {}), '(lat_dist_trgsrc1[i])\n', (38209, 38230), True, 'import numpy as np\n'), ((38232, 38260), 'numpy.mean', 'np.mean', (['lat_dist_trgsrc2[i]'], {}), '(lat_dist_trgsrc2[i])\n', (38239, 38260), True, 'import numpy as np\n'), ((38282, 38308), 'numpy.mean', 'np.mean', (['mcdpow_src_src[i]'], {}), '(mcdpow_src_src[i])\n', (38289, 38308), True, 'import numpy as np\n'), ((38310, 38333), 'numpy.mean', 'np.mean', (['mcd_src_src[i]'], {}), '(mcd_src_src[i])\n', (38317, 38333), True, 'import numpy as np\n'), ((38416, 38446), 'numpy.mean', 'np.mean', (['mcdpow_src_trg_src[i]'], {}), '(mcdpow_src_trg_src[i])\n', (38423, 38446), True, 'import numpy as np\n'), ((38448, 38475), 'numpy.mean', 'np.mean', (['mcd_src_trg_src[i]'], {}), '(mcd_src_trg_src[i])\n', (38455, 38475), True, 'import numpy as np\n'), ((38558, 38584), 'numpy.mean', 'np.mean', (['mcdpow_src_trg[i]'], {}), '(mcdpow_src_trg[i])\n', (38565, 38584), True, 'import numpy as np\n'), ((38586, 38611), 'numpy.std', 'np.std', (['mcdpow_src_trg[i]'], {}), '(mcdpow_src_trg[i])\n', (38592, 38611), True, 'import numpy as np\n'), ((38613, 38636), 'numpy.mean', 'np.mean', (['mcd_src_trg[i]'], {}), '(mcd_src_trg[i])\n', (38620, 38636), True, 'import numpy as np\n'), ((38638, 38660), 'numpy.std', 'np.std', (['mcd_src_trg[i]'], {}), '(mcd_src_trg[i])\n', (38644, 38660), True, 'import numpy as np\n'), ((38727, 38755), 'numpy.mean', 'np.mean', (['lat_dist_srctrg1[i]'], {}), '(lat_dist_srctrg1[i])\n', (38734, 38755), True, 'import numpy as np\n'), ((38757, 38785), 'numpy.mean', 'np.mean', (['lat_dist_srctrg2[i]'], {}), '(lat_dist_srctrg2[i])\n', (38764, 38785), True, 'import numpy as np\n'), ((38937, 38951), 'numpy.mean', 'np.mean', (['total'], {}), '(total)\n', (38944, 38951), True, 'import numpy as np\n'), ((42823, 42984), 'logging.info', 'logging.info', (["('%s %s %d %d %d %d' % (featfile_src_[i], featfile_src_trg_[i], flens_src_[\n i], flens_src_trg_[i], flens_spc_src_[i], flens_spc_src_trg_[i]))"], {}), "('%s %s %d %d %d %d' % (featfile_src_[i], featfile_src_trg_[i],\n flens_src_[i], flens_src_trg_[i], flens_spc_src_[i], flens_spc_src_trg_[i])\n )\n", (42835, 42984), False, 'import logging\n'), ((43000, 43161), 'logging.info', 'logging.info', (["('%s %s %d %d %d %d' % (featfile_trg_[i], featfile_trg_src_[i], flens_trg_[\n i], flens_trg_src_[i], flens_spc_trg_[i], flens_spc_trg_src_[i]))"], {}), "('%s %s %d %d %d %d' % (featfile_trg_[i], featfile_trg_src_[i],\n flens_trg_[i], flens_trg_src_[i], flens_spc_trg_[i], flens_spc_trg_src_[i])\n )\n", (43012, 43161), False, 'import logging\n'), ((65204, 65241), 'torch.mean', 'torch.mean', (['batch_loss_mcd_trg_trg[i]'], {}), '(batch_loss_mcd_trg_trg[i])\n', (65214, 65241), False, 'import torch\n'), ((65298, 65339), 'torch.mean', 'torch.mean', (['batch_loss_mcd_trg_src_trg[i]'], {}), '(batch_loss_mcd_trg_src_trg[i])\n', (65308, 65339), False, 'import torch\n'), ((65392, 65429), 'torch.mean', 'torch.mean', (['batch_loss_mcd_trg_src[i]'], {}), '(batch_loss_mcd_trg_src[i])\n', (65402, 65429), False, 'import torch\n'), ((65478, 65511), 'torch.mean', 'torch.mean', (['batch_loss_lat_trg[i]'], {}), '(batch_loss_lat_trg[i])\n', (65488, 65511), False, 'import torch\n'), ((65563, 65599), 'torch.mean', 'torch.mean', (['batch_loss_lat_trg_cv[i]'], {}), '(batch_loss_lat_trg_cv[i])\n', (65573, 65599), False, 'import torch\n'), ((65653, 65690), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_src[i]'], {}), '(batch_loss_mcd_src_src[i])\n', (65663, 65690), False, 'import torch\n'), ((65747, 65788), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_trg_src[i]'], {}), '(batch_loss_mcd_src_trg_src[i])\n', (65757, 65788), False, 'import torch\n'), ((65841, 65878), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_trg[i]'], {}), '(batch_loss_mcd_src_trg[i])\n', (65851, 65878), False, 'import torch\n'), ((65927, 65960), 'torch.mean', 'torch.mean', (['batch_loss_lat_src[i]'], {}), '(batch_loss_lat_src[i])\n', (65937, 65960), False, 'import torch\n'), ((66012, 66048), 'torch.mean', 'torch.mean', (['batch_loss_lat_src_cv[i]'], {}), '(batch_loss_lat_src_cv[i])\n', (66022, 66048), False, 'import torch\n'), ((74306, 74320), 'numpy.mean', 'np.mean', (['total'], {}), '(total)\n', (74313, 74320), True, 'import numpy as np\n'), ((95109, 95146), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_src[i]'], {}), '(batch_loss_mcd_src_src[i])\n', (95119, 95146), False, 'import torch\n'), ((95203, 95244), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_trg_src[i]'], {}), '(batch_loss_mcd_src_trg_src[i])\n', (95213, 95244), False, 'import torch\n'), ((95297, 95334), 'torch.mean', 'torch.mean', (['batch_loss_mcd_src_trg[i]'], {}), '(batch_loss_mcd_src_trg[i])\n', (95307, 95334), False, 'import torch\n'), ((95383, 95416), 'torch.mean', 'torch.mean', (['batch_loss_lat_src[i]'], {}), '(batch_loss_lat_src[i])\n', (95393, 95416), False, 'import torch\n'), ((95468, 95504), 'torch.mean', 'torch.mean', (['batch_loss_lat_src_cv[i]'], {}), '(batch_loss_lat_src_cv[i])\n', (95478, 95504), False, 'import torch\n'), ((100380, 100391), 'time.time', 'time.time', ([], {}), '()\n', (100389, 100391), False, 'import time\n'), ((108041, 108073), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_trg[i]'], {}), '(batch_mcdpow_src_trg[i])\n', (108048, 108073), True, 'import numpy as np\n'), ((108121, 108150), 'numpy.mean', 'np.mean', (['batch_mcd_src_trg[i]'], {}), '(batch_mcd_src_trg[i])\n', (108128, 108150), True, 'import numpy as np\n'), ((108203, 108237), 'numpy.mean', 'np.mean', (['batch_lat_dist_srctrg1[i]'], {}), '(batch_lat_dist_srctrg1[i])\n', (108210, 108237), True, 'import numpy as np\n'), ((108290, 108324), 'numpy.mean', 'np.mean', (['batch_lat_dist_srctrg2[i]'], {}), '(batch_lat_dist_srctrg2[i])\n', (108297, 108324), True, 'import numpy as np\n'), ((108934, 109000), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_src[i][j, :flens_src[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src[i][j, :flens_src[j]], lat_dim=args.lat_dim)\n', (108942, 109000), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((109052, 109122), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_src_trg[i][j, :flens_src[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_trg[i][j, :flens_src[j]], lat_dim=args.lat_dim)\n', (109060, 109122), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((115996, 116007), 'time.time', 'time.time', ([], {}), '()\n', (116005, 116007), False, 'import time\n'), ((32573, 32634), 'numpy.mean', 'np.mean', (['((aligned_lat_srctrg1 - trj_lat_srctrg_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_srctrg1 - trj_lat_srctrg_) ** 2, axis=0)\n', (32580, 32634), True, 'import numpy as np\n'), ((32913, 32971), 'numpy.mean', 'np.mean', (['((aligned_lat_srctrg2 - trj_lat_src_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_srctrg2 - trj_lat_src_) ** 2, axis=0)\n', (32920, 32971), True, 'import numpy as np\n'), ((33125, 33158), 'os.path.dirname', 'os.path.dirname', (['featfile_src_[i]'], {}), '(featfile_src_[i])\n', (33140, 33158), False, 'import os\n'), ((38915, 38928), 'numpy.sum', 'np.sum', (['total'], {}), '(total)\n', (38921, 38928), True, 'import numpy as np\n'), ((59707, 59739), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_src[i]'], {}), '(batch_mcdpow_src_src[i])\n', (59714, 59739), True, 'import numpy as np\n'), ((59791, 59820), 'numpy.mean', 'np.mean', (['batch_mcd_src_src[i]'], {}), '(batch_mcd_src_src[i])\n', (59798, 59820), True, 'import numpy as np\n'), ((59879, 59915), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_trg_src[i]'], {}), '(batch_mcdpow_src_trg_src[i])\n', (59886, 59915), True, 'import numpy as np\n'), ((59971, 60004), 'numpy.mean', 'np.mean', (['batch_mcd_src_trg_src[i]'], {}), '(batch_mcd_src_trg_src[i])\n', (59978, 60004), True, 'import numpy as np\n'), ((60059, 60091), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_trg[i]'], {}), '(batch_mcdpow_src_trg[i])\n', (60066, 60091), True, 'import numpy as np\n'), ((60143, 60172), 'numpy.mean', 'np.mean', (['batch_mcd_src_trg[i]'], {}), '(batch_mcd_src_trg[i])\n', (60150, 60172), True, 'import numpy as np\n'), ((60227, 60259), 'numpy.mean', 'np.mean', (['batch_mcdpow_trg_trg[i]'], {}), '(batch_mcdpow_trg_trg[i])\n', (60234, 60259), True, 'import numpy as np\n'), ((60311, 60340), 'numpy.mean', 'np.mean', (['batch_mcd_trg_trg[i]'], {}), '(batch_mcd_trg_trg[i])\n', (60318, 60340), True, 'import numpy as np\n'), ((60399, 60435), 'numpy.mean', 'np.mean', (['batch_mcdpow_trg_src_trg[i]'], {}), '(batch_mcdpow_trg_src_trg[i])\n', (60406, 60435), True, 'import numpy as np\n'), ((60491, 60524), 'numpy.mean', 'np.mean', (['batch_mcd_trg_src_trg[i]'], {}), '(batch_mcd_trg_src_trg[i])\n', (60498, 60524), True, 'import numpy as np\n'), ((60579, 60611), 'numpy.mean', 'np.mean', (['batch_mcdpow_trg_src[i]'], {}), '(batch_mcdpow_trg_src[i])\n', (60586, 60611), True, 'import numpy as np\n'), ((60663, 60692), 'numpy.mean', 'np.mean', (['batch_mcd_trg_src[i]'], {}), '(batch_mcd_trg_src[i])\n', (60670, 60692), True, 'import numpy as np\n'), ((60749, 60783), 'numpy.mean', 'np.mean', (['batch_lat_dist_srctrg1[i]'], {}), '(batch_lat_dist_srctrg1[i])\n', (60756, 60783), True, 'import numpy as np\n'), ((60840, 60874), 'numpy.mean', 'np.mean', (['batch_lat_dist_srctrg2[i]'], {}), '(batch_lat_dist_srctrg2[i])\n', (60847, 60874), True, 'import numpy as np\n'), ((60931, 60965), 'numpy.mean', 'np.mean', (['batch_lat_dist_trgsrc1[i]'], {}), '(batch_lat_dist_trgsrc1[i])\n', (60938, 60965), True, 'import numpy as np\n'), ((61022, 61056), 'numpy.mean', 'np.mean', (['batch_lat_dist_trgsrc2[i]'], {}), '(batch_lat_dist_trgsrc2[i])\n', (61029, 61056), True, 'import numpy as np\n'), ((62234, 62302), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_trg_[i][j, :flens_trg_[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_[i][j, :flens_trg_[j]], lat_dim=args.lat_dim)\n', (62242, 62302), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((62355, 62423), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_src_[i][j, :flens_src_[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_[i][j, :flens_src_[j]], lat_dim=args.lat_dim)\n', (62363, 62423), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((62480, 62552), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_trg_src_[i][j, :flens_trg_[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_src_[i][j, :flens_trg_[j]], lat_dim=args.lat_dim)\n', (62488, 62552), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((62608, 62680), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_src_trg_[i][j, :flens_src_[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_trg_[i][j, :flens_src_[j]], lat_dim=args.lat_dim)\n', (62616, 62680), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((69538, 69549), 'time.time', 'time.time', ([], {}), '()\n', (69547, 69549), False, 'import time\n'), ((74284, 74297), 'numpy.sum', 'np.sum', (['total'], {}), '(total)\n', (74290, 74297), True, 'import numpy as np\n'), ((86320, 86369), 'torch.cat', 'torch.cat', (['(trj_src_trg, batch_trj_src_trg[0])', '(1)'], {}), '((trj_src_trg, batch_trj_src_trg[0]), 1)\n', (86329, 86369), False, 'import torch\n'), ((86411, 86456), 'torch.cat', 'torch.cat', (['(trj_lat_src, batch_lat_src[0])', '(1)'], {}), '((trj_lat_src, batch_lat_src[0]), 1)\n', (86420, 86456), False, 'import torch\n'), ((91733, 91798), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_src[i][j, :flen_acc[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src[i][j, :flen_acc[j]], lat_dim=args.lat_dim)\n', (91741, 91798), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((91854, 91923), 'gru_vae.loss_vae', 'loss_vae', (['batch_lat_src_trg[i][j, :flen_acc[j]]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_trg[i][j, :flen_acc[j]], lat_dim=args.lat_dim)\n', (91862, 91923), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((101488, 101557), 'torch.cat', 'torch.cat', (['(batch_src[:, :, :stdim], batch_trj_src_trg_src[i - 1])', '(2)'], {}), '((batch_src[:, :, :stdim], batch_trj_src_trg_src[i - 1]), 2)\n', (101497, 101557), False, 'import torch\n'), ((102043, 102093), 'torch.cat', 'torch.cat', (['(batch_cv_src, batch_trj_src_trg[i])', '(2)'], {}), '((batch_cv_src, batch_trj_src_trg[i]), 2)\n', (102052, 102093), False, 'import torch\n'), ((102930, 102980), 'torch.cat', 'torch.cat', (['(batch_cv_src, batch_trj_src_trg[0])', '(2)'], {}), '((batch_cv_src, batch_trj_src_trg[0]), 2)\n', (102939, 102980), False, 'import torch\n'), ((104821, 104870), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_src_', 'trj_lat_srctrg_'], {}), '(trj_lat_src_, trj_lat_srctrg_)\n', (104839, 104870), True, 'from dtw_c import dtw_c as dtw\n'), ((105071, 105127), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_srctrg_', 'trj_lat_src_'], {'mcd': '(0)'}), '(trj_lat_srctrg_, trj_lat_src_, mcd=0)\n', (105089, 105127), True, 'from dtw_c import dtw_c as dtw\n'), ((105187, 105236), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_srctrg_', 'trj_lat_src_'], {}), '(trj_lat_srctrg_, trj_lat_src_)\n', (105205, 105236), True, 'from dtw_c import dtw_c as dtw\n'), ((105434, 105490), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_src_', 'trj_lat_srctrg_'], {'mcd': '(0)'}), '(trj_lat_src_, trj_lat_srctrg_, mcd=0)\n', (105452, 105490), True, 'from dtw_c import dtw_c as dtw\n'), ((36369, 36388), 'numpy.log', 'np.log', (['gv_trg_mean'], {}), '(gv_trg_mean)\n', (36375, 36388), True, 'import numpy as np\n'), ((36494, 36513), 'numpy.log', 'np.log', (['gv_trg_mean'], {}), '(gv_trg_mean)\n', (36500, 36513), True, 'import numpy as np\n'), ((36627, 36646), 'numpy.log', 'np.log', (['gv_trg_mean'], {}), '(gv_trg_mean)\n', (36633, 36646), True, 'import numpy as np\n'), ((36752, 36771), 'numpy.log', 'np.log', (['gv_src_mean'], {}), '(gv_src_mean)\n', (36758, 36771), True, 'import numpy as np\n'), ((36877, 36896), 'numpy.log', 'np.log', (['gv_src_mean'], {}), '(gv_src_mean)\n', (36883, 36896), True, 'import numpy as np\n'), ((37010, 37029), 'numpy.log', 'np.log', (['gv_src_mean'], {}), '(gv_src_mean)\n', (37016, 37029), True, 'import numpy as np\n'), ((44838, 44909), 'torch.cat', 'torch.cat', (['(batch_trg_[:, :, :stdim], batch_trj_trg_src_trg_[i - 1])', '(2)'], {}), '((batch_trg_[:, :, :stdim], batch_trj_trg_src_trg_[i - 1]), 2)\n', (44847, 44909), False, 'import torch\n'), ((45027, 45098), 'torch.cat', 'torch.cat', (['(batch_src_[:, :, :stdim], batch_trj_src_trg_src_[i - 1])', '(2)'], {}), '((batch_src_[:, :, :stdim], batch_trj_src_trg_src_[i - 1]), 2)\n', (45036, 45098), False, 'import torch\n'), ((45963, 46015), 'torch.cat', 'torch.cat', (['(batch_cv_trg_, batch_trj_trg_src_[i])', '(2)'], {}), '((batch_cv_trg_, batch_trj_trg_src_[i]), 2)\n', (45972, 46015), False, 'import torch\n'), ((46141, 46193), 'torch.cat', 'torch.cat', (['(batch_cv_src_, batch_trj_src_trg_[i])', '(2)'], {}), '((batch_cv_src_, batch_trj_src_trg_[i]), 2)\n', (46150, 46193), False, 'import torch\n'), ((47745, 47797), 'torch.cat', 'torch.cat', (['(batch_cv_trg_, batch_trj_trg_src_[0])', '(2)'], {}), '((batch_cv_trg_, batch_trj_trg_src_[0]), 2)\n', (47754, 47797), False, 'import torch\n'), ((47923, 47975), 'torch.cat', 'torch.cat', (['(batch_cv_src_, batch_trj_src_trg_[0])', '(2)'], {}), '((batch_cv_src_, batch_trj_src_trg_[0]), 2)\n', (47932, 47975), False, 'import torch\n'), ((49697, 49746), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_src_', 'trj_lat_srctrg_'], {}), '(trj_lat_src_, trj_lat_srctrg_)\n', (49715, 49746), True, 'from dtw_c import dtw_c as dtw\n'), ((49955, 50011), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_srctrg_', 'trj_lat_src_'], {'mcd': '(0)'}), '(trj_lat_srctrg_, trj_lat_src_, mcd=0)\n', (49973, 50011), True, 'from dtw_c import dtw_c as dtw\n'), ((50075, 50124), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_srctrg_', 'trj_lat_src_'], {}), '(trj_lat_srctrg_, trj_lat_src_)\n', (50093, 50124), True, 'from dtw_c import dtw_c as dtw\n'), ((50330, 50386), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_src_', 'trj_lat_srctrg_'], {'mcd': '(0)'}), '(trj_lat_src_, trj_lat_srctrg_, mcd=0)\n', (50348, 50386), True, 'from dtw_c import dtw_c as dtw\n'), ((51403, 51452), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_trg_', 'trj_lat_trgsrc_'], {}), '(trj_lat_trg_, trj_lat_trgsrc_)\n', (51421, 51452), True, 'from dtw_c import dtw_c as dtw\n'), ((51661, 51717), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_trgsrc_', 'trj_lat_trg_'], {'mcd': '(0)'}), '(trj_lat_trgsrc_, trj_lat_trg_, mcd=0)\n', (51679, 51717), True, 'from dtw_c import dtw_c as dtw\n'), ((51781, 51830), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_trgsrc_', 'trj_lat_trg_'], {}), '(trj_lat_trgsrc_, trj_lat_trg_)\n', (51799, 51830), True, 'from dtw_c import dtw_c as dtw\n'), ((52036, 52092), 'dtw_c.dtw_c.dtw_org_to_trg', 'dtw.dtw_org_to_trg', (['trj_lat_trg_', 'trj_lat_trgsrc_'], {'mcd': '(0)'}), '(trj_lat_trg_, trj_lat_trgsrc_, mcd=0)\n', (52054, 52092), True, 'from dtw_c import dtw_c as dtw\n'), ((58730, 59115), 'logging.info', 'logging.info', (["('batch trgsrc loss %s %s = %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f'\n % (featfile_trg_[j], featfile_trg_src_[j], tmp_batch_mcdpow_trg_trg,\n tmp_batch_mcd_trg_trg, tmp_batch_mcdpow_trg_src_trg,\n tmp_batch_mcd_trg_src_trg, tmp_batch_mcdpow_trg_src,\n tmp_batch_mcd_trg_src, tmp_batch_lat_dist_trgsrc1,\n tmp_batch_lat_dist_trgsrc2))"], {}), "(\n 'batch trgsrc loss %s %s = %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f'\n % (featfile_trg_[j], featfile_trg_src_[j], tmp_batch_mcdpow_trg_trg,\n tmp_batch_mcd_trg_trg, tmp_batch_mcdpow_trg_src_trg,\n tmp_batch_mcd_trg_src_trg, tmp_batch_mcdpow_trg_src,\n tmp_batch_mcd_trg_src, tmp_batch_lat_dist_trgsrc1,\n tmp_batch_lat_dist_trgsrc2))\n", (58742, 59115), False, 'import logging\n'), ((59207, 59592), 'logging.info', 'logging.info', (["('batch srctrg loss %s %s = %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f'\n % (featfile_src_[j], featfile_src_trg_[j], tmp_batch_mcdpow_src_src,\n tmp_batch_mcd_src_src, tmp_batch_mcdpow_src_trg_src,\n tmp_batch_mcd_src_trg_src, tmp_batch_mcdpow_src_trg,\n tmp_batch_mcd_src_trg, tmp_batch_lat_dist_srctrg1,\n tmp_batch_lat_dist_srctrg2))"], {}), "(\n 'batch srctrg loss %s %s = %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f dB %.3f dB , %.3f %.3f'\n % (featfile_src_[j], featfile_src_trg_[j], tmp_batch_mcdpow_src_src,\n tmp_batch_mcd_src_src, tmp_batch_mcdpow_src_trg_src,\n tmp_batch_mcd_src_trg_src, tmp_batch_mcdpow_src_trg,\n tmp_batch_mcd_src_trg, tmp_batch_lat_dist_srctrg1,\n tmp_batch_lat_dist_srctrg2))\n", (59219, 59592), False, 'import logging\n'), ((70457, 70476), 'numpy.log', 'np.log', (['gv_trg_mean'], {}), '(gv_trg_mean)\n', (70463, 70476), True, 'import numpy as np\n'), ((70720, 70739), 'numpy.log', 'np.log', (['gv_trg_mean'], {}), '(gv_trg_mean)\n', (70726, 70739), True, 'import numpy as np\n'), ((70991, 71010), 'numpy.log', 'np.log', (['gv_src_mean'], {}), '(gv_src_mean)\n', (70997, 71010), True, 'import numpy as np\n'), ((71524, 71543), 'numpy.log', 'np.log', (['gv_src_mean'], {}), '(gv_src_mean)\n', (71530, 71543), True, 'import numpy as np\n'), ((71787, 71806), 'numpy.log', 'np.log', (['gv_src_mean'], {}), '(gv_src_mean)\n', (71793, 71806), True, 'import numpy as np\n'), ((72058, 72077), 'numpy.log', 'np.log', (['gv_trg_mean'], {}), '(gv_trg_mean)\n', (72064, 72077), True, 'import numpy as np\n'), ((82850, 82945), 'torch.cat', 'torch.cat', (['(batch_src[:, src_idx_s:src_idx_e + 1, :stdim], batch_trj_src_trg_src[i - 1])', '(2)'], {}), '((batch_src[:, src_idx_s:src_idx_e + 1, :stdim],\n batch_trj_src_trg_src[i - 1]), 2)\n', (82859, 82945), False, 'import torch\n'), ((83783, 83861), 'torch.cat', 'torch.cat', (['(batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[i])', '(2)'], {}), '((batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[i]), 2)\n', (83792, 83861), False, 'import torch\n'), ((85334, 85412), 'torch.cat', 'torch.cat', (['(batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[0])', '(2)'], {}), '((batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[0]), 2)\n', (85343, 85412), False, 'import torch\n'), ((87016, 87111), 'torch.cat', 'torch.cat', (['(batch_src[:, src_idx_s:src_idx_e + 1, :stdim], batch_trj_src_trg_src[i - 1])', '(2)'], {}), '((batch_src[:, src_idx_s:src_idx_e + 1, :stdim],\n batch_trj_src_trg_src[i - 1]), 2)\n', (87025, 87111), False, 'import torch\n'), ((87722, 87800), 'torch.cat', 'torch.cat', (['(batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[i])', '(2)'], {}), '((batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[i]), 2)\n', (87731, 87800), False, 'import torch\n'), ((88879, 88957), 'torch.cat', 'torch.cat', (['(batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[0])', '(2)'], {}), '((batch_cv_src[:, src_idx_s:src_idx_e + 1], batch_trj_src_trg[0]), 2)\n', (88888, 88957), False, 'import torch\n'), ((107488, 107536), 'logging.info', 'logging.info', (["('batch srctrg loss %s ' % text_log)"], {}), "('batch srctrg loss %s ' % text_log)\n", (107500, 107536), False, 'import logging\n'), ((107939, 107987), 'logging.info', 'logging.info', (["('batch trgsrc loss %s ' % text_log)"], {}), "('batch trgsrc loss %s ' % text_log)\n", (107951, 107987), False, 'import logging\n'), ((111580, 111612), 'os.path.dirname', 'os.path.dirname', (['featfile_src[j]'], {}), '(featfile_src[j])\n', (111595, 111612), False, 'import os\n'), ((115883, 115894), 'time.time', 'time.time', ([], {}), '()\n', (115892, 115894), False, 'import time\n'), ((36337, 36367), 'numpy.mean', 'np.mean', (['gv_trg_trg[i]'], {'axis': '(0)'}), '(gv_trg_trg[i], axis=0)\n', (36344, 36367), True, 'import numpy as np\n'), ((36462, 36492), 'numpy.mean', 'np.mean', (['gv_src_trg[i]'], {'axis': '(0)'}), '(gv_src_trg[i], axis=0)\n', (36469, 36492), True, 'import numpy as np\n'), ((36591, 36625), 'numpy.mean', 'np.mean', (['gv_trg_src_trg[i]'], {'axis': '(0)'}), '(gv_trg_src_trg[i], axis=0)\n', (36598, 36625), True, 'import numpy as np\n'), ((36720, 36750), 'numpy.mean', 'np.mean', (['gv_src_src[i]'], {'axis': '(0)'}), '(gv_src_src[i], axis=0)\n', (36727, 36750), True, 'import numpy as np\n'), ((36845, 36875), 'numpy.mean', 'np.mean', (['gv_trg_src[i]'], {'axis': '(0)'}), '(gv_trg_src[i], axis=0)\n', (36852, 36875), True, 'import numpy as np\n'), ((36974, 37008), 'numpy.mean', 'np.mean', (['gv_src_trg_src[i]'], {'axis': '(0)'}), '(gv_src_trg_src[i], axis=0)\n', (36981, 37008), True, 'import numpy as np\n'), ((39143, 39156), 'numpy.sum', 'np.sum', (['total'], {}), '(total)\n', (39149, 39156), True, 'import numpy as np\n'), ((69483, 69494), 'time.time', 'time.time', ([], {}), '()\n', (69492, 69494), False, 'import time\n'), ((70425, 70455), 'numpy.mean', 'np.mean', (['gv_trg_trg[i]'], {'axis': '(0)'}), '(gv_trg_trg[i], axis=0)\n', (70432, 70455), True, 'import numpy as np\n'), ((70684, 70718), 'numpy.mean', 'np.mean', (['gv_trg_src_trg[i]'], {'axis': '(0)'}), '(gv_trg_src_trg[i], axis=0)\n', (70691, 70718), True, 'import numpy as np\n'), ((70959, 70989), 'numpy.mean', 'np.mean', (['gv_trg_src[i]'], {'axis': '(0)'}), '(gv_trg_src[i], axis=0)\n', (70966, 70989), True, 'import numpy as np\n'), ((71492, 71522), 'numpy.mean', 'np.mean', (['gv_src_src[i]'], {'axis': '(0)'}), '(gv_src_src[i], axis=0)\n', (71499, 71522), True, 'import numpy as np\n'), ((71751, 71785), 'numpy.mean', 'np.mean', (['gv_src_trg_src[i]'], {'axis': '(0)'}), '(gv_src_trg_src[i], axis=0)\n', (71758, 71785), True, 'import numpy as np\n'), ((72026, 72056), 'numpy.mean', 'np.mean', (['gv_src_trg[i]'], {'axis': '(0)'}), '(gv_src_trg[i], axis=0)\n', (72033, 72056), True, 'import numpy as np\n'), ((91972, 92004), 'os.path.dirname', 'os.path.dirname', (['featfile_src[j]'], {}), '(featfile_src[j])\n', (91987, 92004), False, 'import os\n'), ((99607, 99639), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_src[i]'], {}), '(batch_mcdpow_src_src[i])\n', (99614, 99639), True, 'import numpy as np\n'), ((99641, 99670), 'numpy.mean', 'np.mean', (['batch_mcd_src_src[i]'], {}), '(batch_mcd_src_src[i])\n', (99648, 99670), True, 'import numpy as np\n'), ((99672, 99708), 'numpy.mean', 'np.mean', (['batch_mcdpow_src_trg_src[i]'], {}), '(batch_mcdpow_src_trg_src[i])\n', (99679, 99708), True, 'import numpy as np\n'), ((99710, 99743), 'numpy.mean', 'np.mean', (['batch_mcd_src_trg_src[i]'], {}), '(batch_mcd_src_trg_src[i])\n', (99717, 99743), True, 'import numpy as np\n'), ((100259, 100270), 'time.time', 'time.time', ([], {}), '()\n', (100268, 100270), False, 'import time\n'), ((101709, 101767), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src[i], lat_dim=args.lat_dim)\n', (101727, 101767), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((101891, 101949), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src[i], lat_dim=args.lat_dim)\n', (101909, 101949), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((102253, 102315), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_trg[i], lat_dim=args.lat_dim)\n', (102271, 102315), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((102596, 102654), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src[0], lat_dim=args.lat_dim)\n', (102614, 102654), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((102778, 102836), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src[0], lat_dim=args.lat_dim)\n', (102796, 102836), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((103140, 103202), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_trg[0], lat_dim=args.lat_dim)\n', (103158, 103202), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((103543, 103575), 'os.path.dirname', 'os.path.dirname', (['featfile_src[j]'], {}), '(featfile_src[j])\n', (103558, 103575), False, 'import os\n'), ((104944, 105005), 'numpy.mean', 'np.mean', (['((aligned_lat_srctrg1 - trj_lat_srctrg_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_srctrg1 - trj_lat_srctrg_) ** 2, axis=0)\n', (104951, 105005), True, 'import numpy as np\n'), ((105310, 105368), 'numpy.mean', 'np.mean', (['((aligned_lat_srctrg2 - trj_lat_src_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_srctrg2 - trj_lat_src_) ** 2, axis=0)\n', (105317, 105368), True, 'import numpy as np\n'), ((107071, 107103), 'os.path.dirname', 'os.path.dirname', (['featfile_src[j]'], {}), '(featfile_src[j])\n', (107086, 107103), False, 'import os\n'), ((45253, 45312), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_trg_[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_[i], lat_dim=args.lat_dim)\n', (45271, 45312), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((45438, 45497), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_trg_[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_[i], lat_dim=args.lat_dim)\n', (45456, 45497), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((45624, 45683), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_[i], lat_dim=args.lat_dim)\n', (45642, 45683), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((45809, 45868), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_[i], lat_dim=args.lat_dim)\n', (45827, 45868), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((46356, 46419), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_trg_src_[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_src_[i], lat_dim=args.lat_dim)\n', (46374, 46419), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((46549, 46612), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg_[i]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_trg_[i], lat_dim=args.lat_dim)\n', (46567, 46612), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((47035, 47094), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_trg_[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_[0], lat_dim=args.lat_dim)\n', (47053, 47094), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((47220, 47279), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_trg_[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_[0], lat_dim=args.lat_dim)\n', (47238, 47279), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((47406, 47465), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_[0], lat_dim=args.lat_dim)\n', (47424, 47465), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((47591, 47650), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_[0], lat_dim=args.lat_dim)\n', (47609, 47650), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((48138, 48201), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_trg_src_[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_trg_src_[0], lat_dim=args.lat_dim)\n', (48156, 48201), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((48331, 48394), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg_[0]'], {'lat_dim': 'args.lat_dim'}), '(batch_lat_src_trg_[0], lat_dim=args.lat_dim)\n', (48349, 48394), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((49824, 49885), 'numpy.mean', 'np.mean', (['((aligned_lat_srctrg1 - trj_lat_srctrg_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_srctrg1 - trj_lat_srctrg_) ** 2, axis=0)\n', (49831, 49885), True, 'import numpy as np\n'), ((50202, 50260), 'numpy.mean', 'np.mean', (['((aligned_lat_srctrg2 - trj_lat_src_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_srctrg2 - trj_lat_src_) ** 2, axis=0)\n', (50209, 50260), True, 'import numpy as np\n'), ((51530, 51591), 'numpy.mean', 'np.mean', (['((aligned_lat_trgsrc1 - trj_lat_trgsrc_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_trgsrc1 - trj_lat_trgsrc_) ** 2, axis=0)\n', (51537, 51591), True, 'import numpy as np\n'), ((51908, 51966), 'numpy.mean', 'np.mean', (['((aligned_lat_trgsrc2 - trj_lat_trg_) ** 2)'], {'axis': '(0)'}), '((aligned_lat_trgsrc2 - trj_lat_trg_) ** 2, axis=0)\n', (51915, 51966), True, 'import numpy as np\n'), ((82936, 82965), 'torch.autograd.Variable', 'Variable', (['y_in_pp_src[i].data'], {}), '(y_in_pp_src[i].data)\n', (82944, 82965), False, 'from torch.autograd import Variable\n'), ((83198, 83271), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[i]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[i], lat_dim=args.lat_dim, training=True)\n', (83216, 83271), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((83277, 83307), 'torch.autograd.Variable', 'Variable', (['y_in_src_src[i].data'], {}), '(y_in_src_src[i].data)\n', (83285, 83307), False, 'from torch.autograd import Variable\n'), ((83503, 83576), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[i]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[i], lat_dim=args.lat_dim, training=True)\n', (83521, 83576), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((83582, 83612), 'torch.autograd.Variable', 'Variable', (['y_in_src_trg[i].data'], {}), '(y_in_src_trg[i].data)\n', (83590, 83612), False, 'from torch.autograd import Variable\n'), ((83859, 83892), 'torch.autograd.Variable', 'Variable', (['y_in_pp_src_trg[i].data'], {}), '(y_in_pp_src_trg[i].data)\n', (83867, 83892), False, 'from torch.autograd import Variable\n'), ((84141, 84218), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg[i]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src_trg[i], lat_dim=args.lat_dim, training=True)\n', (84159, 84218), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((84224, 84258), 'torch.autograd.Variable', 'Variable', (['y_in_src_trg_src[i].data'], {}), '(y_in_src_trg_src[i].data)\n', (84232, 84258), False, 'from torch.autograd import Variable\n'), ((84487, 84516), 'torch.autograd.Variable', 'Variable', (['y_in_pp_src[0].data'], {}), '(y_in_pp_src[0].data)\n', (84495, 84516), False, 'from torch.autograd import Variable\n'), ((84749, 84822), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[0]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[0], lat_dim=args.lat_dim, training=True)\n', (84767, 84822), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((84828, 84858), 'torch.autograd.Variable', 'Variable', (['y_in_src_src[0].data'], {}), '(y_in_src_src[0].data)\n', (84836, 84858), False, 'from torch.autograd import Variable\n'), ((85054, 85127), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[0]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[0], lat_dim=args.lat_dim, training=True)\n', (85072, 85127), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((85133, 85163), 'torch.autograd.Variable', 'Variable', (['y_in_src_trg[0].data'], {}), '(y_in_src_trg[0].data)\n', (85141, 85163), False, 'from torch.autograd import Variable\n'), ((85410, 85443), 'torch.autograd.Variable', 'Variable', (['y_in_pp_src_trg[0].data'], {}), '(y_in_pp_src_trg[0].data)\n', (85418, 85443), False, 'from torch.autograd import Variable\n'), ((85692, 85769), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg[0]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src_trg[0], lat_dim=args.lat_dim, training=True)\n', (85710, 85769), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((85775, 85809), 'torch.autograd.Variable', 'Variable', (['y_in_src_trg_src[0].data'], {}), '(y_in_src_trg_src[0].data)\n', (85783, 85809), False, 'from torch.autograd import Variable\n'), ((87289, 87362), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[i]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[i], lat_dim=args.lat_dim, training=True)\n', (87307, 87362), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((87518, 87591), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[i]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[i], lat_dim=args.lat_dim, training=True)\n', (87536, 87591), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((87997, 88074), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg[i]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src_trg[i], lat_dim=args.lat_dim, training=True)\n', (88015, 88074), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((88446, 88519), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[0]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[0], lat_dim=args.lat_dim, training=True)\n', (88464, 88519), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((88675, 88748), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src[0]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src[0], lat_dim=args.lat_dim, training=True)\n', (88693, 88748), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((89154, 89231), 'gru_vae.sampling_vae_batch', 'sampling_vae_batch', (['batch_lat_src_trg[0]'], {'lat_dim': 'args.lat_dim', 'training': '(True)'}), '(batch_lat_src_trg[0], lat_dim=args.lat_dim, training=True)\n', (89172, 89231), False, 'from gru_vae import sampling_vae_batch, loss_vae\n'), ((98196, 98228), 'os.path.dirname', 'os.path.dirname', (['featfile_src[j]'], {}), '(featfile_src[j])\n', (98211, 98228), False, 'import os\n'), ((32135, 32224), 'torch.index_select', 'torch.index_select', (['trj_lat_srctrg[i]', '(0)', 'spcidx_src_trg_[i, :flens_spc_src_trg_[i]]'], {}), '(trj_lat_srctrg[i], 0, spcidx_src_trg_[i, :\n flens_spc_src_trg_[i]])\n', (32153, 32224), False, 'import torch\n'), ((32299, 32372), 'torch.index_select', 'torch.index_select', (['trj_lat_src[i]', '(0)', 'spcidx_src_[i, :flens_spc_src_[i]]'], {}), '(trj_lat_src[i], 0, spcidx_src_[i, :flens_spc_src_[i]])\n', (32317, 32372), False, 'import torch\n'), ((82981, 83010), 'torch.autograd.Variable', 'Variable', (['h_in_pp_src[i].data'], {}), '(h_in_pp_src[i].data)\n', (82989, 83010), False, 'from torch.autograd import Variable\n'), ((83323, 83353), 'torch.autograd.Variable', 'Variable', (['h_in_src_src[i].data'], {}), '(h_in_src_src[i].data)\n', (83331, 83353), False, 'from torch.autograd import Variable\n'), ((83628, 83658), 'torch.autograd.Variable', 'Variable', (['h_in_src_trg[i].data'], {}), '(h_in_src_trg[i].data)\n', (83636, 83658), False, 'from torch.autograd import Variable\n'), ((83908, 83941), 'torch.autograd.Variable', 'Variable', (['h_in_pp_src_trg[i].data'], {}), '(h_in_pp_src_trg[i].data)\n', (83916, 83941), False, 'from torch.autograd import Variable\n'), ((84274, 84308), 'torch.autograd.Variable', 'Variable', (['h_in_src_trg_src[i].data'], {}), '(h_in_src_trg_src[i].data)\n', (84282, 84308), False, 'from torch.autograd import Variable\n'), ((84532, 84561), 'torch.autograd.Variable', 'Variable', (['h_in_pp_src[0].data'], {}), '(h_in_pp_src[0].data)\n', (84540, 84561), False, 'from torch.autograd import Variable\n'), ((84874, 84904), 'torch.autograd.Variable', 'Variable', (['h_in_src_src[0].data'], {}), '(h_in_src_src[0].data)\n', (84882, 84904), False, 'from torch.autograd import Variable\n'), ((85179, 85209), 'torch.autograd.Variable', 'Variable', (['h_in_src_trg[0].data'], {}), '(h_in_src_trg[0].data)\n', (85187, 85209), False, 'from torch.autograd import Variable\n'), ((85459, 85492), 'torch.autograd.Variable', 'Variable', (['h_in_pp_src_trg[0].data'], {}), '(h_in_pp_src_trg[0].data)\n', (85467, 85492), False, 'from torch.autograd import Variable\n'), ((85825, 85859), 'torch.autograd.Variable', 'Variable', (['h_in_src_trg_src[0].data'], {}), '(h_in_src_trg_src[0].data)\n', (85833, 85859), False, 'from torch.autograd import Variable\n'), ((89421, 89458), 'os.path.dirname', 'os.path.dirname', (['prev_featfile_src[j]'], {}), '(prev_featfile_src[j])\n', (89436, 89458), False, 'import os\n'), ((89538, 89588), 'numpy.var', 'np.var', (['tmp_src_src[j, :prev_flens_src[j]]'], {'axis': '(0)'}), '(tmp_src_src[j, :prev_flens_src[j]], axis=0)\n', (89544, 89588), True, 'import numpy as np\n'), ((89650, 89700), 'numpy.var', 'np.var', (['tmp_src_trg[j, :prev_flens_src[j]]'], {'axis': '(0)'}), '(tmp_src_trg[j, :prev_flens_src[j]], axis=0)\n', (89656, 89700), True, 'import numpy as np\n'), ((89766, 89820), 'numpy.var', 'np.var', (['tmp_src_trg_src[j, :prev_flens_src[j]]'], {'axis': '(0)'}), '(tmp_src_trg_src[j, :prev_flens_src[j]], axis=0)\n', (89772, 89820), True, 'import numpy as np\n'), ((89924, 89974), 'numpy.var', 'np.var', (['tmp_src_src[j, :prev_flens_src[j]]'], {'axis': '(0)'}), '(tmp_src_src[j, :prev_flens_src[j]], axis=0)\n', (89930, 89974), True, 'import numpy as np\n'), ((90036, 90086), 'numpy.var', 'np.var', (['tmp_src_trg[j, :prev_flens_src[j]]'], {'axis': '(0)'}), '(tmp_src_trg[j, :prev_flens_src[j]], axis=0)\n', (90042, 90086), True, 'import numpy as np\n'), ((90152, 90206), 'numpy.var', 'np.var', (['tmp_src_trg_src[j, :prev_flens_src[j]]'], {'axis': '(0)'}), '(tmp_src_trg_src[j, :prev_flens_src[j]], axis=0)\n', (90158, 90206), True, 'import numpy as np\n'), ((31501, 31574), 'torch.index_select', 'torch.index_select', (['trj_src_trg[i]', '(0)', 'spcidx_src_[i, :flens_spc_src_[i]]'], {}), '(trj_src_trg[i], 0, spcidx_src_[i, :flens_spc_src_[i]])\n', (31519, 31574), False, 'import torch\n'), ((31620, 31720), 'torch.index_select', 'torch.index_select', (['batch_src_trg_[i][:, stdim:]', '(0)', 'spcidx_src_trg_[i, :flens_spc_src_trg_[i]]'], {}), '(batch_src_trg_[i][:, stdim:], 0, spcidx_src_trg_[i, :\n flens_spc_src_trg_[i]])\n', (31638, 31720), False, 'import torch\n'), ((31831, 31916), 'torch.index_select', 'torch.index_select', (['trj_src_trg[i][:, 1:]', '(0)', 'spcidx_src_[i, :flens_spc_src_[i]]'], {}), '(trj_src_trg[i][:, 1:], 0, spcidx_src_[i, :flens_spc_src_[i]]\n )\n', (31849, 31916), False, 'import torch\n'), ((31956, 32057), 'torch.index_select', 'torch.index_select', (['batch_src_trg_[i][:, stdim_:]', '(0)', 'spcidx_src_trg_[i, :flens_spc_src_trg_[i]]'], {}), '(batch_src_trg_[i][:, stdim_:], 0, spcidx_src_trg_[i, :\n flens_spc_src_trg_[i]])\n', (31974, 32057), False, 'import torch\n'), ((109173, 109258), 'torch.index_select', 'torch.index_select', (['batch_src[j, :, stdim:]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_src[j, :, stdim:], 0, spcidx_src[j, :flens_spc_src[j]]\n )\n', (109191, 109258), False, 'import torch\n'), ((109338, 109424), 'torch.index_select', 'torch.index_select', (['batch_src[j, :, stdim_:]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_src[j, :, stdim_:], 0, spcidx_src[j, :\n flens_spc_src[j]])\n', (109356, 109424), False, 'import torch\n'), ((104480, 104567), 'torch.index_select', 'torch.index_select', (['trj_lat_srctrg[j]', '(0)', 'spcidx_src_trg[j, :flens_spc_src_trg[j]]'], {}), '(trj_lat_srctrg[j], 0, spcidx_src_trg[j, :\n flens_spc_src_trg[j]])\n', (104498, 104567), False, 'import torch\n'), ((104650, 104726), 'torch.index_select', 'torch.index_select', (['batch_lat_src[0][j]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_lat_src[0][j], 0, spcidx_src[j, :flens_spc_src[j]])\n', (104668, 104726), False, 'import torch\n'), ((109546, 109631), 'torch.index_select', 'torch.index_select', (['batch_trj_src_src[i][j]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_trj_src_src[i][j], 0, spcidx_src[j, :flens_spc_src[j]]\n )\n', (109564, 109631), False, 'import torch\n'), ((109753, 109845), 'torch.index_select', 'torch.index_select', (['batch_trj_src_src[i][j, :, 1:]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_trj_src_src[i][j, :, 1:], 0, spcidx_src[j, :\n flens_spc_src[j]])\n', (109771, 109845), False, 'import torch\n'), ((109972, 110061), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_src[i][j]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_trj_src_trg_src[i][j], 0, spcidx_src[j, :\n flens_spc_src[j]])\n', (109990, 110061), False, 'import torch\n'), ((110187, 110283), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_src[i][j, :, 1:]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_trj_src_trg_src[i][j, :, 1:], 0, spcidx_src[j, :\n flens_spc_src[j]])\n', (110205, 110283), False, 'import torch\n'), ((49343, 49432), 'torch.index_select', 'torch.index_select', (['trj_lat_srctrg[j]', '(0)', 'spcidx_src_trg_[j, :flens_spc_src_trg_[j]]'], {}), '(trj_lat_srctrg[j], 0, spcidx_src_trg_[j, :\n flens_spc_src_trg_[j]])\n', (49361, 49432), False, 'import torch\n'), ((49519, 49598), 'torch.index_select', 'torch.index_select', (['batch_lat_src_[0][j]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_lat_src_[0][j], 0, spcidx_src_[j, :flens_spc_src_[j]])\n', (49537, 49598), False, 'import torch\n'), ((51049, 51138), 'torch.index_select', 'torch.index_select', (['trj_lat_trgsrc[j]', '(0)', 'spcidx_trg_src_[j, :flens_spc_trg_src_[j]]'], {}), '(trj_lat_trgsrc[j], 0, spcidx_trg_src_[j, :\n flens_spc_trg_src_[j]])\n', (51067, 51138), False, 'import torch\n'), ((51225, 51304), 'torch.index_select', 'torch.index_select', (['batch_lat_trg_[0][j]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_lat_trg_[0][j], 0, spcidx_trg_[j, :flens_spc_trg_[j]])\n', (51243, 51304), False, 'import torch\n'), ((52754, 52842), 'torch.index_select', 'torch.index_select', (['batch_trg_[j, :, stdim:]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trg_[j, :, stdim:], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (52772, 52842), False, 'import torch\n'), ((52930, 53019), 'torch.index_select', 'torch.index_select', (['batch_trg_[j, :, stdim_:]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trg_[j, :, stdim_:], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (52948, 53019), False, 'import torch\n'), ((54725, 54813), 'torch.index_select', 'torch.index_select', (['batch_src_[j, :, stdim:]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_src_[j, :, stdim:], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (54743, 54813), False, 'import torch\n'), ((54901, 54990), 'torch.index_select', 'torch.index_select', (['batch_src_[j, :, stdim_:]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_src_[j, :, stdim_:], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (54919, 54990), False, 'import torch\n'), ((105996, 106081), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg[i][j]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_trj_src_trg[i][j], 0, spcidx_src[j, :flens_spc_src[j]]\n )\n', (106014, 106081), False, 'import torch\n'), ((106122, 106219), 'torch.index_select', 'torch.index_select', (['batch_src_trg[j, :, stdim:]', '(0)', 'spcidx_src_trg[j, :flens_spc_src_trg[j]]'], {}), '(batch_src_trg[j, :, stdim:], 0, spcidx_src_trg[j, :\n flens_spc_src_trg[j]])\n', (106140, 106219), False, 'import torch\n'), ((106338, 106430), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg[i][j, :, 1:]', '(0)', 'spcidx_src[j, :flens_spc_src[j]]'], {}), '(batch_trj_src_trg[i][j, :, 1:], 0, spcidx_src[j, :\n flens_spc_src[j]])\n', (106356, 106430), False, 'import torch\n'), ((106469, 106567), 'torch.index_select', 'torch.index_select', (['batch_src_trg[j, :, stdim_:]', '(0)', 'spcidx_src_trg[j, :flens_spc_src_trg[j]]'], {}), '(batch_src_trg[j, :, stdim_:], 0, spcidx_src_trg[j, :\n flens_spc_src_trg[j]])\n', (106487, 106567), False, 'import torch\n'), ((53149, 53237), 'torch.index_select', 'torch.index_select', (['batch_trj_trg_trg_[i][j]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trj_trg_trg_[i][j], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (53167, 53237), False, 'import torch\n'), ((53367, 53462), 'torch.index_select', 'torch.index_select', (['batch_trj_trg_trg_[i][j, :, 1:]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trj_trg_trg_[i][j, :, 1:], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (53385, 53462), False, 'import torch\n'), ((53597, 53689), 'torch.index_select', 'torch.index_select', (['batch_trj_trg_src_trg_[i][j]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trj_trg_src_trg_[i][j], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (53615, 53689), False, 'import torch\n'), ((53823, 53922), 'torch.index_select', 'torch.index_select', (['batch_trj_trg_src_trg_[i][j, :, 1:]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trj_trg_src_trg_[i][j, :, 1:], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (53841, 53922), False, 'import torch\n'), ((54049, 54137), 'torch.index_select', 'torch.index_select', (['batch_trj_trg_src_[i][j]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trj_trg_src_[i][j], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (54067, 54137), False, 'import torch\n'), ((54178, 54278), 'torch.index_select', 'torch.index_select', (['batch_trg_src_[j, :, stdim:]', '(0)', 'spcidx_trg_src_[j, :flens_spc_trg_src_[j]]'], {}), '(batch_trg_src_[j, :, stdim:], 0, spcidx_trg_src_[j, :\n flens_spc_trg_src_[j]])\n', (54196, 54278), False, 'import torch\n'), ((54401, 54496), 'torch.index_select', 'torch.index_select', (['batch_trj_trg_src_[i][j, :, 1:]', '(0)', 'spcidx_trg_[j, :flens_spc_trg_[j]]'], {}), '(batch_trj_trg_src_[i][j, :, 1:], 0, spcidx_trg_[j, :\n flens_spc_trg_[j]])\n', (54419, 54496), False, 'import torch\n'), ((54535, 54636), 'torch.index_select', 'torch.index_select', (['batch_trg_src_[j, :, stdim_:]', '(0)', 'spcidx_trg_src_[j, :flens_spc_trg_src_[j]]'], {}), '(batch_trg_src_[j, :, stdim_:], 0, spcidx_trg_src_[j, :\n flens_spc_trg_src_[j]])\n', (54553, 54636), False, 'import torch\n'), ((55120, 55208), 'torch.index_select', 'torch.index_select', (['batch_trj_src_src_[i][j]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_trj_src_src_[i][j], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (55138, 55208), False, 'import torch\n'), ((55338, 55433), 'torch.index_select', 'torch.index_select', (['batch_trj_src_src_[i][j, :, 1:]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_trj_src_src_[i][j, :, 1:], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (55356, 55433), False, 'import torch\n'), ((55568, 55660), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_src_[i][j]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_trj_src_trg_src_[i][j], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (55586, 55660), False, 'import torch\n'), ((55794, 55893), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_src_[i][j, :, 1:]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_trj_src_trg_src_[i][j, :, 1:], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (55812, 55893), False, 'import torch\n'), ((56020, 56108), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_[i][j]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_trj_src_trg_[i][j], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (56038, 56108), False, 'import torch\n'), ((56149, 56249), 'torch.index_select', 'torch.index_select', (['batch_src_trg_[j, :, stdim:]', '(0)', 'spcidx_src_trg_[j, :flens_spc_src_trg_[j]]'], {}), '(batch_src_trg_[j, :, stdim:], 0, spcidx_src_trg_[j, :\n flens_spc_src_trg_[j]])\n', (56167, 56249), False, 'import torch\n'), ((56372, 56467), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_[i][j, :, 1:]', '(0)', 'spcidx_src_[j, :flens_spc_src_[j]]'], {}), '(batch_trj_src_trg_[i][j, :, 1:], 0, spcidx_src_[j, :\n flens_spc_src_[j]])\n', (56390, 56467), False, 'import torch\n'), ((56506, 56607), 'torch.index_select', 'torch.index_select', (['batch_src_trg_[j, :, stdim_:]', '(0)', 'spcidx_src_trg_[j, :flens_spc_src_trg_[j]]'], {}), '(batch_src_trg_[j, :, stdim_:], 0, spcidx_src_trg_[j, :\n flens_spc_src_trg_[j]])\n', (56524, 56607), False, 'import torch\n'), ((96423, 96545), 'torch.index_select', 'torch.index_select', (['batch_trj_src_src[i][j]', '(0)', '(spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)'], {}), '(batch_trj_src_src[i][j], 0, spcidx_src[j,\n spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)\n', (96441, 96545), False, 'import torch\n'), ((97200, 97326), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_src[i][j]', '(0)', '(spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)'], {}), '(batch_trj_src_trg_src[i][j], 0, spcidx_src[j,\n spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)\n', (97218, 97326), False, 'import torch\n'), ((96274, 96374), 'torch.index_select', 'torch.index_select', (['batch_src[j]', '(0)', 'spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1]'], {}), '(batch_src[j], 0, spcidx_src[j, spcidx_src_s_idx[j]:\n spcidx_src_e_idx[j] + 1])\n', (96292, 96374), False, 'import torch\n'), ((96655, 96755), 'torch.index_select', 'torch.index_select', (['batch_src[j]', '(0)', 'spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1]'], {}), '(batch_src[j], 0, spcidx_src[j, spcidx_src_s_idx[j]:\n spcidx_src_e_idx[j] + 1])\n', (96673, 96755), False, 'import torch\n'), ((96805, 96927), 'torch.index_select', 'torch.index_select', (['batch_trj_src_src[i][j]', '(0)', '(spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)'], {}), '(batch_trj_src_src[i][j], 0, spcidx_src[j,\n spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)\n', (96823, 96927), False, 'import torch\n'), ((97051, 97151), 'torch.index_select', 'torch.index_select', (['batch_src[j]', '(0)', 'spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1]'], {}), '(batch_src[j], 0, spcidx_src[j, spcidx_src_s_idx[j]:\n spcidx_src_e_idx[j] + 1])\n', (97069, 97151), False, 'import torch\n'), ((97440, 97540), 'torch.index_select', 'torch.index_select', (['batch_src[j]', '(0)', 'spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1]'], {}), '(batch_src[j], 0, spcidx_src[j, spcidx_src_s_idx[j]:\n spcidx_src_e_idx[j] + 1])\n', (97458, 97540), False, 'import torch\n'), ((97590, 97716), 'torch.index_select', 'torch.index_select', (['batch_trj_src_trg_src[i][j]', '(0)', '(spcidx_src[j, spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)'], {}), '(batch_trj_src_trg_src[i][j], 0, spcidx_src[j,\n spcidx_src_s_idx[j]:spcidx_src_e_idx[j] + 1] - src_idx_s)\n', (97608, 97716), False, 'import torch\n')] |
import numpy as np
from scipy.integrate.odepack import odeint
def denbigh_rxn2(input_features, A=[.2, 0.01, 0.005, 0.005], E=[7000, 3000, 3000, 100]):
'''
Returns output concentration for a batch reactor based on the denbigh reaction.
Reaction parameters has been tuned to ensure variability within default trans range
:param input_features: Features class object containing features information. Last 2 column must be time
and temperature.
First n columns represent n species initial concentration
:param A: Pre-Exponent factor for Arrhenius Equation
:param E: Activation Energy
:param plot_mode: Plot conc vs time graph for last set of features if == True
:return: Label class containing output concentrations of A,R,T,S,U
'''
numel_row = input_features.shape[0]
numel_col = input_features.shape[1]
conc = input_features[:, :numel_col - 2]
c_out = []
def reaction(c, t, T, A, E):
# Rate equations for Denbigh reaction from Chemical Reaction Engineering, Levenspiel 3ed Chapter 8, page 194
[Ca, Cr, Ct, Cs, Cu] = c
[k1, k2, k3, k4] = A * np.exp(-np.array(E) / (8.314 * T))
[dCadt, dCrdt, dCtdt, dCsdt, dCudt] = [-(k1 + k2) * Ca ** 2,
k1 * Ca ** 2 - (k3 + k4) * Cr ** 0.5,
k2 * Ca ** 2,
k3 * Cr ** 0.5,
k4 * Cr ** 0.5]
return [dCadt, dCrdt, dCtdt, dCsdt, dCudt]
if numel_col < 7:
# If input_features has less than 7 columns, means not all species have non zero inital conc. Must add zero cols
zeros = np.zeros((numel_row, 7 - numel_col))
input_features = np.concatenate((conc, zeros, input_features[:, -2:]), axis=1)
for i in range(numel_row):
c0 = input_features[i, :-2]
t = np.linspace(0, input_features[i, -2], 2000)
T = input_features[i, -1]
c = odeint(reaction, c0, t, args=(T, A, E))
c_out.append(c[-1, :])
c_out = np.array(c_out) # To convert list of numpy array to n x m numpy array
return c_out
| [
"scipy.integrate.odepack.odeint",
"numpy.zeros",
"numpy.array",
"numpy.linspace",
"numpy.concatenate"
] | [((2137, 2152), 'numpy.array', 'np.array', (['c_out'], {}), '(c_out)\n', (2145, 2152), True, 'import numpy as np\n'), ((1749, 1785), 'numpy.zeros', 'np.zeros', (['(numel_row, 7 - numel_col)'], {}), '((numel_row, 7 - numel_col))\n', (1757, 1785), True, 'import numpy as np\n'), ((1812, 1873), 'numpy.concatenate', 'np.concatenate', (['(conc, zeros, input_features[:, -2:])'], {'axis': '(1)'}), '((conc, zeros, input_features[:, -2:]), axis=1)\n', (1826, 1873), True, 'import numpy as np\n'), ((1958, 2001), 'numpy.linspace', 'np.linspace', (['(0)', 'input_features[i, -2]', '(2000)'], {}), '(0, input_features[i, -2], 2000)\n', (1969, 2001), True, 'import numpy as np\n'), ((2050, 2089), 'scipy.integrate.odepack.odeint', 'odeint', (['reaction', 'c0', 't'], {'args': '(T, A, E)'}), '(reaction, c0, t, args=(T, A, E))\n', (2056, 2089), False, 'from scipy.integrate.odepack import odeint\n'), ((1160, 1171), 'numpy.array', 'np.array', (['E'], {}), '(E)\n', (1168, 1171), True, 'import numpy as np\n')] |
import numpy as np
import torch
from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger
from pytorch_toolbelt.utils.torch_utils import (
tensor_from_rgb_image,
rgb_image_from_tensor,
to_numpy,
)
from torch import nn
from torch.utils.data import DataLoader
import pytest
skip_if_no_cuda = pytest.mark.skipif(
not torch.cuda.is_available(), reason="Cuda is not available"
)
def test_tiles_split_merge():
image = np.random.random((500, 500, 3)).astype(np.uint8)
tiler = ImageSlicer(image.shape, tile_size=51, tile_step=26, weight="mean")
tiles = tiler.split(image)
merged = tiler.merge(tiles, dtype=np.uint8)
np.testing.assert_equal(merged, image)
def test_tiles_split_merge_non_dividable():
image = np.random.random((563, 512, 3)).astype(np.uint8)
tiler = ImageSlicer(
image.shape, tile_size=(128, 128), tile_step=(128, 128), weight="mean"
)
tiles = tiler.split(image)
merged = tiler.merge(tiles, dtype=np.uint8)
np.testing.assert_equal(merged, image)
@skip_if_no_cuda
def test_tiles_split_merge_non_dividable_cuda():
image = np.random.random((5632, 5120, 3)).astype(np.uint8)
tiler = ImageSlicer(
image.shape, tile_size=(1280, 1280), tile_step=(1280, 1280), weight="mean"
)
tiles = tiler.split(image)
merger = CudaTileMerger(
tiler.target_shape, channels=image.shape[2], weight=tiler.weight
)
for tile, coordinates in zip(tiles, tiler.crops):
# Integrate as batch of size 1
merger.integrate_batch(
tensor_from_rgb_image(tile).unsqueeze(0).float().cuda(), [coordinates]
)
merged = merger.merge()
merged = rgb_image_from_tensor(merged, mean=0, std=1, max_pixel_value=1)
merged = tiler.crop_to_orignal_size(merged)
np.testing.assert_equal(merged, image)
def test_tiles_split_merge_2():
image = np.random.random((5000, 5000, 3)).astype(np.uint8)
tiler = ImageSlicer(
image.shape, tile_size=(512, 512), tile_step=(256, 256), weight="pyramid"
)
np.testing.assert_allclose(tiler.weight, tiler.weight.T)
tiles = tiler.split(image)
merged = tiler.merge(tiles, dtype=np.uint8)
np.testing.assert_equal(merged, image)
@skip_if_no_cuda
def test_tiles_split_merge_cuda():
class MaxChannelIntensity(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
max_channel, _ = torch.max(input, dim=1, keepdim=True)
return max_channel
image = np.random.random((5000, 5000, 3)).astype(np.uint8)
tiler = ImageSlicer(
image.shape, tile_size=(512, 512), tile_step=(256, 256), weight="pyramid"
)
tiles = [tensor_from_rgb_image(tile) for tile in tiler.split(image)]
model = MaxChannelIntensity().eval().cuda()
merger = CudaTileMerger(tiler.target_shape, 1, tiler.weight)
for tiles_batch, coords_batch in DataLoader(
list(zip(tiles, tiler.crops)), batch_size=8, pin_memory=True
):
tiles_batch = tiles_batch.float().cuda()
pred_batch = model(tiles_batch)
merger.integrate_batch(pred_batch, coords_batch)
merged = np.moveaxis(to_numpy(merger.merge()), 0, -1).astype(np.uint8)
merged = tiler.crop_to_orignal_size(merged)
np.testing.assert_equal(merged, image.max(axis=2, keepdims=True))
| [
"pytorch_toolbelt.utils.torch_utils.rgb_image_from_tensor",
"pytorch_toolbelt.inference.tiles.CudaTileMerger",
"pytorch_toolbelt.inference.tiles.ImageSlicer",
"numpy.random.random",
"torch.cuda.is_available",
"torch.max",
"numpy.testing.assert_equal",
"numpy.testing.assert_allclose",
"pytorch_toolbe... | [((512, 579), 'pytorch_toolbelt.inference.tiles.ImageSlicer', 'ImageSlicer', (['image.shape'], {'tile_size': '(51)', 'tile_step': '(26)', 'weight': '"""mean"""'}), "(image.shape, tile_size=51, tile_step=26, weight='mean')\n", (523, 579), False, 'from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger\n'), ((663, 701), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['merged', 'image'], {}), '(merged, image)\n', (686, 701), True, 'import numpy as np\n'), ((821, 909), 'pytorch_toolbelt.inference.tiles.ImageSlicer', 'ImageSlicer', (['image.shape'], {'tile_size': '(128, 128)', 'tile_step': '(128, 128)', 'weight': '"""mean"""'}), "(image.shape, tile_size=(128, 128), tile_step=(128, 128), weight\n ='mean')\n", (832, 909), False, 'from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger\n'), ((1002, 1040), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['merged', 'image'], {}), '(merged, image)\n', (1025, 1040), True, 'import numpy as np\n'), ((1185, 1276), 'pytorch_toolbelt.inference.tiles.ImageSlicer', 'ImageSlicer', (['image.shape'], {'tile_size': '(1280, 1280)', 'tile_step': '(1280, 1280)', 'weight': '"""mean"""'}), "(image.shape, tile_size=(1280, 1280), tile_step=(1280, 1280),\n weight='mean')\n", (1196, 1276), False, 'from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger\n'), ((1332, 1417), 'pytorch_toolbelt.inference.tiles.CudaTileMerger', 'CudaTileMerger', (['tiler.target_shape'], {'channels': 'image.shape[2]', 'weight': 'tiler.weight'}), '(tiler.target_shape, channels=image.shape[2], weight=tiler.weight\n )\n', (1346, 1417), False, 'from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger\n'), ((1687, 1750), 'pytorch_toolbelt.utils.torch_utils.rgb_image_from_tensor', 'rgb_image_from_tensor', (['merged'], {'mean': '(0)', 'std': '(1)', 'max_pixel_value': '(1)'}), '(merged, mean=0, std=1, max_pixel_value=1)\n', (1708, 1750), False, 'from pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image, rgb_image_from_tensor, to_numpy\n'), ((1804, 1842), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['merged', 'image'], {}), '(merged, image)\n', (1827, 1842), True, 'import numpy as np\n'), ((1952, 2043), 'pytorch_toolbelt.inference.tiles.ImageSlicer', 'ImageSlicer', (['image.shape'], {'tile_size': '(512, 512)', 'tile_step': '(256, 256)', 'weight': '"""pyramid"""'}), "(image.shape, tile_size=(512, 512), tile_step=(256, 256), weight\n ='pyramid')\n", (1963, 2043), False, 'from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger\n'), ((2058, 2114), 'numpy.testing.assert_allclose', 'np.testing.assert_allclose', (['tiler.weight', 'tiler.weight.T'], {}), '(tiler.weight, tiler.weight.T)\n', (2084, 2114), True, 'import numpy as np\n'), ((2199, 2237), 'numpy.testing.assert_equal', 'np.testing.assert_equal', (['merged', 'image'], {}), '(merged, image)\n', (2222, 2237), True, 'import numpy as np\n'), ((2602, 2693), 'pytorch_toolbelt.inference.tiles.ImageSlicer', 'ImageSlicer', (['image.shape'], {'tile_size': '(512, 512)', 'tile_step': '(256, 256)', 'weight': '"""pyramid"""'}), "(image.shape, tile_size=(512, 512), tile_step=(256, 256), weight\n ='pyramid')\n", (2613, 2693), False, 'from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger\n'), ((2839, 2890), 'pytorch_toolbelt.inference.tiles.CudaTileMerger', 'CudaTileMerger', (['tiler.target_shape', '(1)', 'tiler.weight'], {}), '(tiler.target_shape, 1, tiler.weight)\n', (2853, 2890), False, 'from pytorch_toolbelt.inference.tiles import ImageSlicer, CudaTileMerger\n'), ((347, 372), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (370, 372), False, 'import torch\n'), ((2716, 2743), 'pytorch_toolbelt.utils.torch_utils.tensor_from_rgb_image', 'tensor_from_rgb_image', (['tile'], {}), '(tile)\n', (2737, 2743), False, 'from pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image, rgb_image_from_tensor, to_numpy\n'), ((451, 482), 'numpy.random.random', 'np.random.random', (['(500, 500, 3)'], {}), '((500, 500, 3))\n', (467, 482), True, 'import numpy as np\n'), ((760, 791), 'numpy.random.random', 'np.random.random', (['(563, 512, 3)'], {}), '((563, 512, 3))\n', (776, 791), True, 'import numpy as np\n'), ((1122, 1155), 'numpy.random.random', 'np.random.random', (['(5632, 5120, 3)'], {}), '((5632, 5120, 3))\n', (1138, 1155), True, 'import numpy as np\n'), ((1889, 1922), 'numpy.random.random', 'np.random.random', (['(5000, 5000, 3)'], {}), '((5000, 5000, 3))\n', (1905, 1922), True, 'import numpy as np\n'), ((2457, 2494), 'torch.max', 'torch.max', (['input'], {'dim': '(1)', 'keepdim': '(True)'}), '(input, dim=1, keepdim=True)\n', (2466, 2494), False, 'import torch\n'), ((2539, 2572), 'numpy.random.random', 'np.random.random', (['(5000, 5000, 3)'], {}), '((5000, 5000, 3))\n', (2555, 2572), True, 'import numpy as np\n'), ((1564, 1591), 'pytorch_toolbelt.utils.torch_utils.tensor_from_rgb_image', 'tensor_from_rgb_image', (['tile'], {}), '(tile)\n', (1585, 1591), False, 'from pytorch_toolbelt.utils.torch_utils import tensor_from_rgb_image, rgb_image_from_tensor, to_numpy\n')] |
import datetime
import functools
import multiprocessing as mp
import os
import sys
import time
from pprint import pprint
from timeit import Timer
import numpy as np
import pandas as pd
import psutil
import pyarrow as pa
import pyarrow.compute as pc
# import ray
from util import run_timer
def get_sample_data_arrow(rows):
get_bool_array = lambda: pa.array(
(True if i % 2 == 0 else None for i in range(rows))
)
x = [pa.array(np.random.rand(rows)) for k in range(200)]
data = [
pa.array([f"sec_{i}" for i in range(rows)]),
pa.array(np.random.rand(rows)),
*x,
]
batch = pa.RecordBatch.from_arrays(data, names=[f"{x}" for x in range(len(data))])
return batch
def get_sample_data_numpy(rows):
return np.random.rand(rows, 200)
def process_numpy_mapped(args):
index = args[0]
array = np.load("/tmp/sample.npy", mmap_mode='r')
values = np.sum(array[:, index])
return psutil.Process(os.getpid()).memory_info().rss
def run_with_numpy_mapped(batch, cols):
np.save("/tmp/sample.npy", batch)
with mp.Pool(5) as p:
return sum(p.map(process_numpy_mapped, [(i,) for i in range(1, cols)])) / cols
def process_batch(args):
batches, index = args
values = batches[index]
pc.sum(values)
return psutil.Process(os.getpid()).memory_info().rss
def run_with_batch(data, row, cols):
with mp.Pool(5) as p:
return sum(p.map(process_batch, [(data, i) for i in range(1, cols)])) / cols
def process_batch_arrow_file(args):
index = args[0]
with pa.OSFile("/tmp/sample.arrow", "rb") as source:
batches = pa.ipc.open_file(source).read_all()
values = pc.sum(batches[index])
return psutil.Process(os.getpid()).memory_info().rss
def run_with_batch_arrow_file(batch, cols):
with pa.OSFile("/tmp/sample.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, batch.schema) as writer:
writer.write_table(batch)
with mp.Pool(5) as p:
return (
sum(p.map(process_batch_arrow_file, [(i,) for i in range(1, cols)])) / cols
)
def process_batch_mapped(args):
index = args[0]
with pa.memory_map("/tmp/sample.arrow", "r") as source:
batches = pa.ipc.RecordBatchFileReader(source).read_all()
values = pc.sum(batches[index])
return psutil.Process(os.getpid()).memory_info().rss
def run_with_batch_mapped(batch, cols):
with pa.OSFile("/tmp/sample.arrow", "wb") as sink:
with pa.RecordBatchFileWriter(sink, batch.schema) as writer:
writer.write_table(batch)
with mp.Pool(5) as p:
return sum(p.map(process_batch_mapped, [(i,) for i in range(1, cols)])) / cols
def capture_times_func(label, log_fp):
def f(delta):
with open(log_fp, "a") as f:
result = ",".join((label, str(delta)))
f.write(result)
f.write("\n")
return f
def run_test4():
filename = "/tmp/test4.txt"
header = ",".join(
(
"label",
"duration",
)
)
with open(filename, "w") as f:
f.write(f"{header}\n")
memory_used = dict()
for rows in (
10_000,
100_000,
500_000,
1_000_000,
# 2_000_000,
): # , (10000, 100), (100000, 100), (1000000, 100)):
print(f"Running for {rows=}")
array = get_sample_data_numpy(rows)
callback = capture_times_func(f"numpy_memmap{rows}_{200}", filename)
result = run_timer(run_with_numpy_mapped, callback)(array, 200)
memory_used[f"numpy_memmap{rows}_{200}"] = result
table = pa.Table.from_batches([get_sample_data_arrow(rows)])
callback = capture_times_func(f"recordbatch_{rows}_{200}", filename)
result = run_timer(run_with_batch, callback)(table, rows, 200)
memory_used[f"recordbatch_{rows}_{200}"] = result
callback = capture_times_func(f"arrow-file_{rows}_{200}", filename)
result = run_timer(run_with_batch_arrow_file, callback)(
table, table.num_columns
)
memory_used[f"arrow-file_{rows}_{200}"] = result
callback = capture_times_func(f"mapped-arrow-file_{rows}_{200}", filename) #
result = run_timer(run_with_batch_mapped, callback)(table, table.num_columns)
memory_used[f"mapped-arrow-file_{rows}_{200}"] = result
del table
# run_timer(run_with_batch_map_shared)(table, table.num_columns)
with open("/tmp/memory_test4.txt", "w") as f:
f.write(f"label,size\n")
for label, size in memory_used.items():
f.write(f"{label},{size}\n")
if __name__ == "__main__":
run_test4()
| [
"numpy.load",
"numpy.save",
"numpy.sum",
"util.run_timer",
"os.getpid",
"pyarrow.OSFile",
"pyarrow.ipc.open_file",
"multiprocessing.Pool",
"numpy.random.rand",
"pyarrow.ipc.RecordBatchFileReader",
"pyarrow.RecordBatchFileWriter",
"pyarrow.memory_map",
"pyarrow.compute.sum"
] | [((766, 791), 'numpy.random.rand', 'np.random.rand', (['rows', '(200)'], {}), '(rows, 200)\n', (780, 791), True, 'import numpy as np\n'), ((861, 902), 'numpy.load', 'np.load', (['"""/tmp/sample.npy"""'], {'mmap_mode': '"""r"""'}), "('/tmp/sample.npy', mmap_mode='r')\n", (868, 902), True, 'import numpy as np\n'), ((916, 939), 'numpy.sum', 'np.sum', (['array[:, index]'], {}), '(array[:, index])\n', (922, 939), True, 'import numpy as np\n'), ((1043, 1076), 'numpy.save', 'np.save', (['"""/tmp/sample.npy"""', 'batch'], {}), "('/tmp/sample.npy', batch)\n", (1050, 1076), True, 'import numpy as np\n'), ((1280, 1294), 'pyarrow.compute.sum', 'pc.sum', (['values'], {}), '(values)\n', (1286, 1294), True, 'import pyarrow.compute as pc\n'), ((1087, 1097), 'multiprocessing.Pool', 'mp.Pool', (['(5)'], {}), '(5)\n', (1094, 1097), True, 'import multiprocessing as mp\n'), ((1400, 1410), 'multiprocessing.Pool', 'mp.Pool', (['(5)'], {}), '(5)\n', (1407, 1410), True, 'import multiprocessing as mp\n'), ((1569, 1605), 'pyarrow.OSFile', 'pa.OSFile', (['"""/tmp/sample.arrow"""', '"""rb"""'], {}), "('/tmp/sample.arrow', 'rb')\n", (1578, 1605), True, 'import pyarrow as pa\n'), ((1688, 1710), 'pyarrow.compute.sum', 'pc.sum', (['batches[index]'], {}), '(batches[index])\n', (1694, 1710), True, 'import pyarrow.compute as pc\n'), ((1827, 1863), 'pyarrow.OSFile', 'pa.OSFile', (['"""/tmp/sample.arrow"""', '"""wb"""'], {}), "('/tmp/sample.arrow', 'wb')\n", (1836, 1863), True, 'import pyarrow as pa\n'), ((1990, 2000), 'multiprocessing.Pool', 'mp.Pool', (['(5)'], {}), '(5)\n', (1997, 2000), True, 'import multiprocessing as mp\n'), ((2186, 2225), 'pyarrow.memory_map', 'pa.memory_map', (['"""/tmp/sample.arrow"""', '"""r"""'], {}), "('/tmp/sample.arrow', 'r')\n", (2199, 2225), True, 'import pyarrow as pa\n'), ((2320, 2342), 'pyarrow.compute.sum', 'pc.sum', (['batches[index]'], {}), '(batches[index])\n', (2326, 2342), True, 'import pyarrow.compute as pc\n'), ((2455, 2491), 'pyarrow.OSFile', 'pa.OSFile', (['"""/tmp/sample.arrow"""', '"""wb"""'], {}), "('/tmp/sample.arrow', 'wb')\n", (2464, 2491), True, 'import pyarrow as pa\n'), ((2618, 2628), 'multiprocessing.Pool', 'mp.Pool', (['(5)'], {}), '(5)\n', (2625, 2628), True, 'import multiprocessing as mp\n'), ((450, 470), 'numpy.random.rand', 'np.random.rand', (['rows'], {}), '(rows)\n', (464, 470), True, 'import numpy as np\n'), ((576, 596), 'numpy.random.rand', 'np.random.rand', (['rows'], {}), '(rows)\n', (590, 596), True, 'import numpy as np\n'), ((1886, 1930), 'pyarrow.RecordBatchFileWriter', 'pa.RecordBatchFileWriter', (['sink', 'batch.schema'], {}), '(sink, batch.schema)\n', (1910, 1930), True, 'import pyarrow as pa\n'), ((2514, 2558), 'pyarrow.RecordBatchFileWriter', 'pa.RecordBatchFileWriter', (['sink', 'batch.schema'], {}), '(sink, batch.schema)\n', (2538, 2558), True, 'import pyarrow as pa\n'), ((3520, 3562), 'util.run_timer', 'run_timer', (['run_with_numpy_mapped', 'callback'], {}), '(run_with_numpy_mapped, callback)\n', (3529, 3562), False, 'from util import run_timer\n'), ((3799, 3834), 'util.run_timer', 'run_timer', (['run_with_batch', 'callback'], {}), '(run_with_batch, callback)\n', (3808, 3834), False, 'from util import run_timer\n'), ((4005, 4051), 'util.run_timer', 'run_timer', (['run_with_batch_arrow_file', 'callback'], {}), '(run_with_batch_arrow_file, callback)\n', (4014, 4051), False, 'from util import run_timer\n'), ((4261, 4303), 'util.run_timer', 'run_timer', (['run_with_batch_mapped', 'callback'], {}), '(run_with_batch_mapped, callback)\n', (4270, 4303), False, 'from util import run_timer\n'), ((1635, 1659), 'pyarrow.ipc.open_file', 'pa.ipc.open_file', (['source'], {}), '(source)\n', (1651, 1659), True, 'import pyarrow as pa\n'), ((2255, 2291), 'pyarrow.ipc.RecordBatchFileReader', 'pa.ipc.RecordBatchFileReader', (['source'], {}), '(source)\n', (2283, 2291), True, 'import pyarrow as pa\n'), ((966, 977), 'os.getpid', 'os.getpid', ([], {}), '()\n', (975, 977), False, 'import os\n'), ((1321, 1332), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1330, 1332), False, 'import os\n'), ((1741, 1752), 'os.getpid', 'os.getpid', ([], {}), '()\n', (1750, 1752), False, 'import os\n'), ((2373, 2384), 'os.getpid', 'os.getpid', ([], {}), '()\n', (2382, 2384), False, 'import os\n')] |
import pandas
import gensim
import re
import json
from gensim.test.utils import common_texts
from gensim.models.doc2vec import Doc2Vec, TaggedDocument
from translate import Translator
from translate.providers import MyMemoryProvider
from iccs_manager import ICCS, MatchedCategory
from sortedcontainers import SortedList
import numpy as np
from scipy import spatial
####### INTERNAL USE FUNCTIONS #########
def print_decorator(source):
'''
Used for debugging, this decorator prints the decorated function output
'''
def print_output(*args, **kwargs):
result = source(*args, **kwargs)
print(result)
return result
return print_output
def translate_list(data, source, dest='en'):
'''
Translates all elements of [data] from [source] language to [dest] language
'''
ret = []
translator = Translator(provider='mymemory', from_lang = source, to_lang=dest, email='<EMAIL>')
for item in data:
item = translator.translate(item)
ret.append(item)
print("*")
return ret
def write_to_file(filename, data):
'''
Writes the JSON encoding of [data] to [filename] path
'''
with open(filename, 'w', encoding='utf8') as json_file:
json.dump(data, json_file, ensure_ascii=False)
def read_file(filename):
'''
Loads the JSON content of [filename]
'''
return json.loads(open(filename, mode='rb').read().decode('utf-8'))
def avg_feature_vector(sentence, model, num_features, index2word_set):
words = sentence.split()
feature_vec = np.zeros((num_features, ), dtype='float32')
n_words = 0
for word in words:
if word in index2word_set:
n_words += 1
feature_vec = np.add(feature_vec, model[word])
if (n_words > 0):
feature_vec = np.divide(feature_vec, n_words)
return feature_vec
else:
return np.array([])
######## COUNTRY SPECIFIC FUNCTIONS ###############
# used to remember regular expressions used and additional info
def austria_processor(data):
austria_tmp = []
i = 0
for crime in data:
if "Annotations" not in crime:
crime = re.sub(r'\<.*\>', '', crime).strip()
austria_tmp.append(crime)
print(i)
i = i + 1
return austria_tmp
def germany_processor(data):
'''
english translation from different table than data! might not correspond
needs a check because important info might have been cut
'''
germany_tmp = []
i = 0
for crime in data:
crime = re.sub(r'\(.*\)', '', crime).strip()
germany_tmp.append(crime)
print(i)
i = i + 1
return germany_tmp
def luxembourg_processor(data):
return data
def hungary_processor(data):
return data
def czech_processor(data):
return data
def nederland_processor(data):
data = [ re.sub(r'[0-9](\.[0-9])*', '', elem).strip() for elem in data ]
return data
def spain_processor(data):
data = [ re.sub(r'[0-9]*(\.[0-9])*\.-*', '', elem).strip() for elem in data ]
return data
def denmark_processor(data):
data_new = []
for item in data:
if 'Repealed' in item:
continue
elif 'New' in item:
data_new.append(re.sub('\(.*\)', '', item).strip())
else:
data_new.append(item)
return data_new
def portugal_processor(data):
return data
def poland_processor(data):
return data
def bulgaria_processor(data):
'''
explanation of categories:
https://www.nsi.bg/en/content/6247/crimes-chapters-penal-code-and-some-kind-crimes-and-according-results-proceedings
'''
return data
def cyprus_processor(data):
'''
serious crime + minor offences
'''
return data
def italy_processor(data):
return data
def france_processor(data):
'''
removed 4 unused indexes
'''
return data
def norther_ireland_processor(data):
'''
check for nested categories, cant find provincial data
'''
return data
def england_processor(data):
return data
def finland_processor(data):
'''
too complex for R.E.
'''
return data
def belgium_processor(data):
return data
####### PUBLIC USE FUNCTIONS ##########
def match_list(data):
'''
Matches labels in [data] to ICCS categories using avg_feature_vector
'''
model = gensim.models.KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True)
index2word_set = set(model.index2word)
best_matching = dict.fromkeys(data)
print("done loading model...")
for crime in data:
print("Matching crime category: " + crime)
crime_vec = avg_feature_vector(crime, model=model, num_features=300, index2word_set=index2word_set)
similarity_ranking = SortedList(key= lambda x: x.get_similarity())
if crime_vec.size == 0:
best_matching[crime] = [MatchedCategory("", "", 0)]
continue
for code in ICCS:
name = ICCS[code]
potential_vec = avg_feature_vector(name, model=model, num_features=300, index2word_set=index2word_set)
similarity = 1 - spatial.distance.cosine(crime_vec, potential_vec)
matching = MatchedCategory(code, name, similarity)
similarity_ranking.add(matching)
best_matching[crime] = list(reversed(similarity_ranking[-5:]))
return best_matching
def save_matching(filename, data):
'''
Saves the matching contained in [data] to [filename]
'''
for key in data:
data[key] = (data[key][0].get_code(), data[key][0].get_name())
write_to_file(filename, data)
if __name__ == '__main__':
'''
result = match_list(read_file('luxembourg/luxembourg-translated.txt'))
save_matching('luxembourg/luxembourg-matching.txt', result)
''' | [
"json.dump",
"numpy.divide",
"scipy.spatial.distance.cosine",
"numpy.zeros",
"iccs_manager.MatchedCategory",
"numpy.array",
"translate.Translator",
"gensim.models.KeyedVectors.load_word2vec_format",
"numpy.add",
"re.sub"
] | [((851, 936), 'translate.Translator', 'Translator', ([], {'provider': '"""mymemory"""', 'from_lang': 'source', 'to_lang': 'dest', 'email': '"""<EMAIL>"""'}), "(provider='mymemory', from_lang=source, to_lang=dest, email='<EMAIL>'\n )\n", (861, 936), False, 'from translate import Translator\n'), ((1556, 1598), 'numpy.zeros', 'np.zeros', (['(num_features,)'], {'dtype': '"""float32"""'}), "((num_features,), dtype='float32')\n", (1564, 1598), True, 'import numpy as np\n'), ((4369, 4474), 'gensim.models.KeyedVectors.load_word2vec_format', 'gensim.models.KeyedVectors.load_word2vec_format', (['"""./GoogleNews-vectors-negative300.bin"""'], {'binary': '(True)'}), "(\n './GoogleNews-vectors-negative300.bin', binary=True)\n", (4416, 4474), False, 'import gensim\n'), ((1235, 1281), 'json.dump', 'json.dump', (['data', 'json_file'], {'ensure_ascii': '(False)'}), '(data, json_file, ensure_ascii=False)\n', (1244, 1281), False, 'import json\n'), ((1802, 1833), 'numpy.divide', 'np.divide', (['feature_vec', 'n_words'], {}), '(feature_vec, n_words)\n', (1811, 1833), True, 'import numpy as np\n'), ((1886, 1898), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (1894, 1898), True, 'import numpy as np\n'), ((1725, 1757), 'numpy.add', 'np.add', (['feature_vec', 'model[word]'], {}), '(feature_vec, model[word])\n', (1731, 1757), True, 'import numpy as np\n'), ((5235, 5274), 'iccs_manager.MatchedCategory', 'MatchedCategory', (['code', 'name', 'similarity'], {}), '(code, name, similarity)\n', (5250, 5274), False, 'from iccs_manager import ICCS, MatchedCategory\n'), ((2558, 2587), 're.sub', 're.sub', (['"""\\\\(.*\\\\)"""', '""""""', 'crime'], {}), "('\\\\(.*\\\\)', '', crime)\n", (2564, 2587), False, 'import re\n'), ((2871, 2907), 're.sub', 're.sub', (['"""[0-9](\\\\.[0-9])*"""', '""""""', 'elem'], {}), "('[0-9](\\\\.[0-9])*', '', elem)\n", (2877, 2907), False, 'import re\n'), ((2992, 3034), 're.sub', 're.sub', (['"""[0-9]*(\\\\.[0-9])*\\\\.-*"""', '""""""', 'elem'], {}), "('[0-9]*(\\\\.[0-9])*\\\\.-*', '', elem)\n", (2998, 3034), False, 'import re\n'), ((4913, 4939), 'iccs_manager.MatchedCategory', 'MatchedCategory', (['""""""', '""""""', '(0)'], {}), "('', '', 0)\n", (4928, 4939), False, 'from iccs_manager import ICCS, MatchedCategory\n'), ((5162, 5211), 'scipy.spatial.distance.cosine', 'spatial.distance.cosine', (['crime_vec', 'potential_vec'], {}), '(crime_vec, potential_vec)\n', (5185, 5211), False, 'from scipy import spatial\n'), ((2163, 2192), 're.sub', 're.sub', (['"""\\\\<.*\\\\>"""', '""""""', 'crime'], {}), "('\\\\<.*\\\\>', '', crime)\n", (2169, 2192), False, 'import re\n'), ((3255, 3283), 're.sub', 're.sub', (['"""\\\\(.*\\\\)"""', '""""""', 'item'], {}), "('\\\\(.*\\\\)', '', item)\n", (3261, 3283), False, 'import re\n')] |
import torch
import numpy as np
import paddle
from models import convs2s_wmt_en_de
path='ckpt/checkpoint_last.pt'
out=np.load('fconv_dec.npy',allow_pickle=True)[0]
# 1.1获取torch权重和键值
torch_weights = torch.load(path)['model']
torch_keys=[k for k in torch_weights.keys()]
# 1.2获取paddle权重和键值
model=convs2s_wmt_en_de(is_test=False,
src_vocab_size=42243,
tgt_vocab_size=43676,
max_src_positions=1024,
max_tgt_positions=1024,
bos_id=2,
eos_id=2,
beam_size=5,
max_out_len=50 )
paddle_keys=[k for k in model.state_dict().keys()]
paddle_weights=model.state_dict()
print(torch_weights['encoder.fc2.weight_g'].shape)
print(torch_weights['encoder.fc2.weight_v'].shape)
# print(f'len torch keys:{len(torch_keys)}')
# for k,v in torch_weights.items():
# print(k)
# print('epoch',torch_weights['epoch']) # 105
# print('batch_offset',torch_weights['batch_offset']) #0
# print('model',torch_weights['model']) # 参数
# print('optimizer',torch_weights['optimizer'])
# print('best_loss',torch_weights['best_loss']) # None
# print('args',torch_weights['args'])
'''
Namespace(arch='fconv_wmt_en_de', clip_norm=0.1, data='/private/home/edunov/wmt14_en_de', decoder_attention='True',
decoder_embed_dim=768, decoder_layers='[(512, 3)] * 9 + [(1024, 3)] * 4 + [(2048, 1)] * 2', decoder_out_embed_dim=512, dropout=0.1,
encoder_embed_dim=768, encoder_layers='[(512, 3)] * 9 + [(1024, 3)] * 4 + [(2048, 1)] * 2', force_anneal=26, label_smoothing=0.0,
log_interval=500, lr=1.25, lrshrink=0.1, max_epoch=0, max_positions=1024, max_tokens=4000, min_lr=1e-05, model='fconv', momentum=0.99,
no_epoch_checkpoints=False, no_progress_bar=True, no_save=False, restore_file='checkpoint_last.pt', sample_without_replacement=0,
save_dir='checkpoints', save_interval=-1, seed=1, source_lang='en', target_lang='de',
test_subset='test', train_subset='train', valid_subset='valid', weight_decay=0.0, workers=4)
'''
# for k,v in torch_weights['model'].items():
# print(k)
#
#
# print(f'len:{len(paddle_keys)}')
# for k in paddle_keys:
# print(k)
''' 用来对应键名,这里用不上'''
# def pkey_filter(key):
# '''设置paddle key 转torch key的规则(torch多了track,paddle多了conv的bias)'''
# rules={'_batch_norm':'bn','_conv':'conv','_mean':'running_mean','_variance':'running_var','se_block':'se',
# 'squeeze':'fc1','excitation':'fc2','short':'downsample','out':'head.fc'}
# for k,v in rules.items():
# key=key.replace(k,v)
# if key.startswith('conv'):
# key='stem.'+'.'.join(key.split('.')[1:])
# elif key.startswith('s'):
# key_ls=key.split('.')
# key_ls[0]=key_ls[0].replace('_','.')
# if key_ls[1].find('conv')!=-1:
# num=int(key_ls[1][4:])
# key_ls[1]='conv{}'.format(str(num+1))
# key='.'.join(key_ls)
# return key
def key_info(keys):
for i, key in enumerate(keys):
print(f'{i} | name: {key}')
def weight_info(keys, weights):
for i, key in enumerate(keys):
print(f'{i} | name: {key} | shape: {weights[key].shape} \n')
print(f'total:len {len(keys)}')
weight_info(torch_keys,torch_weights)
weight_info(paddle_keys,paddle_weights)
# 打印不一样的
def check_different(ls1,ls2): #torch和paddle的keys
# len
print(f'len: ls1: {len(ls1)} | ls2: {len(ls2)}')
ls_inter=[] # 相交部分
ls_paddle=[] # paddle中多的
ls_torch=[] # torch中多的
# 过滤:
# for i,pkey in enumerate(ls2):
# ls2[i]=pkey_filter(pkey)
# print('filter over')
for k1 in ls1:
if k1 in ls2:
ls_inter.append(k1)
else:
ls_torch.append(k1)
for k2 in ls2:
if k2 not in ls1:
ls_paddle.append(k2)
print(f'Intersection num: {len(ls_inter)} | Torch keys not aligned: {len(ls_torch)} | Paddle keys not aligned: {len(ls_paddle)}')
return ls_inter,ls_torch,ls_paddle
ls_inter,ls_torch,ls_paddle=check_different(torch_keys,paddle_keys)
print(f'torch 多了:{ls_torch} | paddle多了:{ls_paddle}')
def pair_info(torch_keys,torch_weights,paddle_keys,paddle_weights):
for tkeys in torch_keys:
if tkeys in paddle_keys:
print(f'torch key: {tkeys} | paddle key: {tkeys}')
print(f'torch weight: {list(torch_weights[tkeys].shape)} | paddle weight: {paddle_weights[tkeys].shape}')
else:
print(f'torch key: {tkeys} | torch weight: {list(torch_weights[tkeys].shape)}')
print('**' * 50)
# pair_info(torch_keys,torch_weights,paddle_keys,paddle_weights)
'''
torch 多了:['encoder.fc2.weight_g', 'encoder.fc2.weight_v'] | paddle多了:['encoder.fc2.weight']
就是encoderfc2出错了,接下来研究vg是啥
'''
## weight_g和weight_v是啥????
'''
对齐规则:
1.子网络:
-encoder
- embed
- tokens
- positions
- projections
- convolutions
- fc
-decoder
- embed...
- projections
- convolutions
- attention
- in proj
- out proj
- fc
2.权重:
weight (embed)
bias
weight_g (squeeze即可)
- conv (3dim)
- fc,attn,proj (2dim)
weight_v
- conv transpose(2,1,0)
- fc,attn,proj transpose(1,0)
3.规则:
a.所有的weight squeeze #embed和weight_g,bias搞定,先不squeeze,否则conv中k为1就没了 (squeeze weight_g)
b.如果key含weight_v:
如果含conv且维度三维: transpose(2,1,0) #conv搞定
如果维度两维: transpose(1,0) #fc,attn,proj搞定
'''
'''
embed:直接赋值 (检查pos产生的对不对)
除了embed全用了weight_norm,都要g和v,且都有bias(其实就fc、conv两类)
fc:
proj
conv
attn(也是proj)
总结:
1.名含fc、proj的是全连接,v需要转置(1,0),g需要squeeze,bias不变
2.名含conv的是卷积,v需要转置(2,1,0),g需要squeeze,bias不变
3.含embed的直接赋值
'''
def align(torch_weights, paddle_weights):
paddle_keys = [k for k in paddle_weights.keys()]
torch_keys = [k for k in torch_weights.keys()]
for pk in paddle_keys:
if pk in torch_keys:
torch_w=torch_weights[pk].detach().cpu()
last_name =pk.split('.')[-1]
# 对齐嵌入
if 'embed' in pk or last_name=='bias':
paddle_weights[pk] = torch_w.numpy()
# 对齐卷积
elif 'convolutions' in pk:
if last_name=='weight_g':
paddle_weights[pk] = torch_w.squeeze().numpy()
elif last_name=='weight_v':
paddle_weights[pk] = torch_w.numpy().transpose(2, 1, 0) #[k in out]->[out in k]
# 对齐全连接(含attention)
elif 'fc' in pk or 'proj' in pk:
if last_name=='weight_g':
paddle_weights[pk] = torch_w.squeeze().numpy()
elif last_name=='weight_v':
paddle_weights[pk] = torch_w.numpy().transpose(1, 0) #[out in]->[in out]
else:
print(f'key not alligned:{pk}')
return paddle_weights
pad_weights=align(torch_weights,paddle_weights)
print(pad_weights)
# paddle.save(pad_weights, 'ckpt/last/new_align.pdparams')
for pk in paddle_keys:
if pk in torch_keys:
if pk.find('weight_g') != -1:
torch_w=torch_weights[pk].squeeze().cpu().detach().numpy()
else:
torch_w=torch_weights[pk].cpu().detach().numpy()
if paddle_weights[pk].shape==list(torch_w.shape):
paddle_weights[pk]=torch_w
# elif pk.find('weight_g')!=-1:
# paddle_weights[pk] = torch_weights[pk].squeeze().cpu().detach().numpy()
elif pk.find('weight_v')!=-1 and len(torch_w.shape)==3: # conv #可以合并trans
paddle_weights[pk]=torch_w.transpose(2,1,0)
elif pk.find('weight_v')!=-1 and len(torch_w.shape)==2: # fc,attn,proj
paddle_weights[pk] = torch_w.transpose(1,0)
else:
print(f'key not alligned:{pk}')
# 转换成paddle权重
# paddle.save(paddle_weights,'./ckpt/convs2s_last.padparams')
def allign2torch(torch_weights,paddle_weights):
torch_keys = [k for k in torch_weights.keys()]
paddle_keys = [k for k in paddle_weights.keys()]
for tk in torch_keys:
if tk in paddle_keys:
paddle_w = paddle_weights[tk].numpy()
# 对weight_g添加维度
if tk.find('weight_g') != -1: # torch 3dim前两位是1,2dim后一位是1
if len(torch_weights[tk].shape)==3:
torch_weights[tk] = torch.from_numpy(paddle_w).unsqueeze(0).unsqueeze(0)
elif len(torch_weights[tk].shape)==2:
torch_weights[tk] = torch.from_numpy(paddle_w).unsqueeze(1)
# 其他权重(维度相等,或者直接赋值、或者转置)
if list(torch_weights[tk].shape) == paddle_w.shape:
torch_weights[tk] = torch.from_numpy(paddle_w)
elif tk.find('weight_v')!=-1 and len(paddle_w.shape)==3:
torch_weights[tk] = torch.from_numpy(paddle_w.transpose(2, 1, 0))
elif tk.find('weight_v')!=-1 and len(paddle_w.shape)==2:
torch_weights[tk] = torch.from_numpy(paddle_w.transpose(1, 0))
else:
print(f'key not alligned:{pk}')
print('aligned over!')
return torch_weights
# 转换成torch权重
t_path='ckpt/checkpoint_last.pt'
p_path='ckpt/epoch_80/convs2s.pdparams'
torch_weights = torch.load(t_path)
paddle_weights=paddle.load(p_path)
tmodel_weights=allign2torch(torch_weights['model'],paddle_weights)
torch_weights['model']=tmodel_weights
torch.save(torch_weights,'ckpt/checkpoint_80.pt')
# state=paddle.load('./ckpt/convs2s_last.padparams')
# for k,v in state.items():
# print(k,v.shape)
# key_pair_length = min(len(torch_keys), len(paddle_keys)) # 获取最小对应权重长度
# for i, k in enumerate(paddle_keys):
# if i >= key_pair_length:
# break
# torch_k=pkey_filter(k) # 转换为torch key
# if torch_k in torch_keys:
# torch_w = torch_weights[torch_k]
# paddle_w=paddle_weights[k].numpy()
# if len(torch_w.shape)==len(paddle_w.shape)+2:
# torch_w=torch_w.squeeze().detach().numpy()
# else:
# torch_w=torch_w.detach().numpy()
# if paddle_w.shape == torch_w.shape: # paddle shape是list,numpy 的shape是tuple
# paddle_weights[k] = torch_w
# elif paddle_w.shape == torch_w.transpose().shape and k.find('weight') != -1: # 形状不一致,维度一致,且都是weight
# paddle_weights[k] = torch_w.transpose()
# else:
# print(f'err align: key= {torch_k} ')
# print(f'P:{paddle_w.shape} | align_norm:{torch_w.shape}')
# print(len(torch_w.shape),len(paddle_w.shape))
# paddle.save(paddle_weights,save_path)
# print('Align Over!')
| [
"numpy.load",
"paddle.load",
"torch.load",
"torch.save",
"models.convs2s_wmt_en_de",
"torch.from_numpy"
] | [((306, 487), 'models.convs2s_wmt_en_de', 'convs2s_wmt_en_de', ([], {'is_test': '(False)', 'src_vocab_size': '(42243)', 'tgt_vocab_size': '(43676)', 'max_src_positions': '(1024)', 'max_tgt_positions': '(1024)', 'bos_id': '(2)', 'eos_id': '(2)', 'beam_size': '(5)', 'max_out_len': '(50)'}), '(is_test=False, src_vocab_size=42243, tgt_vocab_size=43676,\n max_src_positions=1024, max_tgt_positions=1024, bos_id=2, eos_id=2,\n beam_size=5, max_out_len=50)\n', (323, 487), False, 'from models import convs2s_wmt_en_de\n'), ((9372, 9390), 'torch.load', 'torch.load', (['t_path'], {}), '(t_path)\n', (9382, 9390), False, 'import torch\n'), ((9407, 9426), 'paddle.load', 'paddle.load', (['p_path'], {}), '(p_path)\n', (9418, 9426), False, 'import paddle\n'), ((9537, 9587), 'torch.save', 'torch.save', (['torch_weights', '"""ckpt/checkpoint_80.pt"""'], {}), "(torch_weights, 'ckpt/checkpoint_80.pt')\n", (9547, 9587), False, 'import torch\n'), ((123, 166), 'numpy.load', 'np.load', (['"""fconv_dec.npy"""'], {'allow_pickle': '(True)'}), "('fconv_dec.npy', allow_pickle=True)\n", (130, 166), True, 'import numpy as np\n'), ((205, 221), 'torch.load', 'torch.load', (['path'], {}), '(path)\n', (215, 221), False, 'import torch\n'), ((8820, 8846), 'torch.from_numpy', 'torch.from_numpy', (['paddle_w'], {}), '(paddle_w)\n', (8836, 8846), False, 'import torch\n'), ((8640, 8666), 'torch.from_numpy', 'torch.from_numpy', (['paddle_w'], {}), '(paddle_w)\n', (8656, 8666), False, 'import torch\n'), ((8491, 8517), 'torch.from_numpy', 'torch.from_numpy', (['paddle_w'], {}), '(paddle_w)\n', (8507, 8517), False, 'import torch\n')] |
from scipy.special import erfc
import numpy as np
from numpy import sqrt
from scipy.optimize import minimize
import pandas as pd
def webers_prediction(w,smaller_number,larger_number,rt=np.array([])):
if rt.size==0:
x = ((larger_number-smaller_number)/(sqrt(2*w)*sqrt(smaller_number**2+larger_number**2)))
#print('hello')
#print(x)
else:
x = ((larger_number-smaller_number)/(sqrt(2*w*(1/rt))*sqrt(smaller_number**2+larger_number**2)))
expected_accuracy = 1-0.5*erfc(x)
#print(expected_accuracy)
return expected_accuracy
def main_weber(w,params,optim=1):
stim_left,stim_right,actual_response,rt=params
which_bigger = np.array(stim_left>stim_right)
#print(which_bigger)
diff_pred=[]
model_preds=[]
for i in range(len(actual_response)):
if which_bigger[i]==1:
smaller_number = stim_right[i]
larger_number = stim_left[i]
elif which_bigger[i]==0:
smaller_number = stim_left[i]
larger_number = stim_right[i]
if rt.size==0:
expected_response = webers_prediction(w,smaller_number,larger_number)
#print('hello2',expected_response)
else:
expected_response = webers_prediction(w,smaller_number,larger_number,rt[i])
model_preds.append(expected_response)
#print(expected_response, actual_response[i])
diff_pred.append((expected_response-actual_response[i])**2)
if optim==1:
#now calculate your models error
total_diff = np.sum(diff_pred)
#print(w,total_diff)
return total_diff
else:
return np.hstack(model_preds)
def run_wrapper(w0,params):
res = minimize(main_weber,w0,args=(params,),method='nelder-mead')
webers_w=res.x
print(res.message)
rmse = np.sqrt(res.fun)/len(params)
print('RMSE: ', rmse)
print('Webers w: ', webers_w)
return webers_w, rmse
#load data
pwd = '/<PASSWORD>' #your directory here
task = 'dots_' # 'symbolic_' # dots or symbolic task?
fn = pwd + task + '100519.csv' #filename of task
ds = pd.read_csv(fn)
idx = 'ID_subs' #subject ID column name
#initialize a starting w
w0=0.11
#lets do some preselection by condition
#ds = ds[ds.Subject != '766'] #exclude Subject 766 (no data)
unique_ids = np.unique(ds[idx])
#ds = ds[ds.condition==1] #small
#ds = ds[ds.condition==2] #large
ws=[]
all_model_preds=[]
model_errors=[]
model_accs=[]
for id in unique_ids:
print('Subject: ', id)
#get fixed params and run per subject
stim_left = np.array(ds.loc[ds[idx]==id,'Stim_Left' ])
stim_right = np.array(ds.loc[ds[idx]==id,'Stim_Right' ])
correct_answer = np.array(ds.loc[ds[idx]==id,'Correct_Answer' ])
#rt = np.array(ds.loc[ds['ID']==id,'Stimulus.RT' ])/1000 #convert to seconds
rt = np.array([])
params = np.array([stim_left,stim_right,correct_answer,rt])
w_fit, rmse = run_wrapper(w0,params)
model_errors.append(rmse)
ws.append(w_fit)
model_preds = main_weber(w_fit,params,optim=0)
all_model_preds.append(model_preds)
model_acc = np.sum(np.round(model_preds)==np.hstack(correct_answer))/float(len(model_preds))
print('Model accuracy: ', model_acc)
model_accs.append(model_acc)
#all_model_preds = np.vstack(all_model_preds)
ws = np.hstack(ws)
output_ds = np.array(np.hstack([np.matrix(unique_ids).T,np.matrix(ws).T,np.matrix(model_accs).T,np.matrix(model_errors).T]))
output_labels = ['Subject ID','Webers W', 'Model Accuracy','Model Root Mean Squared Error (RMSE)']
output_df = pd.DataFrame(output_ds,columns=output_labels)
output_df.set_index('Subject ID',inplace=True)
output_df.to_csv(pwd + task + 'webers_fraction.csv')
| [
"pandas.DataFrame",
"numpy.matrix",
"scipy.optimize.minimize",
"numpy.sum",
"pandas.read_csv",
"scipy.special.erfc",
"numpy.hstack",
"numpy.array",
"numpy.round",
"numpy.unique",
"numpy.sqrt"
] | [((2093, 2108), 'pandas.read_csv', 'pd.read_csv', (['fn'], {}), '(fn)\n', (2104, 2108), True, 'import pandas as pd\n'), ((2298, 2316), 'numpy.unique', 'np.unique', (['ds[idx]'], {}), '(ds[idx])\n', (2307, 2316), True, 'import numpy as np\n'), ((3292, 3305), 'numpy.hstack', 'np.hstack', (['ws'], {}), '(ws)\n', (3301, 3305), True, 'import numpy as np\n'), ((3544, 3590), 'pandas.DataFrame', 'pd.DataFrame', (['output_ds'], {'columns': 'output_labels'}), '(output_ds, columns=output_labels)\n', (3556, 3590), True, 'import pandas as pd\n'), ((186, 198), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (194, 198), True, 'import numpy as np\n'), ((677, 709), 'numpy.array', 'np.array', (['(stim_left > stim_right)'], {}), '(stim_left > stim_right)\n', (685, 709), True, 'import numpy as np\n'), ((1704, 1766), 'scipy.optimize.minimize', 'minimize', (['main_weber', 'w0'], {'args': '(params,)', 'method': '"""nelder-mead"""'}), "(main_weber, w0, args=(params,), method='nelder-mead')\n", (1712, 1766), False, 'from scipy.optimize import minimize\n'), ((2546, 2590), 'numpy.array', 'np.array', (["ds.loc[ds[idx] == id, 'Stim_Left']"], {}), "(ds.loc[ds[idx] == id, 'Stim_Left'])\n", (2554, 2590), True, 'import numpy as np\n'), ((2606, 2651), 'numpy.array', 'np.array', (["ds.loc[ds[idx] == id, 'Stim_Right']"], {}), "(ds.loc[ds[idx] == id, 'Stim_Right'])\n", (2614, 2651), True, 'import numpy as np\n'), ((2671, 2720), 'numpy.array', 'np.array', (["ds.loc[ds[idx] == id, 'Correct_Answer']"], {}), "(ds.loc[ds[idx] == id, 'Correct_Answer'])\n", (2679, 2720), True, 'import numpy as np\n'), ((2809, 2821), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2817, 2821), True, 'import numpy as np\n'), ((2835, 2888), 'numpy.array', 'np.array', (['[stim_left, stim_right, correct_answer, rt]'], {}), '([stim_left, stim_right, correct_answer, rt])\n', (2843, 2888), True, 'import numpy as np\n'), ((1544, 1561), 'numpy.sum', 'np.sum', (['diff_pred'], {}), '(diff_pred)\n', (1550, 1561), True, 'import numpy as np\n'), ((1642, 1664), 'numpy.hstack', 'np.hstack', (['model_preds'], {}), '(model_preds)\n', (1651, 1664), True, 'import numpy as np\n'), ((1817, 1833), 'numpy.sqrt', 'np.sqrt', (['res.fun'], {}), '(res.fun)\n', (1824, 1833), True, 'import numpy as np\n'), ((505, 512), 'scipy.special.erfc', 'erfc', (['x'], {}), '(x)\n', (509, 512), False, 'from scipy.special import erfc\n'), ((265, 276), 'numpy.sqrt', 'sqrt', (['(2 * w)'], {}), '(2 * w)\n', (269, 276), False, 'from numpy import sqrt\n'), ((275, 321), 'numpy.sqrt', 'sqrt', (['(smaller_number ** 2 + larger_number ** 2)'], {}), '(smaller_number ** 2 + larger_number ** 2)\n', (279, 321), False, 'from numpy import sqrt\n'), ((415, 437), 'numpy.sqrt', 'sqrt', (['(2 * w * (1 / rt))'], {}), '(2 * w * (1 / rt))\n', (419, 437), False, 'from numpy import sqrt\n'), ((432, 478), 'numpy.sqrt', 'sqrt', (['(smaller_number ** 2 + larger_number ** 2)'], {}), '(smaller_number ** 2 + larger_number ** 2)\n', (436, 478), False, 'from numpy import sqrt\n'), ((3092, 3113), 'numpy.round', 'np.round', (['model_preds'], {}), '(model_preds)\n', (3100, 3113), True, 'import numpy as np\n'), ((3115, 3140), 'numpy.hstack', 'np.hstack', (['correct_answer'], {}), '(correct_answer)\n', (3124, 3140), True, 'import numpy as np\n'), ((3339, 3360), 'numpy.matrix', 'np.matrix', (['unique_ids'], {}), '(unique_ids)\n', (3348, 3360), True, 'import numpy as np\n'), ((3363, 3376), 'numpy.matrix', 'np.matrix', (['ws'], {}), '(ws)\n', (3372, 3376), True, 'import numpy as np\n'), ((3379, 3400), 'numpy.matrix', 'np.matrix', (['model_accs'], {}), '(model_accs)\n', (3388, 3400), True, 'import numpy as np\n'), ((3403, 3426), 'numpy.matrix', 'np.matrix', (['model_errors'], {}), '(model_errors)\n', (3412, 3426), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
import numpy as np
from configs import comission, PRICE_MAG
class environment:
def __init__(self, data_frame, leng):
self.big_data = data_frame.values
self.len = leng
self.start_len = self.big_data.shape[0] - leng
#print(self.start_len, leng)
start_point = int(np.random.rand() * self.start_len)
self.data = self.big_data[start_point:start_point + leng, :]
# close_prices
self.prices = self.data[:, 3]
self.iter = 0
self.n_shares = 0
self.cash = 0
self.max_shares = 1
self.max_iter = self.prices.shape[0]
self.done = False
self.prev_equity = 0
self.equity = 0
self.comission = comission
self.same_steps = 0
self.prev_act = 0
def calc_reward(self, act):
# if(act != self.n_shares
# if abs(self.n_shares + act) <= self.max_shares:
# print(PRICE_MAG)
if(self.n_shares != act):
self.cash = self.cash - self.prices[self.iter - 1] * \
(act - self.n_shares) - self.comission * PRICE_MAG * (1 + 0 * (self.same_steps < 3))
self.n_shares = act
self.equity = self.cash + self.prices[self.iter] * self.n_shares
reward = self.equity - self.prev_equity
self.prev_equity = self.equity
return reward - self.comission * PRICE_MAG * (int(self.same_steps / 1000))
def step(self, act):
if not self.done:
self.iter += 1
observation = self.data[self.iter]
reward = self.calc_reward(act)
self.same_steps += 1
if act != self.prev_act:
self.same_steps = 0
if self.iter >= self.max_iter - 1:
self.done = True
else:
self.done = False
self.prev_act = act
return observation, reward, self.done, self.n_shares
def reset(self):
self.iter = 0
self.done = False
start_point = int(np.random.rand() * self.start_len)
self.data = self.big_data[start_point:start_point + self.len, :]
observation = self.data[self.iter]
self.prices = self.data[:, 3]
self.n_shares = 0
self.cash = 0
self.prev_equity = 0
self.equity = 0
return observation
def sample(self):
return np.random.randint(0, 3)
| [
"numpy.random.rand",
"numpy.random.randint"
] | [((2346, 2369), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (2363, 2369), True, 'import numpy as np\n'), ((331, 347), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (345, 347), True, 'import numpy as np\n'), ((1991, 2007), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (2005, 2007), True, 'import numpy as np\n')] |
import logging
import math
import numpy as np
import sklearn.metrics
from .config import METRIC_NULL
log = logging.getLogger(__name__)
class Metric(object):
# metric class
def __init__(self, name, function, problem_type, best_is_min, need_class=False, binary=False, average=False):
self.name = name
self.function = function
self.problem_type = problem_type
self.best_is_min = best_is_min
self.need_class = need_class
self.binary = binary
self.average = average
# additional metrics not included in sklearn
def rmse(y_act, y_pred):
"""
metrics rmse = Root Mean Squared Error (regression only)
:param y_act: vector of actual values
:param y_pred: vector of predicted values
:return: rmse
"""
return math.sqrt(sklearn.metrics.mean_squared_error(y_act, y_pred))
def rmsle(y_act, y_pred):
"""
metrics rmsle = Root Mean Squared Log Error (regression only)
:param y_act: vector of actual values
:param y_pred: vector of predicted values
:return: rmsle
"""
return math.sqrt(sklearn.metrics.mean_squared_log_error(y_act, y_pred))
def gini(y_act, y_pred):
"""
metrics gini = Gini coefficient (classification only)
:param y_act: vector of actual values
:param y_pred: vector of predicted values
:return: gini
"""
assert (len(y_act) == len(y_pred))
all = np.asarray(np.c_[y_act, y_pred, np.arange(len(y_act))], dtype=np.float)
all = all[np.lexsort((all[:, 2], -1 * all[:, 1]))]
totalLosses = all[:, 0].sum()
giniSum = all[:, 0].cumsum().sum() / totalLosses
giniSum -= (len(y_act) + 1) / 2.
return giniSum / len(y_act)
def gini_normalized(y_act, y_pred):
"""
metrics normalized gini = Normalized Gini coefficient (classification only)
:param y_act: vector of actual values
:param y_pred: vector of predicted values
:return: gini
"""
return gini(y_act, y_pred) / gini(y_act, y_act)
# metrics
metric_list = [
# classification metrics:
Metric('log_loss', sklearn.metrics.log_loss, 'classification', True),
Metric('accuracy', sklearn.metrics.accuracy_score, 'classification', False, need_class=True),
Metric('precision', sklearn.metrics.precision_score, 'classification', False, need_class=True, average=True),
Metric('recall', sklearn.metrics.recall_score, 'classification', False, need_class=True, average=True),
Metric('f1', sklearn.metrics.f1_score, 'classification', False, need_class=True, average=True),
Metric('auc', sklearn.metrics.roc_auc_score, 'classification', False, need_class=False, binary=True),
Metric('hinge', sklearn.metrics.hinge_loss, 'classification', True),
Metric('gini', gini, 'classification', False, need_class=True, binary=True),
Metric('gini_norm', gini_normalized, 'classification', False, need_class=False, binary=True),
# regression metrics
Metric('mse', sklearn.metrics.mean_squared_error, 'regression', True),
Metric('rmse', rmse, 'regression', True),
Metric('mae', sklearn.metrics.mean_absolute_error, 'regression', True),
Metric('median', sklearn.metrics.median_absolute_error, 'regression', True),
Metric('msle', sklearn.metrics.mean_squared_log_error, 'regression', True),
Metric('rmsle', rmsle, 'regression', True),
Metric('r2', sklearn.metrics.r2_score, 'regression', False)
]
metric_map = {m.name: m for m in metric_list}
def evaluate_metric(y_act, y_pred, metric_name, n_classes):
# evaluate score with the metric of the dataset
metric = metric_map[metric_name]
try:
if metric.need_class:
# convert proba to classes
y_pred_metric = np.argmax(y_pred, axis=1)
else:
if metric.binary:
y_pred_metric = y_pred[:, 1]
else:
y_pred_metric = y_pred
# use sign before metrics to always compare best is min in comparisons
# but requires to display abs value for display
if metric.best_is_min:
if metric.name == 'log_loss':
return metric.function(y_act, y_pred_metric, labels=list(range(n_classes)))
else:
return metric.function(y_act, y_pred_metric)
else:
if metric.average and n_classes > 2:
# need average if multi-class
return -metric.function(y_act, y_pred_metric, average='micro')
else:
return -metric.function(y_act, y_pred_metric)
except Exception as e:
log.error('error in evaluating metric %s: %s' % (metric_name, e))
return METRIC_NULL | [
"numpy.lexsort",
"logging.getLogger",
"numpy.argmax"
] | [((109, 136), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (126, 136), False, 'import logging\n'), ((1495, 1534), 'numpy.lexsort', 'np.lexsort', (['(all[:, 2], -1 * all[:, 1])'], {}), '((all[:, 2], -1 * all[:, 1]))\n', (1505, 1534), True, 'import numpy as np\n'), ((3698, 3723), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (3707, 3723), True, 'import numpy as np\n')] |
from netCDF4 import Dataset
import numpy as np
def compute_norms(lo,hi) :
ny_hi = hi.shape[0]
nx_hi = hi.shape[1]
ny_lo = lo.shape[0]
nx_lo = lo.shape[1]
factor_y = int( hi.shape[0] / lo.shape[0] )
factor_x = int( hi.shape[1] / lo.shape[1] )
if (ny_hi == 1) :
interp = np.mean( np.reshape(hi ,[nx_lo,factor_x]) , axis=1 )
else :
tmp = np.mean( np.reshape( hi , [ny_hi , nx_lo , factor_x] ) , axis=2 )
interp = np.mean( np.reshape( tmp.T , [nx_lo , ny_lo , factor_y] ) , axis=2 ).T
l1 = np.sum(np.abs(interp-lo)) / np.sum(np.abs(interp))
l2 = np.sqrt( np.sum(np.abs(interp-lo)**2) / np.sum(np.abs(interp)**2) )
li = np.max(np.abs(interp-lo)) / (np.max(interp) - np.min(interp))
return [l1,l2,li]
def print_norms(fname_hi,fname_a,fname_b) :
nc = Dataset(fname_hi,"r")
nt = len(nc.dimensions["t"])
nx_hi = len(nc.dimensions["x"])
ny_hi = len(nc.dimensions["y"])
h_hi = nc.variables["thickness"][nt-1,:,:]
u_hi = nc.variables["u" ][nt-1,:,:]
v_hi = nc.variables["v" ][nt-1,:,:]
nc = Dataset(fname_a,"r")
nt = len(nc.dimensions["t"])
nx_a = len(nc.dimensions["x"])
ny_a = len(nc.dimensions["y"])
h_a = nc.variables["thickness"][nt-1,:,:]
u_a = nc.variables["u" ][nt-1,:,:]
v_a = nc.variables["v" ][nt-1,:,:]
nc = Dataset(fname_b,"r")
nt = len(nc.dimensions["t"])
nx_b = len(nc.dimensions["x"])
ny_b = len(nc.dimensions["y"])
h_b = nc.variables["thickness"][nt-1,:,:]
u_b = nc.variables["u" ][nt-1,:,:]
v_b = nc.variables["v" ][nt-1,:,:]
l1a_h,l2a_h,lia_h = compute_norms(h_a,h_hi)
l1b_h,l2b_h,lib_h = compute_norms(h_b,h_hi)
l1a_u,l2a_u,lia_u = compute_norms(u_a,u_hi)
l1b_u,l2b_u,lib_u = compute_norms(u_b,u_hi)
l1a_v,l2a_v,lia_v = compute_norms(v_a,v_hi)
l1b_v,l2b_v,lib_v = compute_norms(v_b,v_hi)
denom = np.log(float(nx_a)/float(nx_b))
cv1_h,cv2_h,cvi_h = [np.log(l1b_h/l1a_h)/denom , np.log(l2b_h/l2a_h)/denom , np.log(lib_h/lia_h)/denom ]
cv1_u,cv2_u,cvi_u = [np.log(l1b_u/l1a_u)/denom , np.log(l2b_u/l2a_u)/denom , np.log(lib_u/lia_u)/denom ]
cv1_v,cv2_v,cvi_v = [np.log(l1b_v/l1a_v)/denom , np.log(l2b_v/l2a_v)/denom , np.log(lib_v/lia_v)/denom ]
if (ny_hi == 1) :
print(str(l1a_h)+" "+str(l1a_u)+" "+str(l1b_h)+" "+str(l1b_u)+" "+str(cv1_h)+" "+str(cv1_u))
print(str(l2a_h)+" "+str(l2a_u)+" "+str(l2b_h)+" "+str(l2b_u)+" "+str(cv2_h)+" "+str(cv2_u))
print(str(lia_h)+" "+str(lia_u)+" "+str(lib_h)+" "+str(lib_u)+" "+str(cvi_h)+" "+str(cvi_u))
else :
print(str(l1a_h)+" "+str(l1a_u)+" "+str(l1a_v)+" "+str(l1b_h)+" "+str(l1b_u)+" "+str(l1b_v)+" "+str(cv1_h)+" "+str(cv1_u)+" "+str(cv1_v))
print(str(l2a_h)+" "+str(l2a_u)+" "+str(l2a_v)+" "+str(l2b_h)+" "+str(l2b_u)+" "+str(l2b_v)+" "+str(cv2_h)+" "+str(cv2_u)+" "+str(cv2_v))
print(str(lia_h)+" "+str(lia_u)+" "+str(lia_v)+" "+str(lib_h)+" "+str(lib_u)+" "+str(lib_v)+" "+str(cvi_h)+" "+str(cvi_u)+" "+str(cvi_v))
print_norms( "order_hi.nc" , "order_3_a.nc" , "order_3_b.nc" )
print_norms( "order_hi.nc" , "order_5_a.nc" , "order_5_b.nc" )
print_norms( "order_hi.nc" , "order_7_a.nc" , "order_7_b.nc" )
print_norms( "order_hi.nc" , "order_9_a.nc" , "order_9_b.nc" )
# print_norms( "order_2d_hi.nc" , "order_2d_3_a.nc" , "order_2d_3_b.nc" )
# print_norms( "order_2d_hi.nc" , "order_2d_5_a.nc" , "order_2d_5_b.nc" )
| [
"netCDF4.Dataset",
"numpy.abs",
"numpy.log",
"numpy.max",
"numpy.min",
"numpy.reshape"
] | [((794, 816), 'netCDF4.Dataset', 'Dataset', (['fname_hi', '"""r"""'], {}), "(fname_hi, 'r')\n", (801, 816), False, 'from netCDF4 import Dataset\n'), ((1058, 1079), 'netCDF4.Dataset', 'Dataset', (['fname_a', '"""r"""'], {}), "(fname_a, 'r')\n", (1065, 1079), False, 'from netCDF4 import Dataset\n'), ((1316, 1337), 'netCDF4.Dataset', 'Dataset', (['fname_b', '"""r"""'], {}), "(fname_b, 'r')\n", (1323, 1337), False, 'from netCDF4 import Dataset\n'), ((299, 332), 'numpy.reshape', 'np.reshape', (['hi', '[nx_lo, factor_x]'], {}), '(hi, [nx_lo, factor_x])\n', (309, 332), True, 'import numpy as np\n'), ((374, 414), 'numpy.reshape', 'np.reshape', (['hi', '[ny_hi, nx_lo, factor_x]'], {}), '(hi, [ny_hi, nx_lo, factor_x])\n', (384, 414), True, 'import numpy as np\n'), ((533, 552), 'numpy.abs', 'np.abs', (['(interp - lo)'], {}), '(interp - lo)\n', (539, 552), True, 'import numpy as np\n'), ((561, 575), 'numpy.abs', 'np.abs', (['interp'], {}), '(interp)\n', (567, 575), True, 'import numpy as np\n'), ((666, 685), 'numpy.abs', 'np.abs', (['(interp - lo)'], {}), '(interp - lo)\n', (672, 685), True, 'import numpy as np\n'), ((688, 702), 'numpy.max', 'np.max', (['interp'], {}), '(interp)\n', (694, 702), True, 'import numpy as np\n'), ((705, 719), 'numpy.min', 'np.min', (['interp'], {}), '(interp)\n', (711, 719), True, 'import numpy as np\n'), ((1911, 1932), 'numpy.log', 'np.log', (['(l1b_h / l1a_h)'], {}), '(l1b_h / l1a_h)\n', (1917, 1932), True, 'import numpy as np\n'), ((1939, 1960), 'numpy.log', 'np.log', (['(l2b_h / l2a_h)'], {}), '(l2b_h / l2a_h)\n', (1945, 1960), True, 'import numpy as np\n'), ((1967, 1988), 'numpy.log', 'np.log', (['(lib_h / lia_h)'], {}), '(lib_h / lia_h)\n', (1973, 1988), True, 'import numpy as np\n'), ((2018, 2039), 'numpy.log', 'np.log', (['(l1b_u / l1a_u)'], {}), '(l1b_u / l1a_u)\n', (2024, 2039), True, 'import numpy as np\n'), ((2046, 2067), 'numpy.log', 'np.log', (['(l2b_u / l2a_u)'], {}), '(l2b_u / l2a_u)\n', (2052, 2067), True, 'import numpy as np\n'), ((2074, 2095), 'numpy.log', 'np.log', (['(lib_u / lia_u)'], {}), '(lib_u / lia_u)\n', (2080, 2095), True, 'import numpy as np\n'), ((2125, 2146), 'numpy.log', 'np.log', (['(l1b_v / l1a_v)'], {}), '(l1b_v / l1a_v)\n', (2131, 2146), True, 'import numpy as np\n'), ((2153, 2174), 'numpy.log', 'np.log', (['(l2b_v / l2a_v)'], {}), '(l2b_v / l2a_v)\n', (2159, 2174), True, 'import numpy as np\n'), ((2181, 2202), 'numpy.log', 'np.log', (['(lib_v / lia_v)'], {}), '(lib_v / lia_v)\n', (2187, 2202), True, 'import numpy as np\n'), ((456, 499), 'numpy.reshape', 'np.reshape', (['tmp.T', '[nx_lo, ny_lo, factor_y]'], {}), '(tmp.T, [nx_lo, ny_lo, factor_y])\n', (466, 499), True, 'import numpy as np\n'), ((600, 619), 'numpy.abs', 'np.abs', (['(interp - lo)'], {}), '(interp - lo)\n', (606, 619), True, 'import numpy as np\n'), ((631, 645), 'numpy.abs', 'np.abs', (['interp'], {}), '(interp)\n', (637, 645), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
The :mod:`TensorClus.coclustering.tensorCoclusteringBernoulli` module provides an implementation
of a tensor co-clustering algorithm for binary three-way tensor.
"""
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
# License: BSD 3 clause
from __future__ import division
import numpy as np
import random
from sklearn.utils import check_random_state
from ..initialization import random_init
from .baseNonDiagonalCoclustering import BaseNonDiagonalCoclust
from ..tests.input_checking import check_tensor,check_numbers_clusters_non_diago
GPU_exist = False
try :
import cupy as cp
GPU_exist = True
except ImportError :
GPU_exist = False
print("No GPU available")
print("GPU_exist", GPU_exist)
class TensorCoclusteringBernoulli(BaseNonDiagonalCoclust):
"""Tensor Latent Block Model for Bernoulli distribution.
Parameters
----------
n_row_clusters : int, optional, default: 2
Number of row clusters to form
n_col_clusters : int, optional, default: 2
Number of column clusters to form
fuzzy : boolean, optional, default: True
Provide fuzzy clustering, If fuzzy is False
a hard clustering is performed
init_row : numpy array or scipy sparse matrix, \
shape (n_rows, K), optional, default: None
Initial row labels
init_col : numpy array or scipy sparse matrix, \
shape (n_cols, L), optional, default: None
Initial column labels
max_iter : int, optional, default: 20
Maximum number of iterations
n_init : int, optional, default: 1
Number of time the algorithm will be run with different
initializations. The final results will be the best output of `n_init`
consecutive runs.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
tol : float, default: 1e-9
Relative tolerance with regards to criterion to declare convergence
Attributes
----------
row_labels_ : array-like, shape (n_rows,)
Bicluster label of each row
column_labels_ : array-like, shape (n_cols,)
Bicluster label of each column
mu_kl : array-like, shape (k,l,v)
Value :math: mean vector for each row
cluster k and column cluster l
"""
def __init__(self, n_row_clusters=2, n_col_clusters=2, fuzzy=False, init_row=None, init_col=None,
max_iter=50, n_init=1, tol=1e-6, random_state=None, gpu=None):
self.n_row_clusters = n_row_clusters
self.n_col_clusters = n_col_clusters
self.init_row = init_row
self.init_col = init_col
self.max_iter = max_iter
self.n_init = n_init
self.tol = tol
self.random_state = random_state
self.fuzzy = fuzzy
self.row_labels_ = None
self.column_labels_ = None
self.criterions = []
self.criterion = -np.inf
self.mu_kl = None
self.mu_kl_evolution = None
self.gpu = gpu
def fit(self, X, y=None):
"""Perform Tensor co-clustering.
Parameters
----------
X : three-way numpy array, shape=(n_row_objects,d_col_objects, v_features)
Tensor to be analyzed
"""
global GPU_exist
if self.gpu is None:
self.gpu = GPU_exist
else:
GPU_exist = self.gpu
random_state = check_random_state(self.random_state)
# check_array(X, accept_sparse=True, dtype="numeric", order=None,
# copy=False, force_all_finite=True, ensure_2d=True,
# allow_nd=False, ensure_min_samples=self.n_row_clusters,
# ensure_min_features=self.n_col_clusters,
# warn_on_dtype=False, estimator=None)
check_tensor(X)
check_numbers_clusters_non_diago(X,self.n_row_clusters, self.n_col_clusters)
X = X.astype(int)
criterion = self.criterion
criterions = self.criterions
row_labels_ = self.row_labels_
column_labels_ = self.column_labels_
mu_kl = self.mu_kl
mu_kl_evolution = self.mu_kl_evolution
seeds = random_state.randint(np.iinfo(np.int32).max, size=self.n_init)
#seeds = random.sample(range(10, 30), self.n_init)
for seed in seeds:
self._fit_single(X, seed, y)
if np.isnan(self.criterion):
raise ValueError("matrix may contain negative or "
"unexpected NaN values")
# remember attributes corresponding to the best criterion
if (self.criterion > criterion):
criterion = self.criterion
criterions = self.criterions
row_labels_ = self.row_labels_
column_labels_ = self.column_labels_
mu_kl_ = self.mu_kl
mu_kl_evolution = self.mu_kl_evolution
# update attributes
self.criterion = criterion
self.criterions = criterions
self.row_labels_ = row_labels_
self.column_labels_ = column_labels_
self.mu_kl_ = mu_kl_
self.mu_kl_evolution = mu_kl_evolution
return self
def mukl(self, x, z, w):
"""Compute the mean vector mu_kl per bloc.
Parameters
----------
X : three-way numpy array, shape=(n_row_objects,d_col_objects, v_features)
Tensor to be analyzed
z : numpy array, shape= (n_row_objects, K)
matrix of row partition
w : numpy array, shape(d_col_objects, L)
matrix of column partition
Returns
-------
mukl_mat
three-way numpy array
"""
n = z.shape[0]
d = w.shape[0]
K = z.shape[1]
L = w.shape[1]
v = x.shape[2]
# x = x.reshape(n,v,d)
indRSelec = np.arange(n)
indCSelec = np.arange(d)
"""
sum_z = np.sum(z, 0).reshape(K, 1)
sum_w = np.sum(w, 0).reshape(1, L)
nbr_element_class = sum_z.dot(sum_w)
print("nbr_element_class ", nbr_element_class)
"""
mukl_mat = np.zeros((K, L, v))
for k in range(K):
z_k = z[:, k].reshape(n, 1)
for l in range(L):
w_l = w[:, l].reshape(1, d)
poids = z_k.dot(w_l)
nbr_element_class = np.sum(poids)
if not GPU_exist:
dup_S = poids.reshape(n,d,1)# np.repeat(poids[:, :, np.newaxis], v, axis=2)
x_poids = np.multiply(dup_S, x)
sum_kl = np.sum(x_poids, axis=(0, 1))
else:
x_gpu = cp.asarray(x)
poids_gpu = cp.asarray(poids)
dup_S = poids_gpu.reshape(n,d,1) #cp.repeat(poids_gpu[:, :, np.newaxis], v, axis=2)
x_poids = cp.multiply(dup_S, x_gpu)
sum_kl = cp.sum(x_poids, axis=(0, 1))
sum_kl= cp.asnumpy(sum_kl)
cp.cuda.Stream.null.synchronize()
mukl_mat[k][l] = (sum_kl / nbr_element_class) + 1.e-6# (nbr_element_class[k, l] + 1.e-5)
mukl_mat[mukl_mat>=1]=0.99
return mukl_mat
def pi_k(self,z):
"""Compute row proportion.
Parameters
----------
z : numpy array, shape= (n_row_objects, K)
matrix of row partition
Returns
-------
pi_k_vect
numpy array, shape=(K)
proportion of row clusters
"""
n = z.shape[0]
pi_k_vect = np.sum(z, 0) / n
return pi_k_vect
def rho_l(self,w):
"""Compute column proportion.
Parameters
----------
w : numpy array, shape(d_col_objects, L)
matrix of column partition
Returns
-------
rho_l_vect
numpy array, shape=(L)
proportion of column clusters
"""
d = w.shape[0]
rho_l_vect = np.sum(w, 0) / d
return rho_l_vect
def F_c(self, x, z, w, mukl, pi_k, rho_l, choice='ZW'):
"""Compute fuzzy log-likelihood (LL) criterion.
Parameters
----------
X : three-way numpy array, shape=(n_row_objects,d_col_objects, v_features)
Tensor to be analyzed
z : numpy array, shape= (n_row_objects, K)
matrix of row partition
w : numpy array, shape(d_col_objects, L)
matrix of column partition
mukl : three-way numpy array, shape=(K,L, v_features)
matrix of mean parameter pe bloc
pi_k : numpy array, shape(K,)
vector of row cluster proportion
rho_l : numpy array, shape(K,)
vector of column cluster proportion
choice : string, take values in ("Z", "W", "ZW")
considering the optimization of LL
Returns
-------
(H_z, H_w, LL, value)
(row entropy, column entropy, Log-likelihood, lower bound of log-likelihood)
"""
n = z.shape[0]
d = w.shape[0]
K = z.shape[1]
L = w.shape[1]
v = x.shape[2] # Nombre de covariates
# Reshape X matrix
Xij_selec = x.reshape(n * d, v)
H_w = 0
H_z = 0
z_weight = 0
w_weight = 0
one3D = np.ones((n, d, v))
LL = 0
cpt = 0
for k in range(K):
z_k = z[:, k].reshape(n, 1)
for l in range(L):
w_l = w[:, l].reshape(1, d)
poids = z_k.dot(w_l)
# print('poids', poids.shape)
zkwl = poids.reshape(n * d, 1)
mukl_select = (mukl[k][l]).reshape(1, v)
# print('Ixij',Ixij.shape)
Imukl = np.log(np.ones((1, v)) - mukl_select)
# print("erreur_y",erreur_y.shape)
################
if not GPU_exist:
xijLnmukl = (x[:, :, :] * np.log(mukl_select[0, :])).reshape(n, d, v)
# print('xijLnmukl',xijLnmukl.shape)
Ixij = (one3D - x[:, :, :]).reshape(n, d, v)
Ixij_Imukl = (Ixij[:, :, :] * (Imukl[0, :])).reshape(n, d, v)
else:
x_gpu = cp.asarray(x)
one3D_gpu = cp.asarray(one3D)
mukl_select_gpu = cp.asarray(mukl_select)
Imukl_gpu = cp.asarray(Imukl)
xijLnmukl = (x_gpu[:, :, :] * cp.log(mukl_select_gpu[0, :])).reshape(n, d, v)
# print('xijLnmukl',xijLnmukl.shape)
Ixij = (one3D_gpu - x_gpu[:, :, :]).reshape(n, d, v)
Ixij_Imukl = (Ixij[:, :, :] * (Imukl_gpu[0, :])).reshape(n, d, v)
xijLnmukl = cp.asnumpy(xijLnmukl)
Ixij_Imukl = cp.asnumpy(Ixij_Imukl)
cp.cuda.Stream.null.synchronize()
# print("Imukl",Imukl.shape)
#########
# print('Ixij_Imukl',Ixij_Imukl.shape)
# a * b[:, None]
poids_t = (poids.T)
error = poids[:, :, None] * (xijLnmukl + (Ixij_Imukl))
# print('error', error.shape)
LL = LL + np.sum(error)
cpt = cpt + 1
# LL = LL + ((-1)*n*d*np.log(2*np.pi))
value = 0
if choice == "ZW":
H_z = 0
for i in range(n):
for k in range(K):
H_z = H_z - (z[i, k] * np.log(z[i, k]))
H_w = 0
for j in range(d):
for l in range(L):
H_w = H_w - (w[j, l] * np.log(w[j, l]))
z_weight = 0
for k in range(K):
z_weight = z_weight + (np.sum(z[:, k]) * np.log(pi_k[k]))
w_weight = 0
for l in range(L):
w_weight = w_weight + (np.sum(w[:, l]) * np.log(rho_l[l]))
value = z_weight + w_weight + LL # + H_z + H_w
if choice == "Z":
H_z = 0
for i in range(n):
for k in range(K):
H_z = H_z - (z[i, k] * np.log(z[i, k]))
z_weight = 0
for k in range(K):
z_weight = z_weight + (np.sum(z[:, k]) * np.log(pi_k[k]))
value = z_weight + LL + H_z
if choice == "W":
H_w = 0
for j in range(d):
for l in range(L):
H_w = H_w - (w[j, l] * np.log(w[j, l]))
w_weight = 0
for l in range(L):
w_weight = w_weight + (np.sum(w[:, l]) * np.log(rho_l[l]))
value = w_weight + LL + H_w
return [H_z, H_w, LL, value]
def _fit_single(self, data, random_state, y=None):
"""Perform one run of Tensor co-clustering.
Parameters
----------
X : three-way numpy array, shape=(n_row_objects,d_col_objects, v_features)
Tensor to be analyzed
"""
K = self.n_row_clusters
L = self.n_col_clusters
bool_fuzzy = self.fuzzy
if self.init_row is None:
z = random_init(K, data.shape[0], random_state)
else:
z = np.array(self.init_row, dtype=float)
if self.init_col is None:
w = random_init(L, data.shape[1], random_state)
else:
w = np.array(self.init_col, dtype=float)
########################################################
n = data.shape[0]
d = data.shape[1]
nbr_covariates = data.shape[2]
########################################################
mukl_hat = self.mukl(data, z, w) + 1.e-8
print("les mukl_hat", mukl_hat)
pi_k_hat = self.pi_k(z)
print("proportion lignes", pi_k_hat)
rho_l_hat = self.rho_l(w)
print("proportion colonnes", rho_l_hat)
result = self.F_c(data, z, w, mukl_hat, pi_k_hat, rho_l_hat, choice='ZW')
fc = result[3]
print("objective function", fc)
########################################################
########################################################
#################################
# Début de l'algorithme BLVEM
#################################
iteration_n = self.max_iter
iteration_z = int(10)
iteration_w = int(10)
#################################
dessiner_courbe_evol_mukl= np.zeros((K, L, iteration_n + 1))
for k in range(K):
for l in range(L):
dessiner_courbe_evol_mukl[k, l, 0] = np.mean(mukl_hat[k, l, :])
#################################
########################################################
LL = []
LL.append(fc)
fc_previous = float(-np.inf)
t = 0
change = True
while change and t < self.max_iter:
print("iteration n: ", t)
t_z = 0
while t_z < iteration_z:
print("iteration t_z :", t_z)
# E-step :
z = np.float64(np.zeros((n, K)))
for i in range(n):
x_select = np.asarray(data[[i], :, :]).reshape(d, nbr_covariates)
for k in range(K):
compound = 0
for l in range(L):
w_l = w[:, l].reshape(d, 1)
##############
mukl_select = (mukl_hat[k][l]).reshape(1, nbr_covariates)
xijLog = (x_select[:, :] * np.log(mukl_select[0, :])).reshape(d, nbr_covariates)
oneXij = np.ones((d, nbr_covariates)) - x_select[:, :]
logOneMukl = np.log(np.ones((1, nbr_covariates)) - mukl_select)
###############
# print((logOneMukl[0,:] * oneXij ).shape)
value = xijLog + (logOneMukl[0, :] * oneXij)
compound = compound + np.sum((w_l * value))
z[i, k] = np.log(pi_k_hat[k]) + compound
if bool_fuzzy== True :
#print("soft")
z[i, :] = z[i, :] - np.amax(z[i, :])
z[i, :] = np.exp(z[i, :]) / np.sum(np.exp(z[i, :])) + 1.e-5
else:
#print("hard")
ind_max_r = np.argmax(z[i, :])
z[i, :] = 0 + 1.e-10
z[i, ind_max_r] = 1
# print("z", z)
# M-step :
pi_k_hat = self.pi_k(z)
mukl_hat = self.mukl(data, z, w)
# Calculer LL :
t_z = t_z + 1
########################################################
t_w = 0
while t_w < iteration_w:
print("iteration t_w :", t_w)
# E-step :
w = np.float64(np.zeros((d, L)))
for j in range(d):
x_select = np.asarray(data[:, [j], :]).reshape(n, nbr_covariates)
for l in range(L):
compound = 0
for k in range(K):
z_k = z[:, k].reshape(n, 1)
###############################
mukl_select = (mukl_hat[k][l]).reshape(1, nbr_covariates)
xijLog = (x_select[:, :] * np.log(mukl_select[0, :])).reshape(n, nbr_covariates)
oneXij = np.ones((n, nbr_covariates)) - x_select[:, :]
logOneMukl = np.log(np.ones((1, nbr_covariates)) - mukl_select)
###############################
value = xijLog + (logOneMukl[0, :] * oneXij)
compound = compound + np.sum((z_k * value))
w[j, l] = np.log(rho_l_hat[l]) + compound
if bool_fuzzy == True:
#print("soft")
w[j, :] = w[j, :] - np.amax(w[j, :])
w[j, :] = np.exp(w[j, :]) / np.sum(np.exp(w[j, :])) + 1.e-5
else:
#print("hard")
ind_max_c = np.argmax(w[j,:] )
w[j,:] = 0 + 1.e-10
w[j,ind_max_c] = 1
# M-step :
rho_l_hat = self.rho_l(w)
mukl_hat = self.mukl(data, z, w)
# Calcul LL :
t_w = t_w + 1
for k in range(K):
for l in range(K):
dessiner_courbe_evol_mukl[k, l, t + 1] = np.mean(mukl_hat[k, l, :])
result = self.F_c(data, z, w, mukl_hat, pi_k_hat, rho_l_hat, choice='ZW')
fc = result[3]
LL.append(fc)
print("fc value", fc)
if np.abs(fc - fc_previous) > self.tol:
fc_previous = fc
change = True
LL.append(fc)
print("fc value", fc)
t = t+1
else :
change = False
########################################################
self.criterions = LL
self.criterion = fc
self.row_labels_ = np.argmax(z, 1) + 1
self.column_labels_ = np.argmax(w, 1) + 1
self.mu_kl = mukl_hat
self.mu_kl_evolution = dessiner_courbe_evol_mukl
self.Z = z
self.W = w
| [
"sklearn.utils.check_random_state",
"numpy.sum",
"numpy.abs",
"numpy.argmax",
"numpy.ones",
"numpy.isnan",
"numpy.iinfo",
"numpy.mean",
"numpy.arange",
"numpy.exp",
"numpy.multiply",
"cupy.asnumpy",
"numpy.asarray",
"cupy.multiply",
"cupy.asarray",
"numpy.log",
"numpy.zeros",
"cupy... | [((3569, 3606), 'sklearn.utils.check_random_state', 'check_random_state', (['self.random_state'], {}), '(self.random_state)\n', (3587, 3606), False, 'from sklearn.utils import check_random_state\n'), ((6054, 6066), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (6063, 6066), True, 'import numpy as np\n'), ((6087, 6099), 'numpy.arange', 'np.arange', (['d'], {}), '(d)\n', (6096, 6099), True, 'import numpy as np\n'), ((6329, 6348), 'numpy.zeros', 'np.zeros', (['(K, L, v)'], {}), '((K, L, v))\n', (6337, 6348), True, 'import numpy as np\n'), ((9551, 9569), 'numpy.ones', 'np.ones', (['(n, d, v)'], {}), '((n, d, v))\n', (9558, 9569), True, 'import numpy as np\n'), ((14732, 14765), 'numpy.zeros', 'np.zeros', (['(K, L, iteration_n + 1)'], {}), '((K, L, iteration_n + 1))\n', (14740, 14765), True, 'import numpy as np\n'), ((4544, 4568), 'numpy.isnan', 'np.isnan', (['self.criterion'], {}), '(self.criterion)\n', (4552, 4568), True, 'import numpy as np\n'), ((7788, 7800), 'numpy.sum', 'np.sum', (['z', '(0)'], {}), '(z, 0)\n', (7794, 7800), True, 'import numpy as np\n'), ((8211, 8223), 'numpy.sum', 'np.sum', (['w', '(0)'], {}), '(w, 0)\n', (8217, 8223), True, 'import numpy as np\n'), ((13489, 13525), 'numpy.array', 'np.array', (['self.init_row'], {'dtype': 'float'}), '(self.init_row, dtype=float)\n', (13497, 13525), True, 'import numpy as np\n'), ((13654, 13690), 'numpy.array', 'np.array', (['self.init_col'], {'dtype': 'float'}), '(self.init_col, dtype=float)\n', (13662, 13690), True, 'import numpy as np\n'), ((19701, 19716), 'numpy.argmax', 'np.argmax', (['z', '(1)'], {}), '(z, 1)\n', (19710, 19716), True, 'import numpy as np\n'), ((19751, 19766), 'numpy.argmax', 'np.argmax', (['w', '(1)'], {}), '(w, 1)\n', (19760, 19766), True, 'import numpy as np\n'), ((4360, 4378), 'numpy.iinfo', 'np.iinfo', (['np.int32'], {}), '(np.int32)\n', (4368, 4378), True, 'import numpy as np\n'), ((6564, 6577), 'numpy.sum', 'np.sum', (['poids'], {}), '(poids)\n', (6570, 6577), True, 'import numpy as np\n'), ((14877, 14903), 'numpy.mean', 'np.mean', (['mukl_hat[k, l, :]'], {}), '(mukl_hat[k, l, :])\n', (14884, 14903), True, 'import numpy as np\n'), ((19307, 19331), 'numpy.abs', 'np.abs', (['(fc - fc_previous)'], {}), '(fc - fc_previous)\n', (19313, 19331), True, 'import numpy as np\n'), ((6738, 6759), 'numpy.multiply', 'np.multiply', (['dup_S', 'x'], {}), '(dup_S, x)\n', (6749, 6759), True, 'import numpy as np\n'), ((6789, 6817), 'numpy.sum', 'np.sum', (['x_poids'], {'axis': '(0, 1)'}), '(x_poids, axis=(0, 1))\n', (6795, 6817), True, 'import numpy as np\n'), ((6868, 6881), 'cupy.asarray', 'cp.asarray', (['x'], {}), '(x)\n', (6878, 6881), True, 'import cupy as cp\n'), ((6914, 6931), 'cupy.asarray', 'cp.asarray', (['poids'], {}), '(poids)\n', (6924, 6931), True, 'import cupy as cp\n'), ((7066, 7091), 'cupy.multiply', 'cp.multiply', (['dup_S', 'x_gpu'], {}), '(dup_S, x_gpu)\n', (7077, 7091), True, 'import cupy as cp\n'), ((7121, 7149), 'cupy.sum', 'cp.sum', (['x_poids'], {'axis': '(0, 1)'}), '(x_poids, axis=(0, 1))\n', (7127, 7149), True, 'import cupy as cp\n'), ((7178, 7196), 'cupy.asnumpy', 'cp.asnumpy', (['sum_kl'], {}), '(sum_kl)\n', (7188, 7196), True, 'import cupy as cp\n'), ((7217, 7250), 'cupy.cuda.Stream.null.synchronize', 'cp.cuda.Stream.null.synchronize', ([], {}), '()\n', (7248, 7250), True, 'import cupy as cp\n'), ((10498, 10511), 'cupy.asarray', 'cp.asarray', (['x'], {}), '(x)\n', (10508, 10511), True, 'import cupy as cp\n'), ((10544, 10561), 'cupy.asarray', 'cp.asarray', (['one3D'], {}), '(one3D)\n', (10554, 10561), True, 'import cupy as cp\n'), ((10600, 10623), 'cupy.asarray', 'cp.asarray', (['mukl_select'], {}), '(mukl_select)\n', (10610, 10623), True, 'import cupy as cp\n'), ((10656, 10673), 'cupy.asarray', 'cp.asarray', (['Imukl'], {}), '(Imukl)\n', (10666, 10673), True, 'import cupy as cp\n'), ((11022, 11043), 'cupy.asnumpy', 'cp.asnumpy', (['xijLnmukl'], {}), '(xijLnmukl)\n', (11032, 11043), True, 'import cupy as cp\n'), ((11077, 11099), 'cupy.asnumpy', 'cp.asnumpy', (['Ixij_Imukl'], {}), '(Ixij_Imukl)\n', (11087, 11099), True, 'import cupy as cp\n'), ((11120, 11153), 'cupy.cuda.Stream.null.synchronize', 'cp.cuda.Stream.null.synchronize', ([], {}), '()\n', (11151, 11153), True, 'import cupy as cp\n'), ((11493, 11506), 'numpy.sum', 'np.sum', (['error'], {}), '(error)\n', (11499, 11506), True, 'import numpy as np\n'), ((15374, 15390), 'numpy.zeros', 'np.zeros', (['(n, K)'], {}), '((n, K))\n', (15382, 15390), True, 'import numpy as np\n'), ((17324, 17340), 'numpy.zeros', 'np.zeros', (['(d, L)'], {}), '((d, L))\n', (17332, 17340), True, 'import numpy as np\n'), ((19090, 19116), 'numpy.mean', 'np.mean', (['mukl_hat[k, l, :]'], {}), '(mukl_hat[k, l, :])\n', (19097, 19116), True, 'import numpy as np\n'), ((10005, 10020), 'numpy.ones', 'np.ones', (['(1, v)'], {}), '((1, v))\n', (10012, 10020), True, 'import numpy as np\n'), ((12027, 12042), 'numpy.sum', 'np.sum', (['z[:, k]'], {}), '(z[:, k])\n', (12033, 12042), True, 'import numpy as np\n'), ((12045, 12060), 'numpy.log', 'np.log', (['pi_k[k]'], {}), '(pi_k[k])\n', (12051, 12060), True, 'import numpy as np\n'), ((12158, 12173), 'numpy.sum', 'np.sum', (['w[:, l]'], {}), '(w[:, l])\n', (12164, 12173), True, 'import numpy as np\n'), ((12176, 12192), 'numpy.log', 'np.log', (['rho_l[l]'], {}), '(rho_l[l])\n', (12182, 12192), True, 'import numpy as np\n'), ((12523, 12538), 'numpy.sum', 'np.sum', (['z[:, k]'], {}), '(z[:, k])\n', (12529, 12538), True, 'import numpy as np\n'), ((12541, 12556), 'numpy.log', 'np.log', (['pi_k[k]'], {}), '(pi_k[k])\n', (12547, 12556), True, 'import numpy as np\n'), ((12868, 12883), 'numpy.sum', 'np.sum', (['w[:, l]'], {}), '(w[:, l])\n', (12874, 12883), True, 'import numpy as np\n'), ((12886, 12902), 'numpy.log', 'np.log', (['rho_l[l]'], {}), '(rho_l[l])\n', (12892, 12902), True, 'import numpy as np\n'), ((16762, 16780), 'numpy.argmax', 'np.argmax', (['z[i, :]'], {}), '(z[i, :])\n', (16771, 16780), True, 'import numpy as np\n'), ((18669, 18687), 'numpy.argmax', 'np.argmax', (['w[j, :]'], {}), '(w[j, :])\n', (18678, 18687), True, 'import numpy as np\n'), ((11768, 11783), 'numpy.log', 'np.log', (['z[i, k]'], {}), '(z[i, k])\n', (11774, 11783), True, 'import numpy as np\n'), ((11914, 11929), 'numpy.log', 'np.log', (['w[j, l]'], {}), '(w[j, l])\n', (11920, 11929), True, 'import numpy as np\n'), ((12410, 12425), 'numpy.log', 'np.log', (['z[i, k]'], {}), '(z[i, k])\n', (12416, 12425), True, 'import numpy as np\n'), ((12755, 12770), 'numpy.log', 'np.log', (['w[j, l]'], {}), '(w[j, l])\n', (12761, 12770), True, 'import numpy as np\n'), ((15458, 15485), 'numpy.asarray', 'np.asarray', (['data[[i], :, :]'], {}), '(data[[i], :, :])\n', (15468, 15485), True, 'import numpy as np\n'), ((16397, 16416), 'numpy.log', 'np.log', (['pi_k_hat[k]'], {}), '(pi_k_hat[k])\n', (16403, 16416), True, 'import numpy as np\n'), ((16557, 16573), 'numpy.amax', 'np.amax', (['z[i, :]'], {}), '(z[i, :])\n', (16564, 16573), True, 'import numpy as np\n'), ((17408, 17435), 'numpy.asarray', 'np.asarray', (['data[:, [j], :]'], {}), '(data[:, [j], :])\n', (17418, 17435), True, 'import numpy as np\n'), ((18308, 18328), 'numpy.log', 'np.log', (['rho_l_hat[l]'], {}), '(rho_l_hat[l])\n', (18314, 18328), True, 'import numpy as np\n'), ((18467, 18483), 'numpy.amax', 'np.amax', (['w[j, :]'], {}), '(w[j, :])\n', (18474, 18483), True, 'import numpy as np\n'), ((10200, 10225), 'numpy.log', 'np.log', (['mukl_select[0, :]'], {}), '(mukl_select[0, :])\n', (10206, 10225), True, 'import numpy as np\n'), ((10724, 10753), 'cupy.log', 'cp.log', (['mukl_select_gpu[0, :]'], {}), '(mukl_select_gpu[0, :])\n', (10730, 10753), True, 'import cupy as cp\n'), ((15963, 15991), 'numpy.ones', 'np.ones', (['(d, nbr_covariates)'], {}), '((d, nbr_covariates))\n', (15970, 15991), True, 'import numpy as np\n'), ((16339, 16358), 'numpy.sum', 'np.sum', (['(w_l * value)'], {}), '(w_l * value)\n', (16345, 16358), True, 'import numpy as np\n'), ((16609, 16624), 'numpy.exp', 'np.exp', (['z[i, :]'], {}), '(z[i, :])\n', (16615, 16624), True, 'import numpy as np\n'), ((17930, 17958), 'numpy.ones', 'np.ones', (['(n, nbr_covariates)'], {}), '((n, nbr_covariates))\n', (17937, 17958), True, 'import numpy as np\n'), ((18251, 18270), 'numpy.sum', 'np.sum', (['(z_k * value)'], {}), '(z_k * value)\n', (18257, 18270), True, 'import numpy as np\n'), ((18518, 18533), 'numpy.exp', 'np.exp', (['w[j, :]'], {}), '(w[j, :])\n', (18524, 18533), True, 'import numpy as np\n'), ((16057, 16085), 'numpy.ones', 'np.ones', (['(1, nbr_covariates)'], {}), '((1, nbr_covariates))\n', (16064, 16085), True, 'import numpy as np\n'), ((16634, 16649), 'numpy.exp', 'np.exp', (['z[i, :]'], {}), '(z[i, :])\n', (16640, 16649), True, 'import numpy as np\n'), ((18024, 18052), 'numpy.ones', 'np.ones', (['(1, nbr_covariates)'], {}), '((1, nbr_covariates))\n', (18031, 18052), True, 'import numpy as np\n'), ((18543, 18558), 'numpy.exp', 'np.exp', (['w[j, :]'], {}), '(w[j, :])\n', (18549, 18558), True, 'import numpy as np\n'), ((15872, 15897), 'numpy.log', 'np.log', (['mukl_select[0, :]'], {}), '(mukl_select[0, :])\n', (15878, 15897), True, 'import numpy as np\n'), ((17839, 17864), 'numpy.log', 'np.log', (['mukl_select[0, :]'], {}), '(mukl_select[0, :])\n', (17845, 17864), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
from __future__ import print_function
import sys, os
import bz2, re
import itertools
import operator
import marisa_trie
import numpy as np
from pathlib import Path
from os.path import (expanduser)
from math import sqrt
# opens file checking whether it is bz2 compressed or not.
import tarfile
from .helper import open_get_line
"""A simple password library. Has function to put passwords into nice data
structure for fast look up.
The file creates a cache databse file in home folder with the name .passwords.
In Unix you can find it using `~/.pypasswords`.
Run with Python 3 your life will be much easier. """
MAX_INT = 2**64-1
DEBUG = True
home = expanduser("~")
pass_dir = os.path.join(home, '.pypasswords')
ROCKYOU_TOTAL_CNT = 32603388.0
def sample_following_dist(handle_iter, n, totalf):
"""Samples n passwords following the distribution from the handle
@handle_iter is an iterator that gives (pw,f) @n is the total
number of samle asked for @totalf is the total number of users,
which is euqal to sum(f for pw,f in handle_iter)
As, handle_iterator is an iterator and can only traverse once, @totalf
needs to be supplied to the funciton.
Returns, an array of @n tuples (id, pw) sampled from @handle_iter.
"""
multiplier = 1.0
if totalf == 1.0:
multiplier = 1e8
# print "WARNING!! I don't except probabilities"
totalf = totalf * multiplier
print("# Population Size", totalf)
A = np.sort(np.unique(np.random.randint(0, totalf, size=n*2))[:n])
A = A[::-1]
# Uniqueness check, non necessarily required, but not very
# computationally intensive
assert len(A) == n, "Not enough randomnumbers generated"\
"Requried {}, generated only {}".format(n, len(A))
j = 0
sampled = 0
val = A.pop()
# print handle_iter
for _,w,f in handle_iter:
j += f*multiplier
if not A: break
while val<j:
sampled += 1
if sampled %5000 == 0:
print ("Sampled:",sampled)
yield (val, w)
if A:
val = A.pop()
else:
break
print ("# Stopped at:", w, f, j, '\n')
while A and val<j:
yield (val, w)
if A:
i, val = A.pop()
else:
break
def getallgroups(arr, k=-1):
"""
returns all the subset of @arr of size less than equalto @k
the return array will be of size sum_{i=1}^k nCi, n = len(arr)
"""
if k<0:
k = len(arr)
return itertools.chain.from_iterable(
itertools.combinations(set(arr), j)
for j in range(1,k+1)
)
def is_asciistring(s):
try:
s.decode('ascii')
return True
except (UnicodeDecodeError, UnicodeEncodeError) as e:
# warning("UnicodeError:", s, str(e))
return False
regex = r'([A-Za-z_]+)|([0-9]+)|(\W+)'
def print_err( *args ):
if DEBUG:
sys.stderr.write(' '.join([str(a) for a in args])+'\n')
def tokens(w):
T = []
while w:
m = re.match(regex, w)
T.append(m.group(0))
w = w[len(T[-1]):]
return T
def whatchar(c):
return 'L' if c.isalpha() else \
'D' if c.isdigit else 'Y'
def mean_sd(arr):
s = sum(arr)
s2 = sum([x * x for x in arr])
n = len(arr)
m = s / float(n)
sd = sqrt(float(s2) / n - m * m)
return m, sd
def convert2group(t, totalC):
"""
What is this?
"""
return t + np.random.randint(0, (MAX_INT-t)/totalC) * totalC
# assumes last element in the array(A) is the sum of all elements
def getIndex(p, A):
p %= A[-1]
i = 0
for i, v in enumerate(A):
p -= v
if p < 0: break
return i
class Passwords(object):
"""Its a class to efficiently store and read large password file. It
creates two files for each password in the under the directory
'eff_data/' in home+.pypassword directory (~/.pypasswords). First file
is a trie, which just stores all the password in efficient prefix
trie format using "marisa_trie" module. The second is a numy large
array, containing the indicies. This is what I found the most
memory and compute efficient way of accessing passwords in Python.
@pass_file: the path of the file you want to process. The file
should countain freq and the password similar to the output of
unix "uniq -c" command.
@max_pass_len, min_pass_len defines the
range of password to consider. Note, this filtering does not
effect the totalf, and only changes the iterpws() function.
@@kwargs: Add some extra arguments: @dirname='.' will use the local current
directory for effective data structures.
WARNING: If your file contains more than 2MN, please create the trie from a
password frequency file using this command:
$ pwfile=breach_compilation-withcount.txt; awk '{$1=""; print $0}' ${pwfile} | marisa-build -o ${pwfile//.*}.trie
This will save significant amount of memory
"""
def __init__(self, pass_file, min_pass_len=6, max_pass_len=50, **kwargs):
self.fbasename = os.path.basename(pass_file).split('.', 1)[0]
_dirname = Path(kwargs.get('dirname', '{}/eff_data/'.format(pass_dir)))
if not os.path.exists(_dirname):
os.makedirs(_dirname)
self._max_pass_len = max_pass_len
self._min_pass_len = min_pass_len
_file_base_name = '{}-{}_{}'.format(self.fbasename, self._min_pass_len, self._max_pass_len)
_limit = kwargs.get('limit', int(2e6)) # default value is 2 mn
if _limit > -1:
_file_base_name += "N{}mn".format(int(_limit/1e6))
print("Base file name: {}".format(_file_base_name))
self._file_trie = _dirname / (_file_base_name + '.trie')
self._file_freq = _dirname / (_file_base_name + '.npz')
self._T, self._freq_list, self._totalf = None, None, None
if not kwargs.get('freshall', False) and \
os.path.exists(self._file_trie) and \
os.path.exists(self._file_freq):
self.load_data()
else:
if 'freshall' in kwargs: del kwargs['freshall']
self.create_data_structure(pass_file, freshall=True, **kwargs)
assert self._T, "Could not initialize the trie."
self._sorted_freq_list = None
def create_data_structure(self, pass_file, freshall=False, **kwargs):
# Record trie, Slow, and not memory efficient
# self._T = marisa_trie.RecordTrie(
# '<II', ((unicode(w), (c,))
# for i, (w,c) in
# enumerate(passwords.open_get_line(pass_file)))
# )
print(kwargs)
# If the trie for passwords is already there, read it
print("Trie file: {}".format(self._file_trie))
if os.path.exists(self._file_trie) and not freshall:
self._T = marisa_trie.Trie()
self._T.load(self._file_trie)
else:
print("Recreating the trie file")
self._T = marisa_trie.Trie(w for w, c in open_get_line(pass_file, **kwargs))
self._T.save(self._file_trie)
self._freq_list = np.zeros(len(self._T), dtype=int)
for w, c in open_get_line(pass_file, **kwargs):
try:
self._freq_list[self._T.key_id(w)] = c
except Exception as e:
print("Error: {}. w={}, c={}".format(e, w, c))
self._totalf = self._freq_list.sum()
np.savez_compressed(
self._file_freq, freq=self._freq_list, fsum=self._totalf
)
def sample_pws(self, n, asperdist=True):
"""Returns n passwords sampled from this password dataset. if
asperdist is True, then returns the password sampled according
the password histogram distribution (with
replacement). Passwords are always sampled with replacement.
TODO: The sample users, instead of passwords perse.
"""
if asperdist:
sample = np.random.choice(
self._freq_list.shape[0], size=n, p=self._freq_list/self._totalf
)
else:
sample = np.random.choice(len(self._T), size=n)
return (self._T.restore_key(i) for i in sample)
def load_data(self):
self._T = marisa_trie.Trie()
self._T.load(self._file_trie)
np_f = np.load(self._file_freq)
self._freq_list, self._totalf = np_f['freq'], np_f['fsum']
def totalf(self):
return self._totalf
def pw2id(self, pw):
try:
return self._T.key_id(pw)
except KeyError:
return -1
except UnicodeDecodeError as e:
print(repr(pw), e)
raise ValueError(e)
def id2pw(self, _id):
try:
return self._T.restore_key(_id)
except KeyError:
return ''
def prob(self, pw):
return self.__getitem__(pw)/self._totalf
def pw2freq(self, pw):
try:
return self._freq_list[self._T.key_id(pw)]
# return self._T.get(unicode(pw), 0)
except KeyError:
return 0
def id2freq(self, _id):
_id = int(_id)
try:
return self._freq_list[_id]
except ValueError:
return 0
def sumvalues(self, q=0):
"""Sum of top q passowrd frequencies
"""
if q == 0:
return self._totalf
else:
return -np.partition(-self._freq_list, q)[:q].sum()
def iterpws(self, n):
"""
Returns passwords in order of their frequencies.
@n: The numebr of passwords to return
Return: pwid, password, frequency
Every password is assigned an uniq id, for efficient access.
"""
if self._sorted_freq_list is None:
self._sorted_freq_list = np.argsort(self._freq_list)[::-1]
for _id in self._sorted_freq_list:
pw = self._T.restore_key(_id)
if self._min_pass_len <= len(pw) <= self._max_pass_len:
yield _id, pw, self._freq_list[_id]
def justiter(self):
for w, _id in self._T.iteritems():
yield _id, w, self._freq_list[_id]
def keys(self):
return self._T.iterkeys()
def values(self):
return self._freq_list
def guessranks(self, pws):
"""return teh guess rank of a password @pw according to this
password distribution file"""
# if self._sorted_freq_list is None:
# self._sorted_freq_list = np.argsort(self._freq_list)[::-1]
freqs = np.array([self.pw2freq(pw) for pw in pws]).reshape(-1, 1)
ranks = (np.tile(self._freq_list, freqs.shape) > freqs).sum(axis=1) + 1
return ranks
def __iter__(self):
"""Returns the id and frequency of the passwords, you can get
the real password by calling self.id2pw on the id"""
if self._sorted_freq_list is None:
self._sorted_freq_list = np.argsort(self._freq_list)[::-1]
for _id in self._sorted_freq_list:
yield _id, self._freq_list[_id]
def __getitem__(self, k):
if isinstance(k, int):
return self._freq_list[k]
if isinstance(k, str):
return self._freq_list[self.pw2id(k)]
raise TypeError("_id is wrong type ({}) expects str or int"
.format(type(k)))
def __len__(self):
return self._freq_list.shape[0]
import unittest
class TestPasswords(unittest.TestCase):
def test_pw2freq(self):
passwords = Passwords(
os.path.expanduser('~/passwords/rockyou-withcount.txt.bz2')
)
for pw, f in {'michelle': 12714, 'george': 4749,
'familia': 1975, 'honeybunny': 242,
'asdfasdf2wg': 0, ' 234 adsf': 0}.items():
pw = pw
self.assertEqual(passwords.pw2freq(pw), f)
def test_getallgroups(self):
for inp, res in [(
[1,2,3], set([
(1,), (2,), (3,), (1,2), (2,3), (1,3), (1,2,3)])
)]:
res1 = set(getallgroups(inp))
self.assertEqual(res1, res)
if __name__ == "__main__":
# print(list(getallgroups([1,2,3,4,5,6,7,8,9], 5)))
unittest.main()
| [
"unittest.main",
"numpy.partition",
"numpy.load",
"os.path.join",
"os.makedirs",
"os.path.basename",
"os.path.exists",
"re.match",
"numpy.argsort",
"numpy.savez_compressed",
"marisa_trie.Trie",
"numpy.random.randint",
"numpy.tile",
"numpy.random.choice",
"os.path.expanduser"
] | [((679, 694), 'os.path.expanduser', 'expanduser', (['"""~"""'], {}), "('~')\n", (689, 694), False, 'from os.path import expanduser\n'), ((706, 740), 'os.path.join', 'os.path.join', (['home', '""".pypasswords"""'], {}), "(home, '.pypasswords')\n", (718, 740), False, 'import sys, os\n'), ((12248, 12263), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12261, 12263), False, 'import unittest\n'), ((3063, 3081), 're.match', 're.match', (['regex', 'w'], {}), '(regex, w)\n', (3071, 3081), False, 'import bz2, re\n'), ((7476, 7553), 'numpy.savez_compressed', 'np.savez_compressed', (['self._file_freq'], {'freq': 'self._freq_list', 'fsum': 'self._totalf'}), '(self._file_freq, freq=self._freq_list, fsum=self._totalf)\n', (7495, 7553), True, 'import numpy as np\n'), ((8286, 8304), 'marisa_trie.Trie', 'marisa_trie.Trie', ([], {}), '()\n', (8302, 8304), False, 'import marisa_trie\n'), ((8358, 8382), 'numpy.load', 'np.load', (['self._file_freq'], {}), '(self._file_freq)\n', (8365, 8382), True, 'import numpy as np\n'), ((3485, 3529), 'numpy.random.randint', 'np.random.randint', (['(0)', '((MAX_INT - t) / totalC)'], {}), '(0, (MAX_INT - t) / totalC)\n', (3502, 3529), True, 'import numpy as np\n'), ((5261, 5285), 'os.path.exists', 'os.path.exists', (['_dirname'], {}), '(_dirname)\n', (5275, 5285), False, 'import sys, os\n'), ((5299, 5320), 'os.makedirs', 'os.makedirs', (['_dirname'], {}), '(_dirname)\n', (5310, 5320), False, 'import sys, os\n'), ((5981, 6012), 'os.path.exists', 'os.path.exists', (['self._file_trie'], {}), '(self._file_trie)\n', (5995, 6012), False, 'import sys, os\n'), ((6030, 6061), 'os.path.exists', 'os.path.exists', (['self._file_freq'], {}), '(self._file_freq)\n', (6044, 6061), False, 'import sys, os\n'), ((6811, 6842), 'os.path.exists', 'os.path.exists', (['self._file_trie'], {}), '(self._file_trie)\n', (6825, 6842), False, 'import sys, os\n'), ((6883, 6901), 'marisa_trie.Trie', 'marisa_trie.Trie', ([], {}), '()\n', (6899, 6901), False, 'import marisa_trie\n'), ((7999, 8088), 'numpy.random.choice', 'np.random.choice', (['self._freq_list.shape[0]'], {'size': 'n', 'p': '(self._freq_list / self._totalf)'}), '(self._freq_list.shape[0], size=n, p=self._freq_list / self\n ._totalf)\n', (8015, 8088), True, 'import numpy as np\n'), ((11577, 11636), 'os.path.expanduser', 'os.path.expanduser', (['"""~/passwords/rockyou-withcount.txt.bz2"""'], {}), "('~/passwords/rockyou-withcount.txt.bz2')\n", (11595, 11636), False, 'import sys, os\n'), ((1502, 1542), 'numpy.random.randint', 'np.random.randint', (['(0)', 'totalf'], {'size': '(n * 2)'}), '(0, totalf, size=n * 2)\n', (1519, 1542), True, 'import numpy as np\n'), ((9839, 9866), 'numpy.argsort', 'np.argsort', (['self._freq_list'], {}), '(self._freq_list)\n', (9849, 9866), True, 'import numpy as np\n'), ((10972, 10999), 'numpy.argsort', 'np.argsort', (['self._freq_list'], {}), '(self._freq_list)\n', (10982, 10999), True, 'import numpy as np\n'), ((5121, 5148), 'os.path.basename', 'os.path.basename', (['pass_file'], {}), '(pass_file)\n', (5137, 5148), False, 'import sys, os\n'), ((10652, 10689), 'numpy.tile', 'np.tile', (['self._freq_list', 'freqs.shape'], {}), '(self._freq_list, freqs.shape)\n', (10659, 10689), True, 'import numpy as np\n'), ((9450, 9483), 'numpy.partition', 'np.partition', (['(-self._freq_list)', 'q'], {}), '(-self._freq_list, q)\n', (9462, 9483), True, 'import numpy as np\n')] |
import numpy as np
from repeater.results_check.outliers_detection.outliers_detector_decorator import (
OutliersDetectionDecorator
)
class Mad(OutliersDetectionDecorator):
def __init__(self, outlier_detector, parameters):
super().__init__(outlier_detector, __name__, parameters)
def _find_outliers(self, inputs):
criterion_used_flag = False
if self.lower_threshold <= len(inputs) <= self.upper_threshold:
MAD_based_outlier = self.is_outlier(inputs)
outliers = []
for i in range(0, len(inputs)):
if MAD_based_outlier[i] is True:
outliers.append(inputs[i])
unique = np.unique(outliers)
criterion_used_flag = True
return unique, criterion_used_flag
else:
return [], criterion_used_flag
def is_outlier(self, points, thresh=3.5):
"""
References:
----------
<NAME> and <NAME> (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
Iglewicz and Hoaglin suggest using threshold ±3.5 as cut-off value
but this a matter of choice (±3 is also often used).
"""
if len(points.shape) == 1:
points = points[:, None]
# find median value of dataset
median = np.median(points, axis=0)
# find deviations from median
diff = np.sum((points - median)**2, axis=-1)
diff = np.sqrt(diff)
# find median of deviations
med_abs_deviation = np.median(diff)
# 0.6745 is the 0.75th quantile of the standard normal distribution and is used for consistency.
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
| [
"numpy.median",
"numpy.sum",
"numpy.unique",
"numpy.sqrt"
] | [((1435, 1460), 'numpy.median', 'np.median', (['points'], {'axis': '(0)'}), '(points, axis=0)\n', (1444, 1460), True, 'import numpy as np\n'), ((1514, 1553), 'numpy.sum', 'np.sum', (['((points - median) ** 2)'], {'axis': '(-1)'}), '((points - median) ** 2, axis=-1)\n', (1520, 1553), True, 'import numpy as np\n'), ((1567, 1580), 'numpy.sqrt', 'np.sqrt', (['diff'], {}), '(diff)\n', (1574, 1580), True, 'import numpy as np\n'), ((1645, 1660), 'numpy.median', 'np.median', (['diff'], {}), '(diff)\n', (1654, 1660), True, 'import numpy as np\n'), ((687, 706), 'numpy.unique', 'np.unique', (['outliers'], {}), '(outliers)\n', (696, 706), True, 'import numpy as np\n')] |
#!/usr/bin/env python
#
# Copyright 2019 DFKI GmbH.
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the
# following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
# NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
# USE OR OTHER DEALINGS IN THE SOFTWARE.
import numpy as np
import math
from anim_utils.utilities.log import write_log, write_message_to_log, LOG_MODE_DEBUG, LOG_MODE_ERROR, LOG_MODE_INFO
from transformations import quaternion_from_euler, euler_from_quaternion, euler_matrix
from .utils import _transform_point_from_cad_to_opengl_cs, _transform_unconstrained_indices_from_cad_to_opengl_cs
from .constants import *
DISTANCE_WARNING = "Warning: shift second to last control point because it is too close to the last control point"
def _init_active_region(traj_constraint):
if "semanticAnnotation" in list(traj_constraint[0].keys()):
active_region = dict()
active_region["start_point"] = None
active_region["end_point"] = None
return active_region
else:
return None
def _end_active_region(active_region, control_points):
if active_region["start_point"] is None:
active_region["start_point"] = control_points[0]
if active_region["end_point"] is None:
active_region["end_point"] = control_points[-1]
def _update_active_region(active_region, point, new_active):
if new_active and active_region["start_point"] is None:
active_region["start_point"] = point
elif not new_active and active_region["start_point"] is not None and active_region["end_point"] is None:
active_region["end_point"] = point
def _is_active_trajectory_region(control_points, index):
if "semanticAnnotation" in list(control_points[index].keys()):
if "collisionAvoidance" in list(control_points[index]["semanticAnnotation"].keys()):
return control_points[index]["semanticAnnotation"]["collisionAvoidance"]
return True
class TrajectoryConstraintReader(object):
def __init__(self, activate_coordinate_transform=True, scale_factor=1.0):
self.activate_coordinate_transform = activate_coordinate_transform
self.scale_factor = scale_factor
def _filter_control_points_simple(self, control_points, distance_threshold=0.0):
filtered_control_points = {P_KEY: list(), O_KEY: list()}
previous_point = None
n_control_points = len(control_points)
last_distance = None
for idx in range(n_control_points):
result = self._filter_control_point(control_points, n_control_points, idx, previous_point,
last_distance, distance_threshold)
if result is not None:
position, orientation, last_distance = result
#n_points = len(filtered_control_points)
#if idx == n_control_points - 1:
# last_added_point_idx = n_points - 1
# delta = filtered_control_points[P_KEY][last_added_point_idx] - position
# if np.linalg.norm(delta) < distance_threshold:
# filtered_control_points[last_added_point_idx][P_KEY] += delta
# write_log(DISTANCE_WARNING)
filtered_control_points[P_KEY].append(position)
filtered_control_points[O_KEY].append(orientation)
previous_point = position
return filtered_control_points
def _filter_control_points_ca(self, control_points, distance_threshold=-1):
filtered_control_points = list()
active_regions = list()
previous_point = None
n_control_points = len(control_points)
was_active = False
last_distance = None
count = -1
for idx in range(n_control_points):
is_active = _is_active_trajectory_region(control_points, idx)
if not is_active:
was_active = is_active
continue
if not was_active and is_active:
active_region = _init_active_region(control_points)
filtered_control_points.append(list())
active_regions.append(active_region)
count += 1
if count < 0:
continue
tmp_distance_threshold = distance_threshold
if active_regions[count] is not None:
tmp_distance_threshold = -1
result = self._filter_control_point(control_points, n_control_points, idx, previous_point,
last_distance, tmp_distance_threshold)
if result is None:
continue
else:
point, orientation, last_distance = result
n_points = len(filtered_control_points[count])
if idx == n_control_points - 1:
last_added_point_idx = n_points - 1
delta = filtered_control_points[count][last_added_point_idx] - point
if np.linalg.norm(delta) < distance_threshold:
filtered_control_points[count][last_added_point_idx] += delta
write_log(
"Warning: shift second to last control point because it is too close to the last control point")
filtered_control_points[count].append(point)
if active_regions[count] is not None:
_update_active_region(active_regions[count], point, is_active)
previous_point = point
was_active = is_active
# handle invalid region specification
region_points = list()
for idx in range(len(filtered_control_points)):
region_points.append(len(filtered_control_points[idx]))
if active_regions[idx] is not None:
if len(filtered_control_points[idx]) < 2:
filtered_control_points[idx] = None
else:
_end_active_region(active_regions[idx], filtered_control_points[idx])
# print "loaded", len(control_point_list),"active regions with",region_points,"points"
return filtered_control_points, active_regions
def _filter_control_point(self, control_points, n_control_points, index, previous_point, last_distance,
distance_threshold):
control_point = control_points[index]
if P_KEY not in list(control_point.keys()) or control_point[P_KEY] == [None, None, None]:
write_log("Warning: skip undefined control point")
return None
# set component of the position to 0 where it is is set to None to allow a 3D spline definition
position = control_point[P_KEY]
point = [p * self.scale_factor if p is not None else 0 for p in position]
point = np.asarray(_transform_point_from_cad_to_opengl_cs(point, self.activate_coordinate_transform))
if previous_point is not None and np.linalg.norm(point - previous_point) < 0.001:
return None
if O_KEY in list(control_point.keys()) and None not in control_point[O_KEY]:
#q = quaternion_from_euler(*np.radians(control_point[O_KEY]))
ref_vector = [0, 0, 1, 1]
m = euler_matrix(*np.radians(control_point[O_KEY]))
orientation = np.dot(m, ref_vector)#[:3]
#ref_vector = [0, 1]
#angle = np.radians(control_point[O_KEY][1])
#sa = math.sin(angle)
#ca = math.cos(angle)
#m = np.array([[ca, -sa], [sa, ca]])
#orientation = np.dot(m, ref_vector)
orientation /= np.linalg.norm(orientation)
orientation = np.array([orientation[0],0,orientation[2]])
else:
orientation = None
#orientation = None
if previous_point is None or index == n_control_points - 1:
return point, orientation, last_distance
else:
# add the point if there is no distance threshold or if it is the first point,
# or if it is the last point or larger than or equal to the distance threshold
distance = np.linalg.norm(point - previous_point)
if distance_threshold > 0.0 and distance < distance_threshold:
return None
if last_distance is not None and distance < last_distance / 10.0: # TODO add toggle of filter to config
return None
return point, orientation, distance
def _extract_control_point_list(self, action_desc, joint_name):
control_point_list = None
for c in action_desc[CONSTRAINTS_KEY]:
if "joint" in list(c.keys()) and TRAJECTORY_CONSTRAINTS_KEY in list(c.keys()) and joint_name == c["joint"]:
control_point_list = c[TRAJECTORY_CONSTRAINTS_KEY]
break # there should only be one list per joint and elementary action
return control_point_list
def _find_semantic_annotation(self, control_points):
semantic_annotation = None
for p in control_points:
if "semanticAnnotation" in list(p.keys()) and not "collisionAvoidance" in list(p["semanticAnnotation"].keys()):
semantic_annotation = p["semanticAnnotation"]
break
return semantic_annotation
def _find_unconstrained_indices(self, trajectory_constraint_data):
"""extract unconstrained dimensions"""
unconstrained_indices = list()
idx = 0
for p in trajectory_constraint_data:
if [P_KEY] in list(p.keys()):
for v in p[P_KEY]:
if v is None:
unconstrained_indices.append(idx)
idx += 1
break # check only one point
return _transform_unconstrained_indices_from_cad_to_opengl_cs(unconstrained_indices, self.activate_coordinate_transform)
def _check_for_collision_avoidance_annotation(self, trajectory_constraint_desc, control_points):
""" find start and end control point of an active region if there exists one.
Note this functions expects that there is not more than one active region.
:param trajectory_constraint_desc:
:param control_points:
:return: dict containing "start_point" and "end_point" or None
"""
assert len(trajectory_constraint_desc) == len(control_points), str(len(trajectory_constraint_desc)) +" != " + str( len(control_points))
active_region = None
if "semanticAnnotation" in list(trajectory_constraint_desc[0].keys()):
active_region = dict()
active_region["start_point"] = None
active_region["end_point"] = None
c_index = 0
for c in trajectory_constraint_desc:
if "semanticAnnotation" in list(c.keys()):
if c["semanticAnnotation"]["collisionAvoidance"]:
active_region["start_point"] = control_points[c_index]
elif active_region["start_point"] is not None and active_region["end_point"] is None:
active_region["end_point"] = control_points[c_index]
break
c_index += 1
return active_region
def create_trajectory_from_control_points(self, control_points, distance_threshold=-1):
desc = dict()
desc["control_points_list"] = []
desc["orientation_list"] = []
desc["active_regions"] = []
desc["semantic_annotation"] = self._find_semantic_annotation(control_points)
desc["unconstrained_indices"] = self._find_unconstrained_indices(control_points)
desc["control_points_list"] = [self._filter_control_points_simple(control_points, distance_threshold)]
return desc
def extract_trajectory_desc(self, elementary_action_list, action_index, joint_name, distance_threshold=-1):
""" Extract the trajectory information from the constraint list
Returns:
-------
* desc : dict
\tConstraint definition that contains a list of control points, unconstrained_indices, active_regions and a possible
annotation.
"""
control_points = self._extract_control_point_list(elementary_action_list[action_index], joint_name)
if control_points is not None:
return self.create_trajectory_from_control_points(control_points, distance_threshold)
return {"control_points_list": []}
| [
"numpy.radians",
"anim_utils.utilities.log.write_log",
"numpy.array",
"numpy.linalg.norm",
"numpy.dot"
] | [((7359, 7409), 'anim_utils.utilities.log.write_log', 'write_log', (['"""Warning: skip undefined control point"""'], {}), "('Warning: skip undefined control point')\n", (7368, 7409), False, 'from anim_utils.utilities.log import write_log, write_message_to_log, LOG_MODE_DEBUG, LOG_MODE_ERROR, LOG_MODE_INFO\n'), ((8173, 8194), 'numpy.dot', 'np.dot', (['m', 'ref_vector'], {}), '(m, ref_vector)\n', (8179, 8194), True, 'import numpy as np\n'), ((8485, 8512), 'numpy.linalg.norm', 'np.linalg.norm', (['orientation'], {}), '(orientation)\n', (8499, 8512), True, 'import numpy as np\n'), ((8539, 8584), 'numpy.array', 'np.array', (['[orientation[0], 0, orientation[2]]'], {}), '([orientation[0], 0, orientation[2]])\n', (8547, 8584), True, 'import numpy as np\n'), ((8998, 9036), 'numpy.linalg.norm', 'np.linalg.norm', (['(point - previous_point)'], {}), '(point - previous_point)\n', (9012, 9036), True, 'import numpy as np\n'), ((7813, 7851), 'numpy.linalg.norm', 'np.linalg.norm', (['(point - previous_point)'], {}), '(point - previous_point)\n', (7827, 7851), True, 'import numpy as np\n'), ((8113, 8145), 'numpy.radians', 'np.radians', (['control_point[O_KEY]'], {}), '(control_point[O_KEY])\n', (8123, 8145), True, 'import numpy as np\n'), ((5849, 5870), 'numpy.linalg.norm', 'np.linalg.norm', (['delta'], {}), '(delta)\n', (5863, 5870), True, 'import numpy as np\n'), ((6003, 6119), 'anim_utils.utilities.log.write_log', 'write_log', (['"""Warning: shift second to last control point because it is too close to the last control point"""'], {}), "(\n 'Warning: shift second to last control point because it is too close to the last control point'\n )\n", (6012, 6119), False, 'from anim_utils.utilities.log import write_log, write_message_to_log, LOG_MODE_DEBUG, LOG_MODE_ERROR, LOG_MODE_INFO\n')] |
import matplotlib.pyplot as plt
import numpy as np
import glob
from process_data.uniformization import uniformization, reducePoint
from process_data.B_Spline_Approximation import BS_curve
import math
def plotMap(juncDir, traDir=None, segBegin=0, segEnd=0, tra_begin=0, tra_length=0):
"""
traDir: 轨迹路径
juncDir: 道路节点数据路径
tra_begin: 需要的打印轨迹的起始点
tra_length: 需要打印的轨迹长度。0表示到结束
"""
# 获取路径下文件夹下个数
path_file_number=glob.glob(pathname='{}*.csv'.format(juncDir))
if segEnd == 0:
segEnd = len(path_file_number)
for index in range(segBegin, segEnd):
filename = '{}segment_{}.csv'.format(juncDir, index)
data = np.loadtxt(filename, delimiter=",", dtype="double")
xpoint = data[:,0]
ypoint = data[:,1]
cos = data[:, 2]
sin = data[:, 3]
lLength = data[:, 5]
rLength = data[:, 7]
# left boundary
l_b_x = xpoint - lLength*sin
l_b_y = ypoint + lLength*cos
# right boundary
r_b_x = xpoint + rLength*sin
r_b_y = ypoint - rLength*cos
if traDir: # 如果轨迹路径不为空,则打印轨迹
tra = np.loadtxt("{}tra.csv".format(traDir), delimiter=",", dtype="double")
if tra_length == 0:
plt.plot(tra[tra_begin:, 0], tra[tra_begin:, 1], color='r') # 轨迹
else:
tra_end = tra_begin + tra_length
plt.plot(tra[tra_begin:tra_end, 0], tra[tra_begin:tra_end, 1], color='r')
plt.plot(xpoint, ypoint, color='g', linestyle='--') # 中心线
plt.plot(l_b_x, l_b_y, color='y')
plt.plot(r_b_x, r_b_y, color='y')
plt.show()
plt.clf()
def plotLane(dataDir):
"""
打印一段道路信息
"""
tra = np.loadtxt("{}tra.csv".format(dataDir), delimiter=",", dtype="double")
filename = '{}laneInfo.csv'.format(dataDir)
data = np.loadtxt(filename, delimiter=",", dtype="double")
xpoint = data[:,0]
ypoint = data[:,1]
cos = data[:, 2]
sin = data[:, 3]
lLength = data[:, 5]
rLength = data[:, 7]
# left boundary
l_b_x = xpoint - lLength*sin
l_b_y = ypoint + lLength*cos
# right boundary
r_b_x = xpoint + rLength*sin
r_b_y = ypoint - rLength*cos
plt.plot(tra[:, 0], tra[:, 1], color='r')
plt.plot(xpoint, ypoint, color='g', linestyle='--') # 中心线
plt.plot(l_b_x, l_b_y, color='b')
plt.plot(r_b_x, r_b_y, color='b')
plt.show()
plt.clf()
def calcuBoundary(laneInfo):
"""
输入一路段信息,计算边界轨迹。
返回(中心线、左边界,右边界)数据 shape:(N, 6)
"""
# laneInfo = np.loadtxt(laneDir, delimiter=",", dtype="double")
# 计算边界线
xpoint = laneInfo[:,0]
ypoint = laneInfo[:,1]
cos = laneInfo[:, 2]
sin = laneInfo[:, 3]
lLength = laneInfo[:, 5]
rLength = laneInfo[:, 7]
# left boundary
l_b_x = xpoint - lLength*sin
l_b_y = ypoint + lLength*cos
# right boundary
r_b_x = xpoint + rLength*sin
r_b_y = ypoint - rLength*cos
# laneInfo shape: (dataLength, 6) (中心线、左边界,右边界)
laneInfo = np.vstack([xpoint, ypoint, l_b_x, l_b_y, r_b_x, r_b_y]).T
# np.save("{}laneInfo".format(laneDir), laneInfo)
# print(laneInfo.shape)
return laneInfo
def bsplineFitting(laneInfo, cpNum, degree, distance, show=False):
"""
使用B样条拟合轨迹点
cpNum: 控制点个数
degree: 阶数
distance: 轨迹点抽取距离
"""
tra = laneInfo
bs = BS_curve(cpNum, degree)
# 获取左边界线拟合参数并简化轨迹点
boundary = uniformization(tra, distance)
# 打印边界点
xx = boundary[: , 0]
yy = boundary[: , 1]
# print(boundary.shape)
paras = bs.estimate_parameters(boundary)
knots = bs.get_knots()
if bs.check():
cp = bs.approximation(boundary)
uq = np.linspace(0,1,101)
y = bs.bs(uq)
if show:
plt.scatter(xx ,yy)
plt.plot(y[:,0],y[:,1],'r')
plt.plot(cp[:,0],cp[:,1],'y')
plt.scatter(cp[:,0],cp[:,1],c = 'y')
plt.show()
return cp
def polyFitting(laneInfo):
"""
使用多项式拟合轨迹
degree: 多项式阶数
"""
# 获取左边界线拟合参数
boundary = uniformization(laneInfo[:, 2:4], 5)
param = np.polyfit(boundary[:, 0], boundary[:, 1], 3)
plt.scatter(boundary[:, 0], boundary[:, 1])
x = boundary[:, 0]
plt.plot(x, param[0]*x**3 + param[1]*x**2 + param[2]*x**1 + param[3], 'k--')
plt.show()
return param
def showTra(dataDir):
tra = np.loadtxt("{}tra.csv".format(dataDir), delimiter=",", dtype="double")
point = tra[:, :2]
point = reducePoint(point, step=50)
point = point.T
point[0, :] = point[0, :] - np.average(point[0, :])
point[1, :] = point[1, :] - np.average(point[1, :])
plt.plot(point[0,:],point[1, :], color='r')
plt.show()
# showTra("./data/bag_2/")
def getTrainData(traDir, juncDir, limit_1, limit_2):
"""
数据处理流程
traDir: 车辆轨迹路径
juncDir: 道路节点轨迹
limit_1: 下界
limit_2: 上界
"""
# 获取监督数据(轨迹的B样条控制点)
tra = np.loadtxt("{}tra.csv".format(traDir), delimiter=",", dtype="double")
tra = tra[(limit_1 < tra[:, 0]) & (tra[:, 0] < limit_2) , :]
temp_x = tra[0, 0] # 记录轨迹起始点坐标(全局坐标)
temp_y = tra[0, 1]
tra[:, 0] -= tra[0, 0]
tra[:, 1] -= tra[0, 1]
end_x = tra[-1, 0] # 轨迹结束相对坐标,(以轨迹初始点(0,0)为起始点)
end_y = tra[-1, 1]
start_speed = math.sqrt(tra[0, 2]**2 + tra[0, 3]**2)
np.save("{}tra".format(traDir), tra)
traCP = bsplineFitting(tra[:, 0:2], cpNum=8, degree=3, distance=5, show=False)
# print("轨迹拟合控制点: ", traCP)
# 拼接第一段和第三段数据
seg_1 = np.loadtxt("{}segment_0.csv".format(juncDir), delimiter=",", dtype="double")
seg_2 = np.loadtxt("{}segment_2.csv".format(juncDir), delimiter=",", dtype="double")
laneInfo = np.vstack([seg_1, seg_2])
# 截取了路段信息(-200, -100)
laneInfo = laneInfo[(limit_1 < laneInfo[:, 0]) & (laneInfo[:, 0] < limit_2) , :]
laneInfo[:, 0] -= temp_x
laneInfo[:, 1] -= temp_y
np.save("{}laneInfo".format(traDir), laneInfo)
# 根据中心线与左右边界距离计算道路左右边界点
laneInfo = calcuBoundary(laneInfo)
# 拟合道路左边界
boundaryCP = bsplineFitting(laneInfo[:, 2:4], cpNum=8, degree=3, distance=5, show=False)
boundaryCP = np.array(boundaryCP).reshape(1, -1)
fectures = np.array([0, 0, start_speed, end_x, end_y]).reshape(1, -1)
fectures = np.hstack([fectures, boundaryCP])
labels = np.array(traCP).reshape(1, -1)
return fectures, labels
| [
"process_data.B_Spline_Approximation.BS_curve",
"matplotlib.pyplot.show",
"numpy.average",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.clf",
"numpy.polyfit",
"matplotlib.pyplot.scatter",
"math.sqrt",
"numpy.hstack",
"numpy.array",
"numpy.loadtxt",
"numpy.linspace",
"process_data.uniformizat... | [((1633, 1643), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1641, 1643), True, 'import matplotlib.pyplot as plt\n'), ((1648, 1657), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1655, 1657), True, 'import matplotlib.pyplot as plt\n'), ((1852, 1903), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""', 'dtype': '"""double"""'}), "(filename, delimiter=',', dtype='double')\n", (1862, 1903), True, 'import numpy as np\n'), ((2219, 2260), 'matplotlib.pyplot.plot', 'plt.plot', (['tra[:, 0]', 'tra[:, 1]'], {'color': '"""r"""'}), "(tra[:, 0], tra[:, 1], color='r')\n", (2227, 2260), True, 'import matplotlib.pyplot as plt\n'), ((2266, 2317), 'matplotlib.pyplot.plot', 'plt.plot', (['xpoint', 'ypoint'], {'color': '"""g"""', 'linestyle': '"""--"""'}), "(xpoint, ypoint, color='g', linestyle='--')\n", (2274, 2317), True, 'import matplotlib.pyplot as plt\n'), ((2330, 2363), 'matplotlib.pyplot.plot', 'plt.plot', (['l_b_x', 'l_b_y'], {'color': '"""b"""'}), "(l_b_x, l_b_y, color='b')\n", (2338, 2363), True, 'import matplotlib.pyplot as plt\n'), ((2368, 2401), 'matplotlib.pyplot.plot', 'plt.plot', (['r_b_x', 'r_b_y'], {'color': '"""b"""'}), "(r_b_x, r_b_y, color='b')\n", (2376, 2401), True, 'import matplotlib.pyplot as plt\n'), ((2407, 2417), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2415, 2417), True, 'import matplotlib.pyplot as plt\n'), ((2422, 2431), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2429, 2431), True, 'import matplotlib.pyplot as plt\n'), ((3358, 3381), 'process_data.B_Spline_Approximation.BS_curve', 'BS_curve', (['cpNum', 'degree'], {}), '(cpNum, degree)\n', (3366, 3381), False, 'from process_data.B_Spline_Approximation import BS_curve\n'), ((3420, 3449), 'process_data.uniformization.uniformization', 'uniformization', (['tra', 'distance'], {}), '(tra, distance)\n', (3434, 3449), False, 'from process_data.uniformization import uniformization, reducePoint\n'), ((3685, 3707), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(101)'], {}), '(0, 1, 101)\n', (3696, 3707), True, 'import numpy as np\n'), ((4025, 4060), 'process_data.uniformization.uniformization', 'uniformization', (['laneInfo[:, 2:4]', '(5)'], {}), '(laneInfo[:, 2:4], 5)\n', (4039, 4060), False, 'from process_data.uniformization import uniformization, reducePoint\n'), ((4073, 4118), 'numpy.polyfit', 'np.polyfit', (['boundary[:, 0]', 'boundary[:, 1]', '(3)'], {}), '(boundary[:, 0], boundary[:, 1], 3)\n', (4083, 4118), True, 'import numpy as np\n'), ((4123, 4166), 'matplotlib.pyplot.scatter', 'plt.scatter', (['boundary[:, 0]', 'boundary[:, 1]'], {}), '(boundary[:, 0], boundary[:, 1])\n', (4134, 4166), True, 'import matplotlib.pyplot as plt\n'), ((4194, 4286), 'matplotlib.pyplot.plot', 'plt.plot', (['x', '(param[0] * x ** 3 + param[1] * x ** 2 + param[2] * x ** 1 + param[3])', '"""k--"""'], {}), "(x, param[0] * x ** 3 + param[1] * x ** 2 + param[2] * x ** 1 +\n param[3], 'k--')\n", (4202, 4286), True, 'import matplotlib.pyplot as plt\n'), ((4275, 4285), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4283, 4285), True, 'import matplotlib.pyplot as plt\n'), ((4447, 4474), 'process_data.uniformization.reducePoint', 'reducePoint', (['point'], {'step': '(50)'}), '(point, step=50)\n', (4458, 4474), False, 'from process_data.uniformization import uniformization, reducePoint\n'), ((4611, 4656), 'matplotlib.pyplot.plot', 'plt.plot', (['point[0, :]', 'point[1, :]'], {'color': '"""r"""'}), "(point[0, :], point[1, :], color='r')\n", (4619, 4656), True, 'import matplotlib.pyplot as plt\n'), ((4659, 4669), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4667, 4669), True, 'import matplotlib.pyplot as plt\n'), ((5241, 5283), 'math.sqrt', 'math.sqrt', (['(tra[0, 2] ** 2 + tra[0, 3] ** 2)'], {}), '(tra[0, 2] ** 2 + tra[0, 3] ** 2)\n', (5250, 5283), False, 'import math\n'), ((5648, 5673), 'numpy.vstack', 'np.vstack', (['[seg_1, seg_2]'], {}), '([seg_1, seg_2])\n', (5657, 5673), True, 'import numpy as np\n'), ((6211, 6244), 'numpy.hstack', 'np.hstack', (['[fectures, boundaryCP]'], {}), '([fectures, boundaryCP])\n', (6220, 6244), True, 'import numpy as np\n'), ((662, 713), 'numpy.loadtxt', 'np.loadtxt', (['filename'], {'delimiter': '""","""', 'dtype': '"""double"""'}), "(filename, delimiter=',', dtype='double')\n", (672, 713), True, 'import numpy as np\n'), ((1484, 1535), 'matplotlib.pyplot.plot', 'plt.plot', (['xpoint', 'ypoint'], {'color': '"""g"""', 'linestyle': '"""--"""'}), "(xpoint, ypoint, color='g', linestyle='--')\n", (1492, 1535), True, 'import matplotlib.pyplot as plt\n'), ((1552, 1585), 'matplotlib.pyplot.plot', 'plt.plot', (['l_b_x', 'l_b_y'], {'color': '"""y"""'}), "(l_b_x, l_b_y, color='y')\n", (1560, 1585), True, 'import matplotlib.pyplot as plt\n'), ((1594, 1627), 'matplotlib.pyplot.plot', 'plt.plot', (['r_b_x', 'r_b_y'], {'color': '"""y"""'}), "(r_b_x, r_b_y, color='y')\n", (1602, 1627), True, 'import matplotlib.pyplot as plt\n'), ((3016, 3071), 'numpy.vstack', 'np.vstack', (['[xpoint, ypoint, l_b_x, l_b_y, r_b_x, r_b_y]'], {}), '([xpoint, ypoint, l_b_x, l_b_y, r_b_x, r_b_y])\n', (3025, 3071), True, 'import numpy as np\n'), ((3745, 3764), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xx', 'yy'], {}), '(xx, yy)\n', (3756, 3764), True, 'import matplotlib.pyplot as plt\n'), ((3773, 3804), 'matplotlib.pyplot.plot', 'plt.plot', (['y[:, 0]', 'y[:, 1]', '"""r"""'], {}), "(y[:, 0], y[:, 1], 'r')\n", (3781, 3804), True, 'import matplotlib.pyplot as plt\n'), ((3809, 3842), 'matplotlib.pyplot.plot', 'plt.plot', (['cp[:, 0]', 'cp[:, 1]', '"""y"""'], {}), "(cp[:, 0], cp[:, 1], 'y')\n", (3817, 3842), True, 'import matplotlib.pyplot as plt\n'), ((3847, 3885), 'matplotlib.pyplot.scatter', 'plt.scatter', (['cp[:, 0]', 'cp[:, 1]'], {'c': '"""y"""'}), "(cp[:, 0], cp[:, 1], c='y')\n", (3858, 3885), True, 'import matplotlib.pyplot as plt\n'), ((3892, 3902), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (3900, 3902), True, 'import matplotlib.pyplot as plt\n'), ((4527, 4550), 'numpy.average', 'np.average', (['point[0, :]'], {}), '(point[0, :])\n', (4537, 4550), True, 'import numpy as np\n'), ((4583, 4606), 'numpy.average', 'np.average', (['point[1, :]'], {}), '(point[1, :])\n', (4593, 4606), True, 'import numpy as np\n'), ((6085, 6105), 'numpy.array', 'np.array', (['boundaryCP'], {}), '(boundaryCP)\n', (6093, 6105), True, 'import numpy as np\n'), ((6137, 6180), 'numpy.array', 'np.array', (['[0, 0, start_speed, end_x, end_y]'], {}), '([0, 0, start_speed, end_x, end_y])\n', (6145, 6180), True, 'import numpy as np\n'), ((6258, 6273), 'numpy.array', 'np.array', (['traCP'], {}), '(traCP)\n', (6266, 6273), True, 'import numpy as np\n'), ((1251, 1310), 'matplotlib.pyplot.plot', 'plt.plot', (['tra[tra_begin:, 0]', 'tra[tra_begin:, 1]'], {'color': '"""r"""'}), "(tra[tra_begin:, 0], tra[tra_begin:, 1], color='r')\n", (1259, 1310), True, 'import matplotlib.pyplot as plt\n'), ((1401, 1474), 'matplotlib.pyplot.plot', 'plt.plot', (['tra[tra_begin:tra_end, 0]', 'tra[tra_begin:tra_end, 1]'], {'color': '"""r"""'}), "(tra[tra_begin:tra_end, 0], tra[tra_begin:tra_end, 1], color='r')\n", (1409, 1474), True, 'import matplotlib.pyplot as plt\n')] |
def captureRestofRow(s,r,c,header): #sheet row and col
out = []
for col in range(c+header,s.ncols):
x = s.cell(r,col)
value = x.value
try: value = str(float(value)) #Keep if string or number
except: pass
if value != "":
out.append(value)
return out
def captureRestofCol(s,r,c,header): #sheet row and col
out = []
for row in range(r+header,s.nrows):
x = s.cell(row,c)
value = x.value
try: value = str(float(value))
except: pass
if value != "":
out.append(value)
return out
def captureMiniTable2Anchor(s,r,c, stop):
out = []
for col in range(c,s.ncols):
x = s.cell(r,col)
value = x.value
try: value = str(float(value))
except: pass
if value == stop:
break
if value != stop:
thisCol = captureRestofCol(s,r,col,1)
if thisCol: #Check to make sure there is data in column
out.append(captureRestofCol(s,r,col,1))
return out
def divide_chunks(l, n): #creates a generator
# looping till length l
for i in range(0, len(l), n):
yield l[i:i + n]
def sum_chunks(l,n): #creates a generator
for i in range(0, len(l), n):
yield sum(l[i:i+n])
def avg_list(l,w):
for i in l:
yield sum(i)/(len(i)*w)
def normalize_chunks(d,l):
for i in range(0,len(l)):
for j in l[i]:
yield(j/d[i])
def captureDay0(s,r,c, reps, numconc): #add 1 to starting value to int lists
out = []
for col in range(c,c+reps):
l = [float(x) for x in captureRestofCol(s,r,col,1)]
out.append(list(sum_chunks(l,numconc))) #list generator def
oo = list(zip(*out))
return list(avg_list(oo,numconc))
def captureSolvent(s,r,c, reps, numconc, d0):
out = []
for col in range(c,c+reps):
l = [float(x) for x in captureRestofCol(s,r,col, 1)]
ll = list(divide_chunks(l,numconc))
lll = list(normalize_chunks(d0,ll))
out.append(list(sum_chunks(lll,numconc))) #list generator def
oo = list(zip(*out))
return list(avg_list(oo,numconc))
def formatKat(s,r,c,d0,con,solv,sr):
nconc = len(set(con))
n = [float(x) for x in captureRestofCol(s,r,c,1)]
l = list(divide_chunks(n,nconc))
nn = list(normalize_chunks(d0,l)) #Normalize to Day0
ll = list(divide_chunks(nn,nconc))
nnn = list(normalize_chunks(solv,ll)) #Normalize Day0 to solvent
lll = list(divide_chunks(nnn,nconc))
raw = n
dnorm = nn
snorm = nnn
#NOTE: added Dec 6. 2019. I need to capture day 0 raw value
redat0 = np.repeat(d0,nconc)
resolv = np.repeat(sr,nconc)
o = list(zip(raw,dnorm,snorm,redat0,resolv))
oo = list(divide_chunks(o,nconc))
return(oo)
def printALL(A,p,d,ids,c,sn):
assert(len(A) == len(p)) #means I have the same number of arrays in ALL as plates/drugs replicates
assert(len(p) == len(d)) #headers match
#NOTE: If more data is needed add column here.
normalized = ["Raw","Day0","Solvent","RawDay0","Vehicle"]
dcnt = 0
for i in range(0,len(p)):
if dcnt == 4:
dcnt = 0
dcnt += 1
plate = p[i]
drug = d[i]
for j in range(0,len(A[i])):
oid = ids[j][0]
for k in range(0,len(A[i][j])):
concentration = c[k]
for m in range(0,len(A[i][j][k])):
norm = normalized[m]
value = A[i][j][k][m]
variable = "V"+str(dcnt) #For completely long format
tissue = (oid.replace(" ","_")+"_plate_"+str(plate))
out = [tissue,drug,str(concentration),norm,sn,variable,str(value)]
print("\t".join(out))
#sys.exit()
def parseKat(f1):
out = ["Plate","Drug","Vol","Normalized","DrugSet","Variable","Value"]
print("\t".join(out))
wb = xlrd.open_workbook(f1)
for sheet in wb.sheets():
platenums = []
targets = []
concentrations = []
ids = []
reps = 4
numconc = 8 #This will throw an error if concentrations are different
capturedDay0 = False
firstDMSO = False
d0avgs = []
drugs = []
solvent = []
#NOTE: Dec 6 Added for raw value
solventR = []
dmsoCnt = 0
drugCnt = 0
excelCol = 0
sheetname = sheet.name
ALL = []
for row in range(0,sheet.nrows):
for col in range(0,sheet.ncols):
# print(str(row)+":"+str(col))
x = sheet.cell(row,col)
value = x.value
try: value = str(int(value)) #Keep if string or number
except: pass
if "plate" in value.lower():
#print(row,col)
platenums = captureRestofRow(sheet,row,col,1)
if "target" in value.lower():
#print(row,col)
targets = captureRestofRow(sheet,row,col,1)
if "identifier" in value.lower():
#print(row,col)
o = captureMiniTable2Anchor(sheet,row,col,"Day 0") #Dictionary
#print(o)
ids = list(zip(*o)) #the * notation is an "UnPACKING" step ;) so cool
#print(ids)
if "Day 0" == value and capturedDay0 == False:
d0avgs = captureDay0(sheet, row, col, reps, numconc)
capturedDay0 = True
if "uM" == value:
concentrations = captureRestofCol(sheet,row,col,1)
if "dmso" in value.lower() and firstDMSO == False: #this will finish off the table with the structure in place
assert(len(set(concentrations)) == numconc) #This checks the hardcoded data
drugs = [x.lower() for x in captureRestofRow(sheet,row,col,0)]
excelCol = col
firstDMSO = True
if "dmso" in value.lower():
dmsoCnt += 1
if dmsoCnt == 1:
solvent = captureSolvent(sheet, row, col, reps, numconc, d0avgs)
solventR = captureDay0(sheet, row, col, reps, numconc)
if dmsoCnt == 4:
dmsoCnt = 0
if value.lower() in drugs:
drugCnt += 1
pos = col-excelCol
o = formatKat(sheet,row,col,d0avgs,concentrations,solvent,solventR)
ALL.append(o)
printALL(ALL,platenums,drugs,ids,concentrations,sheetname) #NOTE makes sure this is the last step
if __name__ == "__main__":
import sys
import xlrd
import datetime
import numpy as np
#datetime.datetime.strptime('24052010', '%d%m%Y').date()
files = parseKat(sys.argv[1])
| [
"xlrd.open_workbook",
"numpy.repeat"
] | [((2695, 2715), 'numpy.repeat', 'np.repeat', (['d0', 'nconc'], {}), '(d0, nconc)\n', (2704, 2715), True, 'import numpy as np\n'), ((2728, 2748), 'numpy.repeat', 'np.repeat', (['sr', 'nconc'], {}), '(sr, nconc)\n', (2737, 2748), True, 'import numpy as np\n'), ((4003, 4025), 'xlrd.open_workbook', 'xlrd.open_workbook', (['f1'], {}), '(f1)\n', (4021, 4025), False, 'import xlrd\n')] |
import numpy as np
import pickle as pk
import os
import argparse
import json
from collections import OrderedDict
import SimpleITK as sitk
def parse_arguments():
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--preprocessed', help='nnunet preprocessed data dir', required=True, type=source_path)
parser.add_argument('-the', '--threshold', default = 0.5, help='mask threshold', required=True, type=float)
# parser.add_argument('-o', '--output', help='output 2d slice directory', required=True, type=output_path)
# parser.add_argument('-m', '--mode', help='2d or 3d mode', required=True, type=str)
return parser.parse_args()
def output_path(path):
if os.path.isdir(path):
return path
else:
os.mkdir(path)
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError(f"nnunet preprocessed data dir:{path} is not a valid path")
def source_path(path):
if os.path.isdir(path):
return path
else:
raise argparse.ArgumentTypeError(f"output 2d slice directory:{path} is not a valid path")
if __name__ == '__main__':
parse_args = parse_arguments()
for file in os.listdir(parse_args.preprocessed):
if file.endswith(".npz"):
name, ext = os.path.splitext(file)
nii_arr = np.load(os.path.join(parse_args.preprocessed, file))
print(file+":")
# print(np.amax(nii_arr['softmax']), np.percentile(nii_arr['softmax'],66.5),np.amin(nii_arr['softmax']))
# print(nii_arr['softmax'].shape)
# print('-------------')
print(np.argmax(nii_arr['softmax'],axis=0).shape)
max_idx = np.argmax(nii_arr['softmax'],axis=0)
print("max_idx", max_idx.shape)
foreground_mask = np.where(np.amax(nii_arr['softmax'], axis=0)>=parse_args.threshold, max_idx, 0)
print("foreground_mask", foreground_mask.shape)
new_label_arr = foreground_mask*max_idx
new_label = sitk.GetImageFromArray(new_label_arr)
org_label = sitk.ReadImage(os.path.join(parse_args.preprocessed,name+'.nii.gz'))
new_label.CopyInformation(org_label)
sitk.WriteImage(new_label, os.path.join(parse_args.preprocessed,name+'_new.nii.gz'))
# if parse_args.mode == '2d':
# for
# with open(os.path.join(parse_args.preprocessed, name + '.pkl'), "r") as input_pickle, open(os.path.join(parse_args.output, name + '.json'), "w") as output_json:
# pk_dict = pk.load(input_pickle)
# output_json.write(json.dumps(pk_dict))
| [
"os.mkdir",
"argparse.ArgumentParser",
"numpy.argmax",
"os.path.isdir",
"numpy.amax",
"os.path.splitext",
"SimpleITK.GetImageFromArray",
"os.path.join",
"os.listdir",
"argparse.ArgumentTypeError"
] | [((176, 201), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (199, 201), False, 'import argparse\n'), ((695, 714), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (708, 714), False, 'import os\n'), ((976, 995), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (989, 995), False, 'import os\n'), ((1205, 1240), 'os.listdir', 'os.listdir', (['parse_args.preprocessed'], {}), '(parse_args.preprocessed)\n', (1215, 1240), False, 'import os\n'), ((754, 768), 'os.mkdir', 'os.mkdir', (['path'], {}), '(path)\n', (762, 768), False, 'import os\n'), ((780, 799), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (793, 799), False, 'import os\n'), ((1041, 1129), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""output 2d slice directory:{path} is not a valid path"""'], {}), "(\n f'output 2d slice directory:{path} is not a valid path')\n", (1067, 1129), False, 'import argparse\n'), ((857, 948), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['f"""nnunet preprocessed data dir:{path} is not a valid path"""'], {}), "(\n f'nnunet preprocessed data dir:{path} is not a valid path')\n", (883, 948), False, 'import argparse\n'), ((1300, 1322), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (1316, 1322), False, 'import os\n'), ((1710, 1747), 'numpy.argmax', 'np.argmax', (["nii_arr['softmax']"], {'axis': '(0)'}), "(nii_arr['softmax'], axis=0)\n", (1719, 1747), True, 'import numpy as np\n'), ((2050, 2087), 'SimpleITK.GetImageFromArray', 'sitk.GetImageFromArray', (['new_label_arr'], {}), '(new_label_arr)\n', (2072, 2087), True, 'import SimpleITK as sitk\n'), ((1353, 1396), 'os.path.join', 'os.path.join', (['parse_args.preprocessed', 'file'], {}), '(parse_args.preprocessed, file)\n', (1365, 1396), False, 'import os\n'), ((2128, 2183), 'os.path.join', 'os.path.join', (['parse_args.preprocessed', "(name + '.nii.gz')"], {}), "(parse_args.preprocessed, name + '.nii.gz')\n", (2140, 2183), False, 'import os\n'), ((2270, 2329), 'os.path.join', 'os.path.join', (['parse_args.preprocessed', "(name + '_new.nii.gz')"], {}), "(parse_args.preprocessed, name + '_new.nii.gz')\n", (2282, 2329), False, 'import os\n'), ((1644, 1681), 'numpy.argmax', 'np.argmax', (["nii_arr['softmax']"], {'axis': '(0)'}), "(nii_arr['softmax'], axis=0)\n", (1653, 1681), True, 'import numpy as np\n'), ((1830, 1865), 'numpy.amax', 'np.amax', (["nii_arr['softmax']"], {'axis': '(0)'}), "(nii_arr['softmax'], axis=0)\n", (1837, 1865), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn import svm, datasets
from sklearn.metrics import accuracy_score
def my_plot(x, y, name="temp.png"):
plt.figure()
plt.plot(range(1, len(x)+1), x, label='tilt')
plt.plot(range(1, len(y)+1), y, label='non-tilt')
plt.xlabel('ids ->')
plt.ylabel('curvature_diff')
plt.legend()
plt.savefig(name)
plt.close()
file_name = '../data/dr_survey_responses.csv'
labels_dict = {}
f = open(file_name)
for line in f:
line = line.strip().split(',')
labels_dict[line[0]] = int(line[8])
f.close()
file_name = '../tilt_scores_fine_tuned.txt'
tilt_dict = {}
f = open(file_name)
for line in f:
if "max_steep" in line:
line = line.strip().split(' ')
tilt_score = round(337.5*(float(line[2])-float(line[4])), 2)
name = line[0].strip().split('_')
name = name[0]+"_"+name[1]
if name not in tilt_dict:
tilt_dict[name] = []
tilt_dict[name].append(tilt_score)
f.close()
x, y = [], []
for name in tilt_dict:
if name not in labels_dict:
continue
x.append(tilt_dict[name])
y.append(labels_dict[name])
x = np.array(x)
y = np.array(y)
#print(x.shape)
print("Tilt", x[y==1][:,0].mean(), x[y==1][:,1].mean())
print("Non-Tilt", x[y==0][:,0].mean(), x[y==0][:,1].mean())
my_plot(x[y==1][:,0], x[y==0][:,0], name="tan_tilt.png")
my_plot(x[y==1][:,1], x[y==0][:,1], name="axial_tilt.png")
plt.figure()
for idx in range(len(y)):
if y[idx] == 1:
plt.scatter(x[idx][0], x[idx][1], color="red", s=1, marker='o', linewidths=2)
else:
plt.scatter(x[idx][0], x[idx][1], color="blue", s=1, marker='o', linewidths=2)
plt.xlabel('tan_tilt')
plt.ylabel('axial_tilt')
plt.title('scatter plot for tilts')
plt.savefig('tilt_scatter.png')
plt.close()
X = x.copy()
from mlxtend.plotting import plot_decision_regions
svm = SVC(kernel='linear')
svm.fit(X, y)
plot_decision_regions(X, y, clf=svm, legend=2)
plt.savefig('decision_boundary.png')
plt.close()
def make_meshgrid(x, y, h=.02):
x_min, x_max = x.min() - 1, x.max() + 1
y_min, y_max = y.min() - 1, y.max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
return xx, yy
def plot_contours(ax, clf, xx, yy, **params):
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
out = ax.contourf(xx, yy, Z, **params)
return out
import random
random.seed(42)
idx = list(range(0,len(X)))
random.shuffle(idx)
idx = np.array(idx)
X = x.copy()
model = SVC(C=0.5, kernel='linear')
clf = model.fit(X[idx[:20]], y[idx[:20]])
preds = clf.predict(X[idx[20:]])
print("Accuracy", accuracy_score(y[idx[20:]], preds))
fig, ax = plt.subplots()
# title for the plots
title = ('Decision surface of linear SVC ')
# Set-up grid for plotting.
X0, X1 = X[:, 0], X[:, 1]
xx, yy = make_meshgrid(X0, X1)
plot_contours(ax, clf, xx, yy, cmap=plt.cm.coolwarm, alpha=0.8)
ax.scatter(X0, X1, c=y, cmap=plt.cm.coolwarm, s=20, edgecolors='k')
ax.set_xlabel('tan_tilt')
ax.set_ylabel('axial_tilt')
#ax.set_xticks(())
#ax.set_yticks(())
ax.set_title(title)
ax.legend()
plt.savefig('1_tilt_decision.png')
| [
"matplotlib.pyplot.title",
"mlxtend.plotting.plot_decision_regions",
"matplotlib.pyplot.close",
"random.shuffle",
"matplotlib.pyplot.legend",
"sklearn.metrics.accuracy_score",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"numpy.array",
"random.seed",
"numpy.arange",
"sklearn.svm.SV... | [((1149, 1160), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (1157, 1160), True, 'import numpy as np\n'), ((1166, 1177), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (1174, 1177), True, 'import numpy as np\n'), ((1438, 1450), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1448, 1450), True, 'import matplotlib.pyplot as plt\n'), ((1668, 1690), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""tan_tilt"""'], {}), "('tan_tilt')\n", (1678, 1690), True, 'import matplotlib.pyplot as plt\n'), ((1692, 1716), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""axial_tilt"""'], {}), "('axial_tilt')\n", (1702, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1753), 'matplotlib.pyplot.title', 'plt.title', (['"""scatter plot for tilts"""'], {}), "('scatter plot for tilts')\n", (1727, 1753), True, 'import matplotlib.pyplot as plt\n'), ((1755, 1786), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""tilt_scatter.png"""'], {}), "('tilt_scatter.png')\n", (1766, 1786), True, 'import matplotlib.pyplot as plt\n'), ((1788, 1799), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (1797, 1799), True, 'import matplotlib.pyplot as plt\n'), ((1875, 1895), 'sklearn.svm.SVC', 'SVC', ([], {'kernel': '"""linear"""'}), "(kernel='linear')\n", (1878, 1895), False, 'from sklearn.svm import SVC\n'), ((1897, 1910), 'sklearn.svm.fit', 'svm.fit', (['X', 'y'], {}), '(X, y)\n', (1904, 1910), False, 'from sklearn import svm, datasets\n'), ((1912, 1958), 'mlxtend.plotting.plot_decision_regions', 'plot_decision_regions', (['X', 'y'], {'clf': 'svm', 'legend': '(2)'}), '(X, y, clf=svm, legend=2)\n', (1933, 1958), False, 'from mlxtend.plotting import plot_decision_regions\n'), ((1960, 1996), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""decision_boundary.png"""'], {}), "('decision_boundary.png')\n", (1971, 1996), True, 'import matplotlib.pyplot as plt\n'), ((1998, 2009), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2007, 2009), True, 'import matplotlib.pyplot as plt\n'), ((2446, 2461), 'random.seed', 'random.seed', (['(42)'], {}), '(42)\n', (2457, 2461), False, 'import random\n'), ((2492, 2511), 'random.shuffle', 'random.shuffle', (['idx'], {}), '(idx)\n', (2506, 2511), False, 'import random\n'), ((2519, 2532), 'numpy.array', 'np.array', (['idx'], {}), '(idx)\n', (2527, 2532), True, 'import numpy as np\n'), ((2558, 2585), 'sklearn.svm.SVC', 'SVC', ([], {'C': '(0.5)', 'kernel': '"""linear"""'}), "(C=0.5, kernel='linear')\n", (2561, 2585), False, 'from sklearn.svm import SVC\n'), ((2733, 2747), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (2745, 2747), True, 'import matplotlib.pyplot as plt\n'), ((3171, 3205), 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""1_tilt_decision.png"""'], {}), "('1_tilt_decision.png')\n", (3182, 3205), True, 'import matplotlib.pyplot as plt\n'), ((203, 215), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (213, 215), True, 'import matplotlib.pyplot as plt\n'), ((318, 338), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""ids ->"""'], {}), "('ids ->')\n", (328, 338), True, 'import matplotlib.pyplot as plt\n'), ((341, 369), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""curvature_diff"""'], {}), "('curvature_diff')\n", (351, 369), True, 'import matplotlib.pyplot as plt\n'), ((372, 384), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (382, 384), True, 'import matplotlib.pyplot as plt\n'), ((387, 404), 'matplotlib.pyplot.savefig', 'plt.savefig', (['name'], {}), '(name)\n', (398, 404), True, 'import matplotlib.pyplot as plt\n'), ((407, 418), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (416, 418), True, 'import matplotlib.pyplot as plt\n'), ((2684, 2718), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y[idx[20:]]', 'preds'], {}), '(y[idx[20:]], preds)\n', (2698, 2718), False, 'from sklearn.metrics import accuracy_score\n'), ((1499, 1576), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[idx][0]', 'x[idx][1]'], {'color': '"""red"""', 's': '(1)', 'marker': '"""o"""', 'linewidths': '(2)'}), "(x[idx][0], x[idx][1], color='red', s=1, marker='o', linewidths=2)\n", (1510, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1588, 1666), 'matplotlib.pyplot.scatter', 'plt.scatter', (['x[idx][0]', 'x[idx][1]'], {'color': '"""blue"""', 's': '(1)', 'marker': '"""o"""', 'linewidths': '(2)'}), "(x[idx][0], x[idx][1], color='blue', s=1, marker='o', linewidths=2)\n", (1599, 1666), True, 'import matplotlib.pyplot as plt\n'), ((2163, 2189), 'numpy.arange', 'np.arange', (['x_min', 'x_max', 'h'], {}), '(x_min, x_max, h)\n', (2172, 2189), True, 'import numpy as np\n'), ((2191, 2217), 'numpy.arange', 'np.arange', (['y_min', 'y_max', 'h'], {}), '(y_min, y_max, h)\n', (2200, 2217), True, 'import numpy as np\n')] |
import os
import random
import sys
import numpy as np
path_list_file = sys.argv[1]
path_to_label_file = sys.argv[2]
with open(path_list_file, 'r') as f:
path_list = list(map(str.strip, f.readlines()))
path_to_label = np.load(path_to_label_file, allow_pickle=True).item()
label_to_paths = dict()
for path in path_list:
label = path_to_label[path]
if label not in label_to_paths:
label_to_paths[label] = []
label_to_paths[label].append(path)
lengths = list(map(len, label_to_paths.values()))
samples_per_class = min(lengths)
samples = []
for paths in label_to_paths.values():
samples.extend(random.sample(paths, k=samples_per_class))
print('\n'.join(samples)) | [
"random.sample",
"numpy.load"
] | [((225, 271), 'numpy.load', 'np.load', (['path_to_label_file'], {'allow_pickle': '(True)'}), '(path_to_label_file, allow_pickle=True)\n', (232, 271), True, 'import numpy as np\n'), ((625, 666), 'random.sample', 'random.sample', (['paths'], {'k': 'samples_per_class'}), '(paths, k=samples_per_class)\n', (638, 666), False, 'import random\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
包含了一些简单的线性算法的模块,包括线性回归、逻辑回归和线性判别分析算法
"""
from collections import OrderedDict
from typing import Union, Tuple, List
import numpy as np
import scipy.optimize as opt
from numpy import ndarray
import _inner_tools as _t
import data_processing as _dp
import mlfunc as _mf
from base import ISuperviseLearner, IConfidenceLearner, IProbabilityLearner
from exception import StateError, DataNotMatchError
class LinearReg(ISuperviseLearner):
"""
线性回归学习器,用于回归任务。
"""
def __init__(self, *, lamb: Union[int, float] = 0, max_iter: int = 100, method: str = 'gradient',
mean_row: Union[ndarray, None] = None, std_row: Union[ndarray, None] = None):
"""
初始化线性回归。
:param lamb: 正则化参数,默认为 0
:param max_iter: 训练的最大迭代次数,默认为 100
:param method: 训练使用的方法,有 gradient(梯度下降优化法)和 normal(正规方程)
:param mean_row: 每列特征值的平均值行向量
:param std_row: 每列特征值的标准差行向量
"""
self.lamb = lamb
self.max_iter = max_iter
self.method = method
self.mean_row = mean_row
self.std_row = std_row
self.x_mat = None
self.y_row = None
self._theta_row = None
@property
def method(self) -> str:
return self._method
@method.setter
def method(self, value: str):
if value != 'gradient' and value != 'normal':
raise ValueError('method must be "gradient" or "normal')
self._method = value
@property
def lamb(self) -> Union[int, float]:
return self._lamb
@lamb.setter
def lamb(self, value: Union[int, float]):
if value < 0:
raise ValueError('lamb must be greater than or equal to 0')
self._lamb = value
@property
def max_iter(self) -> int:
return self._max_iter
@max_iter.setter
def max_iter(self, value: int):
if value < 1:
raise ValueError('max_iter must be greater than or equal to 1')
self._max_iter = value
@property
def theta_row(self) -> Union[ndarray, None]:
return self._theta_row
def train(self, x_mat: ndarray, y_row: ndarray) -> ndarray:
"""
进行训练。可以使用梯度下降或正规方程算法进行训练,其中正规方程法对于特征不是很多的情况下,
比如 n <= 10000,会取得很好的效果。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return 训练的来的参数
"""
x_mat, y_row = _t.match_x_y(x_mat, y_row)
n = x_mat.shape[1]
if self.method == 'gradient':
self._theta_row = opt.fmin_cg(f=lambda t, x, y: self.__cost(t, x, y),
x0=np.zeros((n,)), args=(x_mat, y_row), maxiter=self.max_iter,
fprime=lambda t, x, y: self.__gradient(t, x, y))
elif self.method == 'normal':
self._theta_row = self.__normal_eqn(x_mat, y_row)
else:
raise ValueError('parameter method must be "gradient" or "normal')
self.x_mat = x_mat
self.y_row = y_row
return self._theta_row
def predict(self, x_mat: ndarray):
"""
使用训练好的参数进行预测。如果提供了特征缩放时的平均值、标准差向量,那么会先将特征值规范化。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:return: 预测结果
"""
if self._theta_row is None:
raise StateError('not trained yet')
self._theta_row, x_mat = _t.match_theta_x(self._theta_row, x_mat)
# 正规方程法不需要规范化数据
if self.method == 'gradient' and self.mean_row is not None and self.std_row is not None:
x_mat = _dp.feature_normalize(x_mat, mean_row=self.mean_row, std_row=self.std_row)[0]
return _t.ret(x_mat @ self._theta_row)
def cost(self, x_mat: ndarray, y_row: ndarray):
"""
计算在 x_mat 和 y_row 上的代价。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 代价值
"""
if self._theta_row is None:
raise StateError('not trained yet')
self._theta_row, x_mat = _t.match_theta_x(self._theta_row, x_mat)
x_mat, y_row = _t.match_x_y(x_mat, y_row)
return self.__cost(self._theta_row, x_mat, y_row)
def __cost(self, theta_row: ndarray, x_mat: ndarray, y_row: ndarray) -> float:
"""
计算线性代价函数值 J。
:param theta_row: 参数行向量,每一行/列都是 x_mat 中对应特征的参数
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 代价值
"""
m = x_mat.shape[0]
thetan = theta_row[1:]
return sum((x_mat @ theta_row - y_row) ** 2) / (2 * m) + self.lamb * sum(thetan ** 2) / (2 * m)
def __gradient(self, theta_row: ndarray, x_mat: ndarray, y_vec: ndarray) -> ndarray:
"""
计算梯度。
:param theta_row: 参数行向量,每一行/列都是 x_mat 中对应特征的参数
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_vec: 输出向量,可以是列向量也可以是行向量,每一个值代表 x_mat 中对应行的输出
:return: 梯度,是一个行向量。
"""
m = x_mat.shape[0]
thetan = theta_row[1:]
xn = x_mat[:, 1:]
hx = x_mat @ theta_row
grad0 = ((x_mat[:, :1].T @ (hx - y_vec)) / m).ravel()
grad1 = ((xn.T @ (hx - y_vec)) / m + self.lamb * thetan / m).ravel()
return np.hstack((grad0, grad1))
def __normal_eqn(self, x_mat: ndarray, y_vec: ndarray) -> ndarray:
"""
使用正规方程计算参数向量。对于特征不是很多的情况下,比如 n <= 10000,此时它会取得很好的效果。
注意 x_mat 不要有偏置列。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_vec: 输出向量,可以是列向量也可以是行向量,每一个值代表 x_mat 中对应行的输出
:param lamb: 正则化参数,默认为 0
:return: 参数向量
"""
regularization = 0
if self.lamb != 0:
regularization = np.eye(x_mat.shape[1])
regularization[0, 0] = 0
return np.linalg.pinv(x_mat.T @ x_mat + self.lamb * regularization) @ x_mat.T @ y_vec
class LogisticReg(IProbabilityLearner):
"""
逻辑回归学习器,用于分类问题。
"""
def __init__(self, *, lamb: Union[int, float] = 0, max_iter: int = 100,
labels: Union[None, Tuple[int], List[int], ndarray] = None, threshold: float = 0.5,
mean_row: Union[ndarray, None] = None, std_row: Union[ndarray, None] = None):
"""
初始化线性回归。
:param lamb: 正则化参数,默认为 0
:param max_iter: 训练的最大迭代次数,默认为 100
:param labels: 类别数组,包含所有类别的值,为 None 表示训练的是二分类问题,且类别标记为 1, 0。
在二分类问题中,默认排在前面的类别是正类
:param threshold: 阈值,默认为 0.5,用在二分类问题中
:param mean_row: 每列特征值的平均值行向量
:param std_row: 每列特征值的标准差行向量
"""
self.lamb = lamb
self.max_iter = max_iter
self.labels = labels
self.threshold = threshold
self.mean_row = mean_row
self.std_row = std_row
self.x_mat = None
self.y_vec = None
self._theta = None
@property
def lamb(self) -> Union[int, float]:
return self._lamb
@lamb.setter
def lamb(self, value: Union[int, float]):
if value < 0:
raise ValueError('lamb must be greater than or equal to 0')
self._lamb = value
@property
def max_iter(self) -> int:
return self._max_iter
@max_iter.setter
def max_iter(self, value: int):
if value < 1:
raise ValueError('max_iter must be greater than or equal to 1')
self._max_iter = value
@property
def threshold(self) -> float:
return self._threshold
@threshold.setter
def threshold(self, value: float):
if value < 0 or value > 1:
raise ValueError('threshold must be between 0 and 1')
self._threshold = value
@property
def labels(self) -> ndarray:
return self._labels
@labels.setter
def labels(self, value: Union[ndarray, Tuple[int], List[int], None]):
if value is None:
value = np.array([1, 0])
if len(value) < 2:
raise ValueError('labels contains at least two classes')
if not isinstance(value, ndarray):
self._labels = np.asarray(value)
else:
self._labels = value
@property
def theta(self) -> Union[ndarray, None]:
return self._theta
# TODO: 将面向列的数据转化成面向行的数据
def train(self, x_mat: ndarray, y_row: ndarray) -> ndarray:
"""
训练逻辑回归,既可以训练二分类问题,也可以训练多类分类问题。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 参数。二分类问题返回行向量。多类分类问题返回 n*num_labels 的矩阵,每一列表示对应类别的参数。
"""
x_mat, y_row = _t.match_x_y(x_mat, y_row)
y_row = _t.convert_y(self.labels, y_row)
n = x_mat.shape[1]
if len(self.labels) == 2:
self._theta = opt.fmin_cg(f=lambda t, x, y: self.__cost(t, x, y),
x0=np.zeros((n,)), args=(x_mat, y_row), maxiter=self.max_iter,
fprime=lambda t, x, y: self.__gradient(t, x, y))
else:
self._theta = np.zeros((n, len(self.labels)))
for i, label in enumerate(self.labels):
self._theta[:, i] = opt.fmin_cg(f=lambda t, x, y: self.__cost(t, x, y == label),
x0=np.zeros((n,)), args=(x_mat, y_row), maxiter=self.max_iter,
fprime=lambda t, x, y: self.__gradient(t, x, y == label))
return self._theta
def predict(self, x_mat: ndarray) -> Union[ndarray, int]:
"""
返回预测值,是对应于 x_mat 的标记。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:return: 预测标记
"""
if self._theta is None:
raise StateError('not trained yet')
self._theta, x_mat = _t.match_theta_x(self._theta, x_mat)
prob = x_mat @ self._theta
if len(self.labels) == 2:
return _t.ret(_t.convert_y(self.labels, _mf.sigmoid(prob) >= self.threshold, to=False))
else:
return _t.ret(self.labels[np.argmax(prob, axis=1)])
def cost(self, x_mat: ndarray, y_row: ndarray) -> float:
"""
计算在 x_mat 和 y_row 上的代价。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 代价值。
"""
if self._theta is None:
raise StateError('not trained yet')
self._theta, x_mat = _t.match_theta_x(self._theta, x_mat)
x_mat, y_row = _t.match_x_y(x_mat, y_row)
if len(self.labels) == 2:
return self.__cost(self._theta, x_mat, y_row)
else:
m = x_mat.shape[0]
cost_sum = 0
for i, label in enumerate(self.labels):
y = y_row == label
cost_sum = cost_sum + np.sum(y) * self.__cost(self._theta[:, i], x_mat, y) / m
return cost_sum
def probability(self, x_mat: ndarray) -> Union[ndarray, float]:
"""
返回对应于 x_mat 的预测概率。如果是二分类问题,那么返回一个行向量;如果是多分类问题,返回一个
m*num_labels 的矩阵,其中每一行表示样本在每个类上的概率。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:return: 预测概率。
"""
if self._theta is None:
raise StateError('not trained yet')
self._theta, x_mat = _t.match_theta_x(self._theta, x_mat)
return _mf.sigmoid(x_mat @ self._theta)
def __cost(self, theta_row: ndarray, x_mat: ndarray, y_row: ndarray) -> float:
"""
计算代价值。
:param theta_row: 参数
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 代价值
"""
m = x_mat.shape[0]
hx = _mf.sigmoid(x_mat @ theta_row)
thetan = theta_row[1:]
# FIXME: 当 hx 中包含 0 或 1 时,会导致 log 出现警告并终止迭代
cost = -(y_row @ np.log(hx) + (1 - y_row) @ np.log(1 - hx)) / m + self.lamb * sum(thetan ** 2) / (2 * m)
return cost
def __gradient(self, theta_row: ndarray, x_mat: ndarray, y_row: ndarray) -> ndarray:
"""
计算梯度。
:param theta_row: 参数
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 梯度,是一个行向量
"""
m = x_mat.shape[0]
thetan = theta_row[1:]
xn = x_mat[:, 1:]
hx = _mf.sigmoid(x_mat @ theta_row)
# 如果不展开的话,就是个只有一个元素的二维数组
grad0 = ((x_mat[:, :1].T @ (hx - y_row)) / m).ravel()
gradn = ((xn.T @ (hx - y_row)) / m + self.lamb * thetan / m).ravel()
return np.hstack((grad0, gradn))
class LDA(IConfidenceLearner):
"""
线性判别分析(Linear Discriminant Analysis, LDA)学习器,可以进行分类,还可以对数据进行降维。
"""
def __init__(self, *, labels: Union[None, Tuple[int], List[int], ndarray] = None):
"""
初始化 LDA。
:param labels: 类别数组,包含所有类别的值,为 None 表示训练的是二分类问题,且类别标记为 1, 0。
在二分类问题中,默认排在前面的类别是正类
"""
self.labels = labels
self._theta = None
self._center_projections = {}
@property
def labels(self) -> ndarray:
return self._labels
@labels.setter
def labels(self, value: Union[ndarray, Tuple[int], List[int], None]):
if value is None:
value = np.array([1, 0])
if len(value) < 2:
raise ValueError('labels contains at least two classes')
if not isinstance(value, ndarray):
self._labels = np.asarray(value)
else:
self._labels = value
@property
def theta(self) -> Union[ndarray, None]:
return self._theta
def train(self, x_mat: ndarray, y_row: ndarray):
"""
使用给定的数据对 LDA 学习算法进行训练。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 学习得来的参数
"""
x_mat, y_row = _t.match_x_y(x_mat, y_row, add_ones=False)
self._center_projections.clear()
if len(self.labels) == 2:
# 两种类别的样本
x0 = x_mat[y_row == self.labels[0], :]
x1 = x_mat[y_row == self.labels[1], :]
# 样本中心点
u0 = np.mean(x0, axis=0)
u1 = np.mean(x1, axis=0)
sw = (x0 - u0).T @ (x0 - u0) + (x1 - u1).T @ (x1 - u1) # 类内散度矩阵
self._theta = np.linalg.pinv(sw) @ (u0 - u1)
self._center_projections[self.labels[0]] = self._theta @ u0
self._center_projections[self.labels[1]] = self._theta @ u1
else:
xn = []
un = OrderedDict()
u_mean = 0
for label in self.labels:
xn.append(x_mat[y_row == label, :])
un[label] = np.mean(xn[-1], axis=0)
u_mean = u_mean + un[label]
u_mean = u_mean / x_mat.shape[0]
sw = 0
sb = 0
n = u_mean.shape[0]
for x, u in zip(xn, un.values()):
sw = sw + (x - u).T @ (x - u)
sb = sb + x.shape[0] * (u - u_mean).reshape((n, 1)) @ (u - u_mean).reshape((1, n))
ev, fv = np.linalg.eig(np.linalg.pinv(sw) @ sb)
# 去掉 0 特征
fv = fv[:, np.isclose(ev, 0) == False]
ev = ev[np.isclose(ev, 0) == False]
# 从大到小排序
# TODO: 考虑是否只在压缩的时候进行排序
indices = np.argsort(ev)[::-1]
fv = fv[:, indices]
self._theta = np.real(fv)
for label in self.labels:
self._center_projections[label] = self._theta.T @ un[label]
return self._theta
def predict(self, x_mat: ndarray) -> Union[int, ndarray]:
"""
返回预测值,是对应于 x_mat 的标记。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:return: 预测标记
"""
x_mat = self.__match_theta_x(x_mat)
result_labels = np.empty((x_mat.shape[0],))
x_mat = x_mat @ self._theta
for i, x in enumerate(x_mat):
r = None
min_dist = None
for label, center in self._center_projections.items():
dist = np.sum((x - center) ** 2)
if min_dist is None or min_dist > dist:
min_dist = dist
r = label
result_labels[i] = r
return _t.ret(result_labels)
def cost(self, x_mat: ndarray, y_row: ndarray) -> float:
"""
计算在 x_mat 和 y_row 上的代价。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param y_row: 输出行向量,每一个值代表 x_mat 中对应行的输出
:return: 代价值。
"""
x_mat = self.__match_theta_x(x_mat)
x_mat, y_row = _t.match_x_y(x_mat, y_row, add_ones=False)
if len(self.labels) == 2:
# 两种类别的样本
x0 = x_mat[y_row == self.labels[0], :]
x1 = x_mat[y_row == self.labels[1], :]
# 样本中心点
u0 = np.mean(x0, axis=0)
u1 = np.mean(x1, axis=0)
n = u0.shape[0]
sw = (x0 - u0).T @ (x0 - u0) + (x1 - u1).T @ (x1 - u1) # 类内散度矩阵
sb = (u0 - u1).reshape((n, 1)) @ (u0 - u1).reshape((1, n)) # 类间散度矩阵
# 和书上分子分母相反,只是因为约定代价值越小越好,而原来的公式是越大越好,故取反
return (self._theta @ sw @ self._theta) / (self._theta @ sb @ self._theta)
else:
xn = []
un = OrderedDict()
u_mean = 0
for label in self.labels:
xn.append(x_mat[y_row == label, :])
un[label] = np.mean(xn[-1], axis=0)
u_mean = u_mean + un[label]
u_mean = u_mean / x_mat.shape[0]
sw = 0
sb = 0
n = u_mean.shape[0]
for x, u in zip(xn, un.values()):
sw = sw + (x - u).T @ (x - u)
sb = sb + x.shape[0] * (u - u_mean).reshape((n, 1)) @ (u - u_mean).reshape((1, n))
return np.trace(self._theta.T @ sw @ self._theta) / np.trace(self._theta.T @ sb @ self._theta)
def confidence(self, x_mat: ndarray):
"""
返回对 x_mat 的预测置信度。如果是二分类问题,那么返回一个行向量;如果是多分类问题,
返回一个 m*num_labels 的矩阵,其中每一行表示样本在每个类上的置信度。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:return: 置信度
"""
x_mat = self.__match_theta_x(x_mat)
x_mat = x_mat @ self._theta
if len(self._labels) == 2:
confidences = np.empty((x_mat.shape[0],))
center = tuple(self._center_projections.values())[0]
for i, x in enumerate(x_mat):
confidences[i] = np.exp(-np.sum((x - center) ** 2))
return _t.ret(confidences)
else:
confidences = np.empty((x_mat.shape[0], len(self._labels)))
for i, x in enumerate(x_mat):
for j, center in enumerate(self._center_projections.values()):
confidences[i][j] = np.sum((x - center) ** 2)
return confidences
def reduce(self, x_mat: ndarray, dimension: int) -> ndarray:
"""
对 x_mat 进行降维,返回降维后的数据。需要注意的是,所能降维的范围为 [1, 类别数-1]。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:param dimension: 新的维度,这个维度必须小于等于类别数。
:return: 降维后的矩阵
"""
x_mat = self.__match_theta_x(x_mat)
if dimension < 1 or dimension > len(self.labels) - 1:
raise ValueError('dimension range is [1, class number - 1]')
if len(self.labels) == 2:
return (x_mat @ self._theta).reshape((x_mat.shape[0], 1))
else:
return x_mat @ self._theta[:, :dimension]
def restore(self, reduced_mat: ndarray) -> ndarray:
"""
将数据从降维中还原。
:param reduced_mat: 降维后的矩阵
:return: 还原的矩阵。
"""
if self._theta is None:
raise StateError('not trained yet')
if len(reduced_mat.shape) != 2:
raise ValueError('reduced_mat must be a matrix')
dimension = reduced_mat.shape[1]
if dimension > len(self.labels) - 1:
raise ValueError('reduced_mat is not a compressed matrix')
if len(self.labels) == 2:
return reduced_mat @ np.linalg.pinv(self._theta.reshape((self._theta.shape[0], 1)))
else:
return reduced_mat @ np.linalg.pinv(self._theta[:, :dimension])
def __match_theta_x(self, x_mat: ndarray) -> ndarray:
"""
检查输入是否和参数匹配。
:param x_mat: 特征向量组,行数 m 表示样本数,列数 n 表示特征数
:return:
"""
if self._theta is None:
raise StateError('not trained yet')
x_mat = _t.r2m(x_mat)
if x_mat.shape[1] != self._theta.shape[0]:
raise DataNotMatchError('feature quantity mismatch')
return x_mat
| [
"numpy.trace",
"_inner_tools.match_x_y",
"numpy.sum",
"_inner_tools.convert_y",
"numpy.argmax",
"numpy.empty",
"exception.StateError",
"numpy.argsort",
"_inner_tools.r2m",
"numpy.mean",
"numpy.isclose",
"exception.DataNotMatchError",
"_inner_tools.ret",
"numpy.linalg.pinv",
"numpy.real",... | [((2442, 2468), '_inner_tools.match_x_y', '_t.match_x_y', (['x_mat', 'y_row'], {}), '(x_mat, y_row)\n', (2454, 2468), True, 'import _inner_tools as _t\n'), ((3404, 3444), '_inner_tools.match_theta_x', '_t.match_theta_x', (['self._theta_row', 'x_mat'], {}), '(self._theta_row, x_mat)\n', (3420, 3444), True, 'import _inner_tools as _t\n'), ((3680, 3711), '_inner_tools.ret', '_t.ret', (['(x_mat @ self._theta_row)'], {}), '(x_mat @ self._theta_row)\n', (3686, 3711), True, 'import _inner_tools as _t\n'), ((4061, 4101), '_inner_tools.match_theta_x', '_t.match_theta_x', (['self._theta_row', 'x_mat'], {}), '(self._theta_row, x_mat)\n', (4077, 4101), True, 'import _inner_tools as _t\n'), ((4125, 4151), '_inner_tools.match_x_y', '_t.match_x_y', (['x_mat', 'y_row'], {}), '(x_mat, y_row)\n', (4137, 4151), True, 'import _inner_tools as _t\n'), ((5276, 5301), 'numpy.hstack', 'np.hstack', (['(grad0, grad1)'], {}), '((grad0, grad1))\n', (5285, 5301), True, 'import numpy as np\n'), ((8570, 8596), '_inner_tools.match_x_y', '_t.match_x_y', (['x_mat', 'y_row'], {}), '(x_mat, y_row)\n', (8582, 8596), True, 'import _inner_tools as _t\n'), ((8613, 8645), '_inner_tools.convert_y', '_t.convert_y', (['self.labels', 'y_row'], {}), '(self.labels, y_row)\n', (8625, 8645), True, 'import _inner_tools as _t\n'), ((9742, 9778), '_inner_tools.match_theta_x', '_t.match_theta_x', (['self._theta', 'x_mat'], {}), '(self._theta, x_mat)\n', (9758, 9778), True, 'import _inner_tools as _t\n'), ((10378, 10414), '_inner_tools.match_theta_x', '_t.match_theta_x', (['self._theta', 'x_mat'], {}), '(self._theta, x_mat)\n', (10394, 10414), True, 'import _inner_tools as _t\n'), ((10438, 10464), '_inner_tools.match_x_y', '_t.match_x_y', (['x_mat', 'y_row'], {}), '(x_mat, y_row)\n', (10450, 10464), True, 'import _inner_tools as _t\n'), ((11220, 11256), '_inner_tools.match_theta_x', '_t.match_theta_x', (['self._theta', 'x_mat'], {}), '(self._theta, x_mat)\n', (11236, 11256), True, 'import _inner_tools as _t\n'), ((11273, 11305), 'mlfunc.sigmoid', '_mf.sigmoid', (['(x_mat @ self._theta)'], {}), '(x_mat @ self._theta)\n', (11284, 11305), True, 'import mlfunc as _mf\n'), ((11620, 11650), 'mlfunc.sigmoid', '_mf.sigmoid', (['(x_mat @ theta_row)'], {}), '(x_mat @ theta_row)\n', (11631, 11650), True, 'import mlfunc as _mf\n'), ((12250, 12280), 'mlfunc.sigmoid', '_mf.sigmoid', (['(x_mat @ theta_row)'], {}), '(x_mat @ theta_row)\n', (12261, 12280), True, 'import mlfunc as _mf\n'), ((12470, 12495), 'numpy.hstack', 'np.hstack', (['(grad0, gradn)'], {}), '((grad0, gradn))\n', (12479, 12495), True, 'import numpy as np\n'), ((13762, 13804), '_inner_tools.match_x_y', '_t.match_x_y', (['x_mat', 'y_row'], {'add_ones': '(False)'}), '(x_mat, y_row, add_ones=False)\n', (13774, 13804), True, 'import _inner_tools as _t\n'), ((15715, 15742), 'numpy.empty', 'np.empty', (['(x_mat.shape[0],)'], {}), '((x_mat.shape[0],))\n', (15723, 15742), True, 'import numpy as np\n'), ((16154, 16175), '_inner_tools.ret', '_t.ret', (['result_labels'], {}), '(result_labels)\n', (16160, 16175), True, 'import _inner_tools as _t\n'), ((16484, 16526), '_inner_tools.match_x_y', '_t.match_x_y', (['x_mat', 'y_row'], {'add_ones': '(False)'}), '(x_mat, y_row, add_ones=False)\n', (16496, 16526), True, 'import _inner_tools as _t\n'), ((20347, 20360), '_inner_tools.r2m', '_t.r2m', (['x_mat'], {}), '(x_mat)\n', (20353, 20360), True, 'import _inner_tools as _t\n'), ((3340, 3369), 'exception.StateError', 'StateError', (['"""not trained yet"""'], {}), "('not trained yet')\n", (3350, 3369), False, 'from exception import StateError, DataNotMatchError\n'), ((3997, 4026), 'exception.StateError', 'StateError', (['"""not trained yet"""'], {}), "('not trained yet')\n", (4007, 4026), False, 'from exception import StateError, DataNotMatchError\n'), ((5736, 5758), 'numpy.eye', 'np.eye', (['x_mat.shape[1]'], {}), '(x_mat.shape[1])\n', (5742, 5758), True, 'import numpy as np\n'), ((7882, 7898), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (7890, 7898), True, 'import numpy as np\n'), ((8066, 8083), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (8076, 8083), True, 'import numpy as np\n'), ((9682, 9711), 'exception.StateError', 'StateError', (['"""not trained yet"""'], {}), "('not trained yet')\n", (9692, 9711), False, 'from exception import StateError, DataNotMatchError\n'), ((10318, 10347), 'exception.StateError', 'StateError', (['"""not trained yet"""'], {}), "('not trained yet')\n", (10328, 10347), False, 'from exception import StateError, DataNotMatchError\n'), ((11160, 11189), 'exception.StateError', 'StateError', (['"""not trained yet"""'], {}), "('not trained yet')\n", (11170, 11189), False, 'from exception import StateError, DataNotMatchError\n'), ((13168, 13184), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (13176, 13184), True, 'import numpy as np\n'), ((13352, 13369), 'numpy.asarray', 'np.asarray', (['value'], {}), '(value)\n', (13362, 13369), True, 'import numpy as np\n'), ((14043, 14062), 'numpy.mean', 'np.mean', (['x0'], {'axis': '(0)'}), '(x0, axis=0)\n', (14050, 14062), True, 'import numpy as np\n'), ((14080, 14099), 'numpy.mean', 'np.mean', (['x1'], {'axis': '(0)'}), '(x1, axis=0)\n', (14087, 14099), True, 'import numpy as np\n'), ((14431, 14444), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14442, 14444), False, 'from collections import OrderedDict\n'), ((15302, 15313), 'numpy.real', 'np.real', (['fv'], {}), '(fv)\n', (15309, 15313), True, 'import numpy as np\n'), ((16724, 16743), 'numpy.mean', 'np.mean', (['x0'], {'axis': '(0)'}), '(x0, axis=0)\n', (16731, 16743), True, 'import numpy as np\n'), ((16761, 16780), 'numpy.mean', 'np.mean', (['x1'], {'axis': '(0)'}), '(x1, axis=0)\n', (16768, 16780), True, 'import numpy as np\n'), ((17161, 17174), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17172, 17174), False, 'from collections import OrderedDict\n'), ((18185, 18212), 'numpy.empty', 'np.empty', (['(x_mat.shape[0],)'], {}), '((x_mat.shape[0],))\n', (18193, 18212), True, 'import numpy as np\n'), ((18408, 18427), '_inner_tools.ret', '_t.ret', (['confidences'], {}), '(confidences)\n', (18414, 18427), True, 'import _inner_tools as _t\n'), ((19566, 19595), 'exception.StateError', 'StateError', (['"""not trained yet"""'], {}), "('not trained yet')\n", (19576, 19595), False, 'from exception import StateError, DataNotMatchError\n'), ((20300, 20329), 'exception.StateError', 'StateError', (['"""not trained yet"""'], {}), "('not trained yet')\n", (20310, 20329), False, 'from exception import StateError, DataNotMatchError\n'), ((20430, 20476), 'exception.DataNotMatchError', 'DataNotMatchError', (['"""feature quantity mismatch"""'], {}), "('feature quantity mismatch')\n", (20447, 20476), False, 'from exception import StateError, DataNotMatchError\n'), ((3586, 3660), 'data_processing.feature_normalize', '_dp.feature_normalize', (['x_mat'], {'mean_row': 'self.mean_row', 'std_row': 'self.std_row'}), '(x_mat, mean_row=self.mean_row, std_row=self.std_row)\n', (3607, 3660), True, 'import data_processing as _dp\n'), ((5812, 5872), 'numpy.linalg.pinv', 'np.linalg.pinv', (['(x_mat.T @ x_mat + self.lamb * regularization)'], {}), '(x_mat.T @ x_mat + self.lamb * regularization)\n', (5826, 5872), True, 'import numpy as np\n'), ((14205, 14223), 'numpy.linalg.pinv', 'np.linalg.pinv', (['sw'], {}), '(sw)\n', (14219, 14223), True, 'import numpy as np\n'), ((14586, 14609), 'numpy.mean', 'np.mean', (['xn[-1]'], {'axis': '(0)'}), '(xn[-1], axis=0)\n', (14593, 14609), True, 'import numpy as np\n'), ((15222, 15236), 'numpy.argsort', 'np.argsort', (['ev'], {}), '(ev)\n', (15232, 15236), True, 'import numpy as np\n'), ((15957, 15982), 'numpy.sum', 'np.sum', (['((x - center) ** 2)'], {}), '((x - center) ** 2)\n', (15963, 15982), True, 'import numpy as np\n'), ((17316, 17339), 'numpy.mean', 'np.mean', (['xn[-1]'], {'axis': '(0)'}), '(xn[-1], axis=0)\n', (17323, 17339), True, 'import numpy as np\n'), ((17711, 17753), 'numpy.trace', 'np.trace', (['(self._theta.T @ sw @ self._theta)'], {}), '(self._theta.T @ sw @ self._theta)\n', (17719, 17753), True, 'import numpy as np\n'), ((17756, 17798), 'numpy.trace', 'np.trace', (['(self._theta.T @ sb @ self._theta)'], {}), '(self._theta.T @ sb @ self._theta)\n', (17764, 17798), True, 'import numpy as np\n'), ((20034, 20076), 'numpy.linalg.pinv', 'np.linalg.pinv', (['self._theta[:, :dimension]'], {}), '(self._theta[:, :dimension])\n', (20048, 20076), True, 'import numpy as np\n'), ((2662, 2676), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (2670, 2676), True, 'import numpy as np\n'), ((8827, 8841), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (8835, 8841), True, 'import numpy as np\n'), ((10001, 10024), 'numpy.argmax', 'np.argmax', (['prob'], {'axis': '(1)'}), '(prob, axis=1)\n', (10010, 10024), True, 'import numpy as np\n'), ((14997, 15015), 'numpy.linalg.pinv', 'np.linalg.pinv', (['sw'], {}), '(sw)\n', (15011, 15015), True, 'import numpy as np\n'), ((15115, 15132), 'numpy.isclose', 'np.isclose', (['ev', '(0)'], {}), '(ev, 0)\n', (15125, 15132), True, 'import numpy as np\n'), ((18675, 18700), 'numpy.sum', 'np.sum', (['((x - center) ** 2)'], {}), '((x - center) ** 2)\n', (18681, 18700), True, 'import numpy as np\n'), ((9247, 9261), 'numpy.zeros', 'np.zeros', (['(n,)'], {}), '((n,))\n', (9255, 9261), True, 'import numpy as np\n'), ((9901, 9918), 'mlfunc.sigmoid', '_mf.sigmoid', (['prob'], {}), '(prob)\n', (9912, 9918), True, 'import mlfunc as _mf\n'), ((15067, 15084), 'numpy.isclose', 'np.isclose', (['ev', '(0)'], {}), '(ev, 0)\n', (15077, 15084), True, 'import numpy as np\n'), ((18361, 18386), 'numpy.sum', 'np.sum', (['((x - center) ** 2)'], {}), '((x - center) ** 2)\n', (18367, 18386), True, 'import numpy as np\n'), ((10753, 10762), 'numpy.sum', 'np.sum', (['y'], {}), '(y)\n', (10759, 10762), True, 'import numpy as np\n'), ((11759, 11769), 'numpy.log', 'np.log', (['hx'], {}), '(hx)\n', (11765, 11769), True, 'import numpy as np\n'), ((11786, 11800), 'numpy.log', 'np.log', (['(1 - hx)'], {}), '(1 - hx)\n', (11792, 11800), True, 'import numpy as np\n')] |
# This file is generated from image_classification/dataset.md automatically through:
# d2lbook build lib
# Don't edit it directly
#@save_all
#@hide_all
import pathlib
import pandas as pd
from matplotlib import pyplot as plt
from typing import Union, Sequence, Callable, Optional
import fnmatch
import numpy as np
import unittest
from d8 import core
class Dataset(core.BaseDataset):
"""The class of an image classification dataset."""
def __init__(self, df: pd.DataFrame, reader: core.Reader):
super().__init__(df, reader, label_name='class_name')
TYPE = 'image_classification'
def show(self, layout=(2,8)) -> None:
"""Show several random examples with their labels.
:param layout: A tuple of (number of rows, number of columns).
"""
nrows, ncols = layout
max_width=300
scale = 14 / ncols
figsize = (ncols * scale, nrows * scale)
_, axes = plt.subplots(nrows, ncols, figsize=figsize)
samples = self.df.sample(n=nrows*ncols, random_state=0)
for ax, (_, sample) in zip(axes.flatten(), samples.iterrows()):
ax.set_title(sample['class_name'])
img = self.reader.read_image(sample['file_path'], max_width=max_width)
ax.imshow(img)
ax.axis("off")
def _summary(self) -> pd.DataFrame:
"""Returns a summary about this dataset."""
get_mean_std = lambda col: f'{col.mean():.1f} ± {col.std():.1f}'
img_df = self.reader.get_image_info(self.df['file_path'])
return pd.DataFrame([{'# images':len(img_df),
'# classes':len(self.classes),
'image width':get_mean_std(img_df['width']),
'image height':get_mean_std(img_df['height']),
'size (GB)':img_df['size (KB)'].sum()/2**20,}])
def __getitem__(self, idx):
if idx < 0 or idx > self.__len__():
raise IndexError(f'index {idx} out of range [0, {self.__len__()})')
file_path = self.df['file_path'][idx]
img = self.reader.read_image(file_path)
return np.array(img), self.df['class_name'][idx]
def to_mxnet(self):
"""Returns a MXNet dataset instance"""
import mxnet as mx
class MXDataset(mx.gluon.data.Dataset):
def __init__(self, dataset):
self.data = dataset
self.label_to_idx = {n:i for i, n in enumerate(self.data.classes)}
self.classes = dataset.classes
def __getitem__(self, idx):
file_path = self.data.df['file_path'][idx]
img = self.data.reader.read_image(file_path)
img = mx.nd.array(img)
label = self.label_to_idx[self.data.df['class_name'][idx]]
return img, label
def __len__(self):
return len(self.data.df)
return MXDataset(self)
@classmethod
def from_folders(cls, data_path: Union[str, Sequence[str]],
folders: Union[str, Sequence[str]]) -> 'Dataset':
"""Create a dataset when images from the same class are stored in the same folder.
:param data_path: Either a URL or a local path. For the former, data will be downloaded automatically.
:param folders: The folders containing all example images.
:return: The created dataset.
"""
if isinstance(folders, (str, pathlib.Path)): folders = [folders]
def label_func(file_path):
for folder in folders:
if fnmatch.fnmatch(str(file_path.parent.parent), folder):
return file_path.parent.name
return None
return cls.from_label_func(data_path, label_func)
@classmethod
def from_label_func(cls, data_path: Union[str, Sequence[str]],
label_func: Callable[[pathlib.Path], str]) -> 'Dataset':
"""Create a dataset from a function that maps a image path to its class name.
:param data_path: Either a URL or a local path. For the former, data will be downloaded automatically.
:param label_func: A function takes an image path (an instance :class:`pathlib.Path`) to return a string class name or a None to skip this image.
:return: The created dataset.
:param data_path:
"""
reader = core.create_reader(data_path)
entries = []
for file_path in reader.list_images():
lbl = label_func(file_path)
if lbl: entries.append({'file_path':file_path, 'class_name':lbl})
df = pd.DataFrame(entries)
return cls(df, reader)
class TestDataset(unittest.TestCase):
def test_from_folders(self):
Dataset.add('chessman_test', Dataset.from_folders,
['https://www.kaggle.com/niteshfre/chessman-image-dataset', '*'])
ds = Dataset.get('chessman_test')
self.assertEqual(len(ds.df), 552)
self.assertEqual(ds.classes, ['Bishop', 'King', 'Knight', 'Pawn', 'Queen', 'Rook'])
items = ds[10]
self.assertEqual(len(items), 2)
self.assertEqual(items[0].shape[2], 3)
def test_from_label_func(self):
name = 'test-honey-bee'
Dataset.add(name, Dataset.from_label_func,
['https://www.kaggle.com/jenny18/honey-bee-annotated-images',
lambda path: path.name.split('_')[0]])
ds = Dataset.get(name)
self.assertEqual(len(ds.df), 5172)
self.assertEqual(len(ds.classes), 45)
if __name__ == '__main__':
unittest.main(argv=['first-arg-is-ignored'], exit=False)
| [
"unittest.main",
"pandas.DataFrame",
"numpy.array",
"mxnet.nd.array",
"matplotlib.pyplot.subplots",
"d8.core.create_reader"
] | [((5605, 5661), 'unittest.main', 'unittest.main', ([], {'argv': "['first-arg-is-ignored']", 'exit': '(False)'}), "(argv=['first-arg-is-ignored'], exit=False)\n", (5618, 5661), False, 'import unittest\n'), ((936, 979), 'matplotlib.pyplot.subplots', 'plt.subplots', (['nrows', 'ncols'], {'figsize': 'figsize'}), '(nrows, ncols, figsize=figsize)\n', (948, 979), True, 'from matplotlib import pyplot as plt\n'), ((4397, 4426), 'd8.core.create_reader', 'core.create_reader', (['data_path'], {}), '(data_path)\n', (4415, 4426), False, 'from d8 import core\n'), ((4626, 4647), 'pandas.DataFrame', 'pd.DataFrame', (['entries'], {}), '(entries)\n', (4638, 4647), True, 'import pandas as pd\n'), ((2155, 2168), 'numpy.array', 'np.array', (['img'], {}), '(img)\n', (2163, 2168), True, 'import numpy as np\n'), ((2735, 2751), 'mxnet.nd.array', 'mx.nd.array', (['img'], {}), '(img)\n', (2746, 2751), True, 'import mxnet as mx\n')] |
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
from base import BaseDetector, DrawFlags
from image import SuspiciousImage
class Duplication(BaseDetector):
"""
Parameters
----------
min_kp : int,
r : float, (default=0.60)
min_match : int, (default=5)
min_key_ratio : float, (default=0.75)
ransacT : float, (default=5.0)
crossCheck : bool, (default=False)
color : Tuple[int, int, int], (default=(0,255,255))
flags : int, (default=DrawFlags.SHOW_RESULT)
See base.DrawFlags
Attributes
----------
matches_ : list[cv.DMatch],
mask_ : array,
M_ : array,
image_ : array,
"""
def __init__(
self,
min_kp=20,
r=0.60,
min_match=20,
min_key_ratio=0.75,
ransacT=5.0,
crossCheck=False,
color=(0, 255, 255),
flags=DrawFlags.SHOW_RESULT):
self.min_kp = min_kp
self.r = r
self.min_match = min_match
self.min_key_ratio = min_key_ratio
self.ransacT = ransacT
self.crossCheck = crossCheck
self.color = color
self.flags = flags
def detect(self, img1, img2):
"""
:param SuspiciousImage img1: SuspiciousImage class instance
:param SuspiciousImage img2: SuspiciousImage class instance
:return: Suspect(1) or not(0)
:rtype: int
"""
self.mask_ratio_ = 0
self.image_ = None
if len(img1.kp) < self.min_kp or len(img2.kp) < self.min_kp:
return -1
if self.crossCheck:
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
self.matches_ = bf.match(img1.des, img2.des)
else:
matches = img1.bf.knnMatch(img1.des, img2.des, k=2)
self.matches_ = self.ratio_test(matches)
self.find_homography(img1, img2)
result = 1 if self.mask_ratio_ > 0 else 0
return result
def ratio_test(self, matches):
"""
1位のマッチのL2ノルムが2位のマッチのL2ノルムの(r*100)%以下のマッチを残す
:param list matches: list of the first and the second cv.DMatch
:return: list of cv.DMatch
"""
good_matches = []
for m, n in matches:
if m.distance <= self.r * n.distance and n.distance != 0:
good_matches.append(m)
return good_matches
def find_homography(self, img1, img2):
"""
この関数は2つの画像から得られた点の集合を与えると,その物体の射影変換を計算する。
ransacReprojThresholdは点の組をインライア値として扱うために許容される逆投影誤差の最大値。1~10が妥当。
Mは3*3の射影変換行列
:param SuspiciousImage img1: SuspiciousImage class instance
:param SuspiciousImage img2: SuspiciousImage class instance
"""
if len(self.matches_) > self.min_match:
src_pts = np.float32([img1.kp[m.queryIdx].pt for m in self.matches_]
).reshape(-1, 1, 2) # image1のkeypointの座標 (n,1,2)
dst_pts = np.float32([img2.kp[m.trainIdx].pt for m in self.matches_]
).reshape(-1, 1, 2) # image2のkeypointの座標 (n,1,2)
self.M_, self.mask_ = cv.findHomography(
src_pts, dst_pts, cv.RANSAC, ransacReprojThreshold=self.ransacT)
matches_mask = self.mask_.ravel().tolist()
# キーポイントのうちminKey以上を含むならマスクを採用(=DETECT!)
mask_ratio = matches_mask.count(1) / len(matches_mask)
if mask_ratio >= self.min_key_ratio:
self.mask_ratio_ = mask_ratio
if self.flags != DrawFlags.RETURN_RESULT:
self.draw_match(img1, img2, src_pts)
return self
@staticmethod
def padding(img, imgarr):
H, W, C = imgarr.shape
gap = img.gap
imgzeros = np.full((H + gap * 2, W + gap * 2, C), 255)
for i in range(H):
for j in range(W):
imgzeros[i + gap, j + gap, :] = imgarr[i, j, :]
return imgzeros.astype('uint8')
def draw_match(self, img1, img2, src_pts):
"""
:param SuspiciousImage img1: SuspiciousImage class instance
:param SuspiciousImage img2: SuspiciousImage class instance
:param np.ndarray src_pts: array of coordinates of keypoints
"""
gap = img1.gap
x, y, w, h = cv.boundingRect(
np.float32(src_pts)[self.mask_.ravel() == 1])
img1_rect = cv.rectangle(
img1.mat.copy(), (x - gap, y - gap), (x + w - gap, y + h - gap), self.color, 2)
pts = np.float32([[x, y], [x, y + h], [x + w, y + h], [x + w, y]]
).reshape(-1, 1, 2)
dst = cv.perspectiveTransform(pts, self.M_)
img2_rect = cv.polylines(img2.mat.copy(), [np.int32(dst) - gap],
isClosed=True, color=self.color, thickness=2,
lineType=cv.LINE_AA) # LINE_AA:アンチエイリアス
if self.flags == DrawFlags.SHOW_RESULT:
img1_name = img1.name.replace('-', '\n-', 1)
img2_name = img2.name.replace('-', '\n-', 1)
fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.imshow(cv.cvtColor(img1_rect, cv.COLOR_BGR2RGB))
ax1.set_title(img1_name)
ax1.axis('off')
ax2 = fig.add_subplot(122)
ax2.imshow(cv.cvtColor(img2_rect, cv.COLOR_BGR2RGB))
ax2.set_title(img2_name)
ax2.axis('off')
fig.tight_layout(pad=0)
fig.canvas.draw()
s, (width, height) = fig.canvas.print_to_buffer()
X = np.frombuffer(s, np.uint8).reshape((height, width, 4))
scale = 400 / width
self.image_ = cv.resize(
cv.cvtColor(
X,
cv.COLOR_RGBA2BGRA),
dsize=None,
fx=scale,
fy=scale)
plt.close()
elif self.flags == DrawFlags.SHOW_FULL_RESULT:
draw_params = dict(matchColor=self.color,
singlePointColor=None,
matchesMask=self.mask_.ravel().tolist(), # draw only inliers
flags=2)
img1_rect = self.padding(img1, img1_rect)
img2_rect = self.padding(img2, img2_rect)
H1, W1, C = img1_rect.shape
H2, W2, C = img2_rect.shape
if H1 < H2:
img1_rect = np.vstack(
[img1_rect, np.full((H2 - H1, W1, C), 255, dtype='uint8')])
elif H1 > H2:
img2_rect = np.vstack(
[img2_rect, np.full((H1 - H2, W2, C), 255, dtype='uint8')])
img_original = np.hstack([img1_rect, img2_rect])
img_drow = cv.drawMatches(
img1_rect,
img1.kp,
img2_rect,
img2.kp,
self.matches_,
None,
**draw_params)
self.image_ = np.vstack([img_original, img_drow])
return self
class CopyMove(Duplication):
"""
Parameters
----------
min_kp : int,
r : float, (default=0.60)
min_dis : int, (default=40)
min_match : int, (default=5)
min_key_ratio : float, (default=0.5)
ransacT : float, (default=5.0)
crossCheck : bool, (default=False)
color : Tuple[int, int, int], (default=(0,255,255))
flags : int, (default=DrawFlags.SHOW_RESULT)
See base.DrawFlags.
Attributes
----------
matches_ : list[cv.DMatch],
mask_ : array,
M_ : array,
mask_ratio_ : float,
image_ : array,
"""
def __init__(
self,
min_kp=20,
r=0.60,
min_dis=40,
min_match=20,
min_key_ratio=0.75,
ransacT=5.0,
crossCheck=False,
color=(0, 255, 255),
flags=DrawFlags.SHOW_RESULT):
super(
CopyMove,
self).__init__(
min_kp,
r,
min_match,
min_key_ratio,
ransacT,
crossCheck,
color,
flags)
self.min_dis = min_dis
def detect(self, img1, img2=None):
"""
:param SuspiciousImage img1: SuspiciousImage class instance
:return: Suspect(1) or not(0)
:rtype: int
"""
self.mask_ratio_ = 0
self.image_ = None
if len(img1.kp) < self.min_kp:
return -1
matches3 = img1.bf.knnMatch(img1.des, img1.des, k=3)
# 自分自身にマッチしているものを除く
matches2 = []
for m in matches3:
if len(m) > 1:
m = [mi for mi in m if img1.kp[mi.queryIdx]
!= img1.kp[mi.trainIdx]]
if len(m) > 1:
matches2.append([m[0], m[1]])
else:
return 0
good = self.ratio_test(matches2)
self.matches_ = self.distance_cutoff(img1.kp, good)
self.find_homography(img1)
result = 1 if self.mask_ratio_ > 0 else 0
return result
def distance_cutoff(self, kp, matches):
"""
マッチの重複(相互)とマッチ間の距離がminDis以下のマッチを除く
:param list kp: list of cv.KeyPoint
:param list matches: list of cv.DMatch
:return: better list of cv.DMatch
"""
for m in matches:
if kp[m.queryIdx].pt < kp[m.trainIdx].pt:
m.queryIdx, m.trainIdx = m.trainIdx, m.queryIdx
better = [m for m in matches if (np.linalg.norm(
np.array(kp[m.queryIdx].pt) - np.array(kp[m.trainIdx].pt)) > self.min_dis)]
return better
def find_homography(self, img1, img2=None):
super(CopyMove, self).find_homography(img1=img1, img2=img1)
def draw_match(self, img1, img2, src_pts):
gap = img1.gap
x, y, w, h = cv.boundingRect(
np.float32(src_pts)[self.mask_.ravel() == 1])
img_rect = cv.rectangle(
img1.mat.copy(), (x - gap, y - gap), (x + w - gap, y + h - gap), self.color, 2)
pts = np.float32([[x, y], [x, y + h], [x + w, y + h], [x + w, y]]
).reshape(-1, 1, 2)
dst = cv.perspectiveTransform(pts, self.M_)
self.image_ = cv.polylines(img_rect,
[np.int32(dst) - gap],
isClosed=True,
color=self.color,
thickness=2,
lineType=cv.LINE_AA)
if self.flags == DrawFlags.SHOW_FULL_RESULT:
matches_mask = self.mask_.ravel().tolist()
for i, m in enumerate(self.matches_):
if matches_mask[i] == 1:
cv.line(self.image_,
tuple(
np.round(np.array(img1.kp[m.queryIdx].pt) - gap).astype('int')),
tuple(
np.round(np.array(img1.kp[m.trainIdx].pt) - gap).astype('int')),
color=self.color)
cv.circle(self.image_,
tuple(
np.round(np.array(img1.kp[m.queryIdx].pt) - gap).astype('int')),
3,
color=self.color,
thickness=2)
cv.circle(self.image_, tuple(np.round(
np.array(img1.kp[m.trainIdx].pt) - gap).astype('int')), 2,
color=self.color, thickness=2)
return self
| [
"numpy.full",
"cv2.drawMatches",
"cv2.cvtColor",
"matplotlib.pyplot.close",
"numpy.float32",
"cv2.BFMatcher",
"numpy.frombuffer",
"numpy.hstack",
"matplotlib.pyplot.figure",
"numpy.array",
"numpy.int32",
"cv2.perspectiveTransform",
"cv2.findHomography",
"numpy.vstack"
] | [((3784, 3827), 'numpy.full', 'np.full', (['(H + gap * 2, W + gap * 2, C)', '(255)'], {}), '((H + gap * 2, W + gap * 2, C), 255)\n', (3791, 3827), True, 'import numpy as np\n'), ((4647, 4684), 'cv2.perspectiveTransform', 'cv.perspectiveTransform', (['pts', 'self.M_'], {}), '(pts, self.M_)\n', (4670, 4684), True, 'import cv2 as cv\n'), ((10210, 10247), 'cv2.perspectiveTransform', 'cv.perspectiveTransform', (['pts', 'self.M_'], {}), '(pts, self.M_)\n', (10233, 10247), True, 'import cv2 as cv\n'), ((1634, 1680), 'cv2.BFMatcher', 'cv.BFMatcher', (['cv.NORM_HAMMING'], {'crossCheck': '(True)'}), '(cv.NORM_HAMMING, crossCheck=True)\n', (1646, 1680), True, 'import cv2 as cv\n'), ((3157, 3244), 'cv2.findHomography', 'cv.findHomography', (['src_pts', 'dst_pts', 'cv.RANSAC'], {'ransacReprojThreshold': 'self.ransacT'}), '(src_pts, dst_pts, cv.RANSAC, ransacReprojThreshold=self.\n ransacT)\n', (3174, 3244), True, 'import cv2 as cv\n'), ((5094, 5106), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (5104, 5106), True, 'import matplotlib.pyplot as plt\n'), ((5901, 5912), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (5910, 5912), True, 'import matplotlib.pyplot as plt\n'), ((4341, 4360), 'numpy.float32', 'np.float32', (['src_pts'], {}), '(src_pts)\n', (4351, 4360), True, 'import numpy as np\n'), ((4528, 4588), 'numpy.float32', 'np.float32', (['[[x, y], [x, y + h], [x + w, y + h], [x + w, y]]'], {}), '([[x, y], [x, y + h], [x + w, y + h], [x + w, y]])\n', (4538, 4588), True, 'import numpy as np\n'), ((5169, 5209), 'cv2.cvtColor', 'cv.cvtColor', (['img1_rect', 'cv.COLOR_BGR2RGB'], {}), '(img1_rect, cv.COLOR_BGR2RGB)\n', (5180, 5209), True, 'import cv2 as cv\n'), ((5339, 5379), 'cv2.cvtColor', 'cv.cvtColor', (['img2_rect', 'cv.COLOR_BGR2RGB'], {}), '(img2_rect, cv.COLOR_BGR2RGB)\n', (5350, 5379), True, 'import cv2 as cv\n'), ((5731, 5765), 'cv2.cvtColor', 'cv.cvtColor', (['X', 'cv.COLOR_RGBA2BGRA'], {}), '(X, cv.COLOR_RGBA2BGRA)\n', (5742, 5765), True, 'import cv2 as cv\n'), ((6713, 6746), 'numpy.hstack', 'np.hstack', (['[img1_rect, img2_rect]'], {}), '([img1_rect, img2_rect])\n', (6722, 6746), True, 'import numpy as np\n'), ((6770, 6864), 'cv2.drawMatches', 'cv.drawMatches', (['img1_rect', 'img1.kp', 'img2_rect', 'img2.kp', 'self.matches_', 'None'], {}), '(img1_rect, img1.kp, img2_rect, img2.kp, self.matches_, None,\n **draw_params)\n', (6784, 6864), True, 'import cv2 as cv\n'), ((7000, 7035), 'numpy.vstack', 'np.vstack', (['[img_original, img_drow]'], {}), '([img_original, img_drow])\n', (7009, 7035), True, 'import numpy as np\n'), ((9905, 9924), 'numpy.float32', 'np.float32', (['src_pts'], {}), '(src_pts)\n', (9915, 9924), True, 'import numpy as np\n'), ((10091, 10151), 'numpy.float32', 'np.float32', (['[[x, y], [x, y + h], [x + w, y + h], [x + w, y]]'], {}), '([[x, y], [x, y + h], [x + w, y + h], [x + w, y]])\n', (10101, 10151), True, 'import numpy as np\n'), ((2816, 2875), 'numpy.float32', 'np.float32', (['[img1.kp[m.queryIdx].pt for m in self.matches_]'], {}), '([img1.kp[m.queryIdx].pt for m in self.matches_])\n', (2826, 2875), True, 'import numpy as np\n'), ((2980, 3039), 'numpy.float32', 'np.float32', (['[img2.kp[m.trainIdx].pt for m in self.matches_]'], {}), '([img2.kp[m.trainIdx].pt for m in self.matches_])\n', (2990, 3039), True, 'import numpy as np\n'), ((4737, 4750), 'numpy.int32', 'np.int32', (['dst'], {}), '(dst)\n', (4745, 4750), True, 'import numpy as np\n'), ((5591, 5617), 'numpy.frombuffer', 'np.frombuffer', (['s', 'np.uint8'], {}), '(s, np.uint8)\n', (5604, 5617), True, 'import numpy as np\n'), ((10330, 10343), 'numpy.int32', 'np.int32', (['dst'], {}), '(dst)\n', (10338, 10343), True, 'import numpy as np\n'), ((6493, 6538), 'numpy.full', 'np.full', (['(H2 - H1, W1, C)', '(255)'], {'dtype': '"""uint8"""'}), "((H2 - H1, W1, C), 255, dtype='uint8')\n", (6500, 6538), True, 'import numpy as np\n'), ((9569, 9596), 'numpy.array', 'np.array', (['kp[m.queryIdx].pt'], {}), '(kp[m.queryIdx].pt)\n', (9577, 9596), True, 'import numpy as np\n'), ((9599, 9626), 'numpy.array', 'np.array', (['kp[m.trainIdx].pt'], {}), '(kp[m.trainIdx].pt)\n', (9607, 9626), True, 'import numpy as np\n'), ((6638, 6683), 'numpy.full', 'np.full', (['(H1 - H2, W2, C)', '(255)'], {'dtype': '"""uint8"""'}), "((H1 - H2, W2, C), 255, dtype='uint8')\n", (6645, 6683), True, 'import numpy as np\n'), ((10876, 10908), 'numpy.array', 'np.array', (['img1.kp[m.queryIdx].pt'], {}), '(img1.kp[m.queryIdx].pt)\n', (10884, 10908), True, 'import numpy as np\n'), ((11008, 11040), 'numpy.array', 'np.array', (['img1.kp[m.trainIdx].pt'], {}), '(img1.kp[m.trainIdx].pt)\n', (11016, 11040), True, 'import numpy as np\n'), ((11233, 11265), 'numpy.array', 'np.array', (['img1.kp[m.queryIdx].pt'], {}), '(img1.kp[m.queryIdx].pt)\n', (11241, 11265), True, 'import numpy as np\n'), ((11496, 11528), 'numpy.array', 'np.array', (['img1.kp[m.trainIdx].pt'], {}), '(img1.kp[m.trainIdx].pt)\n', (11504, 11528), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Aug 11 11:10:17 2021
@author: nmei
"""
import os,gc,torch
import numpy as np
import pandas as pd
from utils_deep import (data_loader,
candidates,
define_augmentations,
createLossAndOptimizer,
train_and_validation,
resample_ttest_2sample,
noise_fuc,
make_decoder,
decode_hidden_layer,
resample_ttest,
resample_behavioral_estimate,
simple_augmentations,
behavioral_evaluate
)
from joblib import Parallel,delayed
from sklearn.decomposition import PCA
for _model_name in ['vgg19_bn','resnet50','alexnet','densenet169','mobilenet']:
# experiment control
model_dir = '../models'
results_dir = '../results/first_layer_only'
train_folder = 'greyscaled'
valid_folder = 'experiment_images_greyscaled'
train_root = f'../data/{train_folder}/'
valid_root = f'../data/{valid_folder}'
image_resize = 128
batch_size = 8
lr = 1e-4
n_epochs = int(1e3)
device = 'cpu'
pretrain_model_name = _model_name
output_activation = 'softmax'
output_units = 2
categorical = True
n_experiment_runs = 20
n_noise_levels = 50
n_permutations = int(1e4)
noise_levels = np.concatenate([[0],[item for item in np.logspace(-1,3,n_noise_levels)]])
if not os.path.exists(results_dir,):
os.mkdir(results_dir)
class FCNN_model(torch.nn.Module):
def __init__(self,
model_name,
in_shape = (1,3,128,128),
):
super(FCNN_model,self).__init__()
torch.manual_seed(12345)
self.pretrained_model = candidates(model_name,pretrained=True)
self.in_shape = in_shape
if model_name == 'resnet50':
self.first_layer_func = self.pretrained_model.conv1
else:
self.first_layer_func = self.pretrained_model.features[0]
# freeze the weights
for params in self.pretrained_model.parameters():
params.requires_grad = False
# add a last layer
self.linear_layer = torch.nn.Linear(1000,2,)
def forward(self,x):
out = self.pretrained_model(x)
pred = self.linear_layer(out)
features = self.first_layer_func(x).view(x.shape[0],-1)
return pred,features
print('set up random seeds')
torch.manual_seed(12345)
if torch.cuda.is_available():torch.cuda.empty_cache();torch.cuda.manual_seed(12345);
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(f'device:{device}')
model_to_train = FCNN_model(pretrain_model_name,)
model_to_train.to(device)
model_parameters = filter(lambda p: p.requires_grad, model_to_train.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(pretrain_model_name,
# model_to_train(next(iter(train_loader))[0]),
f'total params = {params}')
f_name = os.path.join(model_dir,f'{pretrain_model_name}_first_layer.pth')
# train the model
loss_func,optimizer = createLossAndOptimizer(model_to_train,learning_rate = lr)
model_to_train = train_and_validation(
model_to_train,
f_name,
output_activation,
loss_func,
optimizer,
image_resize = image_resize,
device = device,
batch_size = batch_size,
n_epochs = n_epochs,
print_train = True,
patience = 5,
n_noise = 1,
train_root = train_root,
valid_root = valid_root,)
del model_to_train
model_to_train = torch.load(f_name)
model_to_train.to('cpu')
np.random.seed(12345)
torch.manual_seed(12345)
to_round = 9
csv_saving_name = os.path.join(results_dir,pretrain_model_name,'performance_results.csv')
if not os.path.exists(os.path.join(results_dir,pretrain_model_name)):
os.mkdir(os.path.join(results_dir,pretrain_model_name))
results = dict(model_name = [],
noise_level = [],
cnn_score = [],
cnn_pval = [],
first_score_mean = [],
first_score_std = [],
svm_first_pval = [],
)
model_to_train.eval()
for param in model_to_train.parameters():
param.requires_grad = False
loss_func,optimizer = createLossAndOptimizer(model_to_train,learning_rate = lr)
for ii_var,var in enumerate(noise_levels):
np.random.seed(12345)
torch.manual_seed(12345)
var = round(var,to_round)
valid_loader = data_loader(
valid_root,
augmentations = simple_augmentations(image_resize,var),
batch_size = batch_size,
# here I turn on the shuffle like it is in a real experiment
)
# evaluate the model
y_trues,y_preds,features,labels = behavioral_evaluate(
model_to_train,
n_experiment_runs,
loss_func,
valid_loader,
device,
categorical = categorical,
output_activation = output_activation,
)
behavioral_scores = resample_behavioral_estimate(y_trues,y_preds,int(1e3),shuffle = False)
# print(var,np.mean(behavioral_scores))
# del features,labels
gc.collect()
chance_level = Parallel(n_jobs = -1,verbose = 1)(delayed(resample_behavioral_estimate)(**{
'y_true':y_trues,
'y_pred':y_preds,
'n_sampling':int(1e1),
'shuffle':True,}) for _ in range(n_permutations))
gc.collect()
cnn_pval = (np.sum(np.array(chance_level).mean(1) >= np.mean(behavioral_scores)) + 1) / (n_permutations + 1)
decoder = make_decoder('linear-SVM',n_jobs = 1)
decode_features = torch.cat([torch.cat(item).detach().cpu() for item in features]).numpy()
decode_labels = torch.cat([torch.cat(item).detach().cpu() for item in labels ]).numpy()
del features,labels
if len(decode_labels.shape) > 1:
decode_labels = decode_labels[:,-1]
print('fitting PCA...')
pca_features = PCA(n_components = .9,random_state = 12345).fit_transform(decode_features)
gc.collect()
res,_,svm_first_pval = decode_hidden_layer(decoder,pca_features,decode_labels,
n_splits = 50,
test_size = 0.2,)
gc.collect()
svm_first_scores = res['test_score']
print(var,np.mean(behavioral_scores),np.mean(svm_first_scores))
results['model_name'].append(pretrain_model_name)
results['noise_level'].append(var)
results['cnn_score'].append(np.mean(behavioral_scores))
results['cnn_pval'].append(cnn_pval)
results['first_score_mean'].append(np.mean(svm_first_scores))
results['first_score_std'].append(np.std(svm_first_scores))
results['svm_first_pval'].append(svm_first_pval)
gc.collect()
results_to_save = pd.DataFrame(results)
results_to_save.to_csv(os.path.join(results_dir,pretrain_model_name,'decodings.csv'),index = False)
| [
"os.mkdir",
"numpy.random.seed",
"numpy.logspace",
"utils_deep.simple_augmentations",
"torch.cat",
"gc.collect",
"numpy.mean",
"utils_deep.createLossAndOptimizer",
"os.path.join",
"pandas.DataFrame",
"utils_deep.decode_hidden_layer",
"numpy.std",
"torch.load",
"os.path.exists",
"torch.nn... | [((2943, 2967), 'torch.manual_seed', 'torch.manual_seed', (['(12345)'], {}), '(12345)\n', (2960, 2967), False, 'import os, gc, torch\n'), ((2975, 3000), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2998, 3000), False, 'import os, gc, torch\n'), ((3607, 3672), 'os.path.join', 'os.path.join', (['model_dir', 'f"""{pretrain_model_name}_first_layer.pth"""'], {}), "(model_dir, f'{pretrain_model_name}_first_layer.pth')\n", (3619, 3672), False, 'import os, gc, torch\n'), ((3749, 3805), 'utils_deep.createLossAndOptimizer', 'createLossAndOptimizer', (['model_to_train'], {'learning_rate': 'lr'}), '(model_to_train, learning_rate=lr)\n', (3771, 3805), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((3857, 4126), 'utils_deep.train_and_validation', 'train_and_validation', (['model_to_train', 'f_name', 'output_activation', 'loss_func', 'optimizer'], {'image_resize': 'image_resize', 'device': 'device', 'batch_size': 'batch_size', 'n_epochs': 'n_epochs', 'print_train': '(True)', 'patience': '(5)', 'n_noise': '(1)', 'train_root': 'train_root', 'valid_root': 'valid_root'}), '(model_to_train, f_name, output_activation, loss_func,\n optimizer, image_resize=image_resize, device=device, batch_size=\n batch_size, n_epochs=n_epochs, print_train=True, patience=5, n_noise=1,\n train_root=train_root, valid_root=valid_root)\n', (3877, 4126), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((4404, 4422), 'torch.load', 'torch.load', (['f_name'], {}), '(f_name)\n', (4414, 4422), False, 'import os, gc, torch\n'), ((4461, 4482), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (4475, 4482), True, 'import numpy as np\n'), ((4487, 4511), 'torch.manual_seed', 'torch.manual_seed', (['(12345)'], {}), '(12345)\n', (4504, 4511), False, 'import os, gc, torch\n'), ((4560, 4633), 'os.path.join', 'os.path.join', (['results_dir', 'pretrain_model_name', '"""performance_results.csv"""'], {}), "(results_dir, pretrain_model_name, 'performance_results.csv')\n", (4572, 4633), False, 'import os, gc, torch\n'), ((5321, 5377), 'utils_deep.createLossAndOptimizer', 'createLossAndOptimizer', (['model_to_train'], {'learning_rate': 'lr'}), '(model_to_train, learning_rate=lr)\n', (5343, 5377), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((1801, 1828), 'os.path.exists', 'os.path.exists', (['results_dir'], {}), '(results_dir)\n', (1815, 1828), False, 'import os, gc, torch\n'), ((1839, 1860), 'os.mkdir', 'os.mkdir', (['results_dir'], {}), '(results_dir)\n', (1847, 1860), False, 'import os, gc, torch\n'), ((3001, 3025), 'torch.cuda.empty_cache', 'torch.cuda.empty_cache', ([], {}), '()\n', (3023, 3025), False, 'import os, gc, torch\n'), ((3026, 3055), 'torch.cuda.manual_seed', 'torch.cuda.manual_seed', (['(12345)'], {}), '(12345)\n', (3048, 3055), False, 'import os, gc, torch\n'), ((5439, 5460), 'numpy.random.seed', 'np.random.seed', (['(12345)'], {}), '(12345)\n', (5453, 5460), True, 'import numpy as np\n'), ((5469, 5493), 'torch.manual_seed', 'torch.manual_seed', (['(12345)'], {}), '(12345)\n', (5486, 5493), False, 'import os, gc, torch\n'), ((5894, 6052), 'utils_deep.behavioral_evaluate', 'behavioral_evaluate', (['model_to_train', 'n_experiment_runs', 'loss_func', 'valid_loader', 'device'], {'categorical': 'categorical', 'output_activation': 'output_activation'}), '(model_to_train, n_experiment_runs, loss_func,\n valid_loader, device, categorical=categorical, output_activation=\n output_activation)\n', (5913, 6052), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((6460, 6472), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6470, 6472), False, 'import os, gc, torch\n'), ((6737, 6749), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6747, 6749), False, 'import os, gc, torch\n'), ((6894, 6930), 'utils_deep.make_decoder', 'make_decoder', (['"""linear-SVM"""'], {'n_jobs': '(1)'}), "('linear-SVM', n_jobs=1)\n", (6906, 6930), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((7385, 7397), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7395, 7397), False, 'import os, gc, torch\n'), ((7429, 7518), 'utils_deep.decode_hidden_layer', 'decode_hidden_layer', (['decoder', 'pca_features', 'decode_labels'], {'n_splits': '(50)', 'test_size': '(0.2)'}), '(decoder, pca_features, decode_labels, n_splits=50,\n test_size=0.2)\n', (7448, 7518), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((7594, 7606), 'gc.collect', 'gc.collect', ([], {}), '()\n', (7604, 7606), False, 'import os, gc, torch\n'), ((8137, 8149), 'gc.collect', 'gc.collect', ([], {}), '()\n', (8147, 8149), False, 'import os, gc, torch\n'), ((8176, 8197), 'pandas.DataFrame', 'pd.DataFrame', (['results'], {}), '(results)\n', (8188, 8197), True, 'import pandas as pd\n'), ((2094, 2118), 'torch.manual_seed', 'torch.manual_seed', (['(12345)'], {}), '(12345)\n', (2111, 2118), False, 'import os, gc, torch\n'), ((2155, 2194), 'utils_deep.candidates', 'candidates', (['model_name'], {'pretrained': '(True)'}), '(model_name, pretrained=True)\n', (2165, 2194), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((2661, 2685), 'torch.nn.Linear', 'torch.nn.Linear', (['(1000)', '(2)'], {}), '(1000, 2)\n', (2676, 2685), False, 'import os, gc, torch\n'), ((3093, 3118), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3116, 3118), False, 'import os, gc, torch\n'), ((4658, 4704), 'os.path.join', 'os.path.join', (['results_dir', 'pretrain_model_name'], {}), '(results_dir, pretrain_model_name)\n', (4670, 4704), False, 'import os, gc, torch\n'), ((4723, 4769), 'os.path.join', 'os.path.join', (['results_dir', 'pretrain_model_name'], {}), '(results_dir, pretrain_model_name)\n', (4735, 4769), False, 'import os, gc, torch\n'), ((6496, 6526), 'joblib.Parallel', 'Parallel', ([], {'n_jobs': '(-1)', 'verbose': '(1)'}), '(n_jobs=-1, verbose=1)\n', (6504, 6526), False, 'from joblib import Parallel, delayed\n'), ((7670, 7696), 'numpy.mean', 'np.mean', (['behavioral_scores'], {}), '(behavioral_scores)\n', (7677, 7696), True, 'import numpy as np\n'), ((7697, 7722), 'numpy.mean', 'np.mean', (['svm_first_scores'], {}), '(svm_first_scores)\n', (7704, 7722), True, 'import numpy as np\n'), ((7861, 7887), 'numpy.mean', 'np.mean', (['behavioral_scores'], {}), '(behavioral_scores)\n', (7868, 7887), True, 'import numpy as np\n'), ((7977, 8002), 'numpy.mean', 'np.mean', (['svm_first_scores'], {}), '(svm_first_scores)\n', (7984, 8002), True, 'import numpy as np\n'), ((8046, 8070), 'numpy.std', 'np.std', (['svm_first_scores'], {}), '(svm_first_scores)\n', (8052, 8070), True, 'import numpy as np\n'), ((8229, 8292), 'os.path.join', 'os.path.join', (['results_dir', 'pretrain_model_name', '"""decodings.csv"""'], {}), "(results_dir, pretrain_model_name, 'decodings.csv')\n", (8241, 8292), False, 'import os, gc, torch\n'), ((5633, 5672), 'utils_deep.simple_augmentations', 'simple_augmentations', (['image_resize', 'var'], {}), '(image_resize, var)\n', (5653, 5672), False, 'from utils_deep import data_loader, candidates, define_augmentations, createLossAndOptimizer, train_and_validation, resample_ttest_2sample, noise_fuc, make_decoder, decode_hidden_layer, resample_ttest, resample_behavioral_estimate, simple_augmentations, behavioral_evaluate\n'), ((7302, 7343), 'sklearn.decomposition.PCA', 'PCA', ([], {'n_components': '(0.9)', 'random_state': '(12345)'}), '(n_components=0.9, random_state=12345)\n', (7305, 7343), False, 'from sklearn.decomposition import PCA\n'), ((1749, 1783), 'numpy.logspace', 'np.logspace', (['(-1)', '(3)', 'n_noise_levels'], {}), '(-1, 3, n_noise_levels)\n', (1760, 1783), True, 'import numpy as np\n'), ((6530, 6567), 'joblib.delayed', 'delayed', (['resample_behavioral_estimate'], {}), '(resample_behavioral_estimate)\n', (6537, 6567), False, 'from joblib import Parallel, delayed\n'), ((6811, 6837), 'numpy.mean', 'np.mean', (['behavioral_scores'], {}), '(behavioral_scores)\n', (6818, 6837), True, 'import numpy as np\n'), ((6777, 6799), 'numpy.array', 'np.array', (['chance_level'], {}), '(chance_level)\n', (6785, 6799), True, 'import numpy as np\n'), ((6969, 6984), 'torch.cat', 'torch.cat', (['item'], {}), '(item)\n', (6978, 6984), False, 'import os, gc, torch\n'), ((7068, 7083), 'torch.cat', 'torch.cat', (['item'], {}), '(item)\n', (7077, 7083), False, 'import os, gc, torch\n')] |
import numpy as np
from scipy.optimize import least_squares
class LineFromPlanes:
"""It is favorable to single out `z` as the parametrising variable.
x, y behave equivalently, while z is anyways somewhat different:
- z is not continuously read out, but per layer with gaps.
- The range of z parameters is different.
- The cosmics that we are looking for do not penetrate the detector uniformly.
Instead, they favor the z-axis direction.
"""
def __init__(self, dim, init=(1, 1, 0), print_info=True):
def plane_lsq(p, z, x_or_y, y_or_x):
return self.plane_form(p, z, x_or_y) - y_or_x
xi, yi, zi = dim["x"], dim["y"], dim["z"]
res_x = least_squares(plane_lsq, init, args=(zi, yi, xi), loss="soft_l1")
res_y = least_squares(plane_lsq, init, args=(zi, xi, yi), loss="soft_l1")
self.res_x, self.res_y = res_x, res_y
def f_m(p1, p2):
return (p1[0] + p1[1] * p2[0]) / (1 - p1[1] * p2[1])
def f_c(p1, p2):
return (p1[1] * p2[2] + p1[2]) / (1 - p1[1] * p2[1])
self.m_x, self.c_x = f_m(res_x.x, res_y.x), f_c(res_x.x, res_y.x)
self.m_y, self.c_y = f_m(res_y.x, res_x.x), f_c(res_y.x, res_x.x)
if print_info:
print("Plane fit status:", res_x.status, res_y.status)
print(f"x = {self.m_x:.2f}*z + {self.c_x:.2f}")
print(f"y = {self.m_y:.2f}*z + {self.c_y:.2f}")
@classmethod
def plane_form(cls, p, z, x_or_y):
return p[0] * z + p[1] * x_or_y + p[2]
def __call__(self, z):
return self.m_x * z + self.c_x, self.m_y * z + self.c_y
def get_line_points(self, pos, n_points=100):
p_line = {k: np.linspace(pos[k][0], pos[k][-1], n_points) for k in ["z"]}
p_line["x"], p_line["y"] = self.__call__(p_line["z"])
mask_x = (p_line["x"] > pos["x"][0]) & (p_line["x"] < pos["x"][-1])
mask_y = (p_line["y"] > pos["y"][0]) & (p_line["y"] < pos["y"][-1])
mask = mask_x & mask_y
p_line = {k: v[mask] for k, v in p_line.items()}
return p_line
| [
"scipy.optimize.least_squares",
"numpy.linspace"
] | [((707, 772), 'scipy.optimize.least_squares', 'least_squares', (['plane_lsq', 'init'], {'args': '(zi, yi, xi)', 'loss': '"""soft_l1"""'}), "(plane_lsq, init, args=(zi, yi, xi), loss='soft_l1')\n", (720, 772), False, 'from scipy.optimize import least_squares\n'), ((789, 854), 'scipy.optimize.least_squares', 'least_squares', (['plane_lsq', 'init'], {'args': '(zi, xi, yi)', 'loss': '"""soft_l1"""'}), "(plane_lsq, init, args=(zi, xi, yi), loss='soft_l1')\n", (802, 854), False, 'from scipy.optimize import least_squares\n'), ((1711, 1755), 'numpy.linspace', 'np.linspace', (['pos[k][0]', 'pos[k][-1]', 'n_points'], {}), '(pos[k][0], pos[k][-1], n_points)\n', (1722, 1755), True, 'import numpy as np\n')] |
## Code to extract college names from video description of videos on nptel youtube channel.
import json
import numpy as np
import statistics
college_map = {
"IITK": "IITK",
"IIT Kanpur": "IITK",
"Indian Institute of Technology Kanpur": "IITK",
"IITB": "IITB",
"IIT Bombay": "IITB",
"Indian Institute of Technology Bombay": "IITB",
"IITM": "IITM",
"IIT Madras": "IITM",
"Indian Institute of Technology Madras": "IITM",
"IITD": "IITD",
"IIT Delhi": "IITD",
"Indian Institute of Technology Delhi": "IITD",
"IITKGP": "IITKGP",
"IIT Kharagpur": "IITKGP",
"Indian Institute of Technology Kharagpur": "IITKGP",
}
department_map = {
"Computer Science":"CSE",
"Computer":"CSE",
"computer":"CSE",
"CSE":"CSE",
"Electrical Engineering":"EE",
"electrical":"EE",
"Electrical":"EE",
"EE": "EE",
"Mechanical Engineering":"ME",
"Mechanical": "ME",
"ME":"ME",
"Chemical":"CHE",
"CHE":"CHE"
}
considered_channels_id = ['UC640y4UvDAlya_WOj5U4pfA']
def get_college_name():
''' Gets upto 50 comments for all videos with at least 5 comments. Only fetch for nptel and mit '''
with open('data/all_videos.json','r') as f:
vids = json.load(f)
i=0
map_vide_college = {}
college_count = {}
college_to_videos = {}
college_to_dep_video = {}
for vid in vids:
if vids[vid]['snippet']['channelId'] not in considered_channels_id:
continue
video_description = vids[vid]['snippet'].get('description',None)
if video_description:
for key in college_map:
if key in video_description:
print(video_description, college_map[key])
map_vide_college[vid] = college_map[key]
if college_map[key] not in college_to_videos:
college_to_videos[college_map[key]]= [vid]
else:
college_to_videos[college_map[key]].append(vid)
for dep in department_map:
if dep in video_description:
if college_map[key] not in college_to_dep_video:
college_to_dep_video[college_map[key]] = {}
college_to_dep_video[college_map[key]][department_map[dep]] = {}
college_to_dep_video[college_map[key]][department_map[dep]]["videos"] = [vid]
college_to_dep_video[college_map[key]][department_map[dep]]["viewCount"] = [int(vids[vid]["statistics"]["viewCount"])]
college_to_dep_video[college_map[key]][department_map[dep]]["likeCount"] = int(vids[vid]["statistics"]["likeCount"])
college_to_dep_video[college_map[key]][department_map[dep]]["dislikeCount"] = int(vids[vid]["statistics"]["dislikeCount"])
else:
if department_map[dep] not in college_to_dep_video[college_map[key]]:
college_to_dep_video[college_map[key]][department_map[dep]] = {}
college_to_dep_video[college_map[key]][department_map[dep]]["videos"] = [vid]
college_to_dep_video[college_map[key]][department_map[dep]]["viewCount"] = [int(vids[vid]["statistics"]["viewCount"])]
college_to_dep_video[college_map[key]][department_map[dep]]["likeCount"] = int(vids[vid]["statistics"]["likeCount"])
college_to_dep_video[college_map[key]][department_map[dep]]["dislikeCount"] = int(vids[vid]["statistics"]["dislikeCount"])
else:
college_to_dep_video[college_map[key]][department_map[dep]]["videos"].append(vid)
college_to_dep_video[college_map[key]][department_map[dep]]["viewCount"].append(int(vids[vid]["statistics"]["viewCount"]))
college_to_dep_video[college_map[key]][department_map[dep]]["likeCount"] += int(vids[vid]["statistics"]["likeCount"])
college_to_dep_video[college_map[key]][department_map[dep]]["dislikeCount"] += int(vids[vid]["statistics"]["dislikeCount"])
# video_description = int(count)
# if count >= 5:
# comments[vid] = get_comments_for_video(vid)
# i+=1
# if(i%5 == 0):
# print(i)
return map_vide_college,college_to_videos,college_to_dep_video
map_videos,college_to_videos,college_dep_to_videos = get_college_name()
for college in college_to_videos:
print(college, len(college_to_videos[college]))
# li = []
college_views = {}
for college in college_dep_to_videos:
if college not in college_views:
college_views[college] = [0,0]
for dep in college_dep_to_videos[college]:
count_videos = len(college_dep_to_videos[college][dep]["videos"])
median_views = np.median(college_dep_to_videos[college][dep]["viewCount"])
college_views[college][0]+= sum(college_dep_to_videos[college][dep]["viewCount"])
college_views[college][1] +=len(college_dep_to_videos[college][dep]["videos"])
# li.append([college, dep, count_videos, college_dep_to_videos[college][dep]["viewCount"]/count_videos, college_dep_to_videos[college][dep]["likeCount"]/count_videos, college_dep_to_videos[college][dep]["likeCount"]/(college_dep_to_videos[college][dep]["likeCount"] + college_dep_to_videos[college][dep]["dislikeCount"]))])
print("(%s, %s, %d, %0.3f, %0.3f, %0.3f)" %(college, dep, count_videos, median_views, college_dep_to_videos[college][dep]["likeCount"]/count_videos, college_dep_to_videos[college][dep]["likeCount"]/(college_dep_to_videos[college][dep]["likeCount"] + college_dep_to_videos[college][dep]["dislikeCount"])))
for college in college_views:
print(college, college_views[college][0]/college_views[college][1])
outfile = open("data/college_to_videos.json", "w")
json.dump(college_to_videos, outfile , indent = 4)
outfile = open("data/college_dep_to_videos.json", "w")
json.dump(college_dep_to_videos, outfile , indent = 4) | [
"numpy.median",
"json.dump",
"json.load"
] | [((6194, 6241), 'json.dump', 'json.dump', (['college_to_videos', 'outfile'], {'indent': '(4)'}), '(college_to_videos, outfile, indent=4)\n', (6203, 6241), False, 'import json\n'), ((6301, 6352), 'json.dump', 'json.dump', (['college_dep_to_videos', 'outfile'], {'indent': '(4)'}), '(college_dep_to_videos, outfile, indent=4)\n', (6310, 6352), False, 'import json\n'), ((1233, 1245), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1242, 1245), False, 'import json\n'), ((5158, 5217), 'numpy.median', 'np.median', (["college_dep_to_videos[college][dep]['viewCount']"], {}), "(college_dep_to_videos[college][dep]['viewCount'])\n", (5167, 5217), True, 'import numpy as np\n')] |
import os
import sys
import csv
import numpy as np
#returns array of file names
def getAllFolderFiles(folderName):
folderPath = os.path.join(r'C:\Users\jzhan\Desktop\piPACT', folderName)
fileNames = os.listdir(folderPath)
return sortDistance(fileNames)
def getAllFileUUIDData(folderName, file):
fileName = os.path.join(folderName, file)
with open(fileName) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
lineCount = 0
data = []
for row in csv_reader:
if lineCount != 0:
data.append(row[3])
lineCount += 1
return data
#Gets all the Major values
def getAllFileMajorData(folderName, file):
fileName = os.path.join(folderName, file)
with open(fileName) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
lineCount = 0
data = []
for row in csv_reader:
if lineCount != 0:
data.append(row[4])
lineCount += 1
return data
#Gets all the Minor values
def getAllFileMinorData(folderName, file):
fileName = os.path.join(folderName, file)
with open(fileName) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
lineCount = 0
data = []
for row in csv_reader:
if lineCount != 0:
data.append(row[5])
lineCount += 1
return data
#Gets all of the RSSI values in a file
def getAllFileRSSIData(folderName, file):
fileName = os.path.join(folderName, file)
with open(fileName) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
lineCount = 0
data = []
for row in csv_reader:
if lineCount != 0:
data.append(row[7])
lineCount += 1
return data
#returns array of first 100 RSSI values
#in file, real data starts on line 2, so 100th RSSI value is on line 101
#TODO -> fix how folderName//file -> folerName/file
def getRSSIData(folderName, file):
fileName = os.path.join(folderName, file)
with open(fileName) as csv_file:
csv_reader = csv.reader(csv_file, delimiter = ',')
lineCount = 0
data = [0] * 100
for row in csv_reader:
if lineCount == 0:
lineCount += 1
elif lineCount <= 100:
data[lineCount - 1] = int(row[7])
lineCount += 1
else:
break
return data
def avgRSSI(allData):
return sum(allData) / int(len(allData))
def getAllRSSI(folderName):
avg = []
allFiles = getAllFolderFiles(folderName)
for file in allFiles:
avg.append(avgRSSI(getRSSIData(folderName, file)))
if len(getAllDistances(folderName)) == 43:
avg.remove(avg[0])
return avg
#returns distance of a file in inches
def getDistance(fileName):
distance = 0.0
index = fileName.find('_')
newName = fileName.replace('_', '', 1)
distance = newName[index : newName.find('_')]
return np.float32(distance)
#Gets all distances in a folder
def getAllDistances(folderName):
distances = []
for file in getAllFolderFiles(folderName):
distances.append(getDistance(file))
#Only take away for log one
if distances[0] == 0.0:
distances.remove(0.0)
return distances
#Creates an array of all distances in folder, sorted
def sortDistance(fileNames):
copy = fileNames
orderedFiles = []
orderedFiles.append(copy[0])
copy.remove(copy[0])
while len(copy) > 0:
if getDistance(copy[0]) < getDistance(orderedFiles[0]):
orderedFiles.insert(0, copy[0])
elif getDistance(copy[0]) > getDistance(orderedFiles[len(orderedFiles) - 1]):
orderedFiles.append(copy[0])
else:
for index in range(len(orderedFiles) - 1):
if getDistance(copy[0]) > getDistance(orderedFiles[index]) and getDistance(copy[0]) < getDistance(orderedFiles[index + 1]):
orderedFiles.insert(index + 1, copy[0])
copy.remove(copy[0])
return orderedFiles
#returns temperature in fahrenheit
def getTemp(fileName):
temp = 0
length = len(fileName)
temp += int(fileName[length - 25]) * 10
temp += int(fileName[length - 24])
return temp
def getAllTemp(folderName):
temp = []
allFiles = getAllFolderFiles(folderName)
for file in allFiles:
temp.append(getTemp(file))
temp.remove(temp[0])
return temp
#returns "relative humidity" in percent form
def getHumidity(fileName):
humidity = 0
length = len(fileName)
humidity += int(fileName[length - 22]) * 10
humidity += int(fileName[length - 21])
return humidity
def getAllHumidity(folderName):
humidity = []
allFiles = getAllFolderFiles(folderName)
for file in allFiles:
humidity.append(getHumidity(file))
humidity.remove(humidity[0])
return humidity
#Gets date from file
def getDate(fileName):
date = 0
index = fileName.index("T")
date = int(fileName[index - 8: index])
return date
def getAllDate(folderName):
dates = []
allFiles = getAllFolderFiles(folderName)
for file in allFiles:
dates.append(getDate(file))
dates.remove(dates[0])
return dates
#Gets time from file
#fileName ends in .csv
def getTime(fileName):
time = 0
index = fileName.index("T")
time = int(fileName[index + 1: len(fileName) - 4])
return time
def getAllTime(folderName):
times = []
allFiles = getAllFolderFiles(folderName)
for file in allFiles:
times.append(getTime(file))
times.remove(times[0])
return times
#Math functions
#Absolute value all values in a list
def Abs(list):
newList = []
for num in list:
newList.append(abs(num))
return newList
#e to the power of all values in a list
def ePower(list):
newList = []
for num in list:
newList.append(pow(e, num))
return newList
#Do log base 10 on all values of a list
def log10(list):
newList = []
for num in list:
newList.append(math.log10(num))
return newList
#Diagnostic Functions
#Say sick when really they are sick
def truePositive(actual, prediction):
if actual <= 72.0 and prediction <= 72.0:
print(prediction, "is a True Positive when distance really is ...", actual)
return True
return False
#Say sick when really not sick
def falsePositive(actual, prediction):
if actual > 72.0 and prediction <= 72.0:
print("***", end = " ")
print(prediction, "is a False Positive when distance really is ...", actual)
return True
return False
#Say not sick when not sick
def trueNegative(actual, prediction):
if actual >= 72.0 and prediction > 72.0:
print(prediction, "is a True Negative when distance really is ...", actual)
return True
return False
#Say not sick when really they are sick
def falseNegative(actual, prediction):
if actual < 72.0 and prediction > 72.0:
print("***", end = " ")
print(prediction, "is a False Negative when distance really is ...", actual)
return True
return False
def checkALL(actual, prediction):
trueP = 0
falseP = 0
trueN = 0
falseN = 0
if truePositive(actual, prediction):
trueP += 1
if falsePositive(actual, prediction):
falseP += 1
if trueNegative(actual, prediction):
trueN += 1
if falseNegative(actual, prediction):
falseN += 1
print("\n")
print(trueP, "True Positives")
print(falseP, "False Positives")
print(trueN, "True Negatives")
print(falseN, "False Negatives")
#Check how long two devices have been together
def tooLong(fileName1, fileName2):
file1 = getTime(fileName1)
file2 = getTime(fileName2)
if int(file1/10000) == int(file2/10000):
if abs(file1 - file2) <= 1000:
return True
else:
return False
elif abs(int(file1/10000) - int(file2/10000)) == 1:
if abs(file1 - file2) <= 5000:
return True
else:
return False
else:
return False
"""
***************************************************************************************/
* Title: Writing CSV files in Python Source Code
* Author: Programiz
* Date: Unknown
* Code version: Modified
* Availability: https://www.programiz.com/python-programming/writing-csv-files
*
***************************************************************************************/
"""
#Create a new csv file for all data for report
def writeCSV():
headers = ['Distance', 'No_Obstructions', 'Short_A', 'Short_S']
headers += ['2_Shorts', 'Jean_A', 'Jean_S', '2_Jeans', 'Shelf_M', 'Human_A']
with open('newD.csv', 'w') as csvfile:
fieldnames = headers
writer = csv.DictWriter(csvfile, fieldnames = fieldnames)
writer.writeheader()
for index in range(44):
writer.writerow(csvRow(index))
def csvRow(index):
content = {}
content['Distance'] = str(getAllDistances('No_Obstructions')[index])
content['No_Obstructions'] = str(getAllRSSI('No_Obstructions')[index])
content['Short_A'] = str(getAllRSSI('Short_A')[index])
content['Short_S'] = str(getAllRSSI('Short_S')[index])
content['2_Shorts'] = str(getAllRSSI('2_Shorts')[index])
content['Jean_A'] = str(getAllRSSI('Jean_A')[index])
content['Jean_S'] = str(getAllRSSI('Jean_S')[index])
content['2_Jeans'] = str(getAllRSSI('2_Jeans')[index])
if index < 39:
content['Shelf_M'] = str(getAllRSSI('Shelf_M')[index])
if index < 42:
content['Human_A'] = str(getAllRSSI('Human_A')[index])
return content
#Test Code
"""
print("No Obstructions:", getAllRSSI("No_Obstructions"))
print("\n")
print("2 Jeans:", getAllRSSI("2_Jeans"))
print("\n")
print("2 Shorts:", getAllRSSI("2_Shorts"))
print("\n")
print("Human A:", getAllRSSI("human_A"))
print("\n")
print("Jean A:", getAllRSSI("Jean_A"))
print("\n")
print("Jean S:", getAllRSSI("Jean_S"))
print("\n")
print("Shelf M:", getAllRSSI("Shelf_M"))
print("\n")
print("Short A:", getAllRSSI("Short_A"))
print("\n")
print("Short S:", getAllRSSI("Short_S"))
"""
| [
"csv.reader",
"numpy.float32",
"os.path.join",
"os.listdir",
"csv.DictWriter"
] | [((133, 194), 'os.path.join', 'os.path.join', (['"""C:\\\\Users\\\\jzhan\\\\Desktop\\\\piPACT"""', 'folderName'], {}), "('C:\\\\Users\\\\jzhan\\\\Desktop\\\\piPACT', folderName)\n", (145, 194), False, 'import os\n'), ((208, 230), 'os.listdir', 'os.listdir', (['folderPath'], {}), '(folderPath)\n', (218, 230), False, 'import os\n'), ((324, 354), 'os.path.join', 'os.path.join', (['folderName', 'file'], {}), '(folderName, file)\n', (336, 354), False, 'import os\n'), ((722, 752), 'os.path.join', 'os.path.join', (['folderName', 'file'], {}), '(folderName, file)\n', (734, 752), False, 'import os\n'), ((1120, 1150), 'os.path.join', 'os.path.join', (['folderName', 'file'], {}), '(folderName, file)\n', (1132, 1150), False, 'import os\n'), ((1529, 1559), 'os.path.join', 'os.path.join', (['folderName', 'file'], {}), '(folderName, file)\n', (1541, 1559), False, 'import os\n'), ((2058, 2088), 'os.path.join', 'os.path.join', (['folderName', 'file'], {}), '(folderName, file)\n', (2070, 2088), False, 'import os\n'), ((3049, 3069), 'numpy.float32', 'np.float32', (['distance'], {}), '(distance)\n', (3059, 3069), True, 'import numpy as np\n'), ((413, 448), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (423, 448), False, 'import csv\n'), ((811, 846), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (821, 846), False, 'import csv\n'), ((1209, 1244), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (1219, 1244), False, 'import csv\n'), ((1618, 1653), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (1628, 1653), False, 'import csv\n'), ((2147, 2182), 'csv.reader', 'csv.reader', (['csv_file'], {'delimiter': '""","""'}), "(csv_file, delimiter=',')\n", (2157, 2182), False, 'import csv\n'), ((8870, 8916), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (8884, 8916), False, 'import csv\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.