repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
TTS | TTS-master/tests/test_tacotron2_tf_model.py | import os
import unittest
import numpy as np
import tensorflow as tf
import torch
from tests import get_tests_input_path
from TTS.tts.tf.models.tacotron2 import Tacotron2
from TTS.tts.tf.utils.tflite import (convert_tacotron2_to_tflite,
load_tflite_model)
from TTS.utils.io import load_config
tf.get_logger().setLevel('INFO')
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
c = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
class TacotronTFTrainTest(unittest.TestCase):
@staticmethod
def generate_dummy_inputs():
chars_seq = torch.randint(0, 24, (8, 128)).long().to(device)
chars_seq_lengths = torch.randint(100, 128, (8, )).long().to(device)
chars_seq_lengths = torch.sort(chars_seq_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
chars_seq = tf.convert_to_tensor(chars_seq.cpu().numpy())
chars_seq_lengths = tf.convert_to_tensor(chars_seq_lengths.cpu().numpy())
mel_spec = tf.convert_to_tensor(mel_spec.cpu().numpy())
return chars_seq, chars_seq_lengths, mel_spec, mel_postnet_spec, mel_lengths,\
stop_targets, speaker_ids
def test_train_step(self):
''' test forward pass '''
chars_seq, chars_seq_lengths, mel_spec, mel_postnet_spec, mel_lengths,\
stop_targets, speaker_ids = self.generate_dummy_inputs()
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(chars_seq.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5)
# training pass
output = model(chars_seq, chars_seq_lengths, mel_spec, training=True)
# check model output shapes
assert np.all(output[0].shape == mel_spec.shape)
assert np.all(output[1].shape == mel_spec.shape)
assert output[2].shape[2] == chars_seq.shape[1]
assert output[2].shape[1] == (mel_spec.shape[1] // model.decoder.r)
assert output[3].shape[1] == (mel_spec.shape[1] // model.decoder.r)
# inference pass
output = model(chars_seq, training=False)
def test_forward_attention(self,):
chars_seq, chars_seq_lengths, mel_spec, mel_postnet_spec, mel_lengths,\
stop_targets, speaker_ids = self.generate_dummy_inputs()
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(chars_seq.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, forward_attn=True)
# training pass
output = model(chars_seq, chars_seq_lengths, mel_spec, training=True)
# check model output shapes
assert np.all(output[0].shape == mel_spec.shape)
assert np.all(output[1].shape == mel_spec.shape)
assert output[2].shape[2] == chars_seq.shape[1]
assert output[2].shape[1] == (mel_spec.shape[1] // model.decoder.r)
assert output[3].shape[1] == (mel_spec.shape[1] // model.decoder.r)
# inference pass
output = model(chars_seq, training=False)
def test_tflite_conversion(self, ): #pylint:disable=no-self-use
model = Tacotron2(num_chars=24,
num_speakers=0,
r=3,
postnet_output_dim=80,
decoder_output_dim=80,
attn_type='original',
attn_win=False,
attn_norm='sigmoid',
prenet_type='original',
prenet_dropout=True,
forward_attn=False,
trans_agent=False,
forward_attn_mask=False,
location_attn=True,
attn_K=0,
separate_stopnet=True,
bidirectional_decoder=False,
enable_tflite=True)
model.build_inference()
convert_tacotron2_to_tflite(model, output_path='test_tacotron2.tflite', experimental_converter=True)
# init tflite model
tflite_model = load_tflite_model('test_tacotron2.tflite')
# fake input
inputs = tf.random.uniform([1, 4], maxval=10, dtype=tf.int32) #pylint:disable=unexpected-keyword-arg
# run inference
# get input and output details
input_details = tflite_model.get_input_details()
output_details = tflite_model.get_output_details()
# reshape input tensor for the new input shape
tflite_model.resize_tensor_input(input_details[0]['index'], inputs.shape) #pylint:disable=unexpected-keyword-arg
tflite_model.allocate_tensors()
detail = input_details[0]
input_shape = detail['shape']
tflite_model.set_tensor(detail['index'], inputs)
# run the tflite_model
tflite_model.invoke()
# collect outputs
decoder_output = tflite_model.get_tensor(output_details[0]['index'])
postnet_output = tflite_model.get_tensor(output_details[1]['index'])
# remove tflite binary
os.remove('test_tacotron2.tflite')
| 5,947 | 42.101449 | 121 | py |
TTS | TTS-master/tests/test_vocoder_pqmf.py | import os
import torch
import soundfile as sf
from librosa.core import load
from tests import get_tests_path, get_tests_input_path
from TTS.vocoder.layers.pqmf import PQMF
TESTS_PATH = get_tests_path()
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
def test_pqmf():
w, sr = load(WAV_FILE)
layer = PQMF(N=4, taps=62, cutoff=0.15, beta=9.0)
w, sr = load(WAV_FILE)
w2 = torch.from_numpy(w[None, None, :])
b2 = layer.analysis(w2)
w2_ = layer.synthesis(b2)
print(w2_.max())
print(w2_.min())
print(w2_.mean())
sf.write('pqmf_output.wav', w2_.flatten().detach(), sr)
| 626 | 21.392857 | 64 | py |
TTS | TTS-master/tests/test_vocoder_gan_datasets.py | import os
import numpy as np
from tests import get_tests_path, get_tests_input_path, get_tests_output_path
from torch.utils.data import DataLoader
from TTS.utils.audio import AudioProcessor
from TTS.utils.io import load_config
from TTS.vocoder.datasets.gan_dataset import GANDataset
from TTS.vocoder.datasets.preprocess import load_wav_data
file_path = os.path.dirname(os.path.realpath(__file__))
OUTPATH = os.path.join(get_tests_output_path(), "loader_tests/")
os.makedirs(OUTPATH, exist_ok=True)
C = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
test_data_path = os.path.join(get_tests_path(), "data/ljspeech/")
ok_ljspeech = os.path.exists(test_data_path)
def gan_dataset_case(batch_size, seq_len, hop_len, conv_pad, return_segments, use_noise_augment, use_cache, num_workers):
''' run dataloader with given parameters and check conditions '''
ap = AudioProcessor(**C.audio)
_, train_items = load_wav_data(test_data_path, 10)
dataset = GANDataset(ap,
train_items,
seq_len=seq_len,
hop_len=hop_len,
pad_short=2000,
conv_pad=conv_pad,
return_segments=return_segments,
use_noise_augment=use_noise_augment,
use_cache=use_cache)
loader = DataLoader(dataset=dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
drop_last=True)
max_iter = 10
count_iter = 0
# return random segments or return the whole audio
if return_segments:
for item1, _ in loader:
feat1, wav1 = item1
# feat2, wav2 = item2
expected_feat_shape = (batch_size, ap.num_mels, seq_len // hop_len + conv_pad * 2)
# check shapes
assert np.all(feat1.shape == expected_feat_shape), f" [!] {feat1.shape} vs {expected_feat_shape}"
assert (feat1.shape[2] - conv_pad * 2) * hop_len == wav1.shape[2]
# check feature vs audio match
if not use_noise_augment:
for idx in range(batch_size):
audio = wav1[idx].squeeze()
feat = feat1[idx]
mel = ap.melspectrogram(audio)
# the first 2 and the last 2 frames are skipped due to the padding
# differences in stft
assert (feat - mel[:, :feat1.shape[-1]])[:, 2:-2].sum() <= 0, f' [!] {(feat - mel[:, :feat1.shape[-1]])[:, 2:-2].sum()}'
count_iter += 1
# if count_iter == max_iter:
# break
else:
for item in loader:
feat, wav = item
expected_feat_shape = (batch_size, ap.num_mels, (wav.shape[-1] // hop_len) + (conv_pad * 2))
assert np.all(feat.shape == expected_feat_shape), f" [!] {feat.shape} vs {expected_feat_shape}"
assert (feat.shape[2] - conv_pad * 2) * hop_len == wav.shape[2]
count_iter += 1
if count_iter == max_iter:
break
def test_parametrized_gan_dataset():
''' test dataloader with different parameters '''
params = [
[32, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, False, True, 0],
[32, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, False, True, 4],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, True, True, 0],
[1, C.audio['hop_length'], C.audio['hop_length'], 0, True, True, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, True, True, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, False, True, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, False, True, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, True, True, False, 0],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 0, False, False, False, 0],
]
for param in params:
print(param)
gan_dataset_case(*param)
| 4,221 | 42.979167 | 140 | py |
TTS | TTS-master/tests/test_tacotron2_model.py | import copy
import os
import unittest
import torch
from tests import get_tests_input_path
from torch import nn, optim
from TTS.tts.layers.losses import MSELossMasked
from TTS.tts.models.tacotron2 import Tacotron2
from TTS.utils.io import load_config
from TTS.utils.audio import AudioProcessor
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
c = load_config(os.path.join(get_tests_input_path(), 'test_config.json'))
ap = AudioProcessor(**c.audio)
WAV_FILE = os.path.join(get_tests_input_path(), "example_1.wav")
class TacotronTrainTest(unittest.TestCase):
def test_train_step(self): # pylint: disable=no-self-use
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(5):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class MultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, speaker_embedding_dim=55).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(5):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_embeddings=speaker_embeddings)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
class TacotronGSTTrainTest(unittest.TestCase):
#pylint: disable=no-self-use
def test_train_step(self):
# with random gst mel style
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens']).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(10):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(), model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
#print(param.grad)
continue
assert (param != param_ref).any(
), "param {} {} with shape {} not updated!! \n{}\n{}".format(
name, count, param.shape, param, param_ref)
count += 1
# with file gst style
mel_spec = torch.FloatTensor(ap.melspectrogram(ap.load_wav(WAV_FILE)))[:, :30].unsqueeze(0).transpose(1, 2).to(device)
mel_spec = mel_spec.repeat(8, 1, 1)
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_ids = torch.randint(0, 5, (8, )).long().to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens']).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(), model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(10):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_ids)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(), model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
#print(param.grad)
continue
assert (param != param_ref).any(
), "param {} {} with shape {} not updated!! \n{}\n{}".format(
name, count, param.shape, param, param_ref)
count += 1
class SCGSTMultiSpeakeTacotronTrainTest(unittest.TestCase):
@staticmethod
def test_train_step():
input_dummy = torch.randint(0, 24, (8, 128)).long().to(device)
input_lengths = torch.randint(100, 128, (8, )).long().to(device)
input_lengths = torch.sort(input_lengths, descending=True)[0]
mel_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_postnet_spec = torch.rand(8, 30, c.audio['num_mels']).to(device)
mel_lengths = torch.randint(20, 30, (8, )).long().to(device)
mel_lengths[0] = 30
stop_targets = torch.zeros(8, 30, 1).float().to(device)
speaker_embeddings = torch.rand(8, 55).to(device)
for idx in mel_lengths:
stop_targets[:, int(idx.item()):, 0] = 1.0
stop_targets = stop_targets.view(input_dummy.shape[0],
stop_targets.size(1) // c.r, -1)
stop_targets = (stop_targets.sum(2) > 0.0).unsqueeze(2).float().squeeze()
criterion = MSELossMasked(seq_len_norm=False).to(device)
criterion_st = nn.BCEWithLogitsLoss().to(device)
model = Tacotron2(num_chars=24, r=c.r, num_speakers=5, speaker_embedding_dim=55, gst=True, gst_embedding_dim=c.gst['gst_embedding_dim'], gst_num_heads=c.gst['gst_num_heads'], gst_style_tokens=c.gst['gst_style_tokens'], gst_use_speaker_embedding=c.gst['gst_use_speaker_embedding']).to(device)
model.train()
model_ref = copy.deepcopy(model)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=c.lr)
for i in range(5):
mel_out, mel_postnet_out, align, stop_tokens = model.forward(
input_dummy, input_lengths, mel_spec, mel_lengths, speaker_embeddings=speaker_embeddings)
assert torch.sigmoid(stop_tokens).data.max() <= 1.0
assert torch.sigmoid(stop_tokens).data.min() >= 0.0
optimizer.zero_grad()
loss = criterion(mel_out, mel_spec, mel_lengths)
stop_loss = criterion_st(stop_tokens, stop_targets)
loss = loss + criterion(mel_postnet_out, mel_postnet_spec, mel_lengths) + stop_loss
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for name_param, param_ref in zip(model.named_parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
name, param = name_param
if name == 'gst_layer.encoder.recurrence.weight_hh_l0':
continue
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1 | 14,763 | 49.047458 | 299 | py |
TTS | TTS-master/tests/test_vocoder_melgan_discriminator.py | import numpy as np
import torch
from TTS.vocoder.models.melgan_discriminator import MelganDiscriminator
from TTS.vocoder.models.melgan_multiscale_discriminator import MelganMultiscaleDiscriminator
def test_melgan_discriminator():
model = MelganDiscriminator()
print(model)
dummy_input = torch.rand((4, 1, 256 * 10))
output, _ = model(dummy_input)
assert np.all(output.shape == (4, 1, 10))
def test_melgan_multi_scale_discriminator():
model = MelganMultiscaleDiscriminator()
print(model)
dummy_input = torch.rand((4, 1, 256 * 16))
scores, feats = model(dummy_input)
assert len(scores) == 3
assert len(scores) == len(feats)
assert np.all(scores[0].shape == (4, 1, 64))
assert np.all(feats[0][0].shape == (4, 16, 4096))
assert np.all(feats[0][1].shape == (4, 64, 1024))
assert np.all(feats[0][2].shape == (4, 256, 256))
| 882 | 31.703704 | 92 | py |
TTS | TTS-master/tests/test_speedy_speech_layers.py | import torch
from TTS.tts.layers.speedy_speech.encoder import Encoder
from TTS.tts.layers.speedy_speech.decoder import Decoder
from TTS.tts.layers.speedy_speech.duration_predictor import DurationPredictor
from TTS.tts.utils.generic_utils import sequence_mask
from TTS.tts.models.speedy_speech import SpeedySpeech
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test_encoder():
input_dummy = torch.rand(8, 14, 37).to(device)
input_lengths = torch.randint(31, 37, (8, )).long().to(device)
input_lengths[-1] = 37
input_mask = torch.unsqueeze(
sequence_mask(input_lengths, input_dummy.size(2)), 1).to(device)
# residual bn conv encoder
layer = Encoder(out_channels=11,
in_hidden_channels=14,
encoder_type='residual_conv_bn').to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
# transformer encoder
layer = Encoder(out_channels=11,
in_hidden_channels=14,
encoder_type='transformer',
encoder_params={
'hidden_channels_ffn': 768,
'num_heads': 2,
"kernel_size": 3,
"dropout_p": 0.1,
"num_layers": 6,
"rel_attn_window_size": 4,
"input_length": None
}).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
def test_decoder():
input_dummy = torch.rand(8, 128, 37).to(device)
input_lengths = torch.randint(31, 37, (8, )).long().to(device)
input_lengths[-1] = 37
input_mask = torch.unsqueeze(
sequence_mask(input_lengths, input_dummy.size(2)), 1).to(device)
# residual bn conv decoder
layer = Decoder(out_channels=11, in_hidden_channels=128).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
# transformer decoder
layer = Decoder(out_channels=11,
in_hidden_channels=128,
decoder_type='transformer',
decoder_params={
'hidden_channels_ffn': 128,
'num_heads': 2,
"kernel_size": 3,
"dropout_p": 0.1,
"num_layers": 8,
"rel_attn_window_size": 4,
"input_length": None
}).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
# wavenet decoder
layer = Decoder(out_channels=11,
in_hidden_channels=128,
decoder_type='wavenet',
decoder_params={
"num_blocks": 12,
"hidden_channels": 192,
"kernel_size": 5,
"dilation_rate": 1,
"num_layers": 4,
"dropout_p": 0.05
}).to(device)
output = layer(input_dummy, input_mask)
assert list(output.shape) == [8, 11, 37]
def test_duration_predictor():
input_dummy = torch.rand(8, 128, 27).to(device)
input_lengths = torch.randint(20, 27, (8, )).long().to(device)
input_lengths[-1] = 27
x_mask = torch.unsqueeze(sequence_mask(input_lengths, input_dummy.size(2)),
1).to(device)
layer = DurationPredictor(hidden_channels=128).to(device)
output = layer(input_dummy, x_mask)
assert list(output.shape) == [8, 1, 27]
def test_speedy_speech():
num_chars = 7
B = 8
T_en = 37
T_de = 74
x_dummy = torch.randint(0, 7, (B, T_en)).long().to(device)
x_lengths = torch.randint(31, T_en, (B, )).long().to(device)
x_lengths[-1] = T_en
# set durations. max total duration should be equal to T_de
durations = torch.randint(1, 4, (B, T_en))
durations = durations * (T_de / durations.sum(1)).unsqueeze(1)
durations = durations.to(torch.long).to(device)
max_dur = durations.sum(1).max()
durations[:, 0] += T_de - max_dur if T_de > max_dur else 0
y_lengths = durations.sum(1)
model = SpeedySpeech(num_chars, out_channels=80, hidden_channels=128)
if use_cuda:
model.cuda()
# forward pass
o_de, o_dr, attn = model(x_dummy, x_lengths, y_lengths, durations)
assert list(o_de.shape) == [B, 80, T_de], f"{list(o_de.shape)}"
assert list(attn.shape) == [B, T_de, T_en]
assert list(o_dr.shape) == [B, T_en]
# with speaker embedding
model = SpeedySpeech(num_chars,
out_channels=80,
hidden_channels=128,
num_speakers=10,
c_in_channels=256).to(device)
model.forward(x_dummy,
x_lengths,
y_lengths,
durations,
g=torch.randint(0, 10, (B,)).to(device))
assert list(o_de.shape) == [B, 80, T_de], f"{list(o_de.shape)}"
assert list(attn.shape) == [B, T_de, T_en]
assert list(o_dr.shape) == [B, T_en]
# with speaker external embedding
model = SpeedySpeech(num_chars,
out_channels=80,
hidden_channels=128,
num_speakers=10,
external_c=True,
c_in_channels=256).to(device)
model.forward(x_dummy,
x_lengths,
y_lengths,
durations,
g=torch.rand((B,256)).to(device))
assert list(o_de.shape) == [B, 80, T_de], f"{list(o_de.shape)}"
assert list(attn.shape) == [B, T_de, T_en]
assert list(o_dr.shape) == [B, T_en] | 5,880 | 34.005952 | 79 | py |
TTS | TTS-master/tests/test_vocoder_parallel_wavegan_generator.py | import numpy as np
import torch
from TTS.vocoder.models.parallel_wavegan_generator import ParallelWaveganGenerator
def test_pwgan_generator():
model = ParallelWaveganGenerator(
in_channels=1,
out_channels=1,
kernel_size=3,
num_res_blocks=30,
stacks=3,
res_channels=64,
gate_channels=128,
skip_channels=64,
aux_channels=80,
dropout=0.0,
bias=True,
use_weight_norm=True,
upsample_factors=[4, 4, 4, 4])
dummy_c = torch.rand((2, 80, 5))
output = model(dummy_c)
assert np.all(output.shape == (2, 1, 5 * 256)), output.shape
model.remove_weight_norm()
output = model.inference(dummy_c)
assert np.all(output.shape == (2, 1, (5 + 4) * 256))
| 767 | 26.428571 | 82 | py |
TTS | TTS-master/tests/test_wavegrad_train.py | import unittest
import numpy as np
import torch
from torch import optim
from TTS.vocoder.models.wavegrad import Wavegrad
#pylint: disable=unused-variable
torch.manual_seed(1)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class WavegradTrainTest(unittest.TestCase):
def test_train_step(self): # pylint: disable=no-self-use
"""Test if all layers are updated in a basic training cycle"""
input_dummy = torch.rand(8, 1, 20 * 300).to(device)
mel_spec = torch.rand(8, 80, 20).to(device)
criterion = torch.nn.L1Loss().to(device)
model = Wavegrad(in_channels=80,
out_channels=1,
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2],
[1, 2, 4, 8], [1, 2, 4, 8],
[1, 2, 4, 8]])
model_ref = Wavegrad(in_channels=80,
out_channels=1,
upsample_factors=[5, 5, 3, 2, 2],
upsample_dilations=[[1, 2, 1, 2], [1, 2, 1, 2],
[1, 2, 4, 8], [1, 2, 4, 8],
[1, 2, 4, 8]])
model.train()
model.to(device)
betas = np.linspace(1e-6, 1e-2, 1000)
model.compute_noise_level(betas)
model_ref.load_state_dict(model.state_dict())
model_ref.to(device)
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
assert (param - param_ref).sum() == 0, param
count += 1
optimizer = optim.Adam(model.parameters(), lr=0.001)
for i in range(5):
y_hat = model.forward(input_dummy, mel_spec, torch.rand(8).to(device))
optimizer.zero_grad()
loss = criterion(y_hat, input_dummy)
loss.backward()
optimizer.step()
# check parameter changes
count = 0
for param, param_ref in zip(model.parameters(),
model_ref.parameters()):
# ignore pre-higway layer since it works conditional
# if count not in [145, 59]:
assert (param != param_ref).any(
), "param {} with shape {} not updated!! \n{}\n{}".format(
count, param.shape, param, param_ref)
count += 1
| 2,498 | 38.666667 | 82 | py |
TTS | TTS-master/tests/test_vocoder_wavernn.py | import numpy as np
import torch
import random
from TTS.vocoder.models.wavernn import WaveRNN
def test_wavernn():
model = WaveRNN(
rnn_dims=512,
fc_dims=512,
mode=10,
mulaw=False,
pad=2,
use_aux_net=True,
use_upsample_net=True,
upsample_factors=[4, 8, 8],
feat_dims=80,
compute_dims=128,
res_out_dims=128,
num_res_blocks=10,
hop_length=256,
sample_rate=22050,
)
dummy_x = torch.rand((2, 1280))
dummy_m = torch.rand((2, 80, 9))
y_size = random.randrange(20, 60)
dummy_y = torch.rand((80, y_size))
output = model(dummy_x, dummy_m)
assert np.all(output.shape == (2, 1280, 4 * 256)), output.shape
output = model.inference(dummy_y, True, 5500, 550)
assert np.all(output.shape == (256 * (y_size - 1),))
| 850 | 25.59375 | 67 | py |
TTS | TTS-master/tests/test_vocoder_wavernn_datasets.py | import os
import shutil
import numpy as np
from tests import get_tests_path, get_tests_input_path, get_tests_output_path
from torch.utils.data import DataLoader
from TTS.utils.audio import AudioProcessor
from TTS.utils.io import load_config
from TTS.vocoder.datasets.wavernn_dataset import WaveRNNDataset
from TTS.vocoder.datasets.preprocess import load_wav_feat_data, preprocess_wav_files
file_path = os.path.dirname(os.path.realpath(__file__))
OUTPATH = os.path.join(get_tests_output_path(), "loader_tests/")
os.makedirs(OUTPATH, exist_ok=True)
C = load_config(os.path.join(get_tests_input_path(),
"test_vocoder_wavernn_config.json"))
test_data_path = os.path.join(get_tests_path(), "data/ljspeech/")
test_mel_feat_path = os.path.join(test_data_path, "mel")
test_quant_feat_path = os.path.join(test_data_path, "quant")
ok_ljspeech = os.path.exists(test_data_path)
def wavernn_dataset_case(batch_size, seq_len, hop_len, pad, mode, mulaw, num_workers):
""" run dataloader with given parameters and check conditions """
ap = AudioProcessor(**C.audio)
C.batch_size = batch_size
C.mode = mode
C.seq_len = seq_len
C.data_path = test_data_path
preprocess_wav_files(test_data_path, C, ap)
_, train_items = load_wav_feat_data(
test_data_path, test_mel_feat_path, 5)
dataset = WaveRNNDataset(ap=ap,
items=train_items,
seq_len=seq_len,
hop_len=hop_len,
pad=pad,
mode=mode,
mulaw=mulaw
)
# sampler = DistributedSampler(dataset) if num_gpus > 1 else None
loader = DataLoader(dataset,
shuffle=True,
collate_fn=dataset.collate,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
)
max_iter = 10
count_iter = 0
try:
for data in loader:
x_input, mels, _ = data
expected_feat_shape = (ap.num_mels,
(x_input.shape[-1] // hop_len) + (pad * 2))
assert np.all(
mels.shape[1:] == expected_feat_shape), f" [!] {mels.shape} vs {expected_feat_shape}"
assert (mels.shape[2] - pad * 2) * hop_len == x_input.shape[1]
count_iter += 1
if count_iter == max_iter:
break
# except AssertionError:
# shutil.rmtree(test_mel_feat_path)
# shutil.rmtree(test_quant_feat_path)
finally:
shutil.rmtree(test_mel_feat_path)
shutil.rmtree(test_quant_feat_path)
def test_parametrized_wavernn_dataset():
''' test dataloader with different parameters '''
params = [
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 10, True, 0],
[16, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, "mold", False, 4],
[1, C.audio['hop_length'] * 10, C.audio['hop_length'], 2, 9, False, 0],
[1, C.audio['hop_length'], C.audio['hop_length'], 2, 10, True, 0],
[1, C.audio['hop_length'], C.audio['hop_length'], 2, "mold", False, 0],
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 4, 10, False, 2],
[1, C.audio['hop_length'] * 5, C.audio['hop_length'], 2, "mold", False, 0],
]
for param in params:
print(param)
wavernn_dataset_case(*param)
| 3,538 | 37.053763 | 101 | py |
ssqueezepy | ssqueezepy-master/tests/fft_test.py | # -*- coding: utf-8 -*-
"""Fast Fourier Transform, CPU parallelization, and GPU execution tests:
- multi-thread CPU & GPU outputs match that of single-thread CPU
- batched (multi-input) outputs match single for-looped
- `ssqueezepy.FFT` outputs match `scipy`'s
- unified synchrosqueezing pipelines outputs match that of v0.6.0
Note that GPU tests are skipped in CI (Travis), and are instead done locally.
"""
import os
import pytest
import warnings
import numpy as np
from scipy.fft import fft as sfft, rfft as srfft, ifft as sifft, irfft as sirfft
from scipy.fft import ifftshift
import ssqueezepy
from ssqueezepy import TestSignals, Wavelet, ssq_stft, ssq_cwt
from ssqueezepy import fft, rfft, ifft, irfft, cwt
from ssqueezepy.algos import indexed_sum, indexed_sum_onfly, ssqueeze_fast
from ssqueezepy.algos import phase_cwt_cpu, phase_cwt_gpu, replace_under_abs
from ssqueezepy.algos import phase_stft_gpu, phase_stft_cpu
from ssqueezepy.configs import gdefaults
from ssqueezepy.utils import process_scales, buffer
# no visuals here but 1 runs as regular script instead of pytest, for debugging
VIZ = 0
try:
import torch
torch.tensor(1, device='cuda')
CAN_GPU = True
except:
CAN_GPU = False
warnings.warn("SKIPPED TESTS in `fft_test.py`, GPU not found.")
def _wavelet(name='gmw', **kw):
return Wavelet((name, kw))
def test_1D():
os.environ['SSQ_GPU'] = '0'
for N in (128, 129):
x = np.random.randn(N)
xf = x[:N//2 + 1] * (1 + 2j)
souts = dict(fft=sfft(x), rfft=srfft(x), ifft=sifft(xf),
irfft1=sirfft(xf), irfft2=sirfft(xf, n=len(x)))
for patience in (0, (1, 1), (2, 1)):
qouts = dict(
fft=fft(x, patience=patience),
rfft=rfft(x, patience=patience),
ifft=ifft(xf, patience=patience),
irfft1=irfft(xf, patience=patience),
irfft2=irfft(xf, patience=patience, n=len(x)),
)
for name, qout in qouts.items():
assert np.allclose(qout, souts[name]), (
"{}: N={}, patience={}".format(name, N, patience))
def test_2D():
os.environ['SSQ_GPU'] = '0'
for N in (128, 129):
for M in (64, 65):
for axis in (0, 1):
x = np.random.randn(N, M)
if axis == 0:
xf = x[:N//2 + 1] * (1 + 2j)
else:
xf = x[:, :M//2 + 1] * (1 + 2j)
souts = dict(
fft=sfft(x, axis=axis),
rfft=srfft(x, axis=axis),
ifft=sifft(xf, axis=axis),
irfft1=sirfft(xf, axis=axis),
irfft2=sirfft(xf, axis=axis, n=x.shape[axis]),
)
for patience in (0, (1, .5), (2, .5)):
kw = dict(axis=axis, patience=patience)
qouts = dict(
fft=fft(x, **kw),
rfft=rfft(x, **kw),
ifft=ifft(xf, **kw),
irfft1=irfft(xf, **kw),
irfft2=irfft(xf, **kw, n=x.shape[axis]),
)
for name, qout in qouts.items():
assert np.allclose(qout, souts[name]), (
"{}: (N, M)=({}, {}), patience={}".format(
name, N, M, patience))
def test_exhaustive():
"""Ensure exhaustive case works."""
os.environ['SSQ_GPU'] = '0'
fft(np.random.randn(4), patience=(2, None))
def test_indexed_sum():
os.environ['SSQ_GPU'] = '0'
Wx = np.random.randn(1000, 1000).astype('complex64')
k = np.random.randint(0, len(Wx), Wx.shape)
out1 = indexed_sum(Wx, k, parallel=False)
out2 = indexed_sum(Wx, k, parallel=True)
assert np.allclose(out1, out2), "MAE: %s" % np.mean(np.abs(out1 - out2))
def test_parallel_setting():
"""Assert
1. ssqueezepy is parallel by default
2. `configs.ini` includes parallel config
3. os.environ flag overrides `configs.ini`
"""
os.environ['SSQ_GPU'] = '0'
assert ssqueezepy.IS_PARALLEL()
parallel = gdefaults('configs.IS_PARALLEL', parallel=None)
assert parallel is not None
assert parallel == 1
os.environ['SSQ_PARALLEL'] = '0'
try:
assert not ssqueezepy.IS_PARALLEL()
except AssertionError:
raise AssertionError()
finally:
# ensure `os.environ` is cleaned even if assert fails
os.environ.pop('SSQ_PARALLEL')
def _noninf_mean(x):
x[np.isinf(x) | np.isnan(x)] = 0
return x.mean()
def test_phase_cwt():
os.environ['SSQ_GPU'] = '0'
x = TestSignals(N=1000).par_lchirp()[0]
x += x[::-1]
wavelet = Wavelet()
scales = process_scales('log', len(x), wavelet, nv=32)[:240]
Wx, _, dWx = cwt(x, wavelet, scales=scales, derivative=True, cache_wavelet=1)
for dtype in ('complex128', 'complex64'):
# Wx = np.random.randn(100, 8192).astype(dtype) * (1 + 2j)
# dWx = np.random.randn(100, 8192).astype(dtype) * (2 - 1j)
Wx, dWx = Wx.astype(dtype), dWx.astype(dtype)
if CAN_GPU:
Wxt = torch.tensor(Wx, device='cuda')
dWxt = torch.tensor(dWx, device='cuda')
gamma = 1e-2
_out = (dWx / Wx).imag / (2 * np.pi)
_out[np.abs(Wx) < gamma] = np.inf
_out = np.abs(_out)
out0 = phase_cwt_cpu(Wx, dWx, gamma, parallel=False)
out1 = phase_cwt_cpu(Wx, dWx, gamma, parallel=True)
if CAN_GPU:
out2 = phase_cwt_gpu(Wxt, dWxt, gamma).cpu().numpy()
with np.errstate(invalid='ignore'):
mape0_ = _noninf_mean(np.abs(_out - out0) / np.abs(_out))
mape01 = _noninf_mean(np.abs(out0 - out1) / np.abs(out0))
if CAN_GPU:
mape02 = _noninf_mean(np.abs(out0 - out2) / np.abs(out0))
assert np.allclose(out0, _out), ("base", dtype, mape0_)
assert np.allclose(out0, out1), ("parallel", dtype, mape01)
if CAN_GPU:
assert np.allclose(out0, out2), ("gpu", dtype, mape02)
def test_phase_stft():
atol = 1e-7
np.random.seed(0)
for dtype in ('float64', 'float32'):
Wx = np.random.randn(100, 1028).astype(dtype) * (1 + 2j)
dWx = np.random.randn(100, 1028).astype(dtype) * (2 - 1j)
Sfs = np.linspace(0, .5, len(Wx)).astype(dtype)
if CAN_GPU:
Wxt = torch.as_tensor(Wx, device='cuda')
dWxt = torch.as_tensor(dWx, device='cuda')
Sfst = torch.as_tensor(Sfs, device='cuda')
gamma = 1e-2
_out = Sfs[:, None] - (dWx / Wx).imag / (2*np.pi)
_out[np.abs(Wx) < gamma] = np.inf
_out = np.abs(_out)
out0 = phase_stft_cpu(Wx, dWx, Sfs, gamma, parallel=False)
out1 = phase_stft_cpu(Wx, dWx, Sfs, gamma, parallel=True)
if CAN_GPU:
out2 = phase_stft_gpu(Wxt, dWxt, Sfst, gamma).cpu().numpy()
with np.errstate(invalid='ignore'):
mape0_ = _noninf_mean(np.abs(_out - out0) / np.abs(_out))
mape01 = _noninf_mean(np.abs(out0 - out1) / np.abs(out0))
if CAN_GPU:
mape02 = _noninf_mean(np.abs(out0 - out2) / np.abs(out0))
assert np.allclose(out0, _out, atol=atol), ("base", dtype, mape0_)
assert np.allclose(out0, out1, atol=atol), ("parallel", dtype, mape01)
if CAN_GPU:
assert np.allclose(out0, out2, atol=atol), ("gpu", dtype, mape02)
def test_replace_under_abs():
np.random.seed(0)
gamma = 1e-2
for dtype in ('float32', 'float64'):
w0 = np.random.randn(100, 200).astype(dtype)
Wx = np.random.randn(100, 200).astype(dtype) * (2 - 1j)
w1 = w0.copy()
if CAN_GPU:
wt = torch.tensor(w0, device='cuda')
Wxt = torch.tensor(Wx, device='cuda')
replace_under_abs(w0, Wx, gamma, np.inf, parallel=False)
replace_under_abs(w1, Wx, gamma, np.inf, parallel=True)
if CAN_GPU:
replace_under_abs(wt, Wxt, gamma, np.inf)
wt = wt.cpu().numpy()
assert np.allclose(w0, w1), ("parallel", dtype)
if CAN_GPU:
assert np.allclose(w0, wt), ("gpu", dtype)
def _make_ssq_freqs(M, scaletype):
if scaletype == 'log-piecewise':
sf = np.logspace(0, np.log10(M), 2*M)
sf1 = sf[:M//2]
sf2 = sf[M//2 + 3 - 1::3]
ssq_freqs = np.hstack([sf1, sf2])
elif scaletype == 'log':
ssq_freqs = np.logspace(0, np.log10(M), M)
elif scaletype == 'linear':
ssq_freqs = np.linspace(0, M, M)
return ssq_freqs
def test_indexed_sum_onfly():
np.random.seed(0)
for dtype in ('float32', 'float64'):
Wx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
w = np.abs(np.random.randn(*Wx.shape).astype(dtype))
w *= (2*len(Wx) / w.max())
if CAN_GPU:
Wxt, wt = [torch.tensor(g, device='cuda') for g in (Wx, w)]
for scaletype in ('log-piecewise', 'log', 'linear'):
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Wx), scaletype)
ssq_logscale = scaletype.startswith('log')
const = (np.log(2) / 32 if 1 else
ssq_freqs)
out0 = indexed_sum_onfly(Wx, w, ssq_freqs, const, ssq_logscale,
flipud=flipud, parallel=False)
out1 = indexed_sum_onfly(Wx, w, ssq_freqs, const, ssq_logscale,
flipud=flipud, parallel=True)
if CAN_GPU:
out2 = indexed_sum_onfly(Wxt, wt, ssq_freqs, const, ssq_logscale,
flipud=flipud).cpu().numpy()
adiff01 = np.abs(out0 - out1).mean()
if CAN_GPU:
adiff02 = np.abs(out0 - out2).mean()
# this is due to `const` varying rather than 'linear'
th = ((1e-16 if dtype == 'float64' else 1e-8) if ssq_logscale else
(1e-13 if dtype == 'float64' else 1e-5))
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
if CAN_GPU:
assert adiff02 < th, (scaletype, dtype, flipud, adiff02)
def test_ssqueeze_cwt():
np.random.seed(0)
gamma = 1e-2
for dtype in ('float32', 'float64'):
Wx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
dWx = np.random.randn(100, 512).astype(dtype) * (2 - 1j)
if CAN_GPU:
Wxt, dWxt = [torch.tensor(g, device='cuda') for g in (Wx, dWx)]
for scaletype in ('log-piecewise', 'log', 'linear'):
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Wx), scaletype)
ssq_logscale = scaletype.startswith('log')
const = (np.log(2) / 32 if ssq_logscale else
ssq_freqs)
args = (ssq_freqs, const, ssq_logscale)
kw = dict(flipud=flipud, gamma=gamma)
out0 = ssqueeze_fast(Wx, dWx, *args, **kw, parallel=False)
out1 = ssqueeze_fast(Wx, dWx, *args, **kw, parallel=True)
if CAN_GPU:
out2 = ssqueeze_fast(Wxt, dWxt, *args, **kw).cpu().numpy()
adiff01 = np.abs(out0 - out1).mean()
if CAN_GPU:
adiff02 = np.abs(out0 - out2).mean()
# this is due to `const` varying rather than 'linear'
th = ((1e-16 if dtype == 'float64' else 1e-8) if ssq_logscale else
(1e-13 if dtype == 'float64' else 1e-5))
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
if CAN_GPU:
assert adiff02 < th, (scaletype, dtype, flipud, adiff02)
def test_ssqueeze_stft():
np.random.seed(0)
scaletype = 'linear'
ssq_logscale = False
gamma = 1e-2
const = np.log(2) / 32
for dtype in ('float32', 'float64'):
Sx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
dSx = np.random.randn(100, 512).astype(dtype) * (2 - 1j)
if CAN_GPU:
Sxt, dSxt = [torch.tensor(g, device='cuda') for g in (Sx, dSx)]
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Sx), scaletype)
args = (ssq_freqs, const, ssq_logscale)
kw = dict(flipud=flipud, gamma=gamma)
out0 = ssqueeze_fast(Sx, dSx, *args, **kw, parallel=False)
out1 = ssqueeze_fast(Sx, dSx, *args, **kw, parallel=True)
if CAN_GPU:
out2 = ssqueeze_fast(Sxt, dSxt, *args, **kw).cpu().numpy()
adiff01 = np.abs(out0 - out1).mean()
if CAN_GPU:
adiff02 = np.abs(out0 - out2).mean()
# this is due to `const` varying rather than 'linear'
th = (1e-16 if dtype == 'float64' else 1e-8)
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
if CAN_GPU:
assert adiff02 < th, (scaletype, dtype, flipud, adiff02)
def test_ssqueeze_vs_indexed_sum():
"""Computing `Tx` in one loop vs. first computing `w` then summing."""
np.random.seed(0)
gamma = 1e-2
for dtype in ('float32', 'float64'):
Wx = np.random.randn(100, 512).astype(dtype) * (1 + 2j)
dWx = np.random.randn(100, 512).astype(dtype) * (2 - 1j)
w = np.abs((dWx / Wx).imag / (2*np.pi))
w[np.abs(Wx) < gamma] = np.inf
for scaletype in ('log-piecewise', 'log', 'linear'):
for flipud in (False, True):
ssq_freqs = _make_ssq_freqs(len(Wx), scaletype)
ssq_logscale = scaletype.startswith('log')
const = (np.log(2) / 32 if ssq_logscale else
ssq_freqs)
args = (ssq_freqs, const, ssq_logscale)
kw = dict(parallel=False, flipud=flipud)
out0 = indexed_sum_onfly(Wx, w, *args, **kw)
out1 = ssqueeze_fast(Wx, dWx, *args, **kw, gamma=gamma)
adiff01 = np.abs(out0 - out1).mean()
# this is due to `const` varying rather than 'linear'
th = ((1e-16 if dtype == 'float64' else 1e-8) if ssq_logscale else
(1e-13 if dtype == 'float64' else 1e-5))
assert adiff01 < th, (scaletype, dtype, flipud, adiff01)
def test_buffer():
"""Test that CPU & GPU outputs match for `modulated=True` & `=False`,
and that `modulated=True` matches `ifftshift(buffer(modulated=False))`.
Also that single- & multi-thread CPU outputs agree.
Test both single and batched input.
"""
N = 128
tsigs = TestSignals(N=N)
for dtype in ('float64', 'float32'):
for ndim in (1, 2):
x = (tsigs.cosine()[0].astype(dtype) if ndim == 1 else
np.random.randn(4, N))
xt = torch.as_tensor(x, device='cuda') if CAN_GPU else 0
for modulated in (False, True):
for seg_len in (N//2, N//2 - 1):
for n_overlap in (N//2 - 1, N//2 - 2, N//2 - 3):
if seg_len == n_overlap:
continue
out0 = buffer(x, seg_len, n_overlap, modulated, parallel=True)
if modulated:
out00 = buffer(x, seg_len, n_overlap, modulated=False,
parallel=False)
out00 = ifftshift(out00, axes=0 if ndim == 1 else 1)
if CAN_GPU:
out1 = buffer(xt, seg_len, n_overlap, modulated).cpu().numpy()
assert_params = (dtype, modulated, seg_len, n_overlap)
if modulated:
adiff000 = np.abs(out0 - out00).mean()
assert adiff000 == 0, (*assert_params, adiff000)
if CAN_GPU:
adiff01 = np.abs(out0 - out1).mean()
assert adiff01 == 0, (*assert_params, adiff01)
def test_ssq_stft():
N = 256
tsigs = TestSignals(N=N)
gpu_atol = 1e-5
for dtype in ('float64', 'float32'):
x = tsigs.par_lchirp()[0].astype(dtype)
kw = dict(modulated=1, n_fft=128, dtype=dtype, astensor=False)
os.environ['SSQ_GPU'] = '0'
Tx00 = ssq_stft(x, **kw, get_w=1)[0]
Tx01 = ssq_stft(x, **kw, get_w=0)[0]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx10 = ssq_stft(x, **kw, get_w=1)[0]
Tx11 = ssq_stft(x, **kw, get_w=0)[0]
adiff0001 = np.abs(Tx00 - Tx01).mean()
assert np.allclose(Tx00, Tx01), (dtype, adiff0001)
if CAN_GPU:
adiff0010 = np.abs(Tx00 - Tx10).mean()
adiff0011 = np.abs(Tx00 - Tx11).mean()
assert np.allclose(Tx00, Tx10, atol=gpu_atol), (dtype, adiff0010)
assert np.allclose(Tx00, Tx11, atol=gpu_atol), (dtype, adiff0011)
def test_ssq_cwt():
N = 256
tsigs = TestSignals(N=N)
for dtype in ('float64', 'float32'):
gpu_atol = 1e-8 if dtype == 'float64' else 6e-3
x = tsigs.par_lchirp()[0].astype(dtype)
kw = dict(astensor=False)
os.environ['SSQ_GPU'] = '0'
Tx00 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=1)[0]
Tx01 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=0)[0]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx10 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=1)[0]
Tx11 = ssq_cwt(x, _wavelet(dtype=dtype), **kw, get_w=0)[0]
adiff0001 = np.abs(Tx00 - Tx01).mean()
if dtype == 'float64':
assert np.allclose(Tx00, Tx01), (dtype, adiff0001)
else:
assert adiff0001 < 4e-5, (dtype, adiff0001)
if CAN_GPU:
adiff0010 = np.abs(Tx00 - Tx10).mean()
adiff0011 = np.abs(Tx00 - Tx11).mean()
assert np.allclose(Tx00, Tx10, atol=gpu_atol), (dtype, adiff0010)
assert np.allclose(Tx00, Tx11, atol=gpu_atol), (dtype, adiff0011)
os.environ['SSQ_GPU'] = '0'
def test_wavelet_dtype_gmw():
"""Ensure `Wavelet.fn` output is of specified `dtype` for GMW wavelet,
and that `.info()` is computable.
"""
for SSQ_GPU in ('0', '1'):
if SSQ_GPU == '1' and not CAN_GPU:
continue
for order in (0, 1):
for norm in ('bandpass', 'energy'):
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = SSQ_GPU
kw = dict(order=order, norm=norm, dtype=dtype)
wavelet = _wavelet('gmw', **kw)
if norm == 'energy':
dtype = 'float64'
assert wavelet.dtype == dtype, (
"GPU={}, order={}, norm={}, dtype={}, wavelet.dtype={}".format(
SSQ_GPU, order, norm, dtype, wavelet.dtype))
wavelet.info()
os.environ['SSQ_GPU'] = '0'
def test_wavelet_dtype():
"""Ensure `Wavelet.fn` output is of specified `dtype` for non-GMW wavelets,
and that `.info()` is computable.
"""
for SSQ_GPU in ('0', '1'):
if SSQ_GPU == '1' and not CAN_GPU:
continue
for name in ('morlet', 'bump', 'cmhat', 'hhhat'):
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = SSQ_GPU
wavelet = _wavelet(name, dtype=dtype)
assert wavelet.dtype == dtype, (
"GPU={}, name={}, dtype={}, wavelet.dtype={}".format(
SSQ_GPU, name, dtype, wavelet.dtype))
wavelet.info()
os.environ['SSQ_GPU'] = '0'
def test_higher_order():
"""`cwt` & `ssq_cwt` CPU & GPU outputs agreement."""
if not CAN_GPU:
return
tsigs = TestSignals(N=256)
x = tsigs.par_lchirp()[0]
x += x[::-1]
kw = dict(order=range(3), astensor=False)
for dtype in ('float32', 'float64'):
os.environ['SSQ_GPU'] = '0'
Tx0, Wx0, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
os.environ['SSQ_GPU'] = '1'
Tx1, Wx1, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
adiff_Tx = np.abs(Tx0 - Tx1).mean()
adiff_Wx = np.abs(Wx0 - Wx1).mean()
# less should be possible for float64, but didn't investigate
th = 2e-7 if dtype == 'float64' else 1e-4
assert adiff_Tx < th, (dtype, adiff_Tx, th)
assert adiff_Wx < th, (dtype, adiff_Wx, th)
os.environ['SSQ_GPU'] = '0'
def test_cwt_for_loop():
"""Ensure `vectorized=False` runs on GPU and outputs match `=True`."""
if not CAN_GPU:
return
np.random.seed(0)
x = np.random.randn(256)
kw = dict(derivative=True, astensor=False)
os.environ['SSQ_GPU'] = '1'
for dtype in ('float64', 'float32'):
Wx0, _, dWx0 = cwt(x, _wavelet(dtype=dtype), vectorized=False, **kw)
Wx1, _, dWx1 = cwt(x, _wavelet(dtype=dtype), vectorized=True, **kw)
adiff_Wx = np.abs(Wx0 - Wx1)
adiff_dWx = np.abs(dWx0 - dWx1)
atol = 1e-12 if dtype == 'float64' else 1e-6
assert np.allclose(Wx0, Wx1, atol=atol), (dtype, adiff_Wx.mean())
assert np.allclose(dWx0, dWx1, atol=atol), (dtype, adiff_dWx.mean())
def test_ssq_cwt_batched():
"""Ensure batched (2D `x`) inputs output same as if samples fed separately,
and agreement between CPU & GPU.
"""
np.random.seed(0)
x = np.random.randn(4, 256)
kw = dict(astensor=False)
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = '0'
Tx0, Wx0, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
Tx00 = np.zeros(Tx0.shape, dtype=Tx0.dtype)
Wx00 = Tx00.copy()
for i, _x in enumerate(x):
out = ssq_cwt(_x, _wavelet(dtype=dtype), **kw)
Tx00[i], Wx00[i] = out[0], out[1]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx1, Wx1, *_ = ssq_cwt(x, _wavelet(dtype=dtype), **kw)
atol = 1e-12 if dtype == 'float64' else 1e-2
adiff_Tx000 = np.abs(Tx00 - Tx0).mean()
adiff_Wx000 = np.abs(Wx00 - Wx0).mean()
assert np.allclose(Wx00, Wx0), (dtype, adiff_Wx000)
assert np.allclose(Tx00, Tx0), (dtype, adiff_Tx000)
if CAN_GPU:
adiff_Tx01 = np.abs(Tx0 - Tx1).mean()
adiff_Wx01 = np.abs(Wx0 - Wx1).mean()
assert np.allclose(Wx0, Wx1, atol=atol), (dtype, adiff_Wx01)
assert np.allclose(Tx0, Tx1, atol=atol), (dtype, adiff_Tx01)
# didn't investigate float32, and `allclose` threshold is pretty bad,
# so check MAE
if dtype == 'float32':
assert adiff_Tx01 < 2.5e-5, (dtype, adiff_Tx01)
def test_ssq_stft_batched():
"""Ensure batched (2D `x`) inputs output same as if samples fed separately,
and agreement between CPU & GPU.
"""
np.random.seed(0)
x = np.random.randn(4, 256)
for dtype in ('float64', 'float32'):
os.environ['SSQ_GPU'] = '0'
kw = dict(astensor=False, dtype=dtype)
Tx0, Sx0, *_ = ssq_stft(x, **kw)
Tx00 = np.zeros(Tx0.shape, dtype=Tx0.dtype)
Sx00 = Tx00.copy()
for i, _x in enumerate(x):
out = ssq_stft(_x, **kw)
Tx00[i], Sx00[i] = out[0], out[1]
if CAN_GPU:
os.environ['SSQ_GPU'] = '1'
Tx1, Sx1, *_ = ssq_stft(x, **kw)
atol = 1e-12 if dtype == 'float64' else 1e-6
adiff_Tx000 = np.abs(Tx00 - Tx0).mean()
adiff_Sx000 = np.abs(Sx00 - Sx0).mean()
assert np.allclose(Sx00, Sx0), (dtype, adiff_Sx000)
assert np.allclose(Tx00, Tx0), (dtype, adiff_Tx000)
if CAN_GPU:
adiff_Tx01 = np.abs(Tx0 - Tx1)
adiff_Sx01 = np.abs(Sx0 - Sx1)
assert np.allclose(Sx0, Sx1, atol=atol), (dtype, adiff_Sx01)
assert np.allclose(Tx0, Tx1, atol=atol), (dtype, adiff_Tx01)
def test_cwt_batched_for_loop():
"""Ensure basic batched cwt works with both `vectorized`."""
os.environ['SSQ_GPU'] = '0'
np.random.seed(0)
x = np.random.randn(4, 256)
for dtype in ('float64', 'float32'):
Wx0, *_ = cwt(x, _wavelet(dtype=dtype), vectorized=True)
Wx1, *_ = cwt(x, _wavelet(dtype=dtype), vectorized=False)
adiff_Wx01 = np.abs(Wx0 - Wx1)
assert np.allclose(Wx0, Wx1), (dtype, adiff_Wx01.mean())
if __name__ == '__main__':
if VIZ:
test_1D()
test_2D()
test_indexed_sum()
test_parallel_setting()
test_phase_cwt()
test_phase_stft()
test_replace_under_abs()
test_indexed_sum_onfly()
test_ssqueeze_cwt()
test_ssqueeze_stft()
test_ssqueeze_vs_indexed_sum()
test_buffer()
test_ssq_stft()
test_ssq_cwt()
test_wavelet_dtype_gmw()
test_wavelet_dtype()
test_higher_order()
test_cwt_for_loop()
test_ssq_cwt_batched()
test_ssq_stft_batched()
test_cwt_batched_for_loop()
else:
pytest.main([__file__, "-s"])
| 24,286 | 35.195231 | 81 | py |
ssqueezepy | ssqueezepy-master/tests/test_signals_test.py | # -*- coding: utf-8 -*-
"""Test ssqueezepy/_test_signals.py"""
import os
import pytest
import warnings
import numpy as np
import scipy.signal as sig
from ssqueezepy import Wavelet, TestSignals
from ssqueezepy.utils import window_resolution
VIZ = 0
os.environ['SSQ_GPU'] = '0' # in case concurrent tests set it to '1'
try:
import torch
torch.tensor(1, device='cuda')
CAN_GPU = True
except:
CAN_GPU = False
warnings.warn("SKIPPED TESTS in `test_signals_test.py`, GPU not found.")
def test_demo():
tsigs = TestSignals(N=256)
dft = (None, 'rows', 'cols')[0]
tsigs.demo(dft=dft)
signals = [
'am-cosine',
('hchirp', dict(fmin=.2)),
('sine:am-cosine', (dict(f=32, phi0=1), dict(amin=.3))),
]
tsigs.demo(signals, N=256)
tsigs.demo(signals, dft='rows')
tsigs.demo(signals, dft='cols')
def test_wavcomp():
os.environ['SSQ_GPU'] = '0'
tsigs = TestSignals(N=256)
wavelets = [Wavelet(('gmw', {'beta': 5})),
Wavelet(('gmw', {'beta': 22})),
]
tsigs.wavcomp(wavelets)
# test name-param pair, and ability to auto-set `N`
N_all = [256, None]
signals_all = [[('#echirp', dict(fmin=.1))],
[('lchirp', dict(fmin=1, fmax=60, tmin=0, tmax=5))]]
for N, signals in zip(N_all, signals_all):
tsigs.wavcomp(wavelets, signals=signals, N=N)
def test_cwt_vs_stft():
os.environ['SSQ_GPU'] = '0'
# (N, beta, NW): (512, 42.5, 255); (256, 21.5, 255)
N = 256#512
signals = 'all'
snr = 5
n_fft = N
win_len = n_fft#//2
tsigs = TestSignals(N=N, snr=snr)
wavelet = Wavelet(('GMW', {'beta': 21.5}))
NW = win_len//2 - 1
window = np.abs(sig.windows.dpss(win_len, NW))
# window = np.pad(window, win_len//2)
window_name = 'DPSS'
config_str = '\nNW=%s' % NW
# ensure `wavelet` and `window` have ~same time & frequency resolutions
# TODO make function to auto-find matching wavelet given window & vice versa
print("std_w, std_t, harea\nwavelet: {:.4f}, {:.4f}, {:.8f}"
"\nwindow: {:.4f}, {:.4f}, {:.8f}".format(
wavelet.std_w, wavelet.std_t, wavelet.harea,
*window_resolution(window)))
tsigs.cwt_vs_stft(wavelet, window, signals=signals, N=N, win_len=win_len,
n_fft=n_fft, window_name=window_name, config_str=config_str)
def test_ridgecomp():
os.environ['SSQ_GPU'] = '0'
N = 256
n_ridges = 3
penalty = 25
signals = 'poly-cubic'
tsigs = TestSignals(N=N)
kw = dict(N=N, signals=signals, n_ridges=n_ridges, penalty=penalty)
tsigs.ridgecomp(transform='cwt', **kw)
tsigs.ridgecomp(transform='stft', **kw)
def test_gpu():
"""Test that TestSignals can run on GPU."""
if not CAN_GPU:
return
N = 256
tsigs = TestSignals(N=N)
window = np.abs(sig.windows.dpss(N, N//2 - 1))
signals = 'par-lchirp'
os.environ['SSQ_GPU'] = '1'
wavelet = Wavelet()
tsigs.cwt_vs_stft(wavelet, window, signals=signals, N=N)
os.environ['SSQ_GPU'] = '0'
if __name__ == '__main__':
if VIZ:
test_demo()
test_wavcomp()
test_cwt_vs_stft()
test_ridgecomp()
test_gpu()
else:
pytest.main([__file__, "-s"])
| 3,282 | 26.358333 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/_cwt.py | # -*- coding: utf-8 -*-
import numpy as np
from .utils import fft, ifft, ifftshift, FFT_GLOBAL
from .utils import WARN, adm_cwt, adm_ssq, _process_fs_and_t
from .utils import padsignal, process_scales, logscale_transition_idx
from .utils import backend as S
from .utils.backend import Q
from .algos import replace_at_inf_or_nan
from .wavelets import Wavelet
def cwt(x, wavelet='gmw', scales='log-piecewise', fs=None, t=None, nv=32,
l1_norm=True, derivative=False, padtype='reflect', rpadded=False,
vectorized=True, astensor=True, cache_wavelet=None, order=0, average=None,
nan_checks=None, patience=0):
"""Continuous Wavelet Transform, discretized, as described in
Sec. 4.3.3 of [1] and Sec. IIIA of [2]. Uses FFT convolution via frequency-
domain wavelets matching (padded) input's length.
Uses `Wavelet.dtype` precision.
# Arguments:
x: np.ndarray / torch.Tensor
Input vector(s), 1D / 2D.
2D: does *not* do 2D CWT. Instead, treats dim0 as separate inputs,
e.g. `(n_channels, time)`, improving speed & memory w.r.t. looping.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. See `ssqueezepy.wavs()`
or `Wavelet.SUPPORTED`.
- tuple: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
See `help(wavelets.Wavelet)`.
scales: str['log', 'log-piecewise', 'linear', 'log:maximal', ...]
/ np.ndarray
CWT scales.
- 'log': exponentially distributed scales, as pow of 2:
`[2^(1/nv), 2^(2/nv), ...]`
- 'log-piecewise': 'log' except very high `scales` are downsampled
to prevent redundancy. This is recommended. See
https://github.com/OverLordGoldDragon/ssqueezepy/issues/
29#issuecomment-776792726
- 'linear': linearly distributed scales.
!!! this scheme is not recommended; use with caution
str assumes default `preset` of `'minimal'` for low scales and
`'maximal'` for high, which can be changed via e.g. 'log:maximal'.
See `preset` in `help(utils.cwt_scalebounds)`.
nv: int
Number of voices (wavelets per octave). Suggested >= 16.
fs: float / None
Sampling frequency of `x`. Defaults to 1, which for
`maprange='maximal'` makes ssq frequencies range from 1/dT to 0.5*fs,
i.e. as fraction of reference sampling rate up to Nyquist limit;
dT = total duration (N/fs).
Used to compute `dt`, which is only used if `derivative=True`.
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Used to compute `dt`, which is only used if `derivative=True`.
Overrides `fs` if not None.
l1_norm: bool (default True)
Whether to L1-normalize the CWT, which yields a more representative
distribution of energies and component amplitudes than L2 (see [3],
[6]). If False (default True), uses L2 norm.
derivative: bool (default False)
Whether to compute and return `dWx`. Requires `fs` or `t`.
padtype: str / None
Pad scheme to apply on input. See `help(utils.padsignal)`.
`None` -> no padding.
rpadded: bool (default False)
Whether to return padded Wx and dWx.
`False` drops the added padding per `padtype` to return Wx and dWx
of .shape[1] == len(x).
vectorized: bool (default True)
Whether to compute quantities for all scales at once, which is
faster but uses more memory.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
cache_wavelet: bool (default None) / None
If True, will store `wavelet` computations for all `scales` in
`wavelet._Psih` (only if `vectorized`).
- Defaults to True if `wavelet` is passed that's a `Wavelet`,
throws warning if True with non-`Wavelet` `wavelet` and sets self
to False (since the array's discarded at `return` anyway).
- Ignored with `order > 2`, defaults to False.
order: int (default 0) / tuple[int] / range
> 0 computes `cwt` with higher-order GMWs. If tuple, computes
`cwt` at each specified order. See `help(_cwt.cwt_higher_order)`.
average: bool / None
Only used for tuple `order`; see `help(_cwt.cwt_higher_order)`.
nan_checks: bool / None
Checks whether input has `nan` or `inf` values, and zeros them.
`False` saves compute. Doesn't support torch inputs.
Defaults to `True` for NumPy inputs, else `False`.
patience: int / tuple[int, int]
pyFFTW parameter for faster FFT on CPU; see `help(ssqueezepy.FFT)`.
# Returns:
Wx: [na x n] np.ndarray (na = number of scales; n = len(x))
CWT of `x`. (rows=scales, cols=timeshifts)
scales: [na] np.ndarray
Scales at which CWT was computed.
dWx: [na x n] np.ndarray (if `derivative=True`)
Time-derivative of the CWT of `x`, computed via frequency-domain
differentiation (effectively, derivative of trigonometric
interpolation; see [4]). Implements as described in Sec IIIB of [2].
# References:
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. How to validate a wavelet filterbank (CWT)? John Muradeli.
https://dsp.stackexchange.com/a/86069/50076
4. The Exponential Accuracy of Fourier and Chebyshev Differencing Methods.
E. Tadmor.
http://webhome.auburn.edu/~jzl0097/teaching/math_8970/Tadmor_86.pdf
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
cwt_fw.m
6. Rectification of the Bias in the Wavelet Power Spectrum.
Y. Liu, X. S. Liang, R. H. Weisberg.
http://ocg6.marine.usf.edu/~liu/Papers/Liu_etal_2007_JAOT_wavelet.pdf
"""
def _vectorized(xh, scales, wavelet, derivative, cache_wavelet):
if cache_wavelet:
Psih_xh = wavelet.Psih(scale=scales, nohalf=False) * xh
else:
Psih_xh = wavelet(scale=scales, nohalf=False) * xh
Wx = ifft(Psih_xh, axis=-1, astensor=True)
if derivative:
Psih_xh *= (1j * wavelet.xi / dt)
dWx = ifft(Psih_xh, axis=-1, astensor=True)
return (Wx, dWx) if derivative else (Wx, None)
def _for_loop(xh, scales, wavelet, derivative, is_2D):
cdtype = 'complex128' if S.is_dtype(xh, 'complex128') else 'complex64'
shape = ((len(scales), xh.shape[-1]) if not is_2D else
(len(xh), len(scales), xh.shape[-1]))
Wx = S.zeros(shape, dtype=cdtype)
if derivative:
dWx = (Wx.copy() if isinstance(Wx, np.ndarray) else
Wx.detach().clone())
for i, scale in enumerate(scales):
idx = (slice(i, i + 1) if not is_2D else # Wx[i]
(slice(None), slice(i, i + 1))) # Wx[:, i]
# sample FT of wavelet at scale `a`
psih = wavelet(scale=scale, nohalf=False)
Wx[idx] = ifft(psih * xh, axis=-1, astensor=True)
if derivative:
dpsih = (1j * wavelet.xi / dt) * psih
dWx[idx] = ifft(dpsih * xh, axis=-1, astensor=True)
return (Wx, dWx) if derivative else (Wx, None)
def _process_args(x, scales, nv, fs, t, nan_checks, wavelet, cache_wavelet):
if not hasattr(x, 'ndim'):
raise TypeError("`x` must be a numpy array or torch Tensor "
"(got %s)" % type(x))
elif x.ndim not in (1, 2):
raise ValueError("`x` must be 1D or 2D (got x.ndim == %s)" % x.ndim)
if nan_checks is None:
nan_checks = bool(isinstance(x, np.ndarray))
if nan_checks:
if not isinstance(x, np.ndarray):
raise ValueError("`nan_checks=True` requires NumPy input.")
elif np.isnan(x.max()) or np.isinf(x.max()) or np.isinf(x.min()):
WARN("found NaN or inf values in `x`; will zero")
replace_at_inf_or_nan(x, replacement=0.)
if cache_wavelet:
if isinstance(wavelet, (str, tuple)):
# only check str/tuple since it'll error anyway upon other types
WARN("`cache_wavelet=True` requires a `wavelet` that's instance "
"of `Wavelet`; setting to False.")
cache_wavelet = False
elif not vectorized:
WARN("`cache_wavelet=True` requires `vectorized=True`; "
"setting to False.")
cache_wavelet = False
elif cache_wavelet is None:
cache_wavelet = (not isinstance(wavelet, (str, tuple)) and vectorized)
if not isinstance(scales, str):
nv = None
N = x.shape[-1]
dt, *_ = _process_fs_and_t(fs, t, N=N)
is_2D = (x.ndim == 2)
return N, nv, dt, is_2D, cache_wavelet
if isinstance(order, (tuple, list, range)) or order > 0:
kw = dict(wavelet=wavelet, scales=scales, fs=fs, t=t, nv=nv,
l1_norm=l1_norm, derivative=derivative, padtype=padtype,
rpadded=rpadded, vectorized=vectorized, patience=patience,
cache_wavelet=cache_wavelet)
return cwt_higher_order(x, order=order, average=average,
astensor=astensor, **kw)
(N, nv, dt, is_2D, cache_wavelet
) = _process_args(x, scales, nv, fs, t, nan_checks, wavelet, cache_wavelet)
# process `wavelet`, get its `dtype`
wavelet = _process_gmw_wavelet(wavelet, l1_norm)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
dtype = wavelet.dtype
# cast to torch early if possible (keeps as NumPy if SSQ_GPU=0)
torch_supports_padding = bool(padtype in ('zero', 'reflect', None))
if torch_supports_padding:
x = S.asarray(x, dtype)
x = S.astype(x, dtype)
# pad, ensure correct data type
if padtype is not None:
xp, _, n1, _ = padsignal(x, padtype, get_params=True)
else:
xp = x
if not torch_supports_padding:
xp = S.asarray(xp, dtype)
# take to freq-domain
xh = fft(xp, axis=-1, astensor=True)
if is_2D:
xh = xh[:, None] # insert dim1 to broadcast wavelet `scales` along
# process `scales`
scales = process_scales(scales, N, wavelet, nv=nv)
scales = S.asarray(scales, dtype)
# temporarily adjust `wavelet.N` & `FFT_GLOBAL.patience`
wavelet_N_orig = wavelet.N
wavelet.N = xp.shape[-1]
patience_orig = (FFT_GLOBAL.patience, FFT_GLOBAL.planning_timelimit)
FFT_GLOBAL.patience = patience
# take CWT
if vectorized:
Wx, dWx = _vectorized(xh, scales, wavelet, derivative, cache_wavelet)
else:
Wx, dWx = _for_loop(xh, scales, wavelet, derivative, is_2D)
# restore
wavelet.N = wavelet_N_orig
FFT_GLOBAL.patience = patience_orig
# handle unpadding, normalization
if not rpadded and padtype is not None:
# Wx[:, n1:n1 + N] if 1D else Wx[:, :, n1:n1 + N]
idx = ((slice(None), slice(n1, n1 + N)) if not is_2D else
(slice(None), slice(None), slice(n1, n1 + N)))
# shorten to pre-padded size
Wx = Wx[idx]
if derivative:
dWx = dWx[idx]
if S.is_tensor(Wx):
# ensure indexing (strides) is same, else cupy will mess up
Wx = Wx.contiguous()
if derivative:
dWx = dWx.contiguous()
if not l1_norm:
# normalize energy per L2 wavelet norm, else already L1-normalized
Wx *= S.astype(Q.sqrt(scales), Wx.dtype)
if derivative:
dWx *= S.astype(Q.sqrt(scales), Wx.dtype)
# postprocessing & return
if not astensor and S.is_tensor(Wx):
Wx, scales, dWx = [g.cpu().numpy() if S.is_tensor(g) else g
for g in (Wx, scales, dWx)]
scales = scales.squeeze()
return ((Wx, scales, dWx) if derivative else
(Wx, scales))
def icwt(Wx, wavelet='gmw', scales='log-piecewise', nv=None, one_int=True,
x_len=None, x_mean=0, padtype='reflect', rpadded=False, l1_norm=True):
"""The inverse Continuous Wavelet Transform of `Wx`, via double or
single integral.
# Arguments:
Wx: np.ndarray
CWT computed via `ssqueezepy.cwt`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
See help(cwt).
nv: int / None
Number of voices. Suggested >= 32. Needed if `scales` isn't array
(will default to `cwt`'s).
one_int: bool (default True)
Whether to use one-integral iCWT or double.
Current one-integral implementation performs best.
- True: Eq 2.6, modified, of [6]. Explained in [1].
- False: Eq 4.67 of [3]. Explained in [2].
x_len: int / None. Length of `x` used in forward CWT, if different
from Wx.shape[1] (default if None).
x_mean: float. mean of original `x` (not picked up in CWT since it's an
infinite scale component). Default 0.
padtype: str
Pad scheme to apply on input, in case of `one_int=False`.
See `help(utils.padsignal)`.
rpadded: bool (default False)
True if Wx is padded (e.g. if used `cwt(, rpadded=True)`).
l1_norm: bool (default True)
True if Wx was obtained via `cwt(, l1_norm=True)`.
# Returns:
x: np.ndarray
The signal, as reconstructed from Wx.
# References:
1. One integral inverse CWT. John Muradeli.
https://dsp.stackexchange.com/a/76239/50076
2. Inverse CWT derivation. John Muradeli.
https://dsp.stackexchange.com/a/71148/50076
3. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
4. Why iCWT may be inexact. John Muradeli.
https://dsp.stackexchange.com/a/87104/50076
5. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
6. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
7. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
#### Prepare for inversion ###############################################
na, n = Wx.shape
x_len = x_len or n
if not isinstance(scales, np.ndarray) and nv is None:
nv = 32 # must match forward's; default to `cwt`'s
wavelet = _process_gmw_wavelet(wavelet, l1_norm)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# will override `nv` to match `scales`'s
scales, scaletype, _, nv = process_scales(scales, x_len, wavelet, nv=nv,
get_params=True)
assert (len(scales) == na), "%s != %s" % (len(scales), na)
#### Handle piecewise scales case ########################################
# `nv` must be left unspecified so it's inferred automatically from `scales`
# in `process_scales` for each piecewise case
if scaletype == 'log-piecewise':
kw = dict(wavelet=wavelet, one_int=one_int, x_len=x_len, x_mean=x_mean,
padtype=padtype, rpadded=rpadded, l1_norm=l1_norm)
idx = logscale_transition_idx(scales)
x = icwt(Wx[:idx], scales=scales[:idx], **kw)
x += icwt(Wx[idx:], scales=scales[idx:], **kw)
return x
##########################################################################
#### Invert ##############################################################
if one_int:
x = _icwt_1int(Wx, scales, scaletype, l1_norm)
else:
x = _icwt_2int(Wx, scales, scaletype, l1_norm,
wavelet, x_len, padtype, rpadded)
# admissibility coefficient
Cpsi = (adm_ssq(wavelet) if one_int else
adm_cwt(wavelet))
if scaletype == 'log':
# Eq 4.67 in [1]; Theorem 4.5 in [1]; below Eq 14 in [2]
# ln(2**(1/nv)) == ln(2)/nv == diff(ln(scales))[0]
x *= (2 / Cpsi) * np.log(2 ** (1 / nv))
else:
x *= (2 / Cpsi)
x += x_mean # CWT doesn't capture mean (infinite scale)
return x
def _icwt_2int(Wx, scales, scaletype, l1_norm, wavelet, x_len,
padtype='zero', rpadded=False):
"""Double-integral iCWT; works with any(?) wavelet."""
# add CWT padding if it doesn't exist
if not rpadded:
Wx, n_up, n1, _ = padsignal(Wx, padtype=padtype, get_params=True)
# see help(cwt) on `norm` and `pn`
norm = _icwt_norm(scaletype, l1_norm)
pn = (-1)**np.arange(n_up)
x = np.zeros(n_up)
# TODO vectorize?
for scale, Wx_scale in zip(scales, Wx):
# TODO remove `*pn` & `ifftshift`?
psih = wavelet(scale=scale, N=n_up) * pn
xa = ifftshift(ifft(fft(Wx_scale) * psih))
x += xa.real / norm(scale)
x = x[n1:n1 + x_len] # keep the unpadded part
return x
def _icwt_1int(Wx, scales, scaletype, l1_norm):
"""One-integral iCWT; assumes analytic wavelet."""
norm = _icwt_norm(scaletype, l1_norm)
return (Wx.real / norm(scales)).sum(axis=0)
def _icwt_norm(scaletype, l1_norm):
if l1_norm:
norm = ((lambda scale: 1) if scaletype == 'log' else
(lambda scale: scale))
else:
if scaletype == 'log':
norm = lambda scale: scale**.5
elif scaletype == 'linear':
norm = lambda scale: scale**1.5
return norm
def _process_gmw_wavelet(wavelet, l1_norm):
"""Ensure `norm` for GMW is consistent with `l1_norm`."""
norm = 'bandpass' if l1_norm else 'energy'
if isinstance(wavelet, str) and wavelet.lower()[:3] == 'gmw':
wavelet = ('gmw', {'norm': norm})
elif isinstance(wavelet, tuple) and wavelet[0].lower()[:3] == 'gmw':
wavelet, wavopts = wavelet
wavopts['norm'] = wavopts.get('norm', norm)
wavelet = (wavelet, wavopts)
elif isinstance(wavelet, Wavelet):
if wavelet.name == 'GMW L2' and l1_norm:
raise ValueError("using GMW L2 wavelet with `l1_norm=True`")
elif wavelet.name == 'GMW L1' and not l1_norm:
raise ValueError("using GMW L1 wavelet with `l1_norm=False`")
return wavelet
def cwt_higher_order(x, wavelet='gmw', order=1, average=None, astensor=True,
**kw):
"""Compute `cwt` with GMW wavelets of order 0 to `order`. See `help(cwt)`.
Yields lower variance and more noise robust representation. See VI in ref[1].
# Arguments:
x: np.ndarray
Input, 1D/2D. See `help(cwt)`.
wavelet: str / wavelets.Wavelet
CWT wavelet.
order: int / tuple[int] / range
Order of GMW to use for CWT. If tuple, will compute for each
order specified in tuple, subject to `average`.
average: bool (default True if `order` is tuple)
If True, will take arithmetic mean of resulting `Wx` (and `dWx`
if `derivative=True`), else return as list. Note for phase transform,
one should compute derivative of averaged `Wx` rather than take
average of individual `dWx`s.
Ignored with non-tuple `order.
kw: dict / kwargs
Arguments to `cwt`.
If `scales` is string, will reuse zeroth-order's; zeroth order
isn't included in `order`, will set from wavelet at `order=0`.
# References
[1] Generalized Morse Wavelets. S. C. Olhede, A. T. Walden. 2002.
https://spiral.imperial.ac.uk/bitstream/10044/1/1150/1/
OlhedeWaldenGenMorse.pdf
"""
def _process_wavelet(wavelet, order):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if not wavelet.name.lower().startswith('gmw'):
raise ValueError("`wavelet` must be GMW for higher-order transforms "
"(got %s)" % wavelet.name)
wavopts = wavelet.config.copy()
wavopts.pop('order')
wavelets = [Wavelet(('gmw', dict(order=k, **wavopts))) for k in order]
return wavelets, wavopts
def _process_args(wavelet, order, average, kw):
if isinstance(order, (list, range)):
order = tuple(order)
if not isinstance(order, (list, tuple)):
order = [order]
if len(order) == 1 and average:
WARN("`average` ignored with single `order`")
average = False
wavelets, wavopts = _process_wavelet(wavelet, order)
scales = kw.get('scales', 'log-piecewise')
if isinstance(scales, str):
wav = Wavelet(('gmw', dict(order=0, **wavopts)))
scales = process_scales(scales, len(x), wavelet=wav,
nv=kw.get('nv', 32))
scales = S.asarray(scales, wav.dtype)
kw['scales'] = scales
return wavelets, order, average
wavelets, order, average = _process_args(wavelet, order, average, kw)
Wx_all, dWx_all = [], []
derivative = kw.get('derivative', False)
# take the CWTs
for k in range(len(order)):
out = cwt(x, wavelets[k], order=0, **kw)
Wx_all.append(out[0])
if derivative:
dWx_all.append(out[-1])
# handle averaging; strip `Wx_all` of list container if only one array
if average or (average is None and isinstance(order, tuple)):
Wx_all = Q.mean(S.vstack(Wx_all), axis=0)
if derivative:
dWx_all = Q.mean(S.vstack(dWx_all), axis=0)
elif len(Wx_all) == 1:
Wx_all = Wx_all[0]
if derivative:
dWx_all = dWx_all[0]
scales = kw['scales']
if not astensor and S.is_tensor(Wx_all):
Wx_all, scales, dWx_all = [g.cpu().numpy() if S.is_tensor(g) else g
for g in (Wx_all, scales, dWx_all)]
return ((Wx_all, scales, dWx_all) if derivative else
(Wx_all, scales))
| 23,966 | 39.691002 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/_gmw.py | # -*- coding: utf-8 -*-
"""Generalized Morse Wavelets.
For complete functionality, utility functions have been ported from jLab, and
largely validated to match jLab's behavior. jLab tests not ported.
"""
import numpy as np
from numpy.fft import ifft
from numba import jit
from scipy.special import (gamma as gamma_fn,
gammaln as gammaln_fn)
from .algos import nCk
from .wavelets import _xifn, _process_params_dtype
from .configs import gdefaults, USE_GPU, IS_PARALLEL
from .utils.backend import torch
from .utils import backend as S
pi = np.pi
#### Base wavelets (`K=1`) ###################################################
def gmw(gamma=None, beta=None, norm=None, order=None, centered_scale=None,
dtype=None):
"""Generalized Morse Wavelets. Returns function which computes GMW in the
frequency domain.
Assumes `beta != 0`; for full functionality use `_gmw.morsewave`.
Unlike `morsewave`, works with scales rather than frequencies.
Note that function for `norm='energy'` does *not* rescale freq-domain wavelet
per `sqrt(scale)`, for consistency with `ssqueezepy.wavelets`.
See `_gmw.compute_gmw` for code computing freq- and time-domain wavelets
as arrays with proper scaling in.
An overview: https://overlordgolddragon.github.io/generalized-morse-wavelets/
Interactive: https://www.desmos.com/calculator/4gcaeqidxd (bandpass)
https://www.desmos.com/calculator/zfxnblqh8p (energy)
# Arguments
gamma, beta: float > 0, float > 0
GMW parameters. See `help(_gmw.morsewave)`.
norm: str['energy', 'bandpass']
Normalization to use:
'energy': L2 norm, keeps time-domain wavelet's energy at unity
for all `freqs`, i.e. `sum(abs(psi)**2) == 1`.
'bandpass': L1 norm, keeps freq-domain wavelet's peak value at 2
for all `freqs`, i.e. `max(psih) == 2`, `w[argmax(psih)] == wc`.
Additionally see `help(_gmw.morsewave)`.
order: int (default 1)
Order of the wavelet. `k+1`-th order wavelet is orthogonal to `k`-th.
`k=0` will call a different but equivalent function for simpler code
and compute efficiency.
centered_scale: bool (default False)
Unlike other `ssqueezepy.wavelets`, by default `scale=1` in
`morsewave` (i.e. `freqs=1`) computes the wavelet at (peak) center
frequency. This ensures exact equality between `scale` and
`1 / center_frequency`, by multiplying input radians `w` by peak
center freq.
False by default for consistency with other `ssqueezepy` wavelets.
dtype: str / type (np.dtype) / None
See `help(wavelets.Wavelet)`.
# Returns
psihfn: function
Function that computes GMWs, taking `w` (radian frequency)
as argument.
# Usage
wavelet = gmw(3, 60)
wavelet = Wavelet('gmw')
wavelet = Wavelet(('gmw', {'beta': 60}))
Wx, *_ = cwt(x, 'gmw')
# Correspondence with Morlet
Following pairs yield ~same frequency resolution, which is ~same
time-frequency resolution for `mu > 5`, assuming `gamma=3` for all:
`mu`, `beta`
(1.70, 1.00),
(3.00, 3.00),
(4.00, 5.15),
(6.00, 11.5),
(8.00, 21.5),
(10.0, 33.5),
(12.0, 48.5),
(13.4, 60.0),
The default `beta=12` is hence to closely match Morlet's default `mu=6.`.
# vs Morlet
Differences grow significant when seeking excellent time localization
(low `mu`, <4), where Morlet's approximate analyticity breaks down and
negative frequencies are leaked, whereas GMW remains exactly analytic,
with vanishing moments toward dc bin. Else, the two don't behave
noticeably different for `gamma=3`.
# References
[1] Generalized Morse Wavelets. S. C. Olhede, A. T. Walden. 2002.
https://spiral.imperial.ac.uk/bitstream/10044/1/1150/1/
OlhedeWaldenGenMorse.pdf
[2] Generalized Morse Wavelets as a Superfamily of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2012.
https://sci-hub.st/10.1109/TSP.2012.2210890
[3] Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2009.
https://sci-hub.st/10.1109/TSP.2008.2007607
[4] (c) Lilly, J. M. (2021), jLab: A data analysis package for Matlab,
v1.6.9, http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morsewave.m
"""
_check_args(gamma=gamma, beta=beta, norm=norm, order=order)
kw = gdefaults('_gmw.gmw', gamma=gamma, beta=beta, norm=norm, order=order,
centered_scale=centered_scale, dtype=dtype, as_dict=True)
norm, k = kw.pop('norm'), kw.pop('order')
if norm == 'energy'and dtype in ('float32', np.float32):
raise ValueError("`norm='energy'` w/ `dtype='float32'` is unsupported; "
"use 'float64' instead.")
l1_fn, l2_fn = ((gmw_l1, gmw_l2) if k == 0 else
(gmw_l1_k, gmw_l2_k))
if k > 0:
kw['k'] = k
return (l1_fn(**kw) if norm == 'bandpass' else
l2_fn(**kw))
def compute_gmw(N, scale, gamma=3, beta=60, time=False, norm='bandpass',
order=0, centered_scale=False, norm_scale=True, dtype=None):
"""Evaluates GMWs, returning as arrays. See `help(_gmw.gmw)` for full docs.
# Arguments
N: int > 0
Number of samples to compute.
scale: float > 0
Scale at which to sample the freq-domain wavelet: `psih(s * w)`.
gamma, beta, norm, order:
See `help(_gmw.gmw)`.
time: bool (default False)
Whether to compute the time-domain wavelet, `psi`.
centered_scale: bool (default False)
See `help(_gmw.gmw)`.
norm_scale: bool (default True)
Whether to rescale as `sqrt(s) * psih(s * w)` for the `norm='energy'`
case (no effect with `norm='bandpass'`).
# Returns
psih: np.ndarray [N]
Frequency-domain wavelet.
psi: np.ndarray [N]
Time-domain wavelet, returned if `time=True`.
"""
_check_args(gamma=gamma, beta=beta, norm=norm, scale=scale)
gmw_fn = gmw(gamma, beta, norm, order, centered_scale, dtype)
w = _xifn(scale, N)
X = np.zeros(N)
X[:N//2 + 1] = gmw_fn(w[:N//2 + 1])
if norm == 'energy' and norm_scale:
wc = morsefreq(gamma, beta)
X *= (np.sqrt(wc * scale) if centered_scale else
np.sqrt(scale))
X[np.isinf(X) | np.isnan(X)] = 0.
if time:
Xr = X.copy()
if N % 2 == 0:
# https://github.com/jonathanlilly/jLab/issues/13
Xr[N//2] /= 2
x = ifft(Xr * (-1)**np.arange(N))
return (X, x) if time else X
def gmw_l1(gamma=3., beta=60., centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, first order, L1(bandpass)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
wcl = np.log(wc)
gamma, beta, wc, wcl = _process_params_dtype(gamma, beta, wc, wcl, dtype=dtype)
fn = _gmw_l1_gpu if USE_GPU() else (_gmw_l1_par if IS_PARALLEL() else _gmw_l1)
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc, wcl)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, wcl)
@jit(nopython=True, cache=True)
def _gmw_l1(w, gamma, beta, wc, wcl):
# NOTE: numba.jit, unlike numpy & torch, will promote to float64 with
# array float32 and scalar float64
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return 2 * np.exp(- beta * wcl + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l1_par(w, gamma, beta, wc, wcl):
# NOTE: numba.jit, unlike numpy & torch, will promote to float64 with
# array float32 and scalar float64
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return 2 * np.exp(- beta * wcl + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
def _gmw_l1_gpu(w, gamma, beta, wc, wcl):
w_nonneg = (w >= 0)
w *= w_nonneg
return 2 * torch.exp(- beta * wcl + wc**gamma
+ beta * torch.log(w) - w**gamma) * w_nonneg
def gmw_l2(gamma=3., beta=60., centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, first order, L2(energy)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
r = (2*beta + 1) / gamma
rgamma = gamma_fn(r)
(gamma, beta, wc, r, rgamma
) = _process_params_dtype(gamma, beta, wc, r, rgamma, dtype=dtype)
fn = _gmw_l2_gpu if USE_GPU() else (_gmw_l2_par if IS_PARALLEL() else _gmw_l2)
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc,
r, rgamma)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, r, rgamma)
@jit(nopython=True, cache=True)
def _gmw_l2(w, gamma, beta, wc, r, rgamma):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return np.sqrt(2.*pi * gamma * 2.**r / rgamma
) * w**beta * np.exp(-w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l2_par(w, gamma, beta, wc, r, rgamma):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return np.sqrt(2.*pi * gamma * 2.**r / rgamma
) * w**beta * np.exp(-w**gamma) * w_nonneg
def _gmw_l2_gpu(w, gamma, beta, wc, r, rgamma):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
return torch.sqrt(2.*pi * gamma * 2.**r / rgamma
) * w**beta * torch.exp(-w**gamma) * w_nonneg
def gmw_l1_k(gamma=3., beta=60., k=1, centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, `k`-th order, L1(bandpass)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
k_consts = _gmw_k_constants(gamma, beta, k, norm='bandpass', dtype=dtype)
gamma, beta, wc = _process_params_dtype(gamma, beta, wc, dtype=dtype)
fn = (_gmw_l1_k_gpu if USE_GPU() else
(_gmw_l1_k_par if IS_PARALLEL() else _gmw_l1_k))
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc,
k_consts)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, k_consts)
@jit(nopython=True, cache=True)
def _gmw_l1_k(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(- beta * np.log(wc) + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l1_k_par(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(- beta * np.log(wc) + wc**gamma
+ beta * np.log(w) - w**gamma) * w_nonneg
def _gmw_l1_k_gpu(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = w.new_zeros(w.shape)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * torch.exp(- beta * torch.log(wc) + wc**gamma
+ beta * torch.log(w) - w**gamma) * w_nonneg
def gmw_l2_k(gamma=3., beta=60., k=1, centered_scale=False, dtype='float64'):
"""Generalized Morse Wavelets, `k`-th order, L2(energy)-normalized.
See `help(_gmw.gmw)`.
"""
_check_args(gamma=gamma, beta=beta, allow_zerobeta=False)
wc = morsefreq(gamma, beta)
k_consts = _gmw_k_constants(gamma, beta, k, norm='energy', dtype=dtype)
gamma, beta, wc = _process_params_dtype(gamma, beta, wc, dtype=dtype)
fn = (_gmw_l2_k_gpu if USE_GPU() else
(_gmw_l2_k_par if IS_PARALLEL() else _gmw_l2_k))
if centered_scale:
return lambda w: fn(S.atleast_1d(w * wc, dtype), gamma, beta, wc,
k_consts)
else:
return lambda w: fn(S.atleast_1d(w, dtype), gamma, beta, wc, k_consts)
@jit(nopython=True, cache=True)
def _gmw_l2_k(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(beta * np.log(w) - w**gamma) * w_nonneg
@jit(nopython=True, cache=True, parallel=True)
def _gmw_l2_k_par(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = np.zeros(w.shape, dtype=w.dtype)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * np.exp(beta * np.log(w) - w**gamma) * w_nonneg
def _gmw_l2_k_gpu(w, gamma, beta, wc, k_consts):
w_nonneg = (w >= 0)
w *= w_nonneg # zero negative `w` to avoid nans
C = w.new_zeros(w.shape)
for m in range(len(k_consts)):
C += k_consts[m] * (2*w**gamma)**m
return C * torch.exp(beta * torch.log(w) - w**gamma) * w_nonneg
def _gmw_k_constants(gamma, beta, k, norm='bandpass', dtype='float64'):
"""Laguerre polynomial constants & `coeff` term.
Higher-order GMWs are coded such that constants are pre-computed and reused
for any `w` input, since they remain fixed for said order.
"""
r = (2 * beta + 1) / gamma
c = r - 1
# compute `coeff`
if norm == 'bandpass':
coeff = np.sqrt(np.exp(gammaln_fn(r) + gammaln_fn(k + 1) -
gammaln_fn(k + r)))
elif norm == 'energy':
coeff = np.sqrt(2*pi * gamma * (2**r) *
np.exp(gammaln_fn(k + 1) - gammaln_fn(k + r)))
# compute Laguerre polynomial constants
L_consts = np.zeros(k + 1, dtype=dtype)
for m in range(k + 1):
fact = np.exp(gammaln_fn(k + c + 1) - gammaln_fn(c + m + 1) -
gammaln_fn(k - m + 1))
L_consts[m] = (-1)**m * fact / gamma_fn(m + 1)
k_consts = L_consts * coeff
if norm == 'bandpass':
k_consts *= 2
k_consts = k_consts.astype(dtype)
return k_consts
#### General order wavelets (any `K`) ########################################
def morsewave(N, freqs, gamma=3, beta=60, K=1, norm='bandpass'):
"""Generalized Morse wavelets of Olhede and Walden (2002).
# Arguments:
N: int > 0
Number of samples / wavelet length
freqs: float / list / np.ndarray
(peak) center frequencies at which to generate wavelets,
in *radians* (i.e. `w` in `w = 2*pi*f`).
gamma, beta: float, float
GMW parameters; `(gamma, beta) = (3, 60)` yields optimal
time-frequency localization, and a good default for natural signals.
- smaller `beta`: greater time resolution, lower freq resolution.
- `gamma`: structurally alters the wavelet; 2 and 1 provide
superior time localization but poor joint localization.
See refs [2], [3].
K: int > 0
Will compute first `K` orthogonal GMWs, characterized by
orders 0 through `K - 1`.
Note this `K` is 1 greater than in original paper and than `order`
throughout `ssqueezepy`, but is consistent with jLAB.
norm: str['energy', 'bandpass']
Normalization to use. See `help(_gmw.gmw)`, and below.
# Returns:
psih: np.ndarray [N x len(freqs) x K]
Frequency-domain GMW, generated by sampling continuous-time function.
Will collapse dims of length 1 (e.g. if `K=0` or `freqs` is integer).
psi: np.ndarray [N x len(freqs) x K]
Time-domain GMW, centered, generated via inverse DFT of `psih`.
# References
See `help(_gmw.gmw)`.
__________________________________________________________________________
**`beta==0` case**
For BETA equal to zero, the generalized Morse wavelets describe
a non-zero-mean function which is not in fact a wavelet. Only 'bandpass'
normalization is supported for this case.
In this case the frequency speficies the half-power point of the
analytic lowpass filter.
The frequency-domain definition of MORSEWAVE is not necessarily
a good way to compute the zero-beta functions, however. You will
probably need to take a very small DT.
**Multiple orthogonal wavelets**
MORSEWAVE can compute multiple orthogonal versions of the generalized
Morse wavelets, characterized by the order K.
PSI=MORSEWAVE(N,K,GAMMA,BETA,FS) with a fifth numerical argument K
returns an N x LENGTH(FS) x K array PSI which contains time-domain
versions of the first K orthogonal generalized Morse wavelets.
These K different orthogonal wavelets have been employed in
multiwavelet polarization analysis, see Olhede and Walden (2003a,b).
Again either bandpass or energy normalization can be applied. With
bandpass normalization, all wavelets are divided by a constant, setting
the peak value of the first frequency-domain wavelet equal to 2.
"""
_check_args(gamma=gamma, beta=beta, norm=norm)
if not isinstance(freqs, (list, tuple, np.ndarray)):
freqs = [freqs]
psi = np.zeros((N, len(freqs), K), dtype='complex128')
psif = np.zeros((N, len(freqs), K))
for n, f in enumerate(freqs):
psif[:, n:n+1, :], psi[:, n:n+1, :] = _morsewave1(N, abs(f), gamma, beta,
K, norm)
if f < 0:
psi[:, n:n+1, :] = psi[:, n, :].conj()
psif[1:, n:n+1, :] = np.flip(psif[1:, n, :], axis=0)
psi = psi.squeeze()
psif = psif.squeeze()
return psif, psi
def _morsewave1(N, f, gamma, beta, K, norm):
"""See `help(_gmw.morsewave)`."""
fo = morsefreq(gamma, beta)
fact = f / fo
w = 2*pi * np.linspace(0, 1, N, endpoint=False) / fact
w = w.reshape(-1, 1)
with np.errstate(divide='ignore', invalid='ignore'):
if norm == 'energy':
if beta == 0:
psizero = np.exp(-w**gamma)
else:
# w**beta * exp(-w**gamma)
psizero = np.exp(beta * np.log(w) - w**gamma)
else:
if beta == 0:
psizero = 2 * np.exp(-w**gamma)
else:
# Alternate calculation to cancel things that blow up
psizero = 2 * np.exp(- beta * np.log(fo) + fo**gamma
+ beta * np.log(w) - w**gamma)
if beta == 0:
# Ensure nice lowpass filters for beta=0;
# Otherwise, doesn't matter since wavelets vanishes at zero frequency
psizero[0] /= 2 # Due to unit-step function
psizero[np.isnan(psizero) | np.isinf(psizero)] = 0.
X = _morsewave_first_family(fact, N, K, gamma, beta, w, psizero, norm)
X[np.isinf(X)] = 0.
Xr = X.copy()
# center time-domain wavelet
Xr *= (-1)**np.arange(len(Xr)).reshape(-1, 1, 1)
if len(Xr) % 2 == 0:
Xr[len(Xr) // 2] /= 2
x = ifft(Xr, axis=0)
return X, x
def _morsewave_first_family(fact, N, K, gamma, beta, w, psizero, norm):
"""See `help(_gmw.morsewave)`.
See Olhede and Walden, "Noise reduction in directional signals using
multiple Morse wavelets", IEEE Trans. Bio. Eng., v50, 51--57.
The equation at the top right of page 56 is equivalent to the
used expressions. Morse wavelets are defined in the frequency
domain, and so not interpolated in the time domain in the same way
as other continuous wavelets.
"""
r = (2 * beta + 1) / gamma
c = r - 1
L = np.zeros(w.shape)
psif = np.zeros((len(psizero), 1, K))
for k in range(K):
# Log of gamma function much better ... trick from Maltab's ``beta'`
if norm == 'energy':
A = morseafun(gamma, beta, k + 1, norm='energy')
coeff = np.sqrt(1. / fact) * A
elif norm == 'bandpass':
if beta == 0:
coeff = 1.
else:
coeff = np.sqrt(np.exp(gammaln_fn(r) + gammaln_fn(k + 1) -
gammaln_fn(k + r)))
L[:N//2 + 1] = laguerre(2 * w[:N//2 + 1]**gamma, k, c).reshape(-1, 1)
psif[:, :, k] = coeff * psizero * L
return psif
def morseafun(gamma, beta, k=1, norm='bandpass'):
"""GMW amplitude or a-function (evaluated). Used internally by other funcs.
# Arguments
k: int >= 1
Order of the wavelet; see `help(_gmw.morsewave)`.
gamma, beta: float, float
Wavelet parameters. See `help(_gmw.morsewave)`.
norm: str['energy', 'bandpass']
Wavelet normalization. See `help(_gmw.morsewave)`.
# Returns
A: float
GMW amplitude (freq-domain peak value).
______________________________________________________________________
Lilly, J. M. (2021), jLab: A data analysis package for Matlab, v1.6.9,
http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morseafun.m
"""
if norm == 'energy':
r = (2*beta + 1) / gamma
A = np.sqrt(2*pi * gamma * (2**r) *
np.exp(gammaln_fn(k) - gammaln_fn(k + r - 1)))
elif norm == 'bandpass':
if beta == 0:
A = 2.
else:
wc = morsefreq(gamma, beta)
A = 2. / np.exp(beta * np.log(wc) - wc**gamma)
else:
raise ValueError("unsupported `norm`: %s;" % norm
+ "must be one of: 'bandpass', 'energy'.")
return A
def laguerre(x, k, c):
"""Generalized Laguerre polynomials. See `help(_gmw.morsewave)`.
LAGUERRE is used in the computation of the generalized Morse
wavelets and uses the expression given by Olhede and Walden (2002),
"Generalized Morse Wavelets", Section III D.
"""
x = np.atleast_1d(np.asarray(x).squeeze())
assert x.ndim == 1
y = np.zeros(x.shape)
for m in range(k + 1):
# Log of gamma function much better ... trick from Maltab's ``beta''
fact = np.exp(gammaln_fn(k + c + 1) - gammaln_fn(c + m + 1) -
gammaln_fn(k - m + 1))
y += (-1)**m * fact * x**m / gamma_fn(m + 1)
return y
def morsefreq(gamma, beta, n_out=1):
"""Frequency measures for GMWs (with F. Rekibi).
`n_out` controls how many parameters are computed and returned, in the
following order: `wm, we, wi, cwi`, where:
wm: modal / peak frequency
we: energy frequency
wi: instantaneous frequency at time-domain wavelet's center
cwi: curvature of instantaneous frequency at time-domain wavelet's center
All frequency quantities are *radian*, opposed to linear cyclic (i.e. `w`
in `w = 2*pi*f`).
For BETA=0, the "wavelet" becomes an analytic lowpass filter, and `wm`
is not defined in the usual way. Instead, `wm` is defined as the point
at which the filter has decayed to one-half of its peak power.
# References
[1] Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2009.
https://sci-hub.st/10.1109/TSP.2008.2007607
[2] (c) Lilly, J. M. (2021), jLab: A data analysis package for Matlab,
v1.6.9, http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morsefreq.m
"""
wm = (beta / gamma)**(1 / gamma)
if n_out > 1:
we = (1 / 2**(1 / gamma)) * (gamma_fn((2*beta + 2) / gamma) /
gamma_fn((2*beta + 1) / gamma))
if n_out > 2:
wi = (gamma_fn((beta + 2) / gamma) /
gamma_fn((beta + 1) / gamma))
if n_out > 3:
k2 = _morsemom(2, gamma, beta, n_out=3)[-1]
k3 = _morsemom(3, gamma, beta, n_out=3)[-1]
cwi = -(k3 / k2**1.5)
if n_out == 1:
return wm
elif n_out == 2:
return wm, we
elif n_out == 3:
return wm, we, wi
return wm, we, wi, cwi
def _morsemom(p, gamma, beta, n_out=4):
"""Frequency-domain `p`-th order moments of the first order GMW.
Used internally by other funcs.
`n_out` controls how many parameters are coMputed and returned, in the
following order: `Mp, Np, Kp, Lp`, where:
Mp: p-th order moment
Np: p-th order energy moment
Kp: p-th order cumulant
Lp: p-th order energy cumulant
The p-th order moment and energy moment are defined as
Mp = 1/(2 pi) int omegamma^p psi(omegamma) d omegamma
Np = 1/(2 pi) int omegamma^p |psi(omegamma)|.^2 d omegamma
respectively, where omegamma is the radian frequency. These are evaluated
using the 'bandpass' normalization, which has `max(abs(psih(omegamma)))=2`.
# References
[1] Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede. 2009.
https://sci-hub.st/10.1109/TSP.2008.2007607
[2] (c) Lilly, J. M. (2021), jLab: A data analysis package for Matlab,
v1.6.9, http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/morsemom.m
"""
def morsemom1(p, gamma, beta):
return morseafun(gamma, beta, k=1) * morsef(gamma, beta + p)
def morsef(gamma, beta):
# normalized first frequency-domain moment "f_{beta, gamma}" of the
# first-order GMW
return (1 / (2*pi * gamma)) * gamma_fn((beta + 1) / gamma)
Mp = morsemom1(p, gamma, beta)
if n_out > 1:
Np = (2 / 2**((1 + p) / gamma)) * morsemom1(p, gamma, 2*beta)
if n_out > 2:
prange = np.arange(p + 1)
moments = morsemom1(prange, gamma, beta)
cumulants = _moments_to_cumulants(moments)
Kp = cumulants[p]
if n_out > 3:
moments = (2 / 2**((1 + prange) / gamma)
) * morsemom1(prange, gamma, 2 * beta)
cumulants = _moments_to_cumulants(moments)
Lp = cumulants[p]
if n_out == 1:
return Mp
elif n_out == 2:
return Mp, Np
elif n_out == 3:
return Mp, Np, Kp
return Mp, Np, Kp, Lp
def _moments_to_cumulants(moments):
"""Convert moments to cumulants. Used internally by other funcs.
Converts the first N moments `moments =[M0,M1,...M{N-1}]`
into the first N cumulants `cumulants=[K0,K1,...K{N-1}]`.
Note for a probability density function, M0=1 and K0=0.
______________________________________________________________________
Lilly, J. M. (2021), jLab: A data analysis package for Matlab, v1.6.9,
http://www.jmlilly.net/jmlsoft.html
https://github.com/jonathanlilly/jLab/blob/master/jWavelet/moms
"""
moments = np.atleast_1d(np.asarray(moments).squeeze())
assert moments.ndim == 1
cumulants = np.zeros(len(moments))
cumulants[0] = np.log(moments[0])
for n in range(1, len(moments)):
coeff = 0
for k in range(1, n):
coeff += nCk(n - 1, k - 1
) * cumulants[k] * (moments[n - k] / moments[0])
cumulants[n] = (moments[n] / moments[0]) - coeff
return cumulants
def _check_args(gamma=None, beta=None, norm=None, order=None, scale=None,
allow_zerobeta=True):
"""Only checks those that are passed in."""
if gamma is not None and gamma <= 0:
raise ValueError(f"`gamma` must be positive (got {gamma})")
if beta is not None:
if beta < 0:
kind = "non-negative" if allow_zerobeta else "positive"
raise ValueError(f"`beta` must be {kind} (got {beta})")
elif beta == 0 and not allow_zerobeta:
raise ValueError(f"`beta` cannot be zero (got {beta}); "
"use `_gmw.morsewave`, which supports it")
if norm is not None and norm not in ('bandpass', 'energy'):
raise ValueError(f"`norm` must be 'energy' or 'bandpass' (got '{norm}')")
if order is not None:
if (not isinstance(order, (int, float)) or
(isinstance(order, float) and not order.is_integer())):
raise TypeError("`order` must be integer (got %s)" % str(order))
elif order < 0:
raise ValueError("`order` must be >=0 (got %s)" % order)
if scale is not None and scale <= 0:
raise ValueError(f"`scale` must be positive (got {scale})")
| 29,146 | 36.657623 | 83 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/_ssq_cwt.py | # -*- coding: utf-8 -*-
import numpy as np
from .utils import EPS32, EPS64, pi, p2up, adm_ssq, process_scales
from .utils import trigdiff, _process_fs_and_t
from .utils import backend as S
from .algos import replace_under_abs, phase_cwt_cpu, phase_cwt_gpu
from .ssqueezing import ssqueeze, _check_ssqueezing_args
from .wavelets import Wavelet
from ._cwt import cwt
def ssq_cwt(x, wavelet='gmw', scales='log-piecewise', nv=None, fs=None, t=None,
ssq_freqs=None, padtype='reflect', squeezing='sum', maprange='peak',
difftype='trig', difforder=None, gamma=None, vectorized=True,
preserve_transform=None, astensor=True, order=0, nan_checks=None,
patience=0, flipud=True, cache_wavelet=None,
get_w=False, get_dWx=False):
"""Synchrosqueezed Continuous Wavelet Transform.
Implements the algorithm described in Sec. III of [1].
Uses `wavelet.dtype` precision.
# Arguments:
x: np.ndarray / torch.Tensor
Input vector(s), 1D or 2D. See `help(cwt)`.
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain. See `help(cwt)`.
scales: str['log', 'linear', 'log:maximal', ...] / np.ndarray
CWT scales. See `help(cwt)`.
nv: int / None
Number of voices (wavelets per octave). Suggested >= 16.
fs, t: float, np.ndarray
See `help(_cwt.cwt)`.
ssq_freqs: str['log', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
padtype: str / None
Pad scheme to apply on input. See `help(utils.padsignal)`.
`None` -> no padding.
squeezing: str['sum', 'lebesgue'] / function
See `help(ssqueezing.ssqueeze)`.
maprange: str['maximal', 'peak', 'energy'] / tuple(float, float)
Kind of frequency mapping used, determining the range of frequencies
spanned (fm to fM, min to max).
- 'maximal': fm=1/dT, fM=1/(2*dt), always. Data's fundamental
and Nyquist frequencies, determined from `fs` (or `t`).
Other mappings can never span outside this range.
- ('peak', 'energy'): sets fm and fM based on center frequency
associated with `wavelet` at maximum and minimum scale,
respectively. See `help(wavelets.center_frequency)`.
- 'peak': the frequency-domain trimmed bell will have its peak
at Nyquist, meaning all other frequencies are beneath, so each
scale is still correctly resolved but with downscaled energies.
With sufficiently-spanned `scales`, coincides with 'maximal'.
- 'energy': however, the bell's spectral energy is centered
elsewhere, as right-half of bell is partly or entirely trimmed
(left-half can be trimmed too). Use for energy-centric mapping,
which for sufficiently-spanned `scales` will always have lesser
fM (but ~same fM).
- tuple: sets `ssq_freqrange` directly.
difftype: str['trig', 'phase', 'numeric']
Method by which to differentiate Wx (default='trig') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'trig': use `dWx`, obtained via trigonometric (frequency-domain
interpolant) differentiation (see `cwt`, `phase_cwt`).
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx` (see `phase_cwt`).
- 'numeric': first-, second-, or fourth-order (set by `difforder`)
numeric differentiation (see `phase_cwt_num`).
difforder: int[1, 2, 4]
Order of differentiation for difftype='numeric' (default=4).
gamma: float / None
CWT phase threshold. Sets `w=inf` for small values of `Wx` where
phase computation is unstable and inaccurate (like in DFT):
w[abs(Wx) < beta] = inf
This is used to zero `Wx` where `w=0` in computing `Tx` to ignore
contributions from points with indeterminate phase.
Default = 10 * (machine epsilon) = 10 * np.finfo(np.float64).eps
(or float32)
It is recommended to standardize the input, or at least not
pass a small-valued input, to avoid false filtering by `gamma`,
especially if input obeys a power scaling law
(e.g. `~1/f` with EEG/MEG, and similar with audio).
# TODO warn user if `x.max()` is small?
vectorized: bool (default True)
Whether to vectorize CWT, i.e. compute quantities for all scales at
once, which is faster but uses more memory.
preserve_transform: bool (default None) / None
Whether to return `Wx` as directly output from `cwt` (it might be
altered by `ssqueeze` or `phase_transform`). Uses more memory
per storing extra copy of `Wx`.
- Defaults to True if `'SSQ_GPU' == '0'`, else False.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
order: int (default 0) / tuple[int]
`order > 0` computes ssq of `cwt` taken with higher-order GMWs.
If tuple, computes ssq of average of `cwt`s taken at each specified
order. See `help(_cwt.cwt_higher_order)`.
nan_checks: bool / None
Checks whether input has `nan` or `inf` values, and zeros them.
`False` saves compute. Doesn't support torch inputs.
Defaults to `True` for NumPy inputs, else `False`.
patience: int / tuple[int, int]
pyFFTW parameter for faster FFT on CPU; see `help(ssqueezepy.FFT)`.
flipud: bool (default True)
See `help(ssqueeze)`.
cache_wavelet: bool (default None) / None
See `help(cwt)`.
get_w, get_dWx: bool (default False)
`get_w`:
True: will compute phase transform separately, assign it to
array `w` and return it.
False: will compute synchrosqueezing directly from `Wx` and
`dWx` without assigning to intermediate array, which is faster
(by 20-30%) and takes less memory.
`get_dWx`:
True: will return dWx
False: discards dWx after computing `w` or synchrosqueezing.
`get_dWx=True` with `get_w=True` uses most memory.
These options do not affect `Tx`.
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
Wx: np.ndarray [na x n]
Continuous Wavelet Transform of `x`, L1-normed (see `cwt`).
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
scales: np.ndarray [na]
Scales associated with rows of `Wx`.
w: np.ndarray [na x n] (if `get_w=True`)
Phase transform for each element of `Wx`.
dWx: [na x n] np.ndarray (if `get_dWx=True`)
See `help(_cwt.cwt)`.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_fw.m
"""
def _process_args(x, scales, fs, t, nv, difftype, difforder, squeezing,
maprange, wavelet, get_w):
if x.ndim == 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched "
"input.")
difforder = _check_ssqueezing_args(squeezing, maprange, wavelet,
difftype, difforder, get_w,
transform='cwt')
if nv is None and not isinstance(scales, np.ndarray):
nv = 32
N = x.shape[-1]
dt, fs, t = _process_fs_and_t(fs, t, N)
return N, dt, fs, difforder, nv
def _phase_transform(Wx, dWx, N, dt, gamma, difftype, difforder):
if difftype == 'trig':
# calculate instantaneous frequency directly from the
# frequency-domain derivative
w = phase_cwt(Wx, dWx, difftype, gamma)
elif difftype == 'phase':
# !!! bad; yields negatives, and forcing abs(w) doesn't help
# calculate inst. freq. from unwrapped phase of CWT
w = phase_cwt(Wx, None, difftype, gamma)
elif difftype == 'numeric':
# !!! tested to be very inaccurate for small scales
# calculate derivative numerically
_, n1, _ = p2up(N)
Wx = Wx[:, (n1 - 4):(n1 + N + 4)]
w = phase_cwt_num(Wx, dt, difforder, gamma)
return Wx, w
N, dt, fs, difforder, nv = _process_args(x, scales, fs, t, nv, difftype,
difforder, squeezing, maprange,
wavelet, get_w)
wavelet = Wavelet._init_if_not_isinstance(wavelet, N=N)
# CWT with higher-order GMWs
if isinstance(order, (tuple, list, range)) or order > 0:
# keep padding for `trigdiff`
kw = dict(wavelet=wavelet, scales=scales, fs=fs, nv=nv,
l1_norm=True, derivative=False, padtype=padtype, rpadded=True,
vectorized=vectorized, astensor=True,
cache_wavelet=cache_wavelet, nan_checks=nan_checks)
_, n1, _ = p2up(N)
average = isinstance(order, (tuple, list, range))
Wx, scales = cwt(x, order=order, average=average, **kw)
dWx = trigdiff(Wx, fs, rpadded=True, N=N, n1=n1)
Wx = Wx[:, n1:n1 + N]
if S.is_tensor(Wx):
Wx = Wx.contiguous()
scales, cwt_scaletype, *_ = process_scales(scales, N, wavelet, nv=nv,
get_params=True)
# regular CWT
if order == 0:
# l1_norm=True to spare a multiplication; for SSQ_CWT L1 & L2 are exactly
# same anyway since we're inverting CWT over time-frequency plane
rpadded = (difftype == 'numeric')
Wx, scales, dWx = cwt(x, wavelet, scales=scales, fs=fs, nv=nv,
l1_norm=True, derivative=True, padtype=padtype,
rpadded=rpadded, vectorized=vectorized,
astensor=True, patience=patience,
cache_wavelet=cache_wavelet, nan_checks=nan_checks)
# make copy of `Wx` if specified
if preserve_transform is None:
preserve_transform = not S.is_tensor(Wx)
if preserve_transform:
_Wx = (Wx.copy() if not S.is_tensor(Wx) else
Wx.detach().clone())
else:
_Wx = Wx
# gamma
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Wx, 'complex128') else EPS32)
# compute `w` if `get_w` and free `dWx` from memory if `not get_dWx`
if get_w:
_Wx, w = _phase_transform(_Wx, dWx, N, dt, gamma, difftype, difforder)
_dWx = None # don't use in `ssqueeze`
if not get_dWx:
dWx = None
else:
w = None
_dWx = dWx
# default to same scheme used by `scales`
if ssq_freqs is None:
ssq_freqs = cwt_scaletype
# affects `maprange` computation if non-tuple
was_padded = bool(padtype is not None)
# synchrosqueeze
Tx, ssq_freqs = ssqueeze(_Wx, w, ssq_freqs, scales, fs=fs,
squeezing=squeezing, maprange=maprange,
wavelet=wavelet, gamma=gamma, was_padded=was_padded,
flipud=flipud, dWx=_dWx, transform='cwt')
# postprocessing & return
if difftype == 'numeric':
Wx = Wx[:, 4:-4]
Tx = Tx[:, 4:-4]
w = w[:, 4:-4] if w is not None else None
if not astensor and S.is_tensor(Tx):
Tx, Wx, w, dWx, scales, ssq_freqs = [
g.cpu().numpy() if S.is_tensor(g) else g
for g in (Tx, Wx, w, dWx, scales, ssq_freqs)]
scales = scales.squeeze()
if get_w and get_dWx:
return Tx, Wx, ssq_freqs, scales, w, dWx
elif get_w:
return Tx, Wx, ssq_freqs, scales, w
elif get_dWx:
return Tx, Wx, ssq_freqs, scales, dWx
else:
return Tx, Wx, ssq_freqs, scales
def issq_cwt(Tx, wavelet='gmw', cc=None, cw=None):
"""Inverse synchrosqueezing transform of `Tx` with associated frequencies
in `fs` and curve bands in time-frequency plane specified by `Cs` and
`freqband`. This implements Eq. 15 of [1].
# Arguments:
Tx: np.ndarray
Synchrosqueezed CWT of `x` (see `ssq_cwt`).
(rows=~frequencies, cols=timeshifts)
wavelet: str / tuple[str, dict] / `wavelets.Wavelet`
Wavelet that was used to compute Tx, sampled in Fourier
frequency domain.
- str: name of builtin wavelet. `ssqueezepy.wavs()`
- tuple[str, dict]: name of builtin wavelet and its configs.
E.g. `('morlet', {'mu': 5})`.
- `wavelets.Wavelet` instance. Can use for custom wavelet.
cc, cw: np.ndarray / None
Curve centerpoints, and curve (vertical) widths (bandwidths),
together defining the portion of Tx to invert over to extract
K "components" per Modulation Model:
x_k(t) = A_k(t) cos(phi_k(t)) + res; k=0,...,K-1
where K=len(c)==len(cw), and `res` is residual error (inversion
over portion leftover/uncovered by cc, cw).
None = full inversion.
# Returns:
x: np.ndarray [K x Tx.shape[1]]
Components of reconstructed signal, and residual error.
If cb & cw are None, x.shape == (Tx.shape[1],). See `cb, cw`.
# Example:
Tx, *_ = ssq_cwt(x, 'gmw') # synchrosqueezed CWT
x = issq_cwt(Tx, 'gmw') # reconstruction
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
3. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
4. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_cwt_iw.m
"""
cc, cw, full_inverse = _process_component_inversion_args(cc, cw)
if full_inverse:
# Integration over all frequencies recovers original signal
x = Tx.real.sum(axis=0)
else:
x = _invert_components(Tx, cc, cw)
wavelet = Wavelet._init_if_not_isinstance(wavelet)
Css = adm_ssq(wavelet) # admissibility coefficient
# *2 per analytic wavelet & taking real part; Theorem 4.5 [2]
x *= (2 / Css)
return x
def _invert_components(Tx, cc, cw):
# Invert Tx around curve masks in the time-frequency plane to recover
# individual components; last one is the remaining signal
x = np.zeros((cc.shape[1] + 1, cc.shape[0]))
TxRemainder = Tx.copy()
for n in range(cc.shape[1]):
TxMask = np.zeros(Tx.shape, dtype='complex128')
upper_cc = np.clip(cc[:, n] + cw[:, n], 0, len(Tx))
lower_cc = np.clip(cc[:, n] - cw[:, n], 0, len(Tx))
# cc==-1 denotes no curve at that time,
# removing such points from inversion
upper_cc[np.where(cc[:, n] == -1)] = 0
lower_cc[np.where(cc[:, n] == -1)] = 1
for m in range(Tx.shape[1]):
idxs = slice(lower_cc[m], upper_cc[m] + 1)
TxMask[idxs, m] = Tx[idxs, m]
TxRemainder[idxs, m] = 0
x[n] = TxMask.real.sum(axis=0).T
x[n + 1] = TxRemainder.real.sum(axis=0).T
return x
def _process_component_inversion_args(cc, cw):
if (cc is None) and (cw is None):
full_inverse = True
else:
full_inverse = False
if cc.ndim == 1:
cc = cc.reshape(-1, 1)
if cw.ndim == 1:
cw = cw.reshape(-1, 1)
cc = cc.astype('int32')
cw = cw.astype('int32')
return cc, cw, full_inverse
def phase_cwt(Wx, dWx, difftype='trig', gamma=None, parallel=None):
"""Calculate the phase transform at each (scale, time) pair:
w[a, b] = Im((1/2pi) * d/db (Wx[a,b]) / Wx[a,b])
See above Eq 20.3 in [1], or Eq 13 in [2].
# Arguments:
Wx: np.ndarray
CWT of `x` (see `help(cwt)`).
dWx: np.ndarray.
Time-derivative of `Wx`, computed via frequency-domain differentiation
(effectively, derivative of trigonometric interpolation; see [4]).
difftype: str['trig', 'phase']
Method by which to differentiate Wx (default='trig') to obtain
instantaneous frequencies:
w(a,b) = Im( (1/2pi) * (1/Wx(a,b)) * d/db[Wx(a,b)] )
- 'trig': using `dWx, the time-derivative of the CWT of `x`,
computed via frequency-domain differentiation (effectively,
derivative of trigonometric interpolation; see [4]). Implements
as described in Sec IIIB of [2].
- 'phase': differentiate by taking forward finite-difference of
unwrapped angle of `Wx`. Does not support GPU or multi-threaded
CPU execution.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
parallel: bool (default `ssqueezepy.IS_PARALLEL()`)
Whether to use multiple CPU threads (ignored if input is tensor).
# Returns:
w: np.ndarray
Phase transform for each element of `Wx`. w.shape == Wx.shape.
# References:
1. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
4. The Exponential Accuracy of Fourier and Chebyshev Differencing Methods.
E. Tadmor.
http://webhome.auburn.edu/~jzl0097/teaching/math_8970/Tadmor_86.pdf
5. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
phase_cwt.m
"""
def _process_input(Wx, parallel, gamma):
S.warn_if_tensor_and_par(Wx, parallel)
gpu = S.is_tensor(Wx)
if difftype != 'trig':
if gpu:
raise ValueError("`difftype != 'trig'` unsupported with tensor "
"inputs.")
elif parallel:
raise ValueError("`difftype != 'trig'` unsupported with "
"`parallel`.")
if gamma is None:
gamma = np.sqrt(EPS64 if S.is_dtype(Wx, 'complex128') else EPS32)
return gamma, gpu
gamma, gpu = _process_input(Wx, parallel, gamma)
if difftype == 'trig':
if gpu:
w = phase_cwt_gpu(Wx, dWx, gamma)
else:
w = phase_cwt_cpu(Wx, dWx, gamma, parallel)
elif difftype == 'phase':
# TODO gives bad results; shouldn't we divide by Wx?
u = np.unwrap(np.angle(Wx)).T
w = np.vstack([np.diff(u, axis=0), u[-1] - u[0]]).T / (2*pi)
np.abs(w, out=w)
replace_under_abs(w, ref=Wx, value=gamma, replacement=np.inf)
else:
raise ValueError(f"unsupported `difftype` '{difftype}'; must be one of "
"'trig', 'phase'.")
return w
def phase_cwt_num(Wx, dt, difforder=4, gamma=None):
"""Calculate the phase transform at each (scale, time) pair:
w[a, b] = Im((1/2pi) * d/db (Wx[a,b]) / Wx[a,b])
Uses numeric differentiation (1st, 2nd, or 4th order). See above Eq 20.3
in [1], or Eq 13 in [2].
# Arguments:
Wx: np.ndarray
CWT of `x` (see `cwt`).
dt: float
Sampling period (e.g. t[1] - t[0]).
difforder: int[1, 2, 4]
Order of differentiation (default=4).
gamma: float
See `help(ssqueezepy.ssq_cwt)`.
# Returns:
w: np.ndarray
Phase transform via demodulated FM-estimates. w.shape == Wx.shape.
# References:
1. A Nonlinear squeezing of the CWT Based on Auditory Nerve Models.
I. Daubechies, S. Maes.
https://services.math.duke.edu/%7Eingrid/publications/DM96.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
phase_cwt_num.m
"""
# unreliable; bad results on high freq pure tones
def _differentiate(Wx, dt):
if difforder in (2, 4):
# append for differentiating
Wxr = np.hstack([Wx[:, -2:], Wx, Wx[:, :2]])
if difforder == 1:
w = np.hstack([Wx[:, 1:] - Wx[:, :-1],
Wx[:, :1] - Wx[:, -1:]])
w /= dt
elif difforder == 2:
# calculate 2nd-order forward difference
w = -Wxr[:, 4:] + 4 * Wxr[:, 3:-1] - 3 * Wxr[:, 2:-2]
w /= (2 * dt)
elif difforder == 4:
# calculate 4th-order central difference
w = -Wxr[:, 4:]
w += Wxr[:, 3:-1] * 8
w -= Wxr[:, 1:-3] * 8
w += Wxr[:, 0:-4]
w /= (12 * dt)
return w
if difforder not in (1, 2, 4):
raise ValueError("`difforder` must be one of: 1, 2, 4 "
"(got %s)" % difforder)
w = _differentiate(Wx, dt)
# calculate inst. freq for each scale
# 2*pi norm per discretized inverse FT rather than inverse DFT
w = np.real(-1j * w / Wx) / (2*pi)
# epsilon from Daubechies, H-T Wu, et al.
# gamma from Brevdo, H-T Wu, et al.
gamma = gamma or 10 * (EPS64 if Wx.dtype == np.cfloat else EPS32)
w[np.abs(Wx) < gamma] = np.inf
# see `phase_cwt`, though negatives may no longer be in minority
w = np.abs(w)
return w
| 24,265 | 40.059222 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/wavelets.py | # -*- coding: utf-8 -*-
import numpy as np
import gc
from numba import jit
from types import FunctionType
from scipy import integrate
from .algos import find_maximum
from .configs import gdefaults, USE_GPU, IS_PARALLEL
from .utils import backend as S
from .utils.fft_utils import ifft, fftshift, ifftshift
from .utils.backend import torch, Q, atleast_1d
class Wavelet():
"""Central wavelet class. `__call__` computes Fourier frequency-domain
wavelet, `psih`, `.psifn` computes time-domain wavelet, `psi`.
`Wavelet.SUPPORTED` for names of built-in wavelets passable to `__init__()`;
`Wavelet.VISUALS` for names of visualizations passable to `viz()`.
`viz()` to run visuals, `info()` to print relevant wavelet info.
# Arguments:
wavelet: str / tuple[str, dict] /FunctionType
Name of supported wavelet (must be one of `Wavelet.SUPPORTED`)
or custom function. Or tuple, name of wavelet and its configs,
e.g. `('morlet', {'mu': 5})`.
N: int
Default length of wavelet.
dtype: str / type (np.dtype) / None
dtype at which wavelets are generated; can't change after __init__.
Must be one of `Wavelet.DTYPES`. If None, uses value from
`configs.ini`, global (if set) or wavelet-specific.
'float32' is unsupported for GMW's `norm='energy'` and will be
overridden by 'float64' (with a warning if it was passed to __init__).
# Example:
wavelet = Wavelet(('morlet', {'mu': 7}), N=1024)
plt.plot(wavelet(scale=8))
"""
SUPPORTED = {'gmw', 'morlet', 'bump', 'cmhat', 'hhhat'}
VISUALS = {'time-frequency', 'heatmap', 'waveforms', 'filterbank',
'harea', 'std_t', 'std_w', 'anim:time-frequency'}
DTYPES = {'float32', 'float64'}
# TODO ensure everything is accounted
# Attributes whose data is stored on GPU (if env flag 'SSQ_GPU' == '1')
ON_GPU = {'xi', '_Psih', '_Psih_scale'}
# Time-frequency attributes
TF_PROPS = {'wc', 'wc_ct', 'scalec_ct', 'std_t', 'std_w',
'std_t_d', 'std_w_d'}
def __init__(self, wavelet='gmw', N=1024, dtype=None):
self._dtype = self._process_dtype(dtype, as_str=True
) if dtype is not None else None
self._validate_and_set_wavelet(wavelet)
self.N = N # also sets _xi
#### Main methods / properties ###########################################
def __call__(self, w=None, *, scale=None, N=None, nohalf=True, imag_th=1e-8):
"""wavelet(w) if called with positional argument, w = float or array, else
wavelet(scale * xi), where `xi` is recomputed if `N` is not None.
`nohalf=False` (default=True) halves the Nyquist bin for even-length
psih to ensure proper time-domain wavelet decay and analyticity:
https://github.com/jonathanlilly/jLab/issues/13
If evaluated wavelet's imaginary component is less than `imag_th`*(sum of
real), will drop it; set to None to disable.
"""
if w is not None:
psih = self.fn(S.asarray(w, self.dtype))
else:
psih = self.fn(self.xifn(scale, N))
if not nohalf:
psih = self._halve_nyquist(psih)
if (S.is_dtype(psih, ('complex64', 'complex128')) and
(imag_th is not None) and
(psih.imag.sum() / psih.real.sum() < imag_th)):
psih = psih.real
return psih
@staticmethod
def _halve_nyquist(psih):
"""https://github.com/jonathanlilly/jLab/issues/13"""
N = len(psih) if psih.ndim == 1 else psih.shape[1]
if N % 2 == 0:
if psih.ndim == 1:
psih[N//2] /= 2
else:
psih[:, N//2] /= 2
return psih
def psifn(self, w=None, *, scale=None, N=None):
"""Compute time-domain wavelet; simply `ifft(psih)` with appropriate
extra steps.
"""
psih = self(w, scale=scale, N=N, nohalf=False)
if psih.ndim in (1, 2):
pn = (-1)**S.arange(psih.shape[-1], dtype=self.dtype)
else:
raise ValueError("`psih` must yield to 1D or 2D (got %s)" % psih.ndim)
# * pn = freq-domain spectral reversal to center time-domain wavelet
psi = ifft(psih * pn, axis=-1)
return psi
def xifn(self, scale=None, N=None):
"""Computes `xi`, radian frequencies at which `wavelet` is sampled,
as fraction of sampling frequency: 0 to pi & -pi to 0, scaled by
`scale` - or more precisely:
N=128: [0, 1, 2, ..., 64, -63, -62, ..., -1] * (2*pi / N) * scale
N=129: [0, 1, 2, ..., 64, -64, -63, ..., -1] * (2*pi / N) * scale
"""
if isinstance(scale, (np.ndarray, torch.Tensor)) and len(scale) > 1:
if scale.squeeze().ndim > 1:
raise ValueError("2D `scale` unsupported")
elif scale.ndim == 1:
scale = scale.reshape(-1, 1) # add dim for proper broadcast
elif scale is None:
scale = 1.
scale = S.asarray(scale, dtype=self.dtype)
if N is None:
xi = scale * self.xi
else:
xi = scale * S.asarray(_xifn(scale=1., N=N,
dtype=getattr(np, self.dtype)))
return xi
def Psih(self, scale=None, N=None, nohalf=True):
"""Return pre-computed `psih` at scale(s) `scale` of length `N` if
same `scale` & `N` were passed previously, else compute anew.
`dtype` will override `self.dtype` if not None.
If both `scale` & `N` are None, will return previously computed `Psih`.
"""
pN = getattr(self, '_Psih_N', S.array([-1]))
ps = getattr(self, '_Psih_scale', S.array([-1]))
N_is_None = N is None
N = N or self.N
if ((scale is None and N_is_None) or
(N == pN and (len(scale) == len(ps) and S.allclose(scale, ps)))):
return self._Psih
# first empty existing to free memory
if getattr(self, '_Psih', None) is not None:
self._Psih = None
gc.collect()
self._Psih = self(scale=scale, N=N, nohalf=nohalf)
self._Psih_N = N
self._Psih_scale = scale
return self._Psih
@property
def N(self):
"""Default value used when `N` is not passed to a `Wavelet` method."""
return self._N
@N.setter
def N(self, value):
"""Ensure `xi` always matches `N`."""
self._N = value
self._xi = S.asarray(_xifn(scale=1, N=value,
dtype=getattr(np, self.dtype)))
@property
def xi(self):
"""`xi` computed at `scale=1` and `N=self.N`. See `help(Wavelet.xifn)`."""
return self._xi
@property
def dtype(self):
"""dtype at which psih and psi are generated; can't change post-init."""
return self._dtype
#### Properties ##########################################################
@property
def name(self):
"""Name of underlying freq-domain function, processed by
`wavelets._fn_to_name`.
"""
return _fn_to_name(self.fn)
@property
def config_str(self):
"""`self.config` formatted into a nice string."""
if self.config:
cfg = ""
for k, v in self.config.items():
if k in ('norm', 'centered_scale', 'dtype'):
# too long, no real need
continue
elif k == 'order' and v == 0:
# no need to include base wavelet's order
continue
elif isinstance(v, float) and v.is_integer():
v = int(v)
cfg += "{}={}, ".format(k, v)
cfg = cfg.rstrip(', ')
else:
cfg = "Default configs"
return cfg
@property
def wc(self):
"""Energy center frequency at scale=scalec_ct [(radians*cycles)/samples]
Ideally we'd compute at `scale=1`, but that's trouble for 'energy' center
frequency; see `help(wavelets.center_frequency)`. Away from scale
extrema, 'energy' and 'peak' are same for bell-like |wavelet(w)|^2.
Reported as "dimensional" in `info()` since it's tied to same `scale`
used for computing `std_t_d` & `std_t_w`
"""
if getattr(self, '_wc', None) is None:
self._wc = center_frequency(self, scale=self.scalec_ct, N=self.N,
kind='energy')
return self._wc
@property
def wc_ct(self):
"""'True' radian peak center frequency, i.e. `w` which maximizes the
underlying continuous-time function. Can be used to find `scale`
that centers the wavelet anywhere from 0 to pi in discrete space.
Reported as "nondimensional" in `info()` since it's scale-decoupled.
"""
if getattr(self, '_wc_ct', None) is None:
self._wc_ct = center_frequency(self, kind='peak-ct', N=self.N)
return self._wc_ct
@property
def scalec_ct(self):
"""'Center scale' in sense of `wc_ct`, making wavelet peak at pi/4.
See `help(Wavelet.wc_ct)`.
"""
if getattr(self, '_scalec_ct', None) is None:
self._scalec_ct = (4/pi) * self.wc_ct
return self._scalec_ct
@property
def std_t(self):
"""Non-dimensional time resolution"""
if getattr(self, '_std_t', None) is None:
# scale=10 arbitrarily chosen to yield good compute-accurary
self._std_t = time_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=True)
return self._std_t
@property
def std_w(self):
"""Non-dimensional frequency resolution (radian)"""
if getattr(self, '_std_w', None) is None:
self._std_w = freq_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=True)
return self._std_w
@property
def std_f(self):
"""Non-dimensional frequency resolution (cyclic)"""
return self.std_w / (2*pi)
@property
def harea(self):
"""Heisenberg area: std_t * std_w >= 0.5"""
return self.std_t * self.std_w
@property
def std_t_d(self):
"""Dimensional time resolution [samples/(cycles*radians)]"""
if getattr(self, '_std_t_d', None) is None:
self._std_t_d = time_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=False)
return self._std_t_d
@property
def std_w_d(self):
"""Dimensional frequency resolution [(cycles*radians)/samples]"""
if getattr(self, '_std_w_d', None) is None:
self._std_w_d = freq_resolution(self, scale=self.scalec_ct, N=self.N,
nondim=False)
return self._std_w_d
@property
def std_f_d(self):
"""Dimensional frequency resolution [cycles/samples]"""
return self.std_w_d / (2*pi)
#### Misc ################################################################
def info(self, nondim=True, reset=False):
"""Prints time & frequency resolution quantities. Refer to pertinent
methods' docstrings on how each quantity is computed, and to
tests/props_test.py on various dependences (e.g. `std_t` on `N`).
If `reset`, will recompute all quantities (can be used with e.g. new `N`).
See `help(Wavelet.x)`, x: `std_t, std_w, wc, wc_ct, scalec_ct`.
Detailed overview: https://dsp.stackexchange.com/q/72042/50076
"""
if reset:
self.reset_properties()
if nondim:
cfg = self.config_str
dim_t = dim_w = "non-dimensional"
std_t, std_w = self.std_t, self.std_w
wc_txt = "wc_ct, (cycles*radians)"
wc = self.wc_ct
else:
cfg = self.config_str + " -- scale=%.2f" % self.scalec_ct
dim_t = "samples/(cycles*radians)"
dim_w = "(cycles*radians)/samples"
std_t, std_w = self.std_t_d, self.std_w_d
wc_txt = "wc, (cycles*radians)/samples; %.2f" % self.scalec_ct
wc = self.wc
harea = std_t * std_w
print(("{} wavelet\n"
"\t{}\n"
"\tCenter frequency: {:<10.6f} [{}]\n"
"\tTime resolution: {:<10.6f} [std_t, {}]\n"
"\tFreq resolution: {:<10.6f} [std_w, {}]\n"
"\tHeisenberg area: {:.12f}"
).format(self.name, cfg, wc, wc_txt,
std_t, dim_t, std_w, dim_w, harea))
def reset_properties(self):
"""Reset time-frequency properties (`Wavelet.TF_PROPS`), i.e.
recompute for current `self.N`.
"""
for name in self.TF_PROPS:
setattr(self, f'_{name}', None)
getattr(self, name) # trigger recomputation
def viz(self, name='overview', **kw):
"""`Wavelet.VISUALS` for list of supported `name`s."""
if name == 'overview':
for name in ('heatmap', 'harea', 'filterbank', 'time-frequency'):
kw['N'] = kw.get('N', self.N)
self._viz(name, **kw)
elif name not in Wavelet.VISUALS:
raise ValueError(f"visual '{name}' not supported; must be one of: "
+ ', '.join(Wavelet.VISUALS))
else:
self._viz(name, **kw)
def _viz(self, name, **kw):
kw['wavelet'] = kw.get('wavelet', self)
kw['N'] = kw.get('N', self.N)
{
'heatmap': visuals.wavelet_heatmap,
'waveforms': visuals.wavelet_waveforms,
'filterbank': visuals.wavelet_filterbank,
'harea': visuals.sweep_harea,
'std_t': visuals.sweep_std_t,
'std_w': visuals.sweep_std_w,
'time-frequency': visuals.wavelet_tf,
'anim:time-frequency': visuals.wavelet_tf_anim,
}[name](**kw)
def _desc(self, N=None, scale=None, show_N=True):
"""Nicely-formatted parameter summary, used in other methods"""
if self.config_str != "Default configs":
ptxt = self.config_str.rstrip(', ') + ', '
else:
ptxt = ""
N = N or self.N
if scale is None:
title = "{} wavelet | {}N={}".format(self.name, ptxt, N)
else:
title = "{} wavelet | {}scale={:.2f}, N={}".format(
self.name, ptxt, scale, N)
if not show_N:
title = title[:title.find(f"N={N}")].rstrip(', ')
return title
@classmethod
def _process_dtype(self, dtype, as_str=None):
"""Ensures `dtype` is supported, and converts per `as_str` (if True,
numpy/torch -> str, else vice versa; if None, returns as-is).
"""
if isinstance(dtype, str):
assert_is_one_of(dtype, 'dtype', Wavelet.DTYPES)
if not as_str:
return getattr(Q, dtype)
elif not isinstance(dtype, (type, np.dtype, torch.dtype)):
raise TypeError("`dtype` must be string or type (np./torch.dtype) "
"(got %s)" % dtype)
return dtype if not as_str else str(dtype).split('.')[-1]
#### Init ################################################################
@classmethod
def _init_if_not_isinstance(self, wavelet, **kw):
"""Circumvents type change from IPython's super-/auto-reload,
but first checks with usual isinstance."""
if isinstance_by_name(wavelet, Wavelet):
return wavelet
return Wavelet(wavelet, **kw)
def _validate_and_set_wavelet(self, wavelet):
def process_dtype(wavopts, user_passed_float32):
"""Handles GMW's `norm='energy'` w/ dtype='float32'."""
if wavopts.get('norm', 'bandpass') == 'energy':
if user_passed_float32:
WARN("`norm='energy'` w/ `dtype='float32'` is unsupported; "
"will use 'float64' instead.")
wavopts['dtype'] = 'float64'
self._dtype = 'float64'
elif self.dtype is not None:
wavopts['dtype'] = self.dtype
def set_dtype_from_out():
# 32 will promote to 64 if other params are 64
out_dtype = self.fn(S.asarray([1.], dtype='float32')).dtype
if any(tp in str(out_dtype) for tp in ('complex64', 'complex128')):
# 'bump' wavelet case
out_dtype = ('float32' if 'complex64' in str(out_dtype) else
'float64')
self._dtype = self._process_dtype(out_dtype, as_str=True)
if isinstance(wavelet, FunctionType):
self.fn = wavelet
set_dtype_from_out()
self.config = {}
return
errmsg = ("`wavelet` must be one of: (1) string name of supported "
"wavelet; (2) tuple of (1) and dict of wavelet parameters "
"(e.g. {'mu': 5}); (3) custom function taking `scale * xi` "
"as input. (got: %s)" % str(wavelet))
if not isinstance(wavelet, (tuple, str)):
raise TypeError(errmsg)
elif isinstance(wavelet, tuple):
if not (len(wavelet) == 2 and isinstance(wavelet[1], dict)):
raise TypeError(errmsg)
wavelet, wavopts = wavelet
elif isinstance(wavelet, str):
wavopts = {}
user_passed_float32 = any('float32' in str(t)
for t in (self.dtype, wavopts.get('dtype', 0)))
if isinstance(wavelet, str):
wavelet = wavelet.lower()
module = 'wavelets' if wavelet != 'gmw' else '_gmw'
wavopts = gdefaults(f"{module}.{wavelet}", get_all=True,
as_dict=True, default_order=True, **wavopts)
process_dtype(wavopts, user_passed_float32)
assert_is_one_of(wavelet, 'wavelet', Wavelet.SUPPORTED)
self.fn = {
'gmw': gmw,
'morlet': morlet,
'bump': bump,
'cmhat': cmhat,
'hhhat': hhhat,
}[wavelet](**wavopts)
if self.dtype is None:
set_dtype_from_out()
self.config = wavopts
@jit(nopython=True, cache=True)
def _xifn(scale, N, dtype=np.float64):
"""N=128: [0, 1, 2, ..., 64, -63, -62, ..., -1] * (2*pi / N) * scale
N=129: [0, 1, 2, ..., 64, -64, -63, ..., -1] * (2*pi / N) * scale
"""
xi = np.zeros(N, dtype=dtype)
h = scale * (2 * pi) / N
for i in range(N // 2 + 1):
xi[i] = i * h
for i in range(N // 2 + 1, N):
xi[i] = (i - N) * h
return xi
def _process_params_dtype(*params, dtype, auto_gpu=True):
if dtype is None:
dtype = S.asarray(params[0]).dtype
if auto_gpu:
dtype = Wavelet._process_dtype(dtype, as_str=True)
params = [S.astype(S.asarray(p), dtype) for p in params]
else:
dtype = Wavelet._process_dtype(dtype, as_str=True)
params = [np.asarray(p).astype(dtype) for p in params]
return params if len(params) > 1 else params[0]
#### Wavelet functions ######################################################
def morlet(mu=None, dtype=None):
"""Higher `mu` -> greater frequency, lesser time resolution.
Recommended range: 4 to 16. For `mu > 6` the wavelet is almsot exactly
Gaussian for most scales, providing maximum joint resolution.
`mu=13.4` matches Generalized Morse Wavelets' `(beta, gamma) = (3, 60)`.
For full correspondence see `help(_gmw.gmw)`.
https://en.wikipedia.org/wiki/Morlet_wavelet#Definition
https://www.desmos.com/calculator/0nslu0qivv
"""
mu, dtype = gdefaults('wavelets.morlet', mu=mu, dtype=dtype)
cs = (1 + np.exp(-mu**2) - 2 * np.exp(-3/4 * mu**2)) ** (-.5)
ks = np.exp(-.5 * mu**2)
mu, cs, ks = _process_params_dtype(mu, cs, ks, dtype=dtype)
# all other consts go to `C`; needed for numba.jit to not type promote to
# float64 due to Python floats (e.g. `2.`)
C = S.asarray([-.5, np.sqrt(2) * cs * pi**.25], dtype=dtype)
fn = _morlet_gpu if USE_GPU() else (_morlet_par if IS_PARALLEL() else _morlet)
return lambda w: fn(atleast_1d(w, dtype), mu, ks, C)
@jit(nopython=True, cache=True)
def _morlet(w, mu, ks, C):
return C[1]* (np.exp(C[0] * (w - mu)**2) - ks * np.exp(C[0] * w**2))
@jit(nopython=True, cache=True, parallel=True)
def _morlet_par(w, mu, ks, C):
return C[1]* (np.exp(C[0] * (w - mu)**2) - ks * np.exp(C[0] * w**2))
def _morlet_gpu(w, mu, ks, C):
return C[1] * (torch.exp(C[0] * (w - mu)**2) - ks * torch.exp(C[0] * w**2))
def bump(mu=None, s=None, om=None, dtype=None):
"""Bump wavelet.
https://www.mathworks.com/help/wavelet/gs/choose-a-wavelet.html
"""
mu, s, om, dtype = gdefaults('wavelets.bump', mu=mu, s=s, om=om, dtype=dtype)
if 'float' in dtype:
dtype = 'complex' + str(2 * int(dtype.strip('float')))
mu, s, om = [S.asarray(g, dtype) for g in (mu, s, om)]
C = S.asarray([2 * pi * 1j * om, .443993816053287], dtype=dtype)
C0 = S.asarray(.999, dtype='float' + str(int(dtype.strip('complex'))//2))
fn = _bump_gpu if USE_GPU() else (_bump_par if IS_PARALLEL() else _bump)
return lambda w: fn(atleast_1d(w, dtype), (atleast_1d(w, dtype) - mu) / s,
s, C, C0)
@jit(nopython=True, cache=True)
def _bump(w, _w, s, C, C0):
return np.exp(C[0] * w) / s * (
np.abs(_w) < C0) * np.exp(
-1 / (1 - (_w * (np.abs(_w) < C0))**2)) / C[1]
@jit(nopython=True, cache=True, parallel=True)
def _bump_par(w, _w, s, C, C0):
return np.exp(C[0] * w) / s * (
np.abs(_w) < C0) * np.exp(
-1 / (1 - (_w * (np.abs(_w) < C0))**2)) / C[1]
def _bump_gpu(w, _w, s, C, C0):
return torch.exp(C[0] * w) / s * (
torch.abs(_w) < C0) * torch.exp(
-1 / (1 - (_w * (torch.abs(_w) < C0))**2)) / C[1]
def cmhat(mu=None, s=None, dtype=None):
"""Complex Mexican Hat wavelet.
https://en.wikipedia.org/wiki/Complex_mexican_hat_wavelet
"""
mu, s, dtype = gdefaults('wavelets.cmhat', mu=mu, s=s, dtype=dtype)
mu, s = _process_params_dtype(mu, s, dtype=dtype)
C = S.asarray([5/2, 2 * np.sqrt(2/3) * pi**(-1/4)], dtype=dtype)
fn = _cmhat_gpu if USE_GPU() else (_cmhat_par if IS_PARALLEL() else _cmhat)
return lambda w: fn(atleast_1d(w, dtype) - mu, s, C)
@jit(nopython=True, cache=True)
def _cmhat(_w, s, C):
return C[1] * (s**C[0] * _w**2 * np.exp(-s**2 * _w**2 / 2) * (_w >= 0))
@jit(nopython=True, cache=True, parallel=True)
def _cmhat_par(_w, s, C):
return C[1] * (s**C[0] * _w**2 * np.exp(-s**2 * _w**2 / 2) * (_w >= 0))
def _cmhat_gpu(_w, s, C):
return C[1] * (s**C[0] * _w**2 * torch.exp(-s**2 * _w**2 / 2) * (_w >= 0))
def hhhat(mu=None, dtype=None):
"""Hilbert analytic function of Hermitian Hat."""
mu, dtype = gdefaults('wavelets.hhhat', mu=mu, dtype=dtype)
mu = _process_params_dtype(mu, dtype=dtype)
C = S.asarray([-1/2, 2 / np.sqrt(5) * pi**(-1/4)], dtype=dtype)
fn = _hhhat_gpu if USE_GPU() else (_hhhat_par if IS_PARALLEL() else _hhhat)
return lambda w: fn(atleast_1d(w, dtype) - mu, C)
@jit(nopython=True, cache=True)
def _hhhat(_w, C):
return C[1] * (_w * (1 + _w) * np.exp(C[0] * _w**2)) * (1 + np.sign(_w))
@jit(nopython=True, cache=True, parallel=True)
def _hhhat_par(_w, C):
return C[1] * (_w * (1 + _w) * np.exp(C[0] * _w**2)) * (1 + np.sign(_w))
def _hhhat_gpu(_w, C):
return C[1] * (_w * (1 + _w) * torch.exp(C[0] * _w**2)) * (1 + torch.sign(_w))
#### Wavelet properties ######################################################
def center_frequency(wavelet, scale=None, N=1024, kind='energy', force_int=None,
viz=False):
"""Center frequency (radian) of `wavelet`, either 'energy', 'peak',
or 'peak-ct'.
Detailed overviews:
(1) https://dsp.stackexchange.com/a/76371/50076
(2) https://dsp.stackexchange.com/q/72042/50076
**Note**: implementations of `center_frequency`, `time_resolution`, and
`freq_resolution` are discretized approximations of underlying
continuous-time parameters. This is a flawed approach (see (1)).
- Caution is advised for scales near minimum and maximim (obtained via
`cwt_scalebounds(..., preset='maximal')`), where inaccuracies may be
significant.
- For intermediate scales and sufficiently large N (>=1024), the methods
are reliable. May improve in the future
# Arguments
wavelet: wavelets.Wavelet
scale: float / None
Scale at which to compute `wc`; ignored if `kind='peak-ct'`.
N: int
Length of wavelet.
kind: str['energy', 'peak', 'peak-ct']
- 'energy': weighted mean of wavelet energy, or energy expectation;
Eq 4.52 of [1]:
wc_1 = int w |wavelet(w)|^2 dw 0..inf
wc_scale = int (scale*w) |wavelet(scale*w)|^2 dw 0..inf
= wc_1 / scale
- 'peak': value of `w` at which `wavelet` at `scale` peaks
(is maximum) in discrete time, i.e. constrained 0 to pi.
- 'peak-ct': value of `w` at which `wavelet` peaks (without `scale`,
i.e. `scale=1`), i.e. peak location of the continuous-time function.
Can be used to find `scale` at which `wavelet` is most well-behaved,
e.g. at eighth of sampling frequency (centered between 0 and fs/4).
- 'energy' == 'peak' for wavelets exactly even-symmetric about mode
(peak location)
force_int: bool / None
Relevant only if `kind='energy'`, then defaulting to True. Set to
False to compute via formula - i.e. first integrate at a
"well-behaved" scale, then rescale. For intermediate scales, this
won't yield much difference. For extremes, it matches the
continuous-time results closer - but this isn't recommended, as it
overlooks limitations imposed by discretization (trimmed/undersampled
freq-domain bell).
viz: bool (default False)
Whether to visualize obtained center frequency.
**Misc**
For very high scales, 'energy' w/ `force_int=True` will match 'peak'; for
very low scales, 'energy' will always be less than 'peak'.
To convert to Hz:
wc [(cycles*radians)/samples] / (2pi [radians]) * fs [samples/second]
= fc [cycles/second]
See tests/props_test.py for further info.
# References
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
def _viz(wc, params):
w, psih, apsih2 = params
_w = w[N//2-1:]; _psih = psih[N//2-1:]; _apsih2 = apsih2[N//2-1:]
wc = wc if (kind != 'peak-ct') else pi/4
vline = (wc, dict(color='tab:red', linestyle='--'))
plot(_w, _psih, show=1, vlines=vline,
title="psih(w)+ (frequency-domain wavelet, pos half)")
plot(_w, _w * _apsih2, show=1,
title="w^2 |psih(w)+|^2 (used to compute wc)")
print("wc={}".format(wc))
def _params(wavelet, scale, N):
w = S.asarray(aifftshift(_xifn(1, N)))
psih = asnumpy(wavelet(S.asarray(scale) * w))
apsih2 = np.abs(psih)**2
w = asnumpy(w)
return w, psih, apsih2
def _energy_wc(wavelet, scale, N, force_int):
use_formula = not force_int
if use_formula:
scale_orig = scale
wc_ct = _peak_ct_wc(wavelet, N)[0]
scale = (4/pi) * wc_ct
w, psih, apsih2 = _params(wavelet, scale, N)
wc = (integrate.trapz(apsih2 * w) /
integrate.trapz(apsih2))
if use_formula:
wc *= (scale / scale_orig)
return float(wc), (w, psih, apsih2)
def _peak_wc(wavelet, scale, N):
w, psih, apsih2 = _params(wavelet, scale, N)
wc = w[np.argmax(apsih2)]
return float(wc), (w, psih, apsih2)
def _peak_ct_wc(wavelet, N):
wc, _ = find_maximum(wavelet.fn)
# need `scale` such that `wavelet` peaks at `scale * xi.max()/4`
# thus: `wc = scale * (pi/2)` --> `scale = (4/pi)*wc`
scale = S.asarray((4/pi) * wc)
w, psih, apsih2 = _params(wavelet, scale, N)
return float(wc), (w, psih, apsih2)
if force_int and 'peak' in kind:
NOTE("`force_int` ignored with 'peak' in `kind`")
assert_is_one_of(kind, 'kind', ('energy', 'peak', 'peak-ct'))
if kind == 'peak-ct' and scale is not None:
NOTE("`scale` ignored with `peak = 'peak-ct'`")
if scale is None and kind != 'peak-ct':
# see _peak_ct_wc
wc, _ = find_maximum(wavelet.fn)
scale = (4/pi) * wc
wavelet = Wavelet._init_if_not_isinstance(wavelet)
if kind == 'energy':
force_int = force_int or True
wc, params = _energy_wc(wavelet, scale, N, force_int)
elif kind == 'peak':
wc, params = _peak_wc(wavelet, scale, N)
elif kind == 'peak-ct':
wc, params = _peak_ct_wc(wavelet, N)
if viz:
_viz(wc, params)
return wc
def freq_resolution(wavelet, scale=10, N=1024, nondim=True, force_int=True,
viz=False):
"""Compute wavelet frequency width (std_w) for a given scale and N; larger N
-> less discretization error, but same N as in application works best
(larger will be "too accurate" and misrepresent true discretized values).
`nondim` will divide by peak center frequency and return unitless quantity.
Eq 22 in [1], Sec 4.3.2 in [2].
Detailed overview: https://dsp.stackexchange.com/q/72042/50076
See tests/props_test.py for further info.
# References
1. Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede.
https://sci-hub.st/10.1109/TSP.2008.2007607
2. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
def _viz():
_w = w[N//2-1:]; _psih = psih[N//2-1:]; _apsih2 = apsih2[N//2-1:]
plot(_w, _psih, show=1,
title="psih(w)+ (frequency-domain wavelet, pos half)")
plot(_w, (_w-wce)**2 * _apsih2, show=1,
title="(w-wc)^2 |psih(w)+|^2 (used to compute var_w)")
print("std_w={}".format(std_w))
if use_formula:
NOTE(f"integrated at scale={scale} then used formula; "
"see help(freq_resolution) and try force_int=True")
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# formula criterion not optimal; thresholds will vary by wavelet config
use_formula = ((scale < 4 or scale > N / 5) and not force_int)
if use_formula:
scale_orig = scale
scale = (4/pi) * wavelet.wc_ct
w = aifftshift(_xifn(1, N))
psih = asnumpy(wavelet(scale * w))
wce = center_frequency(wavelet, scale, force_int=force_int, kind='energy')
apsih2 = np.abs(psih)**2
var_w = (integrate.trapz((w - wce)**2 * apsih2, w) /
integrate.trapz(apsih2, w))
std_w = np.sqrt(var_w)
if use_formula:
std_w *= (scale / scale_orig)
scale = scale_orig
if nondim:
wcp = center_frequency(wavelet, scale, kind='peak')
std_w /= wcp
if viz:
_viz()
return std_w
def time_resolution(wavelet, scale=10, N=1024, min_decay=1e3, max_mult=2,
min_mult=2, force_int=True, nondim=True, viz=False):
"""Compute wavelet time resolution for a given scale and N; larger N
-> less discretization error, but same N as in application should suffice.
Eq 21 in [1], Sec 4.3.2 in [2].
Detailed overview: https://dsp.stackexchange.com/q/72042/50076
`nondim` will multiply by peak center frequency and return unitless quantity.
______________________________________________________________________________
**Interpretation**
Measures time-span of 68% of wavelet's energy (1 stdev for Gauss-shaped
|psi(t)|^2). Inversely-proportional with `N`, i.e. same `scale` spans half
the fraction of sequence that's twice long. Is actually *half* the span
per unilateral (radius) std.
std_t ~ scale (T / N)
______________________________________________________________________________
**Implementation details**
`t` may be defined from `min_mult` up to `max_mult` times the original span
for computing stdev since wavelet may not decay to zero within target frame.
For any mult > 1, this is biased if we are convolving by sliding windows of
length `N` in CWT, but we're not (see `cwt`); our scheme captures full wavelet
characteristics, i.e. as if conv/full decayed length (but only up to mult=2).
`min_decay` controls decay criterion of time-wavelet domain in integrating,
i.e. ratio of max to endpoints of |psi(t)|^2 must exceed this. Will search
up to `max_mult * N`-long `t`.
For small `scale` (<~3) results are harder to interpret and defy expected
behavior per discretization complications (call with `viz=True`). Workaround
via computing at stable scale and calculating via formula shouldn't work as
both-domain behaviors deviate from continuous, complete counterparts.
______________________________________________________________________________
See tests/props_test.py for further info.
# References
1. Higher-Order Properties of Analytic Wavelets.
J. M. Lilly, S. C. Olhede.
https://sci-hub.st/10.1109/TSP.2008.2007607
2. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
def _viz():
_w = aifftshift(xi)[Nt//2-1:]
_psih = aifftshift(psih)[Nt//2-1:]
plot(_w, _psih, show=1,
title="psih(w)+ (frequency-domain wavelet, pos half)")
plot(t, t**2 * apsi2, title="t^2 |psi(t)|^2 (used to compute var_t)",
show=1)
_viz_cwt_scalebounds(wavelet, N, max_scale=scale, std_t=std_t, Nt=Nt)
print("std_t={}\nlen(t), len(t)/N, t_min, t_max = {}, {}, {}, {}".format(
std_t, len(t), len(t)/N, t.min(), t.max()))
if use_formula:
NOTE(f"integrated at scale={scale} then used formula; "
"see help(time_resolution) and try force_int=True")
def _make_integration_t(wavelet, scale, N, min_decay, max_mult, min_mult):
"""Ensure `psi` decays sufficiently at integration bounds"""
for mult in np.arange(min_mult, max_mult + 1):
Nt = int(mult * N)
apsi2 = np.abs(asnumpy(wavelet.psifn(scale=scale, N=Nt)))**2
# ensure sufficient decay at endpoints (assumes ~symmetric decay)
if apsi2.max() / apsi2[:max(10, Nt//100)].mean() > min_decay:
break
else:
raise Exception(("Couldn't find decay timespan satisfying "
"`(min_decay, max_mult) = ({}, {})` for `scale={}`; "
"decrease former or increase latter or check "
"`wavelet`".format(min_decay, max_mult, scale)))
# len(t) == mult*N (independent of T)
# `t` doesn't have zero-mean but that's correct for psi's peak & symmetry
T = N
t = np.arange(-mult * T/2, mult * T/2, step=T/N)
return t
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# formula criterion not optimal; thresholds will vary by wavelet config
use_formula = ((scale < 4 or scale > N / 5) and not force_int)
if use_formula:
scale_orig = scale
scale = (4/pi) * wavelet.wc_ct
t = _make_integration_t(wavelet, scale, N, min_decay, max_mult, min_mult)
Nt = len(t)
xi = _xifn(1, Nt)
psih = asnumpy(wavelet(scale * xi, nohalf=False))
psi = asnumpy(ifft(psih * (-1)**np.arange(Nt)))
apsi2 = np.abs(psi)**2
var_t = (integrate.trapz(t**2 * apsi2, t) /
integrate.trapz(apsi2, t))
std_t = np.sqrt(var_t)
if use_formula:
std_t *= (scale_orig / scale)
scale = scale_orig
if nondim:
# 'energy' yields values closer to continuous-time counterparts,
# but we seek accuracy relative to discretized values
wc = center_frequency(wavelet, scale, N=N, kind='peak')
std_t *= wc
if viz:
_viz()
return std_t
#### Misc ####################################################################
def afftshift(xh):
"""Needed since analytic wavelets keep Nyquist bin at N//2 positive bin
whereas FFT convention is to file it under negative (see `_xi`).
Moves right N//2 + 1 bins to left.
"""
if len(xh) % 2 == 0:
return _afftshift_even(xh, np.zeros(len(xh), dtype=xh.dtype))
return fftshift(xh)
@jit(nopython=True, cache=True)
def _afftshift_even(xh, xhs):
N = len(xh)
for i in range(N // 2 + 1):
xhs[i] = xh[i + N // 2 - 1]
for i in range(N // 2 + 1, N):
xhs[i] = xh[i - N // 2 - 1]
return xhs
def aifftshift(xh):
"""Inversion also different; moves left N//2+1 bins to right."""
if len(xh) % 2 == 0:
return _aifftshift_even(xh, np.zeros(len(xh), dtype=xh.dtype))
return ifftshift(xh)
@jit(nopython=True, cache=True)
def _aifftshift_even(xh, xhs):
N = len(xh)
for i in range(N // 2 + 1):
xhs[i + N//2 - 1] = xh[i]
for i in range(N // 2 + 1, N):
xhs[i - N//2 - 1] = xh[i]
return xhs
def _fn_to_name(fn):
"""`_` to ` `, removes `<lambda>` & `.`, handles `SPECIALS`."""
SPECIALS = {'Gmw ': 'GMW '}
name = fn.__qualname__.replace('_', ' ').replace('<locals>', '').replace(
'<lambda>', '').replace('.', '').title()
for k, v in SPECIALS.items():
name = name.replace(k, v)
return name
def isinstance_by_name(obj, ref):
"""IPython reload can make isinstance(Obj(), Obj) fail; won't work if
Obj has __str__ overridden."""
def _class_name(obj):
name = getattr(obj, '__qualname__', getattr(obj, '__name__', ''))
return (getattr(obj, '__module__', '') + '.' + name).lstrip('.')
return _class_name(type(obj)) == _class_name(ref)
##############################################################################
from ._gmw import gmw
from . import visuals
from .visuals import plot, _viz_cwt_scalebounds
from .utils.common import WARN, NOTE, pi, assert_is_one_of
from .utils.backend import asnumpy
| 38,840 | 38.154234 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/_stft.py | # -*- coding: utf-8 -*-
import numpy as np
import scipy.signal as sig
from .utils import WARN, padsignal, buffer, unbuffer, window_norm
from .utils import _process_fs_and_t
from .utils.fft_utils import fft, ifft, rfft, irfft, fftshift, ifftshift
from .utils.backend import torch, is_tensor
from .algos import zero_denormals
from .wavelets import _xifn, _process_params_dtype
from .configs import gdefaults, USE_GPU
def stft(x, window=None, n_fft=None, win_len=None, hop_len=1, fs=None, t=None,
padtype='reflect', modulated=True, derivative=False, dtype=None):
"""Short-Time Fourier Transform.
`modulated=True` computes "modified" variant from [1] which is advantageous
to reconstruction & synchrosqueezing (see "Modulation" below).
# Arguments:
x: np.ndarray
Input vector(s), 1D or 2D. See `help(cwt)`.
window: str / np.ndarray / None
STFT windowing kernel. If string, will fetch per
`scipy.signal.get_window(window, win_len, fftbins=True)`.
Defaults to `scipy.signal.windows.dpss(win_len, win_len//8)`;
the DPSS window provides the best time-frequency resolution.
Always padded to `n_fft`, so for accurate filter characteristics
(side lobe decay, etc), best to pass in pre-designed `window`
with `win_len == n_fft`.
n_fft: int >= 0 / None
FFT length, or `(STFT column length) // 2 + 1`.
If `win_len < n_fft`, will pad `window`. Every STFT column is
`fft(window * x_slice)`.
Defaults to `len(x)//hop_len`, up to 512.
win_len: int >= 0 / None
Length of `window` to use. Used to generate a window if `window`
is string, and ignored if it's np.ndarray.
Defaults to `n_fft//8` or `len(window)` (if `window` is np.ndarray).
hop_len: int > 0
STFT stride, or number of samples to skip/hop over between subsequent
windowings. Relates to 'overlap' as `overlap = n_fft - hop_len`.
Must be 1 for invertible synchrosqueezed STFT.
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq frequencies
range from 0 to 0.5*fs, i.e. as fraction of reference sampling rate
up to Nyquist limit. Used to compute `dSx` and `ssq_freqs`.
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
padtype: str
Pad scheme to apply on input. See `help(utils.padsignal)`.
modulated: bool (default True)
Whether to use "modified" variant as in [1], which centers DFT
cisoids at the window for each shift `u`. `False` will not invert
once synchrosqueezed.
Recommended to use `True`; see "Modulation" below.
derivative: bool (default False)
Whether to compute and return `dSx`. Uses `fs`.
dtype: str['float32', 'float64'] / None
Compute precision; use 'float32` for speed & memory at expense of
accuracy (negligible for most purposes).
If None, uses value from `configs.ini`.
To be safe with `'float32'`, time-localized `window`, and large
`hop_len`, use
from ssqueezepy._stft import _check_NOLA
_check_NOLA(window, hop_len, 'float32', imprecision_strict=True)
**Modulation**
`True` will center DFT cisoids at the window for each shift `u`:
Sm[u, k] = sum_{0}^{N-1} f[n] * g[n - u] * exp(-j*2pi*k*(n - u)/N)
as opposed to usual STFT:
S[u, k] = sum_{0}^{N-1} f[n] * g[n - u] * exp(-j*2pi*k*n/N)
Most implementations (including `scipy`, `librosa`) compute *neither*,
but rather center the window for each slice, thus shifting DFT bases
relative to n=0 (t=0). These create spectra that, viewed as signals, are
of high frequency, making inversion and synchrosqueezing very unstable.
Details & visuals: https://dsp.stackexchange.com/a/72590/50076
# Returns:
Sx: [(n_fft//2 + 1) x n_hops] np.ndarray
STFT of `x`. Positive frequencies only (+dc), via `rfft`.
(n_hops = (len(x) - 1)//hop_len + 1)
(rows=scales, cols=timeshifts)
dWx: [(n_fft//2 + 1) x n_hops] np.ndarray
Returned only if `derivative=True`.
Time-derivative of the STFT of `x`, computed via STFT done with
time-differentiated `window`, as in [1]. This differs from CWT's,
where its (and Sx's) DFTs are taken along columns rather than rows.
d/dt(window) obtained via freq-domain differentiation (help(cwt)).
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
"""
def _stft(xp, window, diff_window, n_fft, hop_len, fs, modulated, derivative):
Sx = buffer(xp, n_fft, n_fft - hop_len, modulated)
if derivative:
dSx = buffer(xp, n_fft, n_fft - hop_len, modulated)
if modulated:
window = ifftshift(window, astensor=True)
if derivative:
diff_window = ifftshift(diff_window, astensor=True) * fs
reshape = (-1, 1) if xp.ndim == 1 else (1, -1, 1)
Sx *= window.reshape(*reshape)
if derivative:
dSx *= (diff_window.reshape(*reshape))
# keep only positive frequencies (Hermitian symmetry assuming real `x`)
axis = 0 if xp.ndim == 1 else 1
Sx = rfft(Sx, axis=axis, astensor=True)
if derivative:
dSx = rfft(dSx, axis=axis, astensor=True)
return (Sx, dSx) if derivative else (Sx, None)
# process args
assert x.ndim in (1, 2)
N = x.shape[-1]
_, fs, _ = _process_fs_and_t(fs, t, N)
n_fft = n_fft or min(N//hop_len, 512)
# process `window`, make `diff_window`, check NOLA, enforce `dtype`
if win_len is None:
win_len = (len(window) if isinstance(window, np.ndarray) else
n_fft)
dtype = gdefaults('_stft.stft', dtype=dtype)
window, diff_window = get_window(window, win_len, n_fft, derivative=True,
dtype=dtype)
_check_NOLA(window, hop_len, dtype)
x = _process_params_dtype(x, dtype=dtype, auto_gpu=False)
# pad `x` to length `padlength`
padlength = N + n_fft - 1
xp = padsignal(x, padtype, padlength=padlength)
# arrays -> tensors if using GPU
if USE_GPU():
xp, window, diff_window = [torch.as_tensor(g, device='cuda') for g in
(xp, window, diff_window)]
# take STFT
Sx, dSx = _stft(xp, window, diff_window, n_fft, hop_len, fs, modulated,
derivative)
# ensure indexing works as expected downstream (cupy)
Sx = Sx.contiguous() if is_tensor(Sx) else Sx
dSx = dSx.contiguous() if is_tensor(dSx) else dSx
return (Sx, dSx) if derivative else Sx
def istft(Sx, window=None, n_fft=None, win_len=None, hop_len=1, N=None,
modulated=True, win_exp=1):
"""Inverse Short-Time Fourier transform. Computed with least-squares
estimate for `win_exp`=1 per Griffin-Lim [1], recommended for STFT with
modifications, else simple inversion with `win_exp`=0:
x[n] = sum(y_t[n] * w^a[n - tH]) / sum(w^{a+1}[n - tH]),
y_t = ifft(Sx), H = hop_len, a = win_exp, t = hop index, n = sample index
Warns if `window` NOLA constraint isn't met (see [2]), invalidating inversion.
Nice visuals and explanations on istft:
https://www.mathworks.com/help/signal/ref/istft.html
# Arguments:
Sx: np.ndarray
STFT of 1D `x`.
window, n_fft, win_len, hop_len, modulated
Should be same as used in forward STFT. See `help(stft)`.
N: int > 0 / None
`len(x)` of original `x`, used in inversion padding and windowing.
If None, assumes longest possible `x` given `hop_len`, `Sx.shape[1]`.
win_exp: int >= 0
Window power used in inversion (see [1], [2], or equation above).
# Returns:
x: np.ndarray, 1D
Signal as reconstructed from `Sx`.
# References:
1. Signal Estimation from Modified Short-Time Fourier Transform.
D. W. Griffin, J. S. Lim.
https://citeseerx.ist.psu.edu/viewdoc/
download?doi=10.1.1.306.7858&rep=rep1&type=pdf
2. Invertibility of overlap-add processing. B. Sharpe.
https://gauss256.github.io/blog/cola.html
"""
### process args #####################################
n_fft = n_fft or (Sx.shape[0] - 1) * 2
win_len = win_len or n_fft
N = N or hop_len * Sx.shape[1] # assume largest possible N if not given
dtype = 'float32' if str(Sx.dtype) == 'complex64' else 'float64'
window = get_window(window, win_len, n_fft=n_fft, dtype=dtype)
_check_NOLA(window, hop_len, dtype=dtype)
xbuf = irfft(Sx, n=n_fft, axis=0).real
if modulated:
xbuf = fftshift(xbuf, axes=0)
# overlap-add the columns
x = unbuffer(xbuf, window, hop_len, n_fft, N, win_exp)
# window norm, control for float precision
wn = window_norm(window, hop_len, n_fft, N, win_exp)
th = np.finfo(x.dtype).tiny
if wn.min() < th:
approx_nonzero_idxs = wn > th
x[approx_nonzero_idxs] /= wn[approx_nonzero_idxs]
else:
x /= wn
# unpad
x = x[n_fft//2 : -((n_fft - 1)//2)]
return x
def get_window(window, win_len, n_fft=None, derivative=False, dtype=None):
"""See `window` in `help(stft)`. Will return window of length `n_fft`,
regardless of `win_len` (will pad if needed).
"""
if n_fft is None:
pl, pr = 0, 0
else:
if win_len > n_fft:
raise ValueError("Can't have `win_len > n_fft` ({} > {})".format(
win_len, n_fft))
pl = (n_fft - win_len) // 2
pr = (n_fft - win_len - pl)
if window is not None:
if isinstance(window, str):
# fftbins=True -> 'periodic' window -> narrower main side-lobe and
# closer to zero-phase in left=right padded case
# for windows edging at 0
window = sig.get_window(window, win_len, fftbins=True)
elif isinstance(window, np.ndarray):
if len(window) != win_len:
WARN("len(window) != win_len (%s != %s)" % (len(window), win_len))
else:
raise ValueError("`window` must be string or np.ndarray "
"(got %s)" % window)
else:
# sym=False <-> fftbins=True (see above)
window = sig.windows.dpss(win_len, max(4, win_len//8), sym=False)
if len(window) < (win_len + pl + pr):
window = np.pad(window, [pl, pr])
if derivative:
wf = fft(window)
Nw = len(window)
xi = _xifn(1, Nw)
if Nw % 2 == 0:
xi[Nw // 2] = 0
# frequency-domain differentiation; see `dWx` return docs in `help(cwt)`
diff_window = ifft(wf * 1j * xi).real
# cast `dtype`, zero denormals (extremely small numbers that slow down CPU)
window = _process_params_dtype(window, dtype=dtype, auto_gpu=False)
zero_denormals(window)
if derivative:
diff_window = _process_params_dtype(diff_window, dtype=dtype,
auto_gpu=False)
zero_denormals(diff_window)
return (window, diff_window) if derivative else window
def _check_NOLA(window, hop_len, dtype=None, imprecision_strict=False):
"""https://gauss256.github.io/blog/cola.html"""
# basic NOLA
if hop_len > len(window):
WARN("`hop_len > len(window)`; STFT not invertible")
elif not sig.check_NOLA(window, len(window), len(window) - hop_len):
WARN("`window` fails Non-zero Overlap Add (NOLA) criterion; "
"STFT not invertible")
# handle `dtype`; note this is just a guess, what matters is `Sx.dtype`
if dtype is None:
dtype = str(window.dtype)
# check for right boundary effect: as ssqueezepy's number of output frames
# is critically sampled (not more than needed), it creates an issue with
# float32 and time-localized windows, which struggle to invert the last frame
tol = 0.15 if imprecision_strict else 1e-3
if dtype == 'float32' and not sig.check_NOLA(
window, len(window), len(window) - hop_len, tol=tol):
# 1e-3 can still have imprecision detectable by eye, but only upon few
# samples, so avoid paranoia. Use 1e-2 to be safe, and 0.15 for ~exact
WARN("Imprecision expected at right-most hop of signal, in inversion. "
"Lower `hop_len`, choose wider `window`, or use `dtype='float64'`.")
| 13,033 | 39.858934 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/_ssq_stft.py | # -*- coding: utf-8 -*-
import numpy as np
from ._stft import stft, get_window, _check_NOLA
from ._ssq_cwt import _invert_components, _process_component_inversion_args
from .utils.cwt_utils import _process_fs_and_t, infer_scaletype
from .utils.common import WARN, EPS32, EPS64
from .utils import backend as S
from .utils.backend import torch
from .algos import phase_stft_cpu, phase_stft_gpu
from .ssqueezing import ssqueeze, _check_ssqueezing_args
def ssq_stft(x, window=None, n_fft=None, win_len=None, hop_len=1, fs=None, t=None,
modulated=True, ssq_freqs=None, padtype='reflect', squeezing='sum',
gamma=None, preserve_transform=None, dtype=None, astensor=True,
flipud=False, get_w=False, get_dWx=False):
"""Synchrosqueezed Short-Time Fourier Transform.
Implements the algorithm described in Sec. III of [1].
MATLAB docs: https://www.mathworks.com/help/signal/ref/fsst.html
# Arguments:
x: np.ndarray
Input vector(s), 1D or 2D. See `help(cwt)`.
window, n_fft, win_len, hop_len, fs, t, padtype, modulated
See `help(stft)`.
ssq_freqs, squeezing
See `help(ssqueezing.ssqueeze)`.
`ssq_freqs`, if array, must be linearly distributed.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
preserve_transform: bool (default True)
Whether to return `Sx` as directly output from `stft` (it might be
altered by `ssqueeze` or `phase_transform`). Uses more memory
per storing extra copy of `Sx`.
dtype: str['float32', 'float64'] / None
See `help(stft)`.
astensor: bool (default True)
If `'SSQ_GPU' == '1'`, whether to return arrays as on-GPU tensors
or move them back to CPU & convert to Numpy arrays.
flipud: bool (default False)
See `help(ssqueeze)`.
get_w, get_dWx
See `help(ssq_cwt)`.
(Named `_dWx` instead of `_dSx` for consistency.)
# Returns:
Tx: np.ndarray
Synchrosqueezed STFT of `x`, of same shape as `Sx`.
Sx: np.ndarray
STFT of `x`. See `help(stft)`.
ssq_freqs: np.ndarray
Frequencies associated with rows of `Tx`.
Sfs: np.ndarray
Frequencies associated with rows of `Sx` (by default == `ssq_freqs`).
w: np.ndarray (if `get_w=True`)
Phase transform of STFT of `x`. See `help(phase_stft)`.
dSx: np.ndarray (if `get_dWx=True`)
Time-derivative of STFT of `x`. See `help(stft)`.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
"""
if x.ndim == 2 and get_w:
raise NotImplementedError("`get_w=True` unsupported with batched input.")
_, fs, _ = _process_fs_and_t(fs, t, x.shape[-1])
_check_ssqueezing_args(squeezing)
# assert ssq_freqs, if array, is linear
if (isinstance(ssq_freqs, np.ndarray) and
infer_scaletype(ssq_freqs)[0] != 'linear'):
raise ValueError("`ssq_freqs` must be linearly distributed "
"for `ssq_stft`")
Sx, dSx = stft(x, window, n_fft=n_fft, win_len=win_len, hop_len=hop_len,
fs=fs, padtype=padtype, modulated=modulated, derivative=True,
dtype=dtype)
# preserve original `Sx` or not
if preserve_transform is None:
preserve_transform = not S.is_tensor(Sx)
if preserve_transform:
_Sx = (Sx.copy() if not S.is_tensor(Sx) else
Sx.detach().clone())
else:
_Sx = Sx
# make `Sfs`
Sfs = _make_Sfs(Sx, fs)
# gamma
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Sx, 'complex128') else EPS32)
# compute `w` if `get_w` and free `dWx` from memory if `not get_dWx`
if get_w:
w = phase_stft(_Sx, dSx, Sfs, gamma)
_dSx = None # don't use in `ssqueeze`
if not get_dWx:
dSx = None
else:
w = None
_dSx = dSx
# synchrosqueeze
if ssq_freqs is None:
ssq_freqs = Sfs
Tx, ssq_freqs = ssqueeze(_Sx, w, squeezing=squeezing, ssq_freqs=ssq_freqs,
Sfs=Sfs, flipud=flipud, gamma=gamma, dWx=_dSx,
maprange='maximal', transform='stft')
# return
if not astensor and S.is_tensor(Tx):
Tx, Sx, ssq_freqs, Sfs, w, dSx = [
g.cpu().numpy() if S.is_tensor(g) else g
for g in (Tx, Sx, ssq_freqs, Sfs, w, dSx)]
if get_w and get_dWx:
return Tx, Sx, ssq_freqs, Sfs, w, dSx
elif get_w:
return Tx, Sx, ssq_freqs, Sfs, w
elif get_dWx:
return Tx, Sx, ssq_freqs, Sfs, dSx
else:
return Tx, Sx, ssq_freqs, Sfs
def issq_stft(Tx, window=None, cc=None, cw=None, n_fft=None, win_len=None,
hop_len=1, modulated=True):
"""Inverse synchrosqueezed STFT.
# Arguments:
x: np.ndarray
Input vector, 1D.
window, n_fft, win_len, hop_len, modulated
See `help(stft)`. Must match those used in `ssq_stft`.
cc, cw: np.ndarray
See `help(issq_cwt)`.
# Returns:
x: np.ndarray
Signal as reconstructed from `Tx`.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
2. Fourier synchrosqueezed transform MATLAB docs.
https://www.mathworks.com/help/signal/ref/fsst.html
"""
def _process_args(Tx, window, cc, cw, win_len, hop_len, n_fft, modulated):
if not modulated:
raise ValueError("inversion with `modulated == False` "
"is unsupported.")
if hop_len != 1:
raise ValueError("inversion with `hop_len != 1` is unsupported.")
cc, cw, full_inverse = _process_component_inversion_args(cc, cw)
n_fft = n_fft or (Tx.shape[0] - 1) * 2
win_len = win_len or n_fft
window = get_window(window, win_len, n_fft=n_fft)
_check_NOLA(window, hop_len)
if abs(np.argmax(window) - len(window)//2) > 1:
WARN("`window` maximum not centered; results may be inaccurate.")
return window, cc, cw, win_len, hop_len, n_fft, full_inverse
(window, cc, cw, win_len, hop_len, n_fft, full_inverse
) = _process_args(Tx, window, cc, cw, win_len, hop_len, n_fft, modulated)
if full_inverse:
# Integration over all frequencies recovers original signal
x = Tx.real.sum(axis=0)
else:
x = _invert_components(Tx, cc, cw)
x *= (2 / window[len(window)//2])
return x
def phase_stft(Sx, dSx, Sfs, gamma=None, parallel=None):
"""Phase transform of STFT:
w[u, k] = Im( k - d/dt(Sx[u, k]) / Sx[u, k] / (j*2pi) )
Defined in Sec. 3 of [1]. Additionally explained in:
https://dsp.stackexchange.com/a/72589/50076
# Arguments:
Sx: np.ndarray
STFT of `x`, where `x` is 1D.
dSx: np.ndarray
Time-derivative of STFT of `x`
Sfs: np.ndarray
Associated physical frequencies, according to `dt` used in `stft`.
Spans 0 to fs/2, linearly.
gamma: float / None
See `help(ssqueezepy.ssq_cwt)`.
# Returns:
w: np.ndarray
Phase transform for each element of `Sx`. w.shape == Sx.shape.
# References:
1. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
"""
S.warn_if_tensor_and_par(Sx, parallel)
if gamma is None:
gamma = 10 * (EPS64 if S.is_dtype(Sx, 'complex128') else EPS32)
if S.is_tensor(Sx):
return phase_stft_gpu(Sx, dSx, Sfs, gamma)
return phase_stft_cpu(Sx, dSx, Sfs, gamma, parallel)
def _make_Sfs(Sx, fs):
dtype = 'float32' if 'complex64' in str(Sx.dtype) else 'float64'
n_rows = len(Sx) if Sx.ndim == 2 else Sx.shape[1]
if S.is_tensor(Sx):
Sfs = torch.linspace(0, .5*fs, n_rows, device=Sx.device,
dtype=getattr(torch, dtype))
else:
Sfs = np.linspace(0, .5*fs, n_rows, dtype=dtype)
return Sfs
| 8,660 | 34.207317 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/configs.py | # -*- coding: utf-8 -*-
"""
Contains `GDEFAULTS`, global defaults dictionary, set in `ssqueezepy.configs.ini`.
The .ini is parsed into a dict, then values are retrieved internally by functions
via `gdefaults()`, which sets default values if keyword arguments weren't set
to original functions (or were set to `None`).
E.g. calling `wavelets.morlet()`, the function has `mu=None` signature, so `mu`
will be drawn from `configs.ini`, unless calling like `wavelets.morlet(mu=1)`.
"""
import os
import inspect
import logging
logging.basicConfig(format='')
WARN = lambda msg: logging.warning("WARNING: %s" % msg)
path = os.path.join(os.path.dirname(__file__), 'configs.ini')
try:
import torch
import cupy
except:
torch, cupy = None, None
def gdefaults(module_and_obj=None, get_all=False, as_dict=None,
default_order=False, **kw):
"""Fetches default arguments from `ssqueezepy/configs.ini` and fills them
in `kw` where they're None (or always if `get_all=True`). See code comments.
"""
if as_dict is None:
as_dict = bool(get_all)
if module_and_obj is None:
stack = inspect.stack(0) # `(0)` faster than `()`
obj = stack[1][3]
module = stack[1][1].split(os.path.sep)[-1].rstrip('.py')
else:
# may have e.g. `utils.common.obj`
mos = module_and_obj.split('.')
module, obj = '.'.join(mos[:-1]), mos[-1]
# fetch latest
GDEFAULTS = _get_gdefaults()
# if `module` & `obj` are found in `GDEFAULTS`, proceed to write values
# from `GDEFAULTS` onto `kw` if `kw`'s are `None`
# if `get_all=True`, load values from `GDEFAULTS` even if they're not in
# `kw`, but don't overwrite those that are in `kw`.
# if `default_order=True`, will return `kw` with keys sorted as in
# `configs.ini`, for e.g. plotting purposes
if module not in GDEFAULTS:
WARN(f"module {module} not found in GDEFAULTS (see configs.ini)")
elif obj not in GDEFAULTS[module]:
WARN(f"object {obj} not found in GDEFAULTS['{module}'] "
"(see configs.ini)")
else:
DEFAULTS = GDEFAULTS[module][obj]
for key, value in kw.items():
if value is None:
kw[key] = DEFAULTS.get(key, value)
if get_all:
for key, value in DEFAULTS.items():
if key not in kw:
kw[key] = value
if default_order:
# first make a dict with correct order
# then overwrite its values with `kw`'s, without changing order
# if `kw` has keys that `ordered_kw` doesn't, they're inserted at end
ordered_kw = {}
for key, value in DEFAULTS.items():
if key in kw: # `get_all` already accounted for
ordered_kw[key] = value
ordered_kw.update(**kw)
kw = ordered_kw
if as_dict:
return kw
return (kw.values() if len(kw) != 1 else
list(kw.values())[0])
def _get_gdefaults():
"""Global defaults fetched from configs.ini."""
def float_if_number(s):
"""If float works, so should int."""
if isinstance(s, (bool, type(None))):
return s
try:
return float(s)
except ValueError:
return s
def process_special(s):
return {
'None': None,
'True': True,
'False': False,
}.get(s, s)
def process_value(value):
value = value.strip('"').strip("'")
return float_if_number(process_special(value))
with open(path, 'r') as f:
txt = f.read().split('\n')
txt = txt[:txt.index('#### END')]
txt = [line.strip(' ') for line in txt if line != '']
GDEFAULTS = {}
module, obj = '', ''
for line in txt:
if line.startswith('## '):
module = line[3:]
GDEFAULTS[module] = {}
elif line.startswith('# '):
obj = line[2:]
GDEFAULTS[module][obj] = {}
else:
key, value = [s.strip(' ') for s in line.split('=')]
GDEFAULTS[module][obj][key] = process_value(value)
return GDEFAULTS
def IS_PARALLEL():
"""Returns False if 'SSQ_PARALLEL' environment flag was set to '0', or
if `parallel` in `configs.ini` is set to `0`; former overrides latter.
"""
not_par_env = (os.environ.get('SSQ_PARALLEL', '1') == '0')
if not_par_env:
return False
not_par_config = (gdefaults('configs.IS_PARALLEL', parallel=None) == 0)
if not_par_config:
return False
return True
def USE_GPU():
if os.environ.get('SSQ_GPU', '0') == '1':
if torch is None or cupy is None:
raise ValueError("'SSQ_GPU' requires PyTorch and CuPy installed.")
return True
return False
GDEFAULTS = _get_gdefaults()
| 4,853 | 31.145695 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/ssqueezing.py | # -*- coding: utf-8 -*-
import numpy as np
from types import FunctionType
from .algos import indexed_sum_onfly, ssqueeze_fast
from .utils import p2up, process_scales, infer_scaletype, _process_fs_and_t
from .utils import NOTE, pi, logscale_transition_idx, assert_is_one_of
from .utils.backend import Q
from .utils.common import WARN
from .utils import backend as S
from .wavelets import center_frequency
def ssqueeze(Wx, w=None, ssq_freqs=None, scales=None, Sfs=None, fs=None, t=None,
squeezing='sum', maprange='maximal', wavelet=None, gamma=None,
was_padded=True, flipud=False, dWx=None, transform='cwt'):
"""Synchrosqueezes the CWT or STFT of `x`.
# Arguments:
Wx or Sx: np.ndarray
CWT or STFT of `x`. CWT is assumed L1-normed, and STFT with
`modulated=True`. If 3D, will treat elements along dim0 as independent
inputs, synchrosqueezing one-by-one (but memory-efficiently).
w: np.ndarray / None
Phase transform of `Wx` or `Sx`. Must be >=0.
If None, `gamma` & `dWx` must be supplied (and `Sfs` for SSQ_STFT).
ssq_freqs: str['log', 'log-piecewise', 'linear'] / np.ndarray / None
Frequencies to synchrosqueeze CWT scales onto. Scale-frequency
mapping is only approximate and wavelet-dependent.
If None, will infer from and set to same distribution as `scales`.
See `help(cwt)` on `'log-piecewise'`.
scales: str['log', 'log-piecewise', 'linear', ...] / np.ndarray
See `help(cwt)`.
Sfs: np.ndarray
Needed if `transform='stft'` and `dWx=None`. See `help(ssq_stft)`.
fs: float / None
Sampling frequency of `x`. Defaults to 1, which makes ssq
frequencies range from 1/dT to 0.5*fs, i.e. as fraction of reference
sampling rate up to Nyquist limit; dT = total duration (N/fs).
Overridden by `t`, if provided.
Relevant on `t` and `dT`: https://dsp.stackexchange.com/a/71580/50076
t: np.ndarray / None
Vector of times at which samples are taken (eg np.linspace(0, 1, n)).
Must be uniformly-spaced.
Defaults to `np.linspace(0, len(x)/fs, len(x), endpoint=False)`.
Overrides `fs` if not None.
squeezing: str['sum', 'lebesgue'] / function
- 'sum': summing `Wx` according to `w`. Standard synchrosqueezing.
Invertible.
- 'lebesgue': as in [3], summing `Wx=ones()/len(Wx)`. Effectively,
raw `Wx` phase is synchrosqueezed, independent of `Wx` values. Not
recommended with CWT or STFT with `modulated=True`. Not invertible.
For `modulated=False`, provides a more stable and accurate
representation.
- 'abs': summing `abs(Wx)` according to `w`. Not invertible
(but theoretically possible to get close with least-squares estimate,
so much "more invertible" than 'lebesgue'). Alt to 'lebesgue',
providing same benefits while losing much less information.
Custom function can be used to transform `Wx` arbitrarily for
summation, e.g. `Wx**2` via `lambda x: x**2`. Output shape
must match `Wx.shape`.
maprange: str['maximal', 'peak', 'energy'] / tuple(float, float)
See `help(ssq_cwt)`. Only `'maximal'` supported with STFT.
wavelet: wavelets.Wavelet
Only used if maprange != 'maximal' to compute center frequencies.
See `help(cwt)`.
gamma: float
See `help(ssq_cwt)`.
was_padded: bool (default `rpadded`)
Whether `x` was padded to next power of 2 in `cwt`, in which case
`maprange` is computed differently.
- Used only with `transform=='cwt'`.
- Ignored if `maprange` is tuple.
flipud: bool (default False)
Whether to fill `Tx` equivalently to `flipud(Tx)` (faster & less
memory than calling `Tx = np.flipud(Tx)` afterwards).
dWx: np.ndarray,
Used internally by `ssq_cwt` / `ssq_stft`; must pass when `w` is None.
transform: str['cwt', 'stft']
Whether `Wx` is from CWT or STFT (`Sx`).
# Returns:
Tx: np.ndarray [nf x n]
Synchrosqueezed CWT of `x`. (rows=~frequencies, cols=timeshifts)
(nf = len(ssq_freqs); n = len(x))
`nf = na` by default, where `na = len(scales)`.
ssq_freqs: np.ndarray [nf]
Frequencies associated with rows of `Tx`.
# References:
1. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
2. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
3. Synchrosqueezing-based Recovery of Instantaneous Frequency from
Nonuniform Samples. G. Thakur and H.-T. Wu.
https://arxiv.org/abs/1006.2533
4. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
synsq_squeeze.m
"""
def _ssqueeze(Tx, w, Wx, dWx, nv, ssq_freqs, scales, transform, ssq_scaletype,
cwt_scaletype, flipud, gamma, Sfs):
if transform == 'cwt':
# Eq 14 [2]; Eq 2.3 [1]
if cwt_scaletype[:3] == 'log':
# ln(2)/nv == diff(ln(scales))[0] == ln(2**(1/nv))
const = np.log(2) / nv
elif cwt_scaletype == 'linear':
# omit /dw since it's cancelled by *dw in inversion anyway
const = ((scales[1] - scales[0]) / scales).squeeze()
elif transform == 'stft':
const = (ssq_freqs[1] - ssq_freqs[0]) # 'alpha' from [3]
ssq_logscale = ssq_scaletype.startswith('log')
# do squeezing by finding which frequency bin each phase transform point
# w[a, b] lands in (i.e. to which f in ssq_freqs each w[a, b] is closest)
# equivalent to argmin(abs(w[a, b] - ssq_freqs)) for every a, b
# Tx[k[i, j], j] += Wx[i, j] * norm -- (see below method's docstring)
if w is None:
ssqueeze_fast(Wx, dWx, ssq_freqs, const, ssq_logscale, flipud,
gamma, out=Tx, Sfs=Sfs)
else:
indexed_sum_onfly(Wx, w, ssq_freqs, const, ssq_logscale, flipud,
out=Tx)
def _process_args(Wx, w, fs, t, transform, squeezing, scales, maprange,
wavelet, dWx):
if w is None and (dWx is None or gamma is None):
raise ValueError("if `w` is None, `dWx` and `gamma` must not be.")
elif w is not None and w.min() < 0:
raise ValueError("found negatives in `w`")
_check_ssqueezing_args(squeezing, maprange, transform=transform,
wavelet=wavelet)
if scales is None and transform == 'cwt':
raise ValueError("`scales` can't be None if `transform == 'cwt'`")
N = Wx.shape[-1]
dt, *_ = _process_fs_and_t(fs, t, N)
return N, dt
N, dt = _process_args(Wx, w, fs, t, transform, squeezing, scales,
maprange, wavelet, dWx)
if transform == 'cwt':
scales, cwt_scaletype, _, nv = process_scales(scales, N, get_params=True)
else:
cwt_scaletype, nv = None, None
# handle `ssq_freqs` & `ssq_scaletype`
if not (isinstance(ssq_freqs, np.ndarray) or S.is_tensor(ssq_freqs)):
if isinstance(ssq_freqs, str):
ssq_scaletype = ssq_freqs
else:
# default to same scheme used by `scales`
ssq_scaletype = cwt_scaletype
if ((maprange == 'maximal' or isinstance(maprange, tuple)) and
ssq_scaletype == 'log-piecewise'):
raise ValueError("can't have `ssq_scaletype = log-piecewise` or "
"tuple with `maprange = 'maximal'` "
"(got %s)" % str(maprange))
ssq_freqs = _compute_associated_frequencies(
scales, N, wavelet, ssq_scaletype, maprange, was_padded, dt,
transform)
elif transform == 'stft':
# removes warning per issue with `infer_scaletype`
# future TODO: shouldn't need this
ssq_scaletype = 'linear'
else:
ssq_scaletype, _ = infer_scaletype(ssq_freqs)
# transform `Wx` if needed
if isinstance(squeezing, FunctionType):
Wx = squeezing(Wx)
elif squeezing == 'lebesgue': # from reference [3]
Wx = S.ones(Wx.shape, dtype=Wx.dtype) / len(Wx)
elif squeezing == 'abs':
Wx = Q.abs(Wx)
# synchrosqueeze
Tx = S.zeros(Wx.shape, dtype=Wx.dtype)
args = (nv, ssq_freqs, scales, transform, ssq_scaletype,
cwt_scaletype, flipud, gamma, Sfs)
if Wx.ndim == 2:
_ssqueeze(Tx, w, Wx, dWx, *args)
elif Wx.ndim == 3:
w, dWx = [(g if g is not None else [None]*len(Tx))
for g in (w, dWx)]
for _Tx, _w, _Wx, _dWx in zip(Tx, w, Wx, dWx):
_ssqueeze(_Tx, _w, _Wx, _dWx, *args)
# `scales` go high -> low
if (transform == 'cwt' and not flipud) or flipud:
if not isinstance(ssq_freqs, np.ndarray):
import torch
ssq_freqs = torch.flip(ssq_freqs, (0,))
else:
ssq_freqs = ssq_freqs[::-1]
return Tx, ssq_freqs
#### `ssqueeze` utils ########################################################
def _ssq_freqrange(maprange, dt, N, wavelet, scales, was_padded):
if isinstance(maprange, tuple):
fm, fM = maprange
elif maprange == 'maximal':
dT = dt * N
# normalized frequencies to map discrete-domain to physical:
# f[[cycles/samples]] -> f[[cycles/second]]
# minimum measurable (fundamental) frequency of data
fm = 1 / dT
# maximum measurable (Nyquist) frequency of data
fM = 1 / (2 * dt)
elif maprange in ('peak', 'energy'):
kw = dict(wavelet=wavelet, N=N, maprange=maprange, dt=dt,
was_padded=was_padded)
fm = _get_center_frequency(**kw, scale=scales[-1])
fM = _get_center_frequency(**kw, scale=scales[0])
return fm, fM
def _compute_associated_frequencies(scales, N, wavelet, ssq_scaletype, maprange,
was_padded=True, dt=1, transform='cwt'):
fm, fM = _ssq_freqrange(maprange, dt, N, wavelet, scales, was_padded)
na = len(scales)
# frequency divisions `w_l` to reassign to in Synchrosqueezing
if ssq_scaletype == 'log':
# [fm, ..., fM]
ssq_freqs = fm * np.power(fM / fm, np.arange(na)/(na - 1))
elif ssq_scaletype == 'log-piecewise':
idx = logscale_transition_idx(scales)
if idx is None:
ssq_freqs = fm * np.power(fM / fm, np.arange(na)/(na - 1))
else:
f0, f2 = fm, fM
# note that it's possible for f1 == f0 per discretization limitations,
# in which case `sqf1` will contain the same value repeated
f1 = _get_center_frequency(wavelet, N, maprange, dt, scales[idx],
was_padded)
# here we don't know what the pre-downsampled `len(scales)` was,
# so we take a longer route by piecewising respective center freqs
t1 = np.arange(0, na - idx - 1)/(na - 1)
t2 = np.arange(na - idx - 1, na)/(na - 1)
# simulates effect of "endpoint" since we'd need to know `f2`
# with `endpoint=False`
t1 = np.hstack([t1, t2[0]])
sqf1 = _exp_fm(t1, f0, f1)[:-1]
sqf2 = _exp_fm(t2, f1, f2)
ssq_freqs = np.hstack([sqf1, sqf2])
ssq_idx = logscale_transition_idx(ssq_freqs)
if ssq_idx is None:
raise Exception("couldn't find logscale transition index of "
"generated `ssq_freqs`; something went wrong")
assert (na - ssq_idx) == idx, "{} != {}".format(na - ssq_idx, idx)
else:
if transform == 'cwt':
ssq_freqs = np.linspace(fm, fM, na)
elif transform == 'stft':
ssq_freqs = np.linspace(0, .5, na) / dt
return ssq_freqs
def _exp_fm(t, fmin, fmax):
tmin, tmax = t.min(), t.max()
a = (fmin**tmax / fmax**tmin) ** (1/(tmax - tmin))
b = fmax**(1/tmax) * (1/a)**(1/tmax)
return a*b**t
def _get_center_frequency(wavelet, N, maprange, dt, scale, was_padded):
if was_padded:
N = p2up(N)[0]
kw = dict(wavelet=wavelet, N=N, scale=scale, kind=maprange)
if maprange == 'energy':
kw['force_int'] = True
wc = center_frequency(**kw)
fc = wc / (2*pi) / dt
return fc
#### misc ####################################################################
def _check_ssqueezing_args(squeezing, maprange=None, wavelet=None, difftype=None,
difforder=None, get_w=None, transform='cwt'):
if transform not in ('cwt', 'stft'):
raise ValueError("`transform` must be one of: cwt, stft "
"(got %s)" % squeezing)
if not isinstance(squeezing, (str, FunctionType)):
raise TypeError("`squeezing` must be string or function "
"(got %s)" % type(squeezing))
elif isinstance(squeezing, str):
assert_is_one_of(squeezing, 'squeezing', ('sum', 'lebesgue', 'abs'))
# maprange
if maprange is not None:
if isinstance(maprange, (tuple, list)):
if not all(isinstance(m, (float, int)) for m in maprange):
raise ValueError("all elements of `maprange` must be "
"float or int")
elif isinstance(maprange, str):
assert_is_one_of(maprange, 'maprange', ('maximal', 'peak', 'energy'))
else:
raise TypeError("`maprange` must be str, tuple, or list "
"(got %s)" % type(maprange))
if isinstance(maprange, str) and maprange != 'maximal':
if transform != 'cwt':
NOTE("string `maprange` currently only functional with "
"`transform='cwt'`")
elif wavelet is None:
raise ValueError(f"maprange='{maprange}' requires `wavelet`")
# difftype
if difftype is not None:
if difftype not in ('trig', 'phase', 'numeric'):
raise ValueError("`difftype` must be one of: direct, phase, numeric"
" (got %s)" % difftype)
elif difftype != 'trig':
from .configs import USE_GPU
if USE_GPU():
raise ValueError("GPU computation only supports "
"`difftype = 'trig'`")
elif not get_w:
raise ValueError("`difftype != 'trig'` requires `get_w = True`")
# difforder
if difforder is not None:
if difftype != 'numeric':
WARN("`difforder` is ignored if `difftype != 'numeric'")
elif difforder not in (1, 2, 4):
raise ValueError("`difforder` must be one of: 1, 2, 4 "
"(got %s)" % difforder)
elif difftype == 'numeric':
difforder = 4
return difforder
| 15,556 | 41.159892 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/algos.py | # -*- coding: utf-8 -*-
"""CPU- & GPU-accelerated routines, and few neat algorithms.
"""
import numpy as np
from numba import jit, prange
from functools import reduce
from .utils.backend import asnumpy, cp, torch
from .utils.gpu_utils import _run_on_gpu, _get_kernel_params
from .utils import backend as S
from .configs import IS_PARALLEL
def nCk(n, k):
"""n-Choose-k"""
mul = lambda a, b: a * b
r = min(k, n - k)
numer = reduce(mul, range(n, n - r, -1), 1)
denom = reduce(mul, range(1, r + 1), 1)
return numer / denom
#### `indexed_sum` ###########################################################
def indexed_sum(a, k, parallel=None):
"""Sum `a` into rows of 2D array according to indices given by 2D `k`."""
out = np.zeros(a.shape, dtype=a.dtype)
if parallel or (parallel is None and IS_PARALLEL()):
_indexed_sum_par(a, k, out)
else:
_indexed_sum(a, k, out)
return out
@jit(nopython=True, cache=True)
def _indexed_sum(a, k, out):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
out[k[i, j], j] += a[i, j]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_par(a, k, out):
for j in prange(a.shape[1]):
for i in range(a.shape[0]):
out[k[i, j], j] += a[i, j]
def _process_ssq_params(Wx, w_or_dWx, ssq_freqs, const, logscale, flipud, out,
gamma, parallel, complex_out=True, Sfs=None):
S.warn_if_tensor_and_par(Wx, parallel)
gpu = S.is_tensor(Wx)
parallel = (parallel or IS_PARALLEL()) and not gpu
# process `Wx`, `w_or_dWx`, `out`
if out is None:
out_shape = (*Wx.shape, 2) if (gpu and complex_out) else Wx.shape
if gpu:
out_dtype = (torch.float32 if Wx.dtype == torch.complex64 else
torch.float64)
out = torch.zeros(out_shape, dtype=out_dtype, device=Wx.device)
else:
out = np.zeros(out_shape, dtype=Wx.dtype)
elif complex_out and gpu:
out = torch.view_as_real(out)
if gpu:
Wx = torch.view_as_real(Wx)
if 'complex' in str(w_or_dWx.dtype):
w_or_dWx = torch.view_as_real(w_or_dWx)
# process `const`
len_const = (const.numel() if isinstance(const, torch.Tensor) else
(const.size if isinstance(const, np.ndarray) else 1))
if len_const != len(Wx):
if gpu:
const_arr = torch.full((len(Wx),), fill_value=const,
device=Wx.device, dtype=Wx.dtype)
else:
const_arr = np.full(len(Wx), const, dtype=Wx.dtype)
elif gpu and isinstance(const, np.ndarray):
const_arr = torch.as_tensor(const, dtype=Wx.dtype, device=Wx.device)
else:
const_arr = const
const_arr = const_arr.squeeze()
# process other constants
if logscale:
_, params = _get_params_find_closest_log(ssq_freqs)
else:
dv = float(ssq_freqs[1] - ssq_freqs[0])
dv = _ensure_nonzero_nonnegative('dv', dv)
params = dict(vmin=float(ssq_freqs[0]), dv=dv)
if gpu:
# process kernel params
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=1)
M = kernel_kw['M']
kernel_kw.update(dict(f='f' if kernel_kw['dtype'] == 'float' else '',
extra=f"k = {M} - 1 - k;" if flipud else ""))
# collect tensors & constants
if 'idx1' in params:
params['idx1'] = int(params['idx1'])
kernel_args = [Wx.data_ptr(), w_or_dWx.data_ptr(), out.data_ptr(),
const_arr.data_ptr(), *list(params.values())]
if gamma is not None:
kernel_args.insert(4, cp.asarray(gamma, dtype=str_dtype))
if Sfs is not None:
kernel_args.insert(2, Sfs.data_ptr())
ssq_scaletype = (('log_piecewise' if 'idx1' in params else 'log')
if logscale else 'lin')
else:
# cpu function params
params.update(dict(const=const_arr, flipud=flipud, omax=len(out) - 1))
if gamma is not None:
params['gamma'] = gamma
if Sfs is not None:
params['Sfs'] = Sfs
ssq_scaletype = (('log_piecewise' if 'idx1' in params else 'log')
if logscale else 'lin')
ssq_scaletype += '_par' if parallel else ''
if gpu:
args = (blockspergrid, threadsperblock, *kernel_args)
return (out, params, args, kernel_kw, ssq_scaletype)
return (Wx, w_or_dWx, out, params, ssq_scaletype)
def ssqueeze_fast(Wx, dWx, ssq_freqs, const, logscale=False, flipud=False,
gamma=None, out=None, Sfs=None, parallel=None):
"""`indexed_sum`, `find_closest`, and `phase_transform` within same loop,
sparing two arrays and intermediate elementwise conditionals; see
`help(algos.find_closest)` on how `k` is computed.
"""
def fn_name(transform, ssq_scaletype):
return ('ssq_stft' if transform == 'stft' else
f'ssq_cwt_{ssq_scaletype}')
outs = _process_ssq_params(Wx, dWx, ssq_freqs, const, logscale, flipud, out,
gamma, parallel, complex_out=True, Sfs=Sfs)
transform = 'cwt' if Sfs is None else 'stft'
if S.is_tensor(Wx):
out, params, args, kernel_kw, ssq_scaletype = outs
kernel = _kernel_codes[fn_name(transform, ssq_scaletype)]
_run_on_gpu(kernel, *args, **kernel_kw)
out = torch.view_as_complex(out)
else:
Wx, dWx, out, params, ssq_scaletype = outs
fn = _cpu_fns[fn_name(transform, ssq_scaletype)]
args = ([Wx, dWx, out] if transform == 'cwt' else
[Wx, dWx, params.pop('Sfs'), out])
fn(*args, **params)
return out
def indexed_sum_onfly(Wx, w, ssq_freqs, const=1, logscale=False, flipud=False,
out=None, parallel=None):
"""`indexed_sum` and `find_closest` within same loop, sparing an array;
see `help(algos.find_closest)` on how `k` is computed.
"""
outs = _process_ssq_params(Wx, w, ssq_freqs, const, logscale, flipud, out,
gamma=None, parallel=parallel, complex_out=True)
if S.is_tensor(Wx):
out, params, args, kernel_kw, ssq_scaletype = outs
kernel = _kernel_codes[f'indexed_sum_{ssq_scaletype}']
_run_on_gpu(kernel, *args, **kernel_kw)
out = torch.view_as_complex(out)
else:
Wx, w, out, params, ssq_scaletype = outs
fn = _cpu_fns[f'indexed_sum_{ssq_scaletype}']
fn(Wx, w, out, **params)
return out
@jit(nopython=True, cache=True)
def _indexed_sum_log(Wx, w, out, const, vlmin, dvl, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((np.log2(w[i, j]) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_log_par(Wx, w, out, const, vlmin, dvl, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((np.log2(w[i, j]) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _indexed_sum_log_piecewise(Wx, w, out, const, vlmin0, vlmin1, dvl0, dvl1,
idx1, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if np.isinf(w[i, j]):
continue
wl = np.log2(w[i, j])
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(round(max((wl - vlmin0) / dvl0, 0)))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_log_piecewise_par(Wx, w, out, const, vlmin0, vlmin1, dvl0, dvl1,
idx1, omax, flipud=False):
# it's also possible to construct the if-else logic in terms of mappables
# of `vlmin`, `dvl`, and `idx`, which generalizes to any number of transitions
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if np.isinf(w[i, j]):
continue
wl = np.log2(w[i, j])
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(round(max((wl - vlmin0) / dvl0, 0)))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _indexed_sum_lin(Wx, w, out, const, vmin, dv, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((w[i, j] - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _indexed_sum_lin_par(Wx, w, out, const, vmin, dv, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if np.isinf(w[i, j]):
continue
k = int(min(round(max((w[i, j] - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
#### `find_closest` algorithms ###############################################
def find_closest(a, v, logscale=False, parallel=None, smart=None):
"""`argmin(abs(a[i, j] - v)) for all `i, j`; `a` is 2D, `v` is 1D.
# Arguments:
a: np.ndarray
2D array.
v: np.ndarray
1D array.
logscale: bool (default False)
Whether "closest" is taken in linear or logarithmic space.
parallel: bool (default True) / None
Whether to use algorithms with `numba.jit(parallel=True)`
smart: bool (default False) / None
Whether to use a very fast smart algorithm (but still the slowest
for ssqueezing; see usage guide below).
Credit: Divakar -- https://stackoverflow.com/a/64526158/10133797
____________________________________________________________________________
**Default behavior**
If only `a` & `v` are passed, `find_closest_smart` is called.
____________________________________________________________________________
**Usage guide**
If 100% accuracy is desired, or `v` is not linearly or logarithmically
distributed, use `find_closest_smart` (`smart=True`) or `find_closest_brute`
(not callable from here).
`_smart` is faster on single CPU thread, but `_brute` can win
via parallelism.
Else, `find_closest_lin` and `find_closest_log` do the trick (the special
case of log-piecewise is handled), and are much faster.
- Relative to "exact", they differ only by 0% to 0.0001%, purely per
float precision limitations, and never by more than one index in `out`
(where whether e.g. `w=0.500000001` belongs to 0 or 1 isn't statistically
meaningful to begin with).
____________________________________________________________________________
**How it works:** `find_closest_log`, `find_closest_lin`
The root assumption is that `v` is uniformly (in linear or log space)
distributed, and we calculate analytically in which bin `w` will land as:
`(w - bin_min) / bin_step_size`
Above is forced to bound in [0, len(v) - 1].
"""
assert not S.is_tensor(a), "`find_closest` doesn't support GPU execution"
if smart is None and parallel is None:
smart = True
elif parallel and smart:
WARN("find_closest: `smart` overrides `parallel`")
if smart:
if logscale:
out = find_closest_smart(np.log2(a), np.log2(v))
else:
out = find_closest_smart(a, v)
elif logscale:
out = find_closest_log(a, v, parallel=parallel)
else:
out = find_closest_lin(a, v, parallel=parallel)
return out
@jit(nopython=True, cache=True, parallel=True)
def find_closest_brute(a, v):
"""Computes exactly but exhaustively."""
out = np.zeros(a.shape, dtype=np.int32)
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
out[i, j] = np.argmin(np.abs(a[i, j] - v))
return out
def find_closest_smart(a, v):
"""Equivalent to argmin(abs(a[i, j] - v)) for all i, j; a is 2D, v is 1D.
Credit: Divakar -- https://stackoverflow.com/a/64526158/10133797
"""
sidx = v.argsort()
v_s = v[sidx]
idx = np.searchsorted(v_s, a)
idx[idx == len(v)] = len(v) - 1
idx0 = (idx - 1).clip(min=0)
m = np.abs(a - v_s[idx]) >= np.abs(v_s[idx0] - a)
m[idx == 0] = 0
idx[m] -= 1
out = sidx[idx]
return out
def _ensure_nonzero_nonnegative(name, x, silent=False):
if x < EPS64:
if not silent:
WARN("computed `%s` (%.2e) is below EPS64; will set to " % (name, x)
+ "EPS64. Advised to check `ssq_freqs`.")
x = EPS64
return x
def _get_params_find_closest_log(v):
idx = logscale_transition_idx(v)
vlmin = float(np.log2(v[0]))
if idx is None:
dvl = float(np.log2(v[1]) - np.log2(v[0]))
dvl = _ensure_nonzero_nonnegative('dvl', dvl)
params = dict(vlmin=vlmin, dvl=dvl)
else:
vlmin0, vlmin1 = vlmin, float(np.log2(v[idx - 1]))
dvl0 = float(np.log2(v[1]) - np.log2(v[0]))
dvl1 = float(np.log2(v[idx]) - np.log2(v[idx - 1]))
# see comment above `f1` in `ssqueezing._compute_associated_frequencies`
dvl0 = _ensure_nonzero_nonnegative('dvl0', dvl0, silent=True)
dvl1 = _ensure_nonzero_nonnegative('dvl1', dvl1)
idx1 = np.asarray(idx - 1, dtype=np.int32)
params = dict(vlmin0=vlmin0, vlmin1=vlmin1, dvl0=dvl0, dvl1=dvl1,
idx1=idx1)
return idx, params
def find_closest_log(a, v, parallel=True):
idx, params = _get_params_find_closest_log(v)
out = np.zeros(a.shape, dtype=np.int32)
params['omax'] = len(out) - 1
if idx is None:
fn = _find_closest_log_par if parallel else _find_closest_log
else:
fn = (_find_closest_log_piecewise_par if parallel else
_find_closest_log_piecewise)
fn(a, out, **params)
return out
@jit(nopython=True, cache=True)
def _find_closest_log(a, out, vlmin, dvl, omax):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
out[i, j] = min(round(max((np.log2(a[i, j]) - vlmin) / dvl, 0)), omax)
@jit(nopython=True, cache=True, parallel=True)
def _find_closest_log_par(a, out, vlmin, dvl, omax):
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
out[i, j] = min(round(max((np.log2(a[i, j]) - vlmin) / dvl, 0)), omax)
@jit(nopython=True, cache=True)
def _find_closest_log_piecewise(a, out, vlmin0, vlmin1, dvl0, dvl1, idx1,
omax):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
al = np.log2(a[i, j])
if al > vlmin1:
out[i, j] = min(round((al - vlmin1) / dvl1) + idx1, omax)
else:
out[i, j] = round(max((al - vlmin0) / dvl0, 0))
@jit(nopython=True, cache=True, parallel=True)
def _find_closest_log_piecewise_par(a, out, vlmin0, vlmin1, dvl0, dvl1, idx1,
omax):
# it's also possible to construct the if-else logic in terms of mappables
# of `vlmin`, `dvl`, and `idx`, which generalizes to any number of transitions
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
if np.isinf(a[i, j]):
continue
al = np.log2(a[i, j])
if al > vlmin1:
out[i, j] = min(round((al - vlmin1) / dvl1) + idx1, omax)
else:
out[i, j] = round(max((al - vlmin0) / dvl0, 0))
def find_closest_lin(a, v, parallel=True):
vmin = v[0]
dv = v[1] - v[0]
out = np.zeros(a.shape, dtype=np.int32)
fn = _find_closest_lin_par if parallel else _find_closest_lin
fn(a, out, vmin, dv, omax=len(out) - 1)
return out
@jit(nopython=True, cache=True)
def _find_closest_lin(a, out, vmin, dv, omax):
for i in range(a.shape[0]):
for j in range(a.shape[1]):
out[i, j] = min(round(max((a[i, j] - vmin) / dv, 0)), omax)
@jit(nopython=True, cache=True, parallel=True)
def _find_closest_lin_par(a, out, vmin, dv, omax):
for i in prange(a.shape[0]):
for j in prange(a.shape[1]):
out[i, j] = min(round(max((a[i, j] - vmin) / dv, 0)), omax)
#### Replacers ###############################################################
def _process_replace_fn_args(x, ref):
if ref is None:
ref = x
xndim = x.ndim # store original ndim to undo expansion later
if not (isinstance(x, np.ndarray) and isinstance(ref, np.ndarray)):
raise TypeError("inputs must be numpy arrays "
"(got %s, %s)" % (type(x), type(ref)))
while x.ndim < 3:
x = np.expand_dims(x, -1)
while ref.ndim < 3:
ref = np.expand_dims(ref, -1)
if x.ndim > 3 or ref.ndim > 3:
raise ValueError("inputs must be 1D, 2D, or 3D numpy arrays "
"(got x.ndim==%d, ref.ndim==%d)" % (x.ndim, ref.ndim))
return x, ref, xndim
def replace_at_inf_or_nan(x, ref=None, replacement=0.):
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_inf_or_nan(x, ref, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_at_inf(x, ref=None, replacement=0.):
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_inf(x, ref, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_at_nan(x, ref=None, replacement=0.):
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_nan(x, ref, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_at_value(x, ref=None, value=0., replacement=0.):
"""Note: `value=np.nan` won't work (but np.inf will, separate from -np.inf)"""
x, ref, xndim = _process_replace_fn_args(x, ref)
x = _replace_at_value(x, ref, value, replacement)
while x.ndim > xndim:
x = x.squeeze(axis=-1)
return x
def replace_under_abs(x, ref=None, value=0., replacement=0., parallel=None):
if S.is_tensor(x):
_replace_under_abs_gpu(x, ref, value, replacement)
elif parallel or (parallel is None and IS_PARALLEL()):
_replace_under_abs_par(x, ref, value, replacement)
else:
_replace_under_abs(x, ref, value, replacement)
# TODO return None?
@jit(nopython=True, cache=True)
def _replace_at_inf_or_nan(x, ref, replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if np.isinf(ref[i, j, k]) or np.isnan(ref[i, j, k]):
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_at_inf(x, ref, replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if np.isinf(ref[i, j, k]):
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_at_nan(x, ref, replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if np.isnan(ref[i, j, k]):
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_at_value(x, ref, value=0., replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
for k in range(x.shape[2]):
if ref[i, j, k] == value:
x[i, j, k] = replacement
return x
@jit(nopython=True, cache=True)
def _replace_under_abs(x, ref, value=0., replacement=0.):
for i in range(x.shape[0]):
for j in range(x.shape[1]):
if abs(ref[i, j]) < value:
x[i, j] = replacement
@jit(nopython=True, cache=True, parallel=True)
def _replace_under_abs_par(x, ref, value=0., replacement=0.):
for i in prange(x.shape[0]):
for j in prange(x.shape[1]):
if abs(ref[i, j]) < value:
x[i, j] = replacement
def _replace_under_abs_gpu(w, Wx, value=0., replacement=0.):
"""Not as general as CPU variants (namely `w` must be real and `Wx`
must be complex).
"""
kernel = '''
extern "C" __global__
void replace_under_abs(${dtype} w[${M}][${N}],
${dtype} Wx[${M}][${N}][2],
${dtype} *value, ${dtype} *replacement)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *value)
w[i][j] = *replacement;
}
'''
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=2)
kernel_kw['f'] = 'f' if kernel_kw['dtype'] == 'float' else ''
Wx = torch.view_as_real(Wx)
kernel_args = [w.data_ptr(), Wx.data_ptr(),
cp.asarray(value, dtype=str_dtype),
cp.asarray(replacement, dtype=str_dtype)]
_run_on_gpu(kernel, blockspergrid, threadsperblock,
*kernel_args, **kernel_kw)
def zero_denormals(x, parallel=None):
"""Denormals are very small non-zero numbers that can significantly slow CPU
execution (e.g. FFT). See https://github.com/scipy/scipy/issues/13764
"""
# take a little bigger than smallest, seems to improve FFT speed
parallel = parallel if parallel is not None else IS_PARALLEL()
tiny = 1000 * np.finfo(x.dtype).tiny
fn = _zero_denormals_par if parallel else _zero_denormals
fn(x.ravel(), tiny)
@jit(nopython=True, cache=True)
def _zero_denormals(x, tiny):
for i in range(x.size):
if x[i] < tiny and x[i] > -tiny:
x[i] = 0
@jit(nopython=True, cache=True, parallel=True)
def _zero_denormals_par(x, tiny):
for i in prange(x.size):
if x[i] < tiny and x[i] > -tiny:
x[i] = 0
#### misc (short) ############################################################
@jit(nopython=True, cache=True)
def _min_neglect_idx(arr, th=1e-12):
"""Used in utils.integrate_analytic and ._integrate_bounded."""
for i, x in enumerate(arr):
if x < th:
return i
return i
#### misc (long) #############################################################
def find_maximum(fn, step_size=1e-3, steps_per_search=1e4, step_start=0,
step_limit=1000, min_value=-1):
"""Finds max of any function with a single maximum, and input value
at which the maximum occurs. Inputs and outputs must be 1D.
Must be strictly non-decreasing from step_start up to maximum of interest.
Takes absolute value of fn's outputs.
"""
steps_per_search = int(steps_per_search)
largest_max = min_value
increment = int(steps_per_search * step_size)
input_values = np.linspace(step_start, increment)
output_values = -1 * np.ones(steps_per_search)
search_idx = 0
while True:
start = step_start + increment * search_idx
end = start + increment
input_values = np.linspace(start, end, steps_per_search, endpoint=False)
output_values[:] = np.abs(asnumpy(fn(input_values)))
output_max = output_values.max()
if output_max > largest_max:
largest_max = output_max
input_value = input_values[np.argmax(output_values)]
elif output_max < largest_max:
break
search_idx += 1
if input_values.max() > step_limit:
raise ValueError(("could not find function maximum with given "
"(step_size, steps_per_search, step_start, "
"step_limit, min_value)=({}, {}, {}, {}, {})"
).format(step_size, steps_per_search, step_start,
step_limit, min_value))
return input_value, largest_max
def find_first_occurrence(fn, value, step_size=1e-3, steps_per_search=1e4,
step_start=0, step_limit=1000):
"""Finds earliest input value for which `fn(input_value) == value`, searching
from `step_start` to `step_limit` in `step_size` increments.
Takes absolute value of fn's outputs.
"""
steps_per_search = int(steps_per_search)
increment = int(steps_per_search * step_size)
output_values = -1 * np.ones(steps_per_search)
step_limit_exceeded = False
search_idx = 0
while True:
start = step_start + increment * search_idx
end = start + increment
input_values = np.linspace(start, end, steps_per_search, endpoint=False)
if input_values.max() > step_limit:
step_limit_exceeded = True
input_values = np.clip(input_values, None, step_limit)
output_values[:] = np.abs(asnumpy(fn(input_values)))
mxdiff = np.abs(np.diff(output_values)).max()
# more reliable than `argmin not in (0, len - 1)` for smooth `fn`
if np.any(np.abs(output_values - value) <= mxdiff):
idx = np.argmin(np.abs(output_values - value))
break
search_idx += 1
if step_limit_exceeded:
raise ValueError(("could not find input value to yield function "
f"output value={value} with given "
"(step_size, steps_per_search, step_start, "
"step_limit, min_value)=({}, {}, {}, {})"
).format(step_size, steps_per_search,
step_start, step_limit))
input_value = input_values[idx]
output_value = output_values[idx]
return input_value, output_value
def phase_cwt_cpu(Wx, dWx, gamma, parallel=None):
"""Computes only the imaginary part of `dWx / Wx` while dividing by 2*pi
in same operation; doesn't compute division at all if `abs(Wx) < gamma`.
Less memory & less computation than `(dWx / Wx).imag / (2*pi)`, same result.
"""
dtype = 'float32' if Wx.dtype == np.complex64 else 'float64'
out = np.zeros(Wx.shape, dtype=dtype)
gamma = np.asarray(gamma, dtype=dtype)
parallel = parallel or IS_PARALLEL()
fn = _phase_cwt_par if parallel else _phase_cwt
fn(Wx, dWx, out, gamma)
return out
@jit(nopython=True, cache=True)
def _phase_cwt(Wx, dWx, out, gamma):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
@jit(nopython=True, cache=True, parallel=True)
def _phase_cwt_par(Wx, dWx, out, gamma):
for i in prange(Wx.shape[0]):
for j in prange(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
def phase_cwt_gpu(Wx, dWx, gamma):
"""Same as `phase_cwt_cpu`, but on GPU."""
kernel = '''
extern "C" __global__
void phase_cwt(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}],
${dtype} *gamma) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *gamma){
out[i][j] = 1.0/0.0;
return;
}
${dtype} A = dWx[i][j][0];
${dtype} B = dWx[i][j][1];
${dtype} C = Wx[i][j][0];
${dtype} D = Wx[i][j][1];
out[i][j] = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
}
'''
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=2)
kernel_kw['f'] = 'f' if kernel_kw['dtype'] == 'float' else ''
out = torch.zeros(Wx.shape, device=Wx.device, dtype=getattr(torch, str_dtype))
Wx = torch.view_as_real(Wx)
dWx = torch.view_as_real(dWx)
kernel_args = [Wx.data_ptr(), dWx.data_ptr(), out.data_ptr(),
cp.asarray(gamma, dtype=str_dtype)]
_run_on_gpu(kernel, blockspergrid, threadsperblock,
*kernel_args, **kernel_kw)
return out
def phase_stft_cpu(Wx, dWx, Sfs, gamma, parallel=None):
dtype = 'float32' if Wx.dtype == np.complex64 else 'float64'
out = np.zeros(Wx.shape, dtype=dtype)
gamma = np.asarray(gamma, dtype=dtype)
parallel = parallel or IS_PARALLEL()
fn = _phase_stft_par if parallel else _phase_stft
fn(Wx, dWx, Sfs, out, gamma)
return out
@jit(nopython=True, cache=True)
def _phase_stft(Wx, dWx, Sfs, out, gamma):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
@jit(nopython=True, cache=True, parallel=True)
def _phase_stft_par(Wx, dWx, Sfs, out, gamma):
for i in prange(Wx.shape[0]):
for j in prange(Wx.shape[1]):
if abs(Wx[i, j]) < gamma:
out[i, j] = np.inf
else:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
out[i, j] = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
def phase_stft_gpu(Wx, dWx, Sfs, gamma):
kernel = '''
extern "C" __global__
void phase_stft(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} Sfs[${M}],
${dtype} out[${M}][${N}],
${dtype} *gamma) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *gamma){
out[i][j] = 1.0/0.0;
return;
}
${dtype} A = dWx[i][j][0];
${dtype} B = dWx[i][j][1];
${dtype} C = Wx[i][j][0];
${dtype} D = Wx[i][j][1];
out[i][j] = abs(Sfs[i] - (B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
}
'''
(blockspergrid, threadsperblock, kernel_kw, str_dtype
) = _get_kernel_params(Wx, dim=2)
kernel_kw['f'] = 'f' if kernel_kw['dtype'] == 'float' else ''
out = torch.zeros(Wx.shape, device=Wx.device, dtype=getattr(torch, str_dtype))
Wx = torch.view_as_real(Wx)
dWx = torch.view_as_real(dWx)
kernel_args = [Wx.data_ptr(), dWx.data_ptr(), Sfs.data_ptr(), out.data_ptr(),
cp.asarray(gamma, dtype=str_dtype)]
_run_on_gpu(kernel, blockspergrid, threadsperblock,
*kernel_args, **kernel_kw)
return out
@jit(nopython=True, cache=True)
def _ssq_cwt_log_piecewise(Wx, dWx, out, const, gamma, vlmin0, vlmin1,
dvl0, dvl1, idx1, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
wl = np.log2(w_ij)
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(max(round((wl - vlmin0) / dvl0), 0))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_cwt_log_piecewise_par(Wx, dWx, out, const, gamma, vlmin0, vlmin1,
dvl0, dvl1, idx1, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
wl = np.log2(w_ij)
if wl > vlmin1:
k = int(min(round((wl - vlmin1) / dvl1) + idx1, omax))
else:
k = int(max(round((wl - vlmin0) / dvl0), 0))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _ssq_cwt_log(Wx, dWx, out, const, gamma, vlmin, dvl, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((np.log2(w_ij) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_cwt_log_par(Wx, dWx, out, const, gamma, vlmin, dvl, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((np.log2(w_ij) - vlmin) / dvl, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _ssq_cwt_lin(Wx, dWx, out, const, gamma, vmin, dv, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_cwt_lin_par(Wx, dWx, out, const, gamma, vmin, dv, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs((B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True)
def _ssq_stft(Wx, dWx, Sfs, out, const, gamma, vmin, dv, omax, flipud=False):
for i in range(Wx.shape[0]):
for j in range(Wx.shape[1]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
@jit(nopython=True, cache=True, parallel=True)
def _ssq_stft_par(Wx, dWx, Sfs, out, const, gamma, vmin, dv, omax, flipud=False):
for j in prange(Wx.shape[1]):
for i in range(Wx.shape[0]):
if abs(Wx[i, j]) > gamma:
A, B = dWx[i, j].real, dWx[i, j].imag
C, D = Wx[i, j].real, Wx[i, j].imag
w_ij = abs(
Sfs[i] - (B*C - A*D) / ((C**2 + D**2) * 6.283185307179586))
k = int(min(round(max((w_ij - vmin) / dv, 0)), omax))
if flipud:
k = omax - k
out[k, j] += Wx[i, j] * const[i]
#### CPU funcs & GPU kernel codes ############################################
_cpu_fns = {
'ssq_cwt_log_piecewise': _ssq_cwt_log_piecewise,
'ssq_cwt_log_piecewise_par': _ssq_cwt_log_piecewise_par,
'ssq_cwt_log': _ssq_cwt_log,
'ssq_cwt_log_par': _ssq_cwt_log_par,
'ssq_cwt_lin': _ssq_cwt_lin,
'ssq_cwt_lin_par': _ssq_cwt_lin_par,
'ssq_stft': _ssq_stft,
'ssq_stft_par': _ssq_stft_par,
'indexed_sum_log_piecewise': _indexed_sum_log_piecewise,
'indexed_sum_log_piecewise_par': _indexed_sum_log_piecewise_par,
'indexed_sum_log': _indexed_sum_log,
'indexed_sum_log_par': _indexed_sum_log_par,
'indexed_sum_lin': _indexed_sum_lin,
'indexed_sum_lin_par': _indexed_sum_lin_par,
}
_kernel_codes = dict(
ssq_cwt_log_piecewise='''
extern "C" __global__
void ssq_cwt_log_piecewise(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vlmin0, double vlmin1,
double dvl0, double dvl1,
int idx1) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
double wl;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
wl = log2${f}(w_ij);
if (wl > vlmin1){
k = (int)round((wl - vlmin1) / dvl1) + idx1;
if (k >= ${M})
k = ${M} - 1;
} else {
k = (int)round((wl - vlmin0) / dvl0);
if (k < 0)
k = 0;
}
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
ssq_cwt_log='''
extern "C" __global__
void ssq_cwt_log(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vlmin, double dvl) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
k = (int)round(((double)log2${f}(w_ij) - vlmin) / dvl);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
ssq_cwt_lin='''
extern "C" __global__
void ssq_cwt_lin(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vmin, double dv) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
k = (int)round(((double)w_ij - vmin) / dv);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
ssq_stft='''
extern "C" __global__
void ssq_stft(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} Sfs[${M}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
${dtype} *gamma,
double vmin, double dv) {
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
${dtype} w_ij, A, B, C, D;
for (int i=0; i < ${M}; ++i){
if (norm${f}(2, Wx[i][j]) > *gamma){
A = dWx[i][j][0];
B = dWx[i][j][1];
C = Wx[i][j][0];
D = Wx[i][j][1];
w_ij = abs(Sfs[i] - (B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
k = (int)round(((double)w_ij - vmin) / dv);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
indexed_sum_log_piecewise='''
extern "C" __global__
void indexed_sum_log_piecewise(${dtype} Wx[${M}][${N}][2],
${dtype} w[${M}][${N}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
double vlmin0, double vlmin1,
double dvl0, double dvl1,
int idx1)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
double wl;
for (int i=0; i < ${M}; ++i){
if (!isinf(w[i][j])){
wl = (double)log2${f}(w[i][j]);
if (wl > vlmin1){
k = (int)round((wl - vlmin1) / dvl1) + idx1;
if (k >= ${M})
k = ${M} - 1;
} else {
k = (int)round((wl - vlmin0) / dvl0);
if (k < 0)
k = 0;
}
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
indexed_sum_log='''
extern "C" __global__
void indexed_sum_log(${dtype} Wx[${M}][${N}][2],
${dtype} w[${M}][${N}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
double vlmin, double dvl)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
for (int i=0; i < ${M}; ++i){
if (!isinf(w[i][j])){
k = (int)round(((double)log2${f}(w[i][j]) - vlmin) / dvl);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
indexed_sum_lin='''
extern "C" __global__
void indexed_sum_lin(${dtype} Wx[${M}][${N}][2],
${dtype} w[${M}][${N}],
${dtype} out[${M}][${N}][2],
${dtype} const_arr[${M}],
double vmin, double dv)
{
int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= ${N})
return;
int k;
for (int i=0; i < ${M}; ++i){
if (!isinf(w[i][j])){
k = (int)round(((double)(w[i][j]) - vmin) / dv);
if (k >= ${M})
k = ${M} - 1;
else if (k < 0)
k = 0;
${extra}
out[k][j][0] += Wx[i][j][0] * const_arr[i];
out[k][j][1] += Wx[i][j][1] * const_arr[i];
}
}
}
''',
phase_cwt='''
extern "C" __global__
void phase_cwt(${dtype} Wx[${M}][${N}][2],
${dtype} dWx[${M}][${N}][2],
${dtype} out[${M}][${N}],
${dtype} *gamma) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= ${M} || j >= ${N})
return;
if (norm${f}(2, Wx[i][j]) < *gamma){
out[i][j] = 1.0/0.0;
return;
}
${dtype} A = dWx[i][j][0];
${dtype} B = dWx[i][j][1];
${dtype} C = Wx[i][j][0];
${dtype} D = Wx[i][j][1];
out[i][j] = abs((B*C - A*D) / ((C*C + D*D) * 6.283185307179586));
}
''',
)
###############################################################################
from .utils.common import WARN, EPS64
from .utils.cwt_utils import logscale_transition_idx
| 46,480 | 34.78214 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/utils/cwt_utils.py | # -*- coding: utf-8 -*-
import numpy as np
from scipy import integrate
from .common import WARN, assert_is_one_of, p2up
from .backend import torch, asnumpy
from ..configs import gdefaults
pi = np.pi
__all__ = [
'adm_ssq',
'adm_cwt',
'cwt_scalebounds',
'process_scales',
'infer_scaletype',
'make_scales',
'logscale_transition_idx',
'nv_from_scales',
'find_min_scale',
'find_max_scale',
'find_downsampling_scale',
'integrate_analytic',
'find_max_scale_alt',
'_process_fs_and_t',
]
def adm_ssq(wavelet):
"""Calculates the synchrosqueezing admissibility constant, the term
R_psi in Eq 15 of [1] (also see Eq 2.5 of [2]). Uses numeric intergration.
integral(conj(wavelet(w)) / w, w=0..inf)
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications. G. Thakur,
E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. Synchrosqueezed Wavelet Transforms: a Tool for Empirical Mode
Decomposition. I. Daubechies, J. Lu, H.T. Wu.
https://arxiv.org/pdf/0912.2437.pdf
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet).fn
Css = integrate_analytic(lambda w: np.conj(asnumpy(wavelet(w))) / w)
Css = Css.real if abs(Css.imag) < 1e-15 else Css
return Css
def adm_cwt(wavelet):
"""Calculates the cwt admissibility constant as per Eq. (4.67) of [1].
Uses numeric integration.
integral(wavelet(w) * conj(wavelet(w)) / w, w=0..inf)
1. Wavelet Tour of Signal Processing, 3rd ed. S. Mallat.
https://www.di.ens.fr/~mallat/papiers/WaveletTourChap1-2-3.pdf
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet).fn
Cpsi = integrate_analytic(lambda w: np.conj(asnumpy(wavelet(w))
) * asnumpy(wavelet(w)) / w)
Cpsi = Cpsi.real if abs(Cpsi.imag) < 1e-15 else Cpsi
return Cpsi
def cwt_scalebounds(wavelet, N, preset=None, min_cutoff=None, max_cutoff=None,
cutoff=None, bin_loc=None, bin_amp=None, use_padded_N=True,
viz=False):
"""Finds range of scales for which `wavelet` is "well-behaved", as
determined by `preset`. Assumes `wavelet` is uni-modal (one peak in freq
domain); may be inaccurate otherwise.
`min_scale`: found such that freq-domain wavelet takes on `cutoff` of its max
value on the greatest bin.
- Lesser `cutoff` -> lesser `min_scale`, always
`max_scale`: search determined by `preset`:
- 'maximal': found such that freq-domain takes `bin_amp` of its max value
on the `bin_loc`-th (non-dc) bin
- Greater `bin_loc` or lesser `bin_amp` -> lesser `max_scale`, always
- 'minimal': found more intricately independent of precise bin location,
but is likely to omit first several bins entirely; see
`help(utils.find_max_scale_alt)`.
- Greater `min_cutoff` -> lesser `max_scale`, generally
`viz==2` for more visuals, `viz==3` for even more.
# Arguments:
wavelet: `wavelets.Wavelet`
Wavelet sampled in Fourier frequency domain. See `help(cwt)`.
N: int
Length of wavelet to use.
min_cutoff, max_cutoff: float > 0 / None
Used to find max scale with `preset='minimal'`.
See `help(utils.find_max_scale_alt)`
cutoff: float / None
Used to find min scale. See `help(utils.find_min_scale)`
preset: str['maximal', 'minimal', 'naive'] / None
- 'maximal': yields a larger max and smaller min.
- 'minimal': strives to keep wavelet in "well-behaved" range of std_t
and std_w, but very high or very low frequencies' energies will be
under-represented. Is closer to MATLAB's default `cwtfreqbounds`.
- 'naive': returns (1, N), which is per original MATLAB Toolbox,
but a poor choice for most wavelet options.
- None: will use `min_cutoff, max_cutoff, cutoff` values, else
override `min_cutoff, max_cutoff` with those of `preset='minimal'`,
and of `cutoff` with that of `preset='maximal'`:
(min_cutoff, max_cutoff, cutoff) = (0.6, 0.8, -.5)
use_padded_N: bool (default True)
Whether to use `N=p2up(N)` in computations. Typically `N == len(x)`,
but CWT pads to next power of 2, which is the actual wavelet length
used, which typically behaves significantly differently at scale
extrema, thus recommended default True. Differs from passing
`N=p2up(N)[0]` and False only for first visual if `viz`, see code.
# Returns:
min_scale, max_scale: float, float
Minimum & maximum scales.
"""
def _process_args(preset, min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp):
defaults = dict(min_cutoff=.6, max_cutoff=.8, cutoff=-.5)
if preset is not None:
if any((min_cutoff, max_cutoff, cutoff)):
WARN("`preset` will override `min_cutoff, max_cutoff, cutoff`")
elif preset == 'minimal' and any((bin_amp, bin_loc)):
WARN("`preset='minimal'` ignores `bin_amp` & `bin_loc`")
assert_is_one_of(preset, 'preset',
('maximal', 'minimal', 'naive'))
if preset in ('naive', 'maximal'):
min_cutoff, max_cutoff = None, None
if preset == 'maximal':
cutoff = -.5
else:
min_cutoff, max_cutoff, cutoff = defaults.values()
else:
if min_cutoff is None:
min_cutoff = defaults['min_cutoff']
elif min_cutoff <= 0:
raise ValueError("`min_cutoff` must be >0 (got %s)" % min_cutoff)
if max_cutoff is None:
max_cutoff = defaults['max_cutoff']
elif max_cutoff < min_cutoff:
raise ValueError("must have `max_cutoff > min_cutoff` "
"(got %s, %s)" % (max_cutoff, min_cutoff))
bin_loc = bin_loc or (2 if preset == 'maximal' else None)
bin_amp = bin_amp or (1 if preset == 'maximal' else None)
cutoff = cutoff if (cutoff is not None) else defaults['cutoff']
return min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp
def _viz():
_viz_cwt_scalebounds(wavelet, N=M, Nt=M, min_scale=min_scale,
max_scale=max_scale, cutoff=cutoff)
if viz >= 2:
wavelet_waveforms(wavelet, M, min_scale)
wavelet_waveforms(wavelet, M, max_scale)
if viz == 3:
scales = make_scales(M, min_scale, max_scale)
sweep_harea(wavelet, M, scales)
min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp = _process_args(
preset, min_cutoff, max_cutoff, cutoff, bin_loc, bin_amp)
if preset == 'naive': # still _process_args for the NOTE
return 1, N
M = p2up(N)[0] if use_padded_N else N
min_scale = find_min_scale(wavelet, cutoff=cutoff)
if preset in ('minimal', None):
max_scale = find_max_scale_alt(wavelet, M, min_cutoff=min_cutoff,
max_cutoff=max_cutoff)
elif preset == 'maximal':
max_scale = find_max_scale(wavelet, M, bin_loc=bin_loc, bin_amp=bin_amp)
if viz:
_viz()
return min_scale, max_scale
def _assert_positive_integer(g, name=''):
if not (g > 0 and float(g).is_integer()):
raise ValueError(f"'{name}' must be a positive integer (got {g})")
def process_scales(scales, N, wavelet=None, nv=None, get_params=False,
use_padded_N=True):
"""Makes scales if `scales` is a string, else validates the array,
and returns relevant parameters if requested.
- Ensures, if array, `scales` is 1D, or 2D with last dim == 1
- Ensures, if string, `scales` is one of ('log', 'linear')
- If `get_params`, also returns (`scaletype`, `nv`, `na`)
- `scaletype`: inferred from `scales` ('linear' or 'log') if array
- `nv`, `na`: computed newly only if not already passed
"""
def _process_args(scales, nv, wavelet):
preset = None
if isinstance(scales, str):
if ':' in scales:
scales, preset = scales.split(':')
elif scales == 'log-piecewise':
preset = 'maximal'
assert_is_one_of(scales, 'scales',
('log', 'log-piecewise', 'linear'))
if nv is None:
nv = 32
if wavelet is None:
raise ValueError("must set `wavelet` if `scales` isn't array")
scaletype = scales
elif isinstance(scales, (np.ndarray, torch.Tensor)):
scales = asnumpy(scales)
if scales.squeeze().ndim != 1:
raise ValueError("`scales`, if array, must be 1D "
"(got shape %s)" % str(scales.shape))
scaletype, _nv = infer_scaletype(scales)
if scaletype == 'log':
if nv is not None and _nv != nv:
raise Exception("`nv` used in `scales` differs from "
"`nv` passed (%s != %s)" % (_nv, nv))
nv = _nv
elif scaletype == 'log-piecewise':
nv = _nv # will be array
scales = scales.reshape(-1, 1) # ensure 2D for broadcast ops later
else:
raise TypeError("`scales` must be a string or Numpy array "
"(got %s)" % type(scales))
if nv is not None and not isinstance(nv, np.ndarray):
_assert_positive_integer(nv, 'nv')
nv = int(nv)
return scaletype, nv, preset
scaletype, nv, preset = _process_args(scales, nv, wavelet)
if isinstance(scales, (np.ndarray, torch.Tensor)):
scales = scales.reshape(-1, 1)
return (scales if not get_params else
(scales, scaletype, len(scales), nv))
#### Compute scales & params #############################################
min_scale, max_scale = cwt_scalebounds(wavelet, N=N, preset=preset,
use_padded_N=use_padded_N)
scales = make_scales(N, min_scale, max_scale, nv=nv, scaletype=scaletype,
wavelet=wavelet)
na = len(scales)
return (scales if not get_params else
(scales, scaletype, na, nv))
def infer_scaletype(scales):
"""Infer whether `scales` is linearly or exponentially distributed (if latter,
also infers `nv`). Used internally on `scales` and `ssq_freqs`.
Returns one of: 'linear', 'log', 'log-piecewise'
"""
scales = asnumpy(scales).reshape(-1, 1)
if not isinstance(scales, np.ndarray):
raise TypeError("`scales` must be a numpy array (got %s)" % type(scales))
elif scales.dtype not in (np.float32, np.float64):
raise TypeError("`scales.dtype` must be np.float32 or np.float64 "
"(got %s)" % scales.dtype)
th_log = 4e-15 if scales.dtype == np.float64 else 8e-7
th_lin = th_log * 1e3 # less accurate for some reason
if np.mean(np.abs(np.diff(np.log(scales), 2, axis=0))) < th_log:
scaletype = 'log'
# ceil to avoid faulty float-int roundoffs
nv = int(np.round(1 / np.diff(np.log2(scales), axis=0)[0]))
elif np.mean(np.abs(np.diff(scales, 2, axis=0))) < th_lin:
scaletype = 'linear'
nv = None
elif logscale_transition_idx(scales) is None:
raise ValueError("could not infer `scaletype` from `scales`; "
"`scales` array must be linear or exponential. "
"(got diff(scales)=%s..." % np.diff(scales, axis=0)[:4])
else:
scaletype = 'log-piecewise'
nv = nv_from_scales(scales)
return scaletype, nv
def make_scales(N, min_scale=None, max_scale=None, nv=32, scaletype='log',
wavelet=None, downsample=None):
"""Recommended to first work out `min_scale` & `max_scale` with
`cwt_scalebounds`.
# Arguments:
N: int
`len(x)` or `len(x_padded)`.
min_scale, max_scale: float, float
Set scale range. Obtained e.g. from `utils.cwt_scalebounds`.
nv: int
Number of voices (wavelets) per octave.
scaletype: str['log', 'log-piecewise', 'linear']
Scaling kind to make.
`'log-piecewise'` uses `utils.find_downsampling_scale`.
wavelet: wavelets.Wavelet
Used only for `scaletype='log-piecewise'`.
downsample: int
Downsampling factor. Used only for `scaletype='log-piecewise'`.
# Returns:
scales: np.ndarray
"""
if scaletype == 'log-piecewise' and wavelet is None:
raise ValueError("must pass `wavelet` for `scaletype == 'log-piecewise'`")
if min_scale is None and max_scale is None and wavelet is not None:
min_scale, max_scale = cwt_scalebounds(wavelet, N, use_padded_N=True)
else:
min_scale = min_scale or 1
max_scale = max_scale or N
downsample = int(gdefaults('utils.cwt_utils.make_scales',
downsample=downsample))
# number of 2^-distributed scales spanning min to max
na = int(np.ceil(nv * np.log2(max_scale / min_scale)))
# floor to keep freq-domain peak at or to right of Nyquist
# min must be more precise, if need integer rounding do on max
mn_pow = int(np.floor(nv * np.log2(min_scale)))
mx_pow = mn_pow + na
if scaletype == 'log':
# TODO discretize per `logspace` instead
scales = 2 ** (np.arange(mn_pow, mx_pow) / nv)
elif scaletype == 'log-piecewise':
scales = 2 ** (np.arange(mn_pow, mx_pow) / nv)
idx = find_downsampling_scale(wavelet, scales)
if idx is not None:
# `+downsample - 1` starts `scales2` as continuing from `scales1`
# at `scales2`'s sampling rate; rest of ops are based on this design,
# such as `/nv` in ssq, which divides `scales2[0]` by `nv`, but if
# `scales2[0]` is one sample away from `scales1[-1]`, seems incorrect
scales1 = scales[:idx]
scales2 = scales[idx + downsample - 1::downsample]
scales = np.hstack([scales1, scales2])
elif scaletype == 'linear':
# TODO poor scheme (but there may not be any good one)
min_scale, max_scale = 2**(mn_pow/nv), 2**(mx_pow/nv)
na = int(np.ceil(max_scale / min_scale))
scales = np.linspace(min_scale, max_scale, na)
else:
raise ValueError("`scaletype` must be 'log' or 'linear'; "
"got: %s" % scaletype)
scales = scales.reshape(-1, 1) # ensure 2D for broadcast ops later
return scales
def logscale_transition_idx(scales):
"""Returns `idx` that splits `scales` as `[scales[:idx], scales[idx:]]`.
"""
scales = asnumpy(scales)
scales_diff2 = np.abs(np.diff(np.log(scales), 2, axis=0))
idx = np.argmax(scales_diff2) + 2
diff2_max = scales_diff2.max()
# every other value must be zero, assert it is so
scales_diff2[idx - 2] = 0
th = 1e-14 if scales.dtype == np.float64 else 1e-6
if not np.any(diff2_max > 100*np.abs(scales_diff2).mean()):
# everything's zero, i.e. no transition detected
return None
elif not np.all(np.abs(scales_diff2) < th):
# other nonzero diffs found, more than one transition point
return None
else:
return idx
def nv_from_scales(scales):
"""Infers `nv` from `scales` assuming `2**` scales; returns array
of length `len(scales)` if `scaletype = 'log-piecewise'`.
"""
scales = asnumpy(scales)
logdiffs = 1 / np.diff(np.log2(scales), axis=0)
nv = np.vstack([logdiffs[:1], logdiffs])
idx = logscale_transition_idx(scales)
if idx is not None:
nv_transition_idx = np.argmax(np.abs(np.diff(nv, axis=0))) + 1
assert nv_transition_idx == idx, "%s != %s" % (nv_transition_idx, idx)
return nv
def find_min_scale(wavelet, cutoff=1):
"""Design the wavelet in frequency domain. `scale` is found to yield
`scale * xi(scale=1)` such that its last (largest) positive value evaluates
`wavelet` to `cutoff * max(psih)`. If cutoff > 0, it lands to right of peak,
else to left (i.e. peak excluded).
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet)
w_peak, peak = find_maximum(wavelet.fn)
if cutoff > 0:
# search to right of peak
step_start, step_limit = w_peak, 10*w_peak
else:
# search to left of peak
step_start, step_limit = 0, w_peak
w_cutoff, _ = find_first_occurrence(wavelet.fn, value=abs(cutoff) * peak,
step_start=step_start,
step_limit=step_limit)
min_scale = w_cutoff / pi
return min_scale
def find_max_scale(wavelet, N, bin_loc=1, bin_amp=1):
"""Finds `scale` such that freq-domain wavelet's amplitude is `bin_amp`
of maximum at `bin_loc` bin. Set `bin_loc=1` to ensure no lower frequencies
are lost, but likewise mind redundancy (see `make_scales`).
"""
wavelet = Wavelet._init_if_not_isinstance(wavelet)
# get scale at which full freq-domain wavelet is likely to fit
wc_ct = center_frequency(wavelet, kind='peak-ct', N=N)
scalec_ct = (4/pi) * wc_ct
# get freq_domain wavelet, positive half (asm. analytic)
psih = asnumpy(wavelet(scale=scalec_ct, N=N)[:N//2 + 1])
# get (radian) frequencies at which it was sampled
xi = asnumpy(wavelet.xifn(scalec_ct, N))
# get index of psih's peak
midx = np.argmax(psih)
# get index where `psih` attains `bin1_amp` of its max value, to left of peak
w_bin = xi[np.where(psih[:midx] < psih.max()*bin_amp)[0][-1]]
# find scale such that wavelet amplitude is `bin_amp` of max at `bin_loc` bin
max_scale = scalec_ct * (w_bin / xi[bin_loc])
return max_scale
def find_downsampling_scale(wavelet, scales, span=5, tol=3, method='sum',
nonzero_th=.02, nonzero_tol=4., N=None, viz=False,
viz_last=False):
"""Find `scale` past which freq-domain wavelets are "excessively redundant",
redundancy determined by `span, tol, method, nonzero_th, nonzero_tol`.
# Arguments
wavelet: np.ndarray / wavelets.Wavelet
CWT wavelet.
scales: np.ndarray
CWT scales.
span: int
Number of wavelets to cross-correlate at each comparison.
tol: int
Tolerance value, works with `method`.
method: str['any', 'all', 'sum']
Condition relating `span` and `tol` to determine whether wavelets
are packed "too densely" at a given cross-correlation, relative
to "joint peak".
'any': at least one of wavelet peaks lie `tol` or more bins away
'all': all wavelet peaks lie `tol` or more bins away
'sum': sum(distances between wavelet peaks and joint peak) > `tol`
nonzero_th: float
Wavelet points as a fraction of respective maxima to consider
nonzero (i.e. `np.where(psih > psih.max()*nonzero_th)`).
nonzero_tol: float
Average number of nonzero points in a `span` group of wavelets above
which testing is exempted. (e.g. if 5 wavelets have 25 nonzero points,
average is 5, so if `nonzero_tol=4`, the `scale` is skipped/passed).
N: int / None
Length of wavelet to use. Defaults to 2048, which generalizes well
along other defaults, since those params (`span`, `tol`, etc) would
need to be scaled alongside `N`.
viz: bool (default False)
Visualize every test for debug purposes.
viz_last: bool (default True)
Visualize the failing scale (recommended if trying by hand);
ignored if `viz=True`.
"""
def check_group(psihs_peaks, joint_peak, method, tol):
too_dense = False
distances = np.abs(psihs_peaks[1] - joint_peak)
if method == 'any':
dist_max = distances.max()
if dist_max < tol:
too_dense = True
elif method == 'all':
dist_satisfied = (distances > tol)
if not np.all(dist_satisfied):
too_dense = True
elif method == 'sum':
dist_sum = distances.sum()
if dist_sum < tol:
too_dense = True
return too_dense
def _viz(psihs, psihs_peaks, joint_peak, psihs_nonzeros, i):
max_nonzero_idx = np.where(psihs_nonzeros)[1].max()
plot(psihs.T[:max_nonzero_idx + 3], color='tab:blue',
vlines=(joint_peak, {'color': 'tab:red'}))
scat(psihs_peaks[1], psihs[psihs_peaks].T, color='tab:red', show=1)
distances = np.abs(psihs_peaks[1] - joint_peak)
print("(idx, peak distances from joint peak, joint peak) = "
"({}, {}, {})".format(i, distances, joint_peak), flush=True)
assert_is_one_of(method, 'method', ('any', 'all', 'sum'))
if not isinstance(wavelet, np.ndarray):
wavelet = Wavelet._init_if_not_isinstance(wavelet)
N = N or 2048
Psih = (wavelet if isinstance(wavelet, (np.ndarray, torch.Tensor)) else
wavelet(scale=scales, N=N))
Psih = asnumpy(Psih)
if len(Psih) != len(scales):
raise ValueError("len(Psih) != len(scales) "
"(%s != %s)" % (len(Psih), len(scales)))
# analytic, drop right half (all zeros)
Psih = Psih[:, :Psih.shape[1]//2]
n_scales = len(Psih)
n_groups = n_scales - span - 1
psihs_peaks = None
for i in range(n_groups):
psihs = Psih[i:i + span]
psihs_nonzeros = (psihs > nonzero_th*psihs.max(axis=1)[:, None])
avg_nonzeros = psihs_nonzeros.sum() / span
if avg_nonzeros > nonzero_tol:
continue
psihs_peaks = np.where(psihs == psihs.max(axis=1)[:, None])
joint_peak = np.argmax(np.prod(psihs, 0)) # mutually cross-correlate
too_dense = check_group(psihs_peaks, joint_peak, method, tol)
if too_dense:
break
if viz:
_viz(psihs, psihs_peaks, joint_peak, psihs_nonzeros, i)
if (viz or viz_last) and psihs_peaks is not None:
print(("Failing scale: (idx, scale) = ({}, {:.2f})\n"
"out of max: (idx, scale) = ({}, {:.2f})"
).format(i, float(scales[i]), len(scales) - 1, float(scales[-1])))
_viz(psihs, psihs_peaks, joint_peak, psihs_nonzeros, i)
return i if (i < n_groups - 1) else None
def integrate_analytic(int_fn, nowarn=False):
"""Assumes function that's zero for negative inputs (e.g. analytic wavelet),
decays toward right, and is unimodal: int_fn(t<0)=0, int_fn(t->inf)->0.
Integrates using trapezoidal rule, from 0 to inf (equivalently).
Integrates near zero separately in log space (useful for e.g. 1/x).
"""
def _est_arr(mxlim, N):
t = np.linspace(mxlim, .1, N, endpoint=False)[::-1]
arr = int_fn(t)
max_idx = np.argmax(arr)
min_neglect_idx = _min_neglect_idx(np.abs(arr[max_idx:]),
th=1e-15) + max_idx
return arr, t, min_neglect_idx
def _find_convergent_array():
mxlims = [1, 20, 80, 160]
for m, mxlim in zip([1, 1, 4, 8], mxlims):
arr, t, min_neglect_idx = _est_arr(mxlim, N=10000*m)
# ensure sufficient decay between peak and right endpoint, and
# that `arr` isn't a flatline (contains wavelet peak)
if ((len(t) - min_neglect_idx > 1000 * m) and
np.sum(np.abs(arr)) > 1e-5):
break
else:
if int_nz < 1e-5:
raise Exception("Could not find converging or non-negligibly"
"-valued bounds of integration for `int_fn`")
elif not nowarn:
WARN("Integrated only from 1e-15 to 0.1 in logspace")
return arr[:min_neglect_idx], t[:min_neglect_idx]
def _integrate_near_zero():
# sample `intfn` more finely as it might be extremely narrow near zero.
# this still doesn't work well as float64 zeros the numerator before /w,
# but the true integral will be negligibly small most of the time anyway
# (.001 to .1 may not be negligible, however; better captured by logspace)
t = np.logspace(-15, -1, 1000)
arr = int_fn(t)
return integrate.trapz(arr, t)
int_nz = _integrate_near_zero()
arr, t = _find_convergent_array()
return integrate.trapz(arr, t) + int_nz
def find_max_scale_alt(wavelet, N, min_cutoff=.1, max_cutoff=.8):
"""
Design the wavelet in frequency domain. `scale` is found to yield
`scale * xi(scale=1)` such that two of its consecutive values land
symmetrically about the peak of `psih` (i.e. none *at* peak), while
still yielding `wavelet(w)` to fall between `min_cutoff`* and `max_cutoff`*
`max(psih)`. `scale` is selected such that the symmetry is attained
using smallest possible bins (closest to dc). Steps:
1. Find `w` (input value to `wavelet`) for which `wavelet` is maximized
(i.e. peak of `psih`).
2. Find two `w` such that `wavelet` attains `min_cutoff` and `max_cutoff`
times its maximum value, using `w` in previous step as upper bound.
3. Find `div_size` such that `xi` lands at both points of symmetry;
`div_size` == increment between successive values of
`xi = scale * xi(scale=1)`.
- `xi` begins at zero; along the cutoff bounds, and us selecting
the smallest number of divisions/increments to reach points of
symmetry, we guarantee a unique `scale`.
This yields a max `scale` that'll generally lie in 'nicely-behaved' region
of std_t; value can be used to fine-tune further.
See `visuals.sweep_std_t`.
"""
if max_cutoff <= 0 or min_cutoff <= 0:
raise ValueError("`max_cutoff` and `min_cutoff` must be positive "
"(got %s, %s)" % (max_cutoff, min_cutoff))
elif max_cutoff <= min_cutoff:
raise ValueError("must have `max_cutoff > min_cutoff` "
"(got %s, %s)" % (max_cutoff, min_cutoff))
wavelet = Wavelet._init_if_not_isinstance(wavelet)
w_peak, peak = find_maximum(wavelet.fn)
# we solve the inverse problem; instead of looking for spacing of xi
# that'd land symmetrically about psih's peak, we pick such points
# above a set ratio of peak's value and ensure they divide the line
# from left symmetry point to zero an integer number of times
# define all points of wavelet from cutoff to peak, left half
w_cutoff, _ = find_first_occurrence(wavelet.fn, value=min_cutoff * peak,
step_start=0, step_limit=w_peak)
w_ltp = np.arange(w_cutoff, w_peak, step=1/N) # left-to-peak
# consider every point on wavelet(w_ltp) (except peak) as candidate cutoff
# point, and pick earliest one that yields integer number of increments
# from left point of symmetry to origin
div_size = (w_peak - w_ltp[:-1]) * 2 # doubled so peak is skipped
n_divs = w_ltp[:-1] / div_size
# diff of modulus; first drop in n_divs is like [.98, .99, 0, .01], so at 0
# we've hit an integer, and n_divs grows ~linearly so behavior guaranteed
# -.8 arbitrary to be ~1 but <1
try:
idx = np.where(np.diff(n_divs % 1) < -.8)[0][0]
except:
raise Exception("Failed to find suffciently-integer xi divisions; try "
"widening (min_cutoff, max_cutoff)")
# the div to base the scale on (angular bin spacing of scale*xi)
div_scale = div_size[idx + 1]
# div size of scale=1 (spacing between angular bins at scale=1)
w_1div = pi / (N / 2)
max_scale = div_scale / w_1div
return max_scale
def _process_fs_and_t(fs, t, N):
"""Ensures `t` is uniformly-spaced and of same length as `x` (==N)
and returns `fs` and `dt` based on it, or from defaults if `t` is None.
"""
if fs is not None and t is not None:
WARN("`t` will override `fs` (both were passed)")
if t is not None:
if len(t) != N:
# not explicitly used anywhere but ensures wrong `t` wasn't supplied
raise Exception("`t` must be of same length as `x` "
"(%s != %s)" % (len(t), N))
elif not np.mean(np.abs(np.diff(t, 2, axis=0))) < 1e-7: # float32 thr.
raise Exception("Time vector `t` must be uniformly sampled.")
fs = 1 / (t[1] - t[0])
else:
if fs is None:
fs = 1
elif fs <= 0:
raise ValueError("`fs` must be > 0")
dt = 1 / fs
return dt, fs, t
#############################################################################
from ..algos import _min_neglect_idx, find_maximum, find_first_occurrence
from ..wavelets import Wavelet, center_frequency
from ..visuals import plot, scat, _viz_cwt_scalebounds, wavelet_waveforms
from ..visuals import sweep_harea
| 29,511 | 39.650138 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/utils/common.py | # -*- coding: utf-8 -*-
import numpy as np
import logging
from textwrap import wrap
from .fft_utils import fft, ifft
logging.basicConfig(format='')
WARN = lambda msg: logging.warning("WARNING: %s" % msg)
NOTE = lambda msg: logging.warning("NOTE: %s" % msg) # else it's mostly ignored
pi = np.pi
EPS32 = np.finfo(np.float32).eps # machine epsilon
EPS64 = np.finfo(np.float64).eps
__all__ = [
"WARN",
"NOTE",
"pi",
"EPS32",
"EPS64",
"p2up",
"padsignal",
"trigdiff",
"mad",
"est_riskshrink_thresh",
"find_closest_parallel_is_faster",
"assert_is_one_of",
"_textwrap",
]
def p2up(n):
"""Calculates next power of 2, and left/right padding to center
the original `n` locations.
# Arguments:
n: int
Length of original (unpadded) signal.
# Returns:
n_up: int
Next power of 2.
n1: int
Left pad length.
n2: int
Right pad length.
"""
up = int(2**(1 + np.round(np.log2(n))))
n2 = int((up - n) // 2)
n1 = int(up - n - n2)
return up, n1, n2
def padsignal(x, padtype='reflect', padlength=None, get_params=False):
"""Pads signal and returns trim indices to recover original.
# Arguments:
x: np.ndarray / torch.Tensor
Input vector, 1D or 2D. 2D has time in dim1, e.g. `(n_inputs, time)`.
padtype: str
Pad scheme to apply on input. One of:
('reflect', 'symmetric', 'replicate', 'wrap', 'zero').
'zero' is most naive, while 'reflect' (default) partly mitigates
boundary effects. See [1] & [2].
Torch doesn't support all padding schemes, but `cwt` will still
pad it via NumPy.
padlength: int / None
Number of samples to pad input to (i.e. len(x_padded) == padlength).
Even: left = right, Odd: left = right + 1.
Defaults to next highest power of 2 w.r.t. `len(x)`.
# Returns:
xp: np.ndarray
Padded signal.
n_up: int
Next power of 2, or `padlength` if provided.
n1: int
Left pad length.
n2: int
Right pad length.
# References:
1. Signal extension modes. PyWavelets contributors
https://pywavelets.readthedocs.io/en/latest/ref/
signal-extension-modes.html
2. Wavelet Bases and Lifting Wavelets. H. Xiong.
http://min.sjtu.edu.cn/files/wavelet/
6-lifting%20wavelet%20and%20filterbank.pdf
"""
def _process_args(x, padtype):
is_numpy = bool(isinstance(x, np.ndarray))
supported = (('zero', 'reflect', 'symmetric', 'replicate', 'wrap')
if is_numpy else
('zero', 'reflect'))
assert_is_one_of(padtype, 'padtype', supported)
if not hasattr(x, 'ndim'):
raise TypeError("`x` must be a numpy array or torch Tensor "
"(got %s)" % type(x))
elif x.ndim not in (1, 2):
raise ValueError("`x` must be 1D or 2D (got x.ndim == %s)" % x.ndim)
return is_numpy
is_numpy = _process_args(x, padtype)
N = x.shape[-1]
if padlength is None:
# pad up to the nearest power of 2
n_up, n1, n2 = p2up(N)
else:
n_up = padlength
if abs(padlength - N) % 2 == 0:
n1 = n2 = (n_up - N) // 2
else:
n2 = (n_up - N) // 2
n1 = n2 + 1
n_up, n1, n2 = int(n_up), int(n1), int(n2)
# set functional spec
if x.ndim == 1:
pad_width = (n1, n2)
elif x.ndim == 2:
pad_width = ([(0, 0), (n1, n2)] if is_numpy else
(n1, n2))
# comments use (n=4, n1=4, n2=3) as example, but this combination can't occur
if is_numpy:
if padtype == 'zero':
# [1,2,3,4] -> [0,0,0,0, 1,2,3,4, 0,0,0]
xp = np.pad(x, pad_width)
elif padtype == 'reflect':
# [1,2,3,4] -> [3,4,3,2, 1,2,3,4, 3,2,1]
xp = np.pad(x, pad_width, mode='reflect')
elif padtype == 'replicate':
# [1,2,3,4] -> [1,1,1,1, 1,2,3,4, 4,4,4]
xp = np.pad(x, pad_width, mode='edge')
elif padtype == 'wrap':
# [1,2,3,4] -> [1,2,3,4, 1,2,3,4, 1,2,3]
xp = np.pad(x, pad_width, mode='wrap')
elif padtype == 'symmetric':
# [1,2,3,4] -> [4,3,2,1, 1,2,3,4, 4,3,2]
if x.ndim == 1:
xp = np.hstack([x[::-1][-n1:], x, x[::-1][:n2]])
elif x.ndim == 2:
xp = np.hstack([x[:, ::-1][:, -n1:], x, x[:, ::-1][:, :n2]])
else:
import torch
mode = 'constant' if padtype == 'zero' else 'reflect'
if x.ndim == 1:
xp = torch.nn.functional.pad(x[None], pad_width, mode)[0]
else:
xp = torch.nn.functional.pad(x, pad_width, mode)
return (xp, n_up, n1, n2) if get_params else xp
def trigdiff(A, fs=1., padtype=None, rpadded=None, N=None, n1=None, window=None,
transform='cwt'):
"""Trigonometric / frequency-domain differentiation; see `difftype` in
`help(ssq_cwt)`. Used internally by `ssq_cwt` with `order > 0`.
Un-transforms `A`, then transforms differentiated.
# Arguments:
A: np.ndarray
2D array to differentiate (or 3D, batched).
fs: float
Sampling frequency, used to scale derivative to physical units.
padtype: str / None
Whether to pad `A` (along dim1) before differentiating.
rpadded: bool (default None)
Whether `A` is already padded. Defaults to True if `padtype` is None.
Must pass `N` if True.
N: int
Length of unpadded signal (i.e. `A.shape[1]`).
n1: int
Will trim differentiated array as `A_diff[:, n1:n1+N]` (un-padding).
transform: str['cwt', 'stft']
Whether `A` stems from CWT or STFT, which changes how differentiation
is done. `'stft'` currently not supported.
"""
from ..wavelets import _xifn
from . import backend as S
def _process_args(A, rpadded, padtype, N, transform, window):
if transform == 'stft':
raise NotImplementedError("`transform='stft'` is currently not "
"supported.")
assert isinstance(A, np.ndarray) or S.is_tensor(A), type(A)
assert A.ndim in (2, 3)
if rpadded and N is None:
raise ValueError("must pass `N` if `rpadded`")
if transform == 'stft' and window is None:
raise ValueError("`transform='stft'` requires `window`")
rpadded = rpadded or False
padtype = padtype or ('reflect' if not rpadded else None)
return rpadded, padtype
rpadded, padtype = _process_args(A, rpadded, padtype, N, transform, window)
if padtype is not None:
A, _, n1, *_ = padsignal(A, padtype, get_params=True)
if transform == 'cwt':
xi = S.asarray(_xifn(1, A.shape[-1]), A.dtype)
A_freqdom = fft(A, axis=-1, astensor=True)
A_diff = ifft(A_freqdom * 1j * xi * fs, axis=-1, astensor=True)
else:
# this requires us to first fully invert STFT(x), then `buffer(x)`,
# then compute `diff_window`, which isn't hard to implement;
# last of these is done
# wf = fft(S.asarray(window, A.dtype))
# xi = S.asarray(_xifn(1, len(window))[None], A.dtype)
# if len(window) % 2 == 0:
# xi[len(window) // 2] = 0
# reshape = (-1, 1) if A.ndim == 2 else (1, -1, 1)
# diff_window = ifft(wf * 1j * xi).real.reshape(*reshape)
pass
if rpadded or padtype is not None:
if N is None:
N = A.shape[-1]
if n1 is None:
_, n1, _ = p2up(N)
idx = ((slice(None), slice(n1, n1 + N)) if A.ndim == 2 else
(slice(None), slice(None), slice(n1, n1 + N)))
A_diff = A_diff[idx]
if S.is_tensor(A_diff):
A_diff = A_diff.contiguous()
return A_diff
def est_riskshrink_thresh(Wx, nv):
"""Estimate the RiskShrink hard thresholding level, based on [1].
This has a denoising effect, but risks losing much of the signal; it's larger
the more high-frequency content there is, even if not noise.
# Arguments:
Wx: np.ndarray
CWT of a signal (see `cwt`).
nv: int
Number of voices used in CWT (see `cwt`).
# Returns:
gamma: float
The RiskShrink hard thresholding estimate.
# References:
1. The Synchrosqueezing algorithm for time-varying spectral analysis:
robustness properties and new paleoclimate applications.
G. Thakur, E. Brevdo, N.-S. Fučkar, and H.-T. Wu.
https://arxiv.org/abs/1105.0010
2. Synchrosqueezing Toolbox, (C) 2014--present. E. Brevdo, G. Thakur.
https://github.com/ebrevdo/synchrosqueezing/blob/master/synchrosqueezing/
est_riskshrink_thresh.m
"""
N = Wx.shape[1]
Wx_fine = np.abs(Wx[:nv])
gamma = 1.4826 * np.sqrt(2 * np.log(N)) * mad(Wx_fine)
return gamma
def find_closest_parallel_is_faster(shape, dtype='float32', trials=7, verbose=1):
"""Returns True if `find_closest(, parallel=True)` is faster, as averaged
over `trials` trials on dummy data.
"""
from timeit import timeit
from ..algos import find_closest
a = np.abs(np.random.randn(*shape).astype(dtype))
v = np.random.uniform(0, len(a), len(a)).astype(dtype)
t0 = timeit(lambda: find_closest(a, v, parallel=False), number=trials)
t1 = timeit(lambda: find_closest(a, v, parallel=True), number=trials)
if verbose:
print("Parallel avg.: {} sec\nNon-parallel avg.: {} sec".format(
t1 / trials, t0 / trials))
return t1 > t0
def mad(data, axis=None):
"""Mean absolute deviation"""
return np.mean(np.abs(data - np.mean(data, axis)), axis)
def assert_is_one_of(x, name, supported, e=ValueError):
if x not in supported:
raise e("`{}` must be one of: {} (got {})".format(
name, ', '.join(supported), x))
def _textwrap(txt, wrap_len=50):
"""Preserves line breaks and includes `'\n'.join()` step."""
return '\n'.join(['\n'.join(
wrap(line, wrap_len, break_long_words=False, replace_whitespace=False))
for line in txt.splitlines() if line.strip() != ''])
| 10,463 | 32.43131 | 81 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/utils/gpu_utils.py | # -*- coding: utf-8 -*-
import numpy as np
from collections import namedtuple
from string import Template
from .backend import torch, cp
Stream = namedtuple('Stream', ['ptr'])
def _run_on_gpu(kernel, grid, block, *args, **kwargs):
kernel_name = kernel.split('void ')[1].split('(')[0]
fn = load_kernel(kernel_name, kernel, **kwargs)
fn(grid=grid, block=block, args=args,
stream=Stream(ptr=torch.cuda.current_stream().cuda_stream))
@cp._util.memoize(for_each_device=True)
def load_kernel(kernel_name, code, **kwargs):
code = Template(code).substitute(**kwargs)
kernel_code = cp.cuda.compile_with_cache(code)
return kernel_code.get_function(kernel_name)
def _get_kernel_params(x, dim=1, threadsperblock=None):
M, N = x.shape[:2]
if dim == 1:
threadsperblock = threadsperblock or (1024,)
blockspergrid = (int(np.ceil(M * N / threadsperblock[0])),)
elif dim == 2:
threadsperblock = threadsperblock or (32, 32)
blockspergrid_x = int(np.ceil(M / threadsperblock[0]))
blockspergrid_y = int(np.ceil(N / threadsperblock[1]))
blockspergrid = (blockspergrid_x, blockspergrid_y)
dtype = ('double' if x.dtype in (torch.float64, torch.complex128) else
'float')
kernel_kw = dict(dtype=dtype, M=M, N=N)
str_dtype = 'float32' if dtype == 'float' else 'float64'
return blockspergrid, threadsperblock, kernel_kw, str_dtype
| 1,432 | 33.95122 | 74 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/utils/backend.py | # -*- coding: utf-8 -*-
import numpy as np
# torch & cupy imported at bottom
def allclose(a, b, device='cuda'):
"""`numpy.allclose` or `torch.allclose`, latter if input(s) are Tensor."""
if is_tensor(a, b, mode='any'):
a, b = asarray(a, device=device), asarray(b, device=device)
return torch.allclose(a, b)
return np.allclose(a, b)
def astype(x, dtype, device='cuda'):
if is_tensor(x):
return x.to(dtype=_torch_dtype(dtype))
return x.astype(dtype)
def array(x, dtype=None, device='cuda'):
if USE_GPU():
return torch.tensor(x, dtype=_torch_dtype(dtype), device=device)
return np.array(x)
def asarray(x, dtype=None, device='cuda'):
if USE_GPU():
return torch.as_tensor(x, dtype=_torch_dtype(dtype), device=device)
return np.asarray(x, dtype=dtype)
def zeros(shape, dtype=None, device='cuda'):
if USE_GPU():
return torch.zeros(shape, dtype=_torch_dtype(dtype), device=device)
return np.zeros(shape, dtype=dtype)
def ones(shape, dtype=None, device='cuda'):
if USE_GPU():
return torch.ones(shape, dtype=_torch_dtype(dtype), device=device)
return np.ones(shape, dtype=dtype)
def is_tensor(*args, mode='all'):
cond = all if mode == 'all' else any
return cond(isinstance(x, torch.Tensor) for x in args)
def is_dtype(x, str_dtype):
return (str_dtype in str(x.dtype) if isinstance(str_dtype, str) else
any(sd in str(x.dtype) for sd in str_dtype))
def atleast_1d(x, dtype=None, device='cuda'):
return Q.atleast_1d(asarray(x, dtype=dtype, device=device))
def asnumpy(x):
if is_tensor(x):
return x.cpu().numpy()
return x
def arange(a, b=None, dtype=None, device='cuda'):
if b is None:
a, b = 0, a
if USE_GPU():
if isinstance(dtype, str):
dtype = getattr(torch, dtype)
return torch.arange(a, b, dtype=dtype, device=device)
return np.arange(a, b, dtype=dtype)
def vstack(x):
if is_tensor(x) or (isinstance(x, list) and is_tensor(x[0])):
if isinstance(x, list):
# stack arrays as elements in extended dim0
return torch.vstack([_x[None] for _x in x])
return torch.vstack(x)
return np.vstack([x])
#### misc + dummies ##########################################################
def warn_if_tensor_and_par(x, parallel):
if parallel and is_tensor(x):
from .common import WARN
WARN("`parallel` ignored with tensor input.")
def _torch_dtype(dtype):
if isinstance(dtype, str):
return getattr(torch, dtype)
elif isinstance(dtype, np.dtype):
return getattr(torch, str(dtype).split('.')[-1])
return dtype # assume torch.dtype
class _TensorDummy():
pass
class TorchDummy():
"""Dummy class with dummy attributes."""
def __init__(self):
self.Tensor = _TensorDummy
self.dtype = _TensorDummy
class _Util():
"""For wrapper: `@cp._util.memoize`."""
def memoize(self, *args, **kwargs):
def wrap(fn):
return fn
return wrap
class CupyDummy():
"""Dummy class with dummy attributes."""
def __init__(self):
self._util = _Util()
class _Q():
"""Class for accessing `numpy` or `torch` attributes according to `USE_GPU()`.
"""
def __getattr__(self, name):
if USE_GPU():
return getattr(torch, name)
return getattr(np, name)
##############################################################################
Q = _Q()
try:
import torch
import cupy as cp
except:
torch = TorchDummy()
cp = CupyDummy()
from ..configs import USE_GPU
| 3,656 | 24.573427 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/utils/fft_utils.py | # -*- coding: utf-8 -*-
import numpy as np
import multiprocessing
from scipy.fft import fftshift as sfftshift, ifftshift as sifftshift
from scipy.fft import fft as sfft, rfft as srfft, ifft as sifft, irfft as sirfft
from pathlib import Path
from . import backend as S
from ..configs import IS_PARALLEL
try:
from torch.fft import (fft as tfft, rfft as trfft,
ifft as tifft, irfft as tirfft,
fftshift as tfftshift, ifftshift as tifftshift)
except ImportError:
pass
try:
import pyfftw
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(600)
except ImportError:
pyfftw = None
UTILS_DIR = Path(__file__).parent
__all__ = [
'fft',
'rfft',
'ifft',
'irfft',
'fftshift',
'ifftshift',
'FFT',
'FFT_GLOBAL',
]
#############################################################################
class FFT():
"""Global class for ssqueezepy FFT methods.
Will use GPU via PyTorch if environment flag `'SSQ_GPU'` is set to `'1'`.
Will use `scipy.fft` or `pyfftw` depending on `patience` argument (and
whether `pyfftw` is installed).
Both will use `threads` CPUs to accelerate computing.
In a nutshell, if you plan on re-running FFT on input of same shape and dtype,
prefer `patience=1`, which introduces a lengthy first-time overhead but may
compute significantly faster afterwards.
# Arguments (`fft`, `rfft`, `ifft`, `irfft`):
x: np.ndarray
1D or 2D.
axis: int
FFT axis. One of `0, 1, -1`.
patience: int / tuple[int, int]
If int:
0: will use `scipy.fft`
1: `pyfftw` with flag `'FFTW_PATIENT'`
2: `pyfftw` with flag `'FFTW_EXHAUSTIVE'`
Else, if tuple, second element specifies `planning_timelimit`
passed to `pyfftw.FFTW` (so tuple requires `patience[0] != 0`).
Set `planning_timelimit = None` to allow planning to finish,
but beware; `patience = 1` can take hours for large inputs, and `2`
even longer.
astensor: bool (default False)
If computing on GPU, whether to return as `torch.Tensor` (if False,
will move to CPU and convert to `numpy.ndarray`).
n: int / None
Only for `irfft`; length of original input. If None, will default to
`2*(x.shape[axis] - 1)`.
__________________________________________________________________________
# Arguments (`__init__`):
planning_timelimit: int
Default.
wisdom_dir: str
Where to save wisdom to or load from. Empty string means
`ssqueezepy/utils/`.
threads: int
Number of CPU threads to use. -1 = maximum.
patience: int
Default `patience`.
cache_fft_objects: bool (default False)
If True, `pyfftw` objects generated throughout session are stored in
`FFT._input_history`, and retrieved if all of below match:
`(x.shape, x.dtype, real, patience, n)`
where `patience` includes `planning_timelimit` as a tuple.
Default False since loading from wisdom is very fast anyway.
verbose: bool (default True)
Controls whether a message is printed upon `patience >= 1`.
__________________________________________________________________________
**Wisdom**
`pyfftw` uses "wisdom", basically storing and reusing generated FFT plans
if input attributes match:
(`x.shape`, `x.dtype`, `axis`, `flags`, `planning_timelimit`)
`flags` and `planning_timelimit` are set via `patience`.
With each `pyfftw` use, `save_wisdom()` is called, writing to `wisdom32` and
`wisdom64` bytes files in `ssqueezepy/utils`. Each time ssqueezepy runs in a
new session, `load_wisdom()` is called to load these values, so wisdom is
only expansive.
"""
def __init__(self, planning_timelimit=120, wisdom_dir=UTILS_DIR, threads=None,
patience=0, cache_fft_objects=False, verbose=1):
self.planning_timelimit = planning_timelimit
self.wisdom_dir = wisdom_dir
self._user_threads = threads
self._patience = patience # default patience
self._process_patience(patience) # error if !=0 and pyfftw not installed
self.cache_fft_objects = cache_fft_objects
self.verbose = verbose
if pyfftw is not None:
pyfftw.config.NUM_THREADS = self.threads
self._wisdom32_path = str(Path(self.wisdom_dir, 'wisdom32'))
self._wisdom64_path = str(Path(self.wisdom_dir, 'wisdom64'))
self._wisdom32, self._wisdom64 = b'', b''
self._input_history = {}
self.load_wisdom()
@property
def threads(self):
"""Set dynamically if `threads` wasn't passed in __init__."""
if self._user_threads is None:
return (multiprocessing.cpu_count() if IS_PARALLEL() else 1)
return self._user_threads
@property
def patience(self):
"""Setter will also set `planning_timelimit` if setting to tuple."""
return self._patience
@patience.setter
def patience(self, value):
self._validate_patience(value)
if isinstance(value, tuple):
self._patience, self.planning_timelimit = value
else:
self._patience = value
#### Main methods #########################################################
def fft(self, x, axis=-1, patience=None, astensor=False):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('fft', x, dim=axis, astensor=astensor)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return sfft(x, axis=axis, workers=self.threads)
fft_object = self._get_save_fill(x, axis, patience, real=False)
return fft_object()
def rfft(self, x, axis=-1, patience=None, astensor=False):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('rfft', x, dim=axis, astensor=astensor)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return srfft(x, axis=axis, workers=self.threads)
fft_object = self._get_save_fill(x, axis, patience, real=True)
return fft_object()
def ifft(self, x, axis=-1, patience=None, astensor=False):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('ifft', x, dim=axis, astensor=astensor)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return sifft(x, axis=axis, workers=self.threads)
fft_object = self._get_save_fill(x, axis, patience, real=False,
inverse=True)
return fft_object()
def irfft(self, x, axis=-1, patience=None, astensor=False, n=None):
"""See `help(ssqueezepy.utils.FFT)`."""
out = self._maybe_gpu('irfft', x, dim=axis, astensor=astensor, n=n)
if out is not None:
return out
patience = self._process_patience(patience)
if patience == 0:
return sirfft(x, axis=axis, workers=self.threads, n=n)
fft_object = self._get_save_fill(x, axis, patience, real=True,
inverse=True, n=n)
return fft_object()
def fftshift(self, x, axes=-1, astensor=False):
out = self._maybe_gpu('fftshift', x, dim=axes, astensor=astensor)
if out is not None:
return out
return sfftshift(x, axes=axes)
def ifftshift(self, x, axes=-1, astensor=False):
out = self._maybe_gpu('ifftshift', x, dim=axes, astensor=astensor)
if out is not None:
return out
return sifftshift(x, axes=axes)
def _maybe_gpu(self, name, x, astensor=False, **kw):
if S.is_tensor(x):
fn = {'fft': tfft, 'ifft': tifft,
'rfft': trfft, 'irfft': tirfft,
'fftshift': tfftshift, 'ifftshift': tifftshift}[name]
out = fn(S.asarray(x), **kw)
return out if astensor else out.cpu().numpy()
return None
#### FFT makers ###########################################################
def _get_save_fill(self, x, axis, patience, real, inverse=False, n=None):
fft_object = self.get_fft_object(x, axis, patience, real, inverse, n)
self.save_wisdom()
fft_object.input_array[:] = x
return fft_object
def get_fft_object(self, x, axis, patience=1, real=False, inverse=False,
n=None):
combo = (x.shape, x.dtype, axis, real, n)
if self.cache_fft_objects and combo in self._input_history:
fft_object = self._input_history[combo]
else:
fft_object = self._get_fft_object(x, axis, patience, real, inverse, n)
if self.cache_fft_objects:
self._input_history[combo] = fft_object
return fft_object
def _get_fft_object(self, x, axis, patience, real, inverse, n):
(shapes, dtypes, flags, planning_timelimit, direction
) = self._process_input(x, axis, patience, real, inverse, n)
shape_in, shape_out = shapes
dtype_in, dtype_out = dtypes
a = pyfftw.empty_aligned(shape_in, dtype=dtype_in)
b = pyfftw.empty_aligned(shape_out, dtype=dtype_out)
fft_object = pyfftw.FFTW(a, b, axes=(axis,), flags=flags,
planning_timelimit=planning_timelimit,
direction=direction, threads=self.threads)
return fft_object
def _process_input(self, x, axis, patience, real, inverse, n):
self._validate_input(x, axis, real, patience, inverse)
# patience, planning time, forward / inverse
if isinstance(patience, tuple):
patience, planning_timelimit = patience
else:
planning_timelimit = self.planning_timelimit
flags = ['FFTW_PATIENT'] if patience == 1 else ['FFTW_EXHAUSTIVE']
direction = 'FFTW_BACKWARD' if inverse else 'FFTW_FORWARD'
# shapes
shape_in = x.shape
shape_out = self._get_output_shape(x, axis, real, inverse, n)
# dtypes
double = x.dtype in (np.float64, np.cfloat)
cdtype = 'complex128' if double else 'complex64'
rdtype = 'float64' if double else 'float32'
dtype_in = rdtype if (real and not inverse) else cdtype
dtype_out = rdtype if (real and inverse) else cdtype
# notify user of procedure
if self.verbose:
if planning_timelimit is None:
adjective = "very long" if patience == 2 else "long"
print("Planning optimal FFT algorithm; this may "
"take %s..." % adjective)
else:
print("Planning optimal FFT algorithm; this will take up to "
"%s secs" % planning_timelimit)
return ((shape_in, shape_out), (dtype_in, dtype_out), flags,
planning_timelimit, direction)
def _get_output_shape(self, x, axis, real=False, inverse=False, n=None):
if not inverse:
n_fft = x.shape[axis]
fft_out_len = (n_fft//2 + 1) if real else n_fft
else:
if real:
n_fft = n if (n is not None) else 2*(x.shape[axis] - 1)
else:
n_fft = x.shape[axis]
fft_out_len = n_fft
if x.ndim != 1:
shape = list(x.shape)
shape[axis] = fft_out_len
shape = tuple(shape)
else:
shape = (fft_out_len,)
return shape
#### Misc #################################################################
def load_wisdom(self):
for name in ('wisdom32', 'wisdom64'):
path = getattr(self, f"_{name}_path")
if Path(path).is_file():
with open(path, 'rb') as f:
setattr(self, f"_{name}", f.read())
pyfftw.import_wisdom((self._wisdom64, self._wisdom32, b''))
def save_wisdom(self):
"""Will overwrite."""
self._wisdom64, self._wisdom32, _ = pyfftw.export_wisdom()
for name in ('wisdom32', 'wisdom64'):
path = getattr(self, f"_{name}_path")
with open(path, 'wb') as f:
f.write(getattr(self, f"_{name}"))
def _validate_input(self, x, axis, real, patience, inverse):
"""Assert is single/double precision and is 1D/2D."""
supported = ('float32', 'float64', 'complex64', 'complex128')
dtype = str(x.dtype)
if dtype not in supported:
raise TypeError("unsupported `x.dtype`: %s " % dtype
+ "(must be one of: %s)" % ', '.join(supported))
if (real and not inverse) and dtype.startswith('complex'):
raise TypeError("`x` cannot be complex for `rfft`")
if axis not in (0, 1, -1):
raise ValueError("unsupported `axis`: %s " % axis
+ "; must be 0, 1, or -1")
self._validate_patience(patience)
def _validate_patience(self, patience):
if not isinstance(patience, (int, tuple)):
raise TypeError("`patience` must be int or tuple "
"(got %s)" % type(patience))
elif isinstance(patience, int):
from .common import assert_is_one_of
assert_is_one_of(patience, 'patience', (0, 1, 2))
def _process_patience(self, patience):
patience = patience if (patience is not None) else self.patience
if pyfftw is None and patience != 0:
raise ValueError("`patience != 0` requires `pyfftw` installed.")
return patience
FFT_GLOBAL = FFT()
fft = FFT_GLOBAL.fft
rfft = FFT_GLOBAL.rfft
ifft = FFT_GLOBAL.ifft
irfft = FFT_GLOBAL.irfft
fftshift = FFT_GLOBAL.fftshift
ifftshift = FFT_GLOBAL.ifftshift
| 14,188 | 37.142473 | 82 | py |
ssqueezepy | ssqueezepy-master/ssqueezepy/utils/stft_utils.py | # -*- coding: utf-8 -*-
import numpy as np
from numpy.fft import fft, fftshift
from numba import jit, prange
from scipy import integrate
from .gpu_utils import _run_on_gpu, _get_kernel_params
from ..configs import IS_PARALLEL
from .backend import torch
from . import backend as S
__all__ = [
"buffer",
"unbuffer",
"window_norm",
"window_resolution",
"window_area",
]
def buffer(x, seg_len, n_overlap, modulated=False, parallel=None):
"""Build 2D array where each column is a successive slice of `x` of length
`seg_len` and overlapping by `n_overlap` (or equivalently incrementing
starting index of each slice by `hop_len = seg_len - n_overlap`).
Mimics MATLAB's `buffer`, with less functionality.
Supports batched input with samples along dim 0, i.e. `(n_inputs, input_len)`.
See `help(stft)` on `modulated`.
Ex:
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
xb = buffer(x, seg_len=5, n_overlap=3)
xb == [[0, 1, 2, 3, 4],
[2, 3, 4, 5, 6],
[4, 5, 6, 7, 8]].T
"""
S.warn_if_tensor_and_par(x, parallel)
assert x.ndim in (1, 2)
hop_len = seg_len - n_overlap
n_segs = (x.shape[-1] - seg_len) // hop_len + 1
s20 = int(np.ceil(seg_len / 2))
s21 = s20 - 1 if (seg_len % 2 == 1) else s20
args = (seg_len, n_segs, hop_len, s20, s21, modulated)
if S.is_tensor(x):
if x.ndim == 1:
out = _buffer_gpu(x, seg_len, n_segs, hop_len, s20, s21, modulated)
elif x.ndim == 2:
out = x.new_zeros((len(x), seg_len, n_segs))
for _x, _out in zip(x, out):
_buffer_gpu(_x, *args, out=_out)
else:
parallel = parallel or IS_PARALLEL()
fn = _buffer_par if parallel else _buffer
if x.ndim == 1:
out = np.zeros((seg_len, n_segs), dtype=x.dtype, order='F')
fn(x, out, *args)
elif x.ndim == 2:
out = np.zeros((len(x), seg_len, n_segs), dtype=x.dtype, order='F')
for _x, _out in zip(x, out):
fn(_x, _out, *args)
return out
@jit(nopython=True, cache=True)
def _buffer(x, out, seg_len, n_segs, hop_len, s20, s21, modulated=False):
for i in range(n_segs):
if not modulated:
start = hop_len * i
end = start + seg_len
out[:, i] = x[start:end]
else:
start0 = hop_len * i
end0 = start0 + s21
start1 = end0
end1 = start1 + s20
out[:s20, i] = x[start1:end1]
out[s20:, i] = x[start0:end0]
@jit(nopython=True, cache=True, parallel=True)
def _buffer_par(x, out, seg_len, n_segs, hop_len, s20, s21, modulated=False):
for i in prange(n_segs):
if not modulated:
start = hop_len * i
end = start + seg_len
out[:, i] = x[start:end]
else:
start0 = hop_len * i
end0 = start0 + s21
start1 = end0
end1 = start1 + s20
out[:s20, i] = x[start1:end1]
out[s20:, i] = x[start0:end0]
def _buffer_gpu(x, seg_len, n_segs, hop_len, s20, s21, modulated=False, out=None):
kernel = '''
extern "C" __global__
void buffer(${dtype} x[${N}],
${dtype} out[${L}][${W}],
bool modulated,
int hop_len, int seg_len,
int s20, int s21)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= ${W})
return;
int start = hop_len * i;
for (int j=start; j < start + seg_len; ++j){
if (!modulated){
out[j - start][i] = x[j];
} else {
if (j < start + s20){
out[j - start][i] = x[j + s21];
} else{
out[j - start][i] = x[j - s20];
}
}
}
}
'''
if not isinstance(x, torch.Tensor):
x = torch.as_tensor(x, device='cuda')
if out is None:
out = x.new_zeros((seg_len, n_segs))
blockspergrid, threadsperblock, kernel_kw, _ = _get_kernel_params(out, dim=1)
kernel_kw.update(dict(N=len(x), L=len(out), W=out.shape[1]))
kernel_args = [x.data_ptr(), out.data_ptr(), bool(modulated), hop_len,
seg_len, s20, s21]
_run_on_gpu(kernel, blockspergrid, threadsperblock, *kernel_args, **kernel_kw)
return out
def unbuffer(xbuf, window, hop_len, n_fft, N, win_exp=1):
"""Undoes `buffer` (minus unpadding), per padding logic used in `stft`:
(N, n_fft) : logic
even, even: left = right + 1
(N, n_fft, len(xp), pl, pr) -> (128, 120, 247, 60, 59)
odd, odd: left = right
(N, n_fft, len(xp), pl, pr) -> (129, 121, 249, 60, 60)
even, odd: left = right
(N, n_fft, len(xp), pl, pr) -> (128, 121, 248, 60, 60)
odd, even: left = right + 1
(N, n_fft, len(xp), pl, pr) -> (129, 120, 248, 60, 59)
"""
if N is None:
# assume greatest possible len(x) (unpadded)
N = xbuf.shape[1] * hop_len + len(window) - 1
if len(window) != n_fft:
raise ValueError("Must have `len(window) == n_fft` "
"(got %s != %s)" % (len(window), n_fft))
if win_exp == 0:
window = 1
elif win_exp != 1:
window = window ** win_exp
x = np.zeros(N + n_fft - 1, dtype=xbuf.dtype)
_overlap_add(x, xbuf, window, hop_len, n_fft)
return x
def window_norm(window, hop_len, n_fft, N, win_exp=1):
"""Computes window modulation array for use in `stft` and `istft`."""
wn = np.zeros(N + n_fft - 1)
_window_norm(wn, window, hop_len, n_fft, win_exp)
return wn
@jit(nopython=True, cache=True)
def _overlap_add(x, xbuf, window, hop_len, n_fft):
for i in range(xbuf.shape[1]):
n = i * hop_len
x[n:n + n_fft] += xbuf[:, i] * window
@jit(nopython=True, cache=True)
def _window_norm(wn, window, hop_len, n_fft, win_exp=1):
max_hops = (len(wn) - n_fft) // hop_len + 1
wpow = window ** (win_exp + 1)
for i in range(max_hops):
n = i * hop_len
wn[n:n + n_fft] += wpow
def window_resolution(window):
"""Minimal function to compute a window's time & frequency widths, assuming
Fourier spectrum centered about dc (else use `ssqueezepy.wavelets` methods).
Returns std_w, std_t, harea. `window` must be np.ndarray and >=0.
"""
from ..wavelets import _xifn
assert window.min() >= 0, "`window` must be >= 0 (got min=%s)" % window.min()
N = len(window)
t = np.arange(-N/2, N/2, step=1)
ws = fftshift(_xifn(1, N))
psihs = fftshift(fft(window))
apsi2 = np.abs(window)**2
apsih2s = np.abs(psihs)**2
var_w = integrate.trapz(ws**2 * apsih2s, ws) / integrate.trapz(apsih2s, ws)
var_t = integrate.trapz(t**2 * apsi2, t) / integrate.trapz(apsi2, t)
std_w, std_t = np.sqrt(var_w), np.sqrt(var_t)
harea = std_w * std_t
return std_w, std_t, harea
def window_area(window, time=True, frequency=False):
"""Minimal function to compute a window's time or frequency 'area' as area
under curve of `abs(window)**2`. `window` must be np.ndarray.
"""
from ..wavelets import _xifn
if not time and not frequency:
raise ValueError("must compute something")
if time:
t = np.arange(-len(window)/2, len(window)/2, step=1)
at = integrate.trapz(np.abs(window)**2, t)
if frequency:
ws = fftshift(_xifn(1, len(window)))
apsih2s = np.abs(fftshift(fft(window)))**2
aw = integrate.trapz(apsih2s, ws)
if time and frequency:
return at, aw
elif time:
return at
return aw
| 7,677 | 30.991667 | 82 | py |
DAS | DAS-master/code/my_layers.py | import keras.backend as K
from keras.engine.topology import Layer
from keras.layers.convolutional import Conv1D
from keras import initializers
from keras import regularizers
from keras import constraints
import tensorflow as tf
import numpy as np
################################################################################
# Quadratic-time MMD with Gaussian RBF
def _mix_rbf_kernel(X, Y, sigmas=[1.], wts=None):
if wts is None:
wts = [1] * len(sigmas)
XX = tf.matmul(X, X, transpose_b=True)
XY = tf.matmul(X, Y, transpose_b=True)
YY = tf.matmul(Y, Y, transpose_b=True)
X_sqnorms = tf.diag_part(XX)
Y_sqnorms = tf.diag_part(YY)
r = lambda x: tf.expand_dims(x, 0)
c = lambda x: tf.expand_dims(x, 1)
K_XX, K_XY, K_YY = 0, 0, 0
for sigma, wt in zip(sigmas, wts):
gamma = 1 / (2 * sigma**2)
K_XX += wt * tf.exp(-gamma * (-2 * XX + c(X_sqnorms) + r(X_sqnorms)))
K_XY += wt * tf.exp(-gamma * (-2 * XY + c(X_sqnorms) + r(Y_sqnorms)))
K_YY += wt * tf.exp(-gamma * (-2 * YY + c(Y_sqnorms) + r(Y_sqnorms)))
return K_XX, K_XY, K_YY, tf.reduce_sum(wts)
def _mmd2(K_XX, K_XY, K_YY, const_diagonal=False, biased=False):
m = tf.cast(tf.shape(K_XX)[0], tf.float32)
n = tf.cast(tf.shape(K_YY)[0], tf.float32)
if biased:
mmd2 = (tf.reduce_sum(K_XX, keep_dims=True) / (m * m)
+ tf.reduce_sum(K_YY, keep_dims=True) / (n * n)
- 2 * tf.reduce_sum(K_XY, keep_dims=True) / (m * n))
else:
if const_diagonal is not False:
trace_X = m * const_diagonal
trace_Y = n * const_diagonal
else:
trace_X = tf.trace(K_XX)
trace_Y = tf.trace(K_YY)
mmd2 = ((tf.reduce_sum(K_XX) - trace_X) / (m * (m - 1))
+ (tf.reduce_sum(K_YY) - trace_Y) / (n * (n - 1))
- 2 * tf.reduce_sum(K_XY) / (m * n))
return mmd2
def mix_rbf_mmd2(X, Y, sigmas=[1.], wts=None, biased=True):
K_XX, K_XY, K_YY, d = _mix_rbf_kernel(X, Y, sigmas, wts)
return _mmd2(K_XX, K_XY, K_YY, const_diagonal=d, biased=biased)
def rbf_mmd2(X, Y, sigma=1., biased=True):
return mix_rbf_mmd2(X, Y, sigmas=[sigma], biased=biased)
################################################################################
################################################################################
# Customized layers
class Max_over_time(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(Max_over_time, self).__init__(**kwargs)
def call(self, x, mask=None):
if mask is not None:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask)
x = x * mask
return K.max(x, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[2])
def compute_mask(self, x, mask):
return None
class KL_loss(Layer):
def __init__(self, batch_size, **kwargs):
super(KL_loss, self).__init__(**kwargs)
self.batch_size = batch_size
def call(self, x, mask=None):
a = x[0]
b = x[1]
a = K.mean(a, axis=0, keepdims=True)
b = K.mean(b, axis=0, keepdims=True)
a /= K.sum(a, keepdims=True)
b /= K.sum(b, keepdims=True)
a = K.clip(a, K.epsilon(), 1)
b = K.clip(b, K.epsilon(), 1)
loss = K.sum(a*K.log(a/b), axis=-1, keepdims=True) \
+ K.sum(b*K.log(b/a), axis=-1, keepdims=True)
loss = K.repeat_elements(loss, self.batch_size, axis=0)
return loss
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
class mmd_loss(Layer):
def __init__(self, batch_size, **kwargs):
super(mmd_loss, self).__init__(**kwargs)
self.batch_size = batch_size
def call(self, x, mask=None):
a = x[0]
b = x[1]
mmd = rbf_mmd2(a, b)
mmd = K.repeat_elements(mmd, self.batch_size, axis=0)
return mmd
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
class Ensemble_pred_loss(Layer):
def __init__(self, **kwargs):
super(Ensemble_pred_loss, self).__init__(**kwargs)
def call(self, x, mask=None):
pred = x[0]
target = x[1]
weight = x[2]
error = K.categorical_crossentropy(target, pred)
loss = error * weight
return loss
def compute_output_shape(self, input_shape):
return (input_shape[0][0], 1)
def compute_mask(self, x, mask):
return None
class Conv1DWithMasking(Conv1D):
def __init__(self, **kwargs):
self.supports_masking = True
super(Conv1DWithMasking, self).__init__(**kwargs)
def compute_mask(self, x, mask):
return mask
| 4,951 | 26.359116 | 80 | py |
DAS | DAS-master/code/optimizers.py | import keras.optimizers as opt
def get_optimizer(args):
clipvalue = 0
clipnorm = 10
if args.algorithm == 'rmsprop':
optimizer = opt.RMSprop(lr=0.0005, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'sgd':
optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adagrad':
optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adadelta':
optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adam':
optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
elif args.algorithm == 'adamax':
optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
return optimizer
| 943 | 41.909091 | 115 | py |
DAS | DAS-master/code/train_batch.py | import argparse
import logging
import numpy as np
from time import time
import utils as U
logging.basicConfig(
# filename='out.log',
level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
##############################################################################################################################
# Parse arguments
parser = argparse.ArgumentParser()
# arguments related to datasets and data preprocessing
parser.add_argument("--dataset", dest="dataset", type=str, metavar='<str>', required=True, help="The name of the dataset (small_1|small_2|large|amazon)")
parser.add_argument("--source", dest="source", type=str, metavar='<str>', required=True, help="The name of the source domain")
parser.add_argument("--target", dest="target", type=str, metavar='<str>', required=True, help="The name of the source target")
parser.add_argument("-v", "--vocab-size", dest="vocab_size", type=int, metavar='<int>', default=10000, help="Vocab size. '0' means no limit (default=0)")
parser.add_argument("--n-class", dest="n_class", type=int, metavar='<int>', default=3, help="The number of ouput classes")
parser.add_argument("-t", "--type", dest="model_type", type=str, metavar='<str>', default='DAS', help="Model type (default=DAS)")
parser.add_argument("--emb", dest="emb_path", type=str, metavar='<str>', help="The path to the word embeddings file")
# hyper-parameters related to network training
parser.add_argument("-a", "--algorithm", dest="algorithm", type=str, metavar='<str>', default='rmsprop', help="Optimization algorithm (rmsprop|sgd|adagrad|adadelta|adam|adamax) (default=rmsprop)")
parser.add_argument("--epochs", dest="epochs", type=int, metavar='<int>', default=15, help="Number of epochs (default=15)")
parser.add_argument("-b", "--batch-size", dest="batch_size", type=int, metavar='<int>', default=50, help="Batch size (default=50)")
# hyper-parameters related to network structure
parser.add_argument("-e", "--embdim", dest="emb_dim", type=int, metavar='<int>', default=300, help="Embeddings dimension (default=300)")
parser.add_argument("-c", "--cnndim", dest="cnn_dim", type=int, metavar='<int>', default=300, help="CNN output dimension.(default=300)")
parser.add_argument("-w", "--cnnwin", dest="cnn_window_size", type=int, metavar='<int>', default=3, help="CNN window size. (default=3)")
parser.add_argument("--cnn-activation", dest="cnn_activation", type=str, metavar='<str>', default='relu', help="The activation of CNN")
parser.add_argument("--dropout", dest="dropout_prob", type=float, metavar='<float>', default=0.5, help="The dropout probability. To disable, input 0 (default=0.5)")
parser.add_argument("--discrepancy-obj", dest="minimize_discrepancy_obj", type=str, metavar='<str>', default='kl_loss', help="The loss for minimizing domain discrepancy (default=kl_loss)")
# hyper-parameters related to DAS objectives
# You can play with those hyper-parameters to see the different variants of our model.
# e.g. set weight_uns to 0 denotes DAS-EM; set weight_entropy to 0 denotes DAS-SE;
# set weight_discrepancy, weight_entropy, weight_uns all to 0s denotes NaiveNN.
parser.add_argument("--weight-discrepancy", dest="weight_discrepancy", type=float, metavar='<float>', default=200, help="The weight of the domain discrepancy minimization objective (lamda_1 in the paper)")
parser.add_argument("--weight-entropy", dest="weight_entropy", type=float, metavar='<float>', default=1.0, help="The weight of the target entropy objective (lamda_2 in the paper)")
parser.add_argument("--weight-uns", dest="weight_uns", type=float, metavar='<float>', default=3.0, help="The max value of the ensemble prediction objective weight (lamda_3 in the paper)")
parser.add_argument("--ensemble-prob", dest="ensemble_prob", type=float, metavar='<float>', default=0.5, help="The ensemble momentum (alpha in the paper)")
# random seed that affects data splits and parameter intializations
parser.add_argument("--seed", dest="seed", type=int, metavar='<int>', default=1234, help="Random seed (default=1234)")
args = parser.parse_args()
U.print_args(args)
# small_1 and small_2 denote eperimenal setting 1 and setting 2 on the small-scale dataset respectively.
# large denotes the large-scale dataset. Table 1(b) in the paper
# amazon denotes the amazon benchmark dataset (Blitzer et al., 2007). See appendix A in the paper.
assert args.dataset in {'small_1', 'small_2', 'large', 'amazon'}
assert args.model_type == 'DAS'
# The domains contained in each dataset
if args.dataset in {'small_1', 'small_2'}:
assert args.source in {'book', 'electronics', 'beauty', 'music'}
assert args.target in {'book', 'electronics', 'beauty', 'music'}
elif args.dataset == 'large':
assert args.source in {'imdb', 'yelp2014', 'cell_phone', 'baby'}
assert args.target in {'imdb', 'yelp2014', 'cell_phone', 'baby'}
else:
# note that the book and electronics domains of amazon benchmark are different from those in small_1 and small_2
assert args.source in {'book', 'dvd', 'electronics', 'kitchen'}
assert args.target in {'book', 'dvd', 'electronics', 'kitchen'}
assert args.algorithm in {'rmsprop', 'sgd', 'adagrad', 'adadelta', 'adam', 'adamax'}
# In DAS, we use kl_loss for minimizing domain discrepancy. (See section 3.2 in paper)
assert args.minimize_discrepancy_obj in {'kl_loss', 'mmd'}
if args.seed > 0:
np.random.seed(args.seed)
##############################################################################################################################
# Prepare data
if args.dataset == 'amazon':
from read_amazon import get_data
else:
from read import get_data
vocab, overall_maxlen, source_x, source_y, dev_x, dev_y, test_x, test_y, source_un, target_un = get_data(
args.dataset, args.source, args.target, args.n_class, args.vocab_size)
print '------------ Traing Sets ------------'
print 'Number of labeled source examples: ', len(source_x)
print 'Number of total source examples (labeled+unlabeled): ', len(source_un)
print 'Number of unlabeled target examples: ', len(target_un)
print '------------ Development Set ------------'
print 'Size of development set: ', len(dev_x)
print '------------ Test Set -------------'
print 'Size of test set: ', len(test_x)
def batch_generator(data_list, batch_size):
num = len(data_list[0])
while True:
excerpt = np.random.choice(num, batch_size)
yield[data[excerpt] for data in data_list]
def batch_generator_large(data_list, batch_size):
#######################################
# Generate balanced labeled source examples.
# Only used on large dataset as
# the training set is quite unbalanced.
#######################################
label_list = np.argmax(data_list[1], axis=-1)
pos_inds = np.where(label_list==0)[0]
neg_inds = np.where(label_list==1)[0]
neu_inds = np.where(label_list==2)[0]
while True:
pos_sample = np.random.choice(pos_inds, batch_size/3)
neg_sample = np.random.choice(neg_inds, batch_size/3)
neu_sample = np.random.choice(neu_inds, batch_size/3+batch_size%3)
excerpt = np.concatenate((pos_sample, neg_sample))
excerpt = np.concatenate((excerpt, neu_sample))
np.random.shuffle(excerpt)
yield[data[excerpt] for data in data_list]
##############################################################################################################################
# Optimizer algorithm
from optimizers import get_optimizer
optimizer = get_optimizer(args)
###############################################################################################################################
# Building model
from models import create_model
import keras.backend as K
logger.info(' Building model')
def entropy(y_true, y_pred):
return K.mean(K.categorical_crossentropy(y_pred, y_pred), axis=-1)
def return_ypred(y_true, y_pred):
return y_pred
model = create_model(args, overall_maxlen, vocab)
model.compile(optimizer=optimizer,
loss={'source_probs': 'categorical_crossentropy', 'target_probs': entropy, 'discrepancy_loss': return_ypred, 'uns_loss': return_ypred},
loss_weights={'source_probs': 1, 'target_probs': args.weight_entropy, 'discrepancy_loss': args.weight_discrepancy, 'uns_loss': 1},
metrics={'source_probs': 'categorical_accuracy'})
###############################################################################################################################
# Training
from keras.utils.np_utils import to_categorical
# weight ramp-up function on the ensemble prediction objective
# w(t) in the paper.
def rampup(epoch):
max_rampup_epochs = 30.0
if epoch == 0:
return 0
elif epoch < args.epochs:
p = min(max_rampup_epochs, float(epoch)) / max_rampup_epochs
p = 1.0 - p
return np.exp(-p*p*5.0)*args.weight_uns
from tqdm import tqdm
logger.info('----------------------------------------- Training Model ---------------------------------------------------------')
if args.dataset == 'large':
source_gen = batch_generator_large([source_x, source_y], batch_size=args.batch_size)
else:
source_gen = batch_generator([source_x, source_y], batch_size=args.batch_size)
source_un_gen = batch_generator([source_un], batch_size=args.batch_size)
target_un_gen = batch_generator([target_un], batch_size=args.batch_size)
overall_x = np.concatenate((source_un, target_un))
samples_per_epoch = len(overall_x)
batches_per_epoch = samples_per_epoch / args.batch_size
# Set the limit of batches_per_epoch to 500
batches_per_epoch = min(batches_per_epoch, 500)
#Initialize targets for unlabeled data. (See algorithm 1 in paper)
ensemble_prediction = np.zeros((len(overall_x), args.n_class))
targets = np.zeros((len(overall_x), args.n_class))
epoch_predictions = np.zeros((len(overall_x), args.n_class))
get_predictions = K.function([model.get_layer('uns_input').input, K.learning_phase()], [model.get_layer('uns_predictions').output])
best_valid_acc = 0
pred_probs = None
for ii in xrange(args.epochs):
t0 = time()
train_loss, source_loss, target_loss, dis_loss, uns_loss, train_metric = 0., 0., 0., 0., 0., 0.
uns_gen = batch_generator([overall_x, targets], batch_size=args.batch_size)
for b in tqdm(xrange(batches_per_epoch)):
batch_source_x, batch_source_y = source_gen.next()
batch_source_un = source_un_gen.next()[0]
batch_target_un = target_un_gen.next()[0]
batch_uns, batch_targets = uns_gen.next()
train_loss_, source_loss_, target_loss_, dis_loss_, uns_loss_, train_metric_ = model.train_on_batch(
[batch_source_x, batch_source_un, batch_target_un, batch_uns, batch_targets, np.full((args.batch_size, 1), rampup(ii))],
{'source_probs': batch_source_y, 'target_probs': batch_source_y, 'discrepancy_loss': np.ones((args.batch_size, 1)) ,
'uns_loss': np.ones((args.batch_size, 1))})
train_loss += train_loss_ / batches_per_epoch
source_loss += source_loss_ / batches_per_epoch
target_loss += target_loss_ / batches_per_epoch
uns_loss += uns_loss_ / batches_per_epoch
dis_loss += dis_loss_ / batches_per_epoch
train_metric += train_metric_ / batches_per_epoch
# after the training of each epoch, compute predictions on unlabeled data
for ind in xrange(0, len(overall_x), args.batch_size):
if ind+args.batch_size > len(overall_x):
batch_inds = range(ind, len(overall_x))
else:
batch_inds = range(ind, ind+args.batch_size)
batch_ = overall_x[batch_inds]
batch_predictions = get_predictions([batch_, 0])[0]
for i, j in enumerate(batch_inds):
epoch_predictions[j] = batch_predictions[i]
# compute ensemble predictions on unlabeled data
ensemble_prediction = args.ensemble_prob*ensemble_prediction + (1-args.ensemble_prob)*epoch_predictions
targets = ensemble_prediction / (1.0-args.ensemble_prob**(ii+1))
targets = to_categorical(np.argmax(ensemble_prediction, axis=-1), args.n_class)
tr_time = time() - t0
valid_loss, valid_source_loss, valid_target_loss, valid_dis_loss, valid_uns_loss, valid_metric = model.evaluate([dev_x, dev_x, dev_x, dev_x, dev_y, np.ones((len(dev_y), 1))],\
{'source_probs': dev_y, 'target_probs': dev_y, 'discrepancy_loss': np.ones((len(dev_x),1)), 'uns_loss': np.ones((len(dev_x),1))}, batch_size=args.batch_size, verbose=1)
logger.info('Epoch %d, train: %is' % (ii, tr_time))
logger.info('[Train] loss: %.4f, [Source Classification] loss: %.4f, [Target Entropy] loss, %.4f, [Ensemble Prediction] loss: %.4f, [Discrepancy] loss: %.4f, metric: %.4f' \
% (train_loss, source_loss, target_loss, uns_loss, dis_loss, train_metric))
logger.info('[Validation] loss: %.4f, [Classification] loss: %.4f, [Entropy] loss, %.4f, [Ensemble Prediction] loss: %.4f, [Discrepancy] loss: %.4f, metric: %.4f' \
% (valid_loss, valid_source_loss, valid_target_loss, valid_uns_loss, valid_dis_loss, valid_metric))
if valid_metric > best_valid_acc:
best_valid_acc = valid_metric
print("------------- Best performance on dev set so far ==> evaluating on test set -------------")
logger.info("------------- Best performance on dev set so far ==> evaluating on test set -------------\n")
if args.dataset == 'large':
#pad test set so that its size is dividible by batch_size
append = args.batch_size-(len(test_y)%args.batch_size)
test_x_ = np.concatenate((test_x, np.zeros((append, test_x.shape[1]))))
test_y_ = np.concatenate((test_y, np.zeros((append, test_y.shape[1]))))
pred_probs = model.predict([test_x_, test_x_, test_x_, test_x_,
test_y_, np.ones((len(test_y_), 1))], batch_size=args.batch_size, verbose=1)[0]
pred_probs = pred_probs[:len(test_y)]
else:
pred_probs = model.predict([test_x, test_x, test_x, test_x,
test_y, np.ones((len(test_y), 1))], batch_size=args.batch_size, verbose=1)[0]
from sklearn.metrics import classification_report, accuracy_score, precision_recall_fscore_support
preds = np.argmax(pred_probs, axis=-1)
true = np.argmax(test_y, axis=-1)
# Compute accuracy on test set
logger.info("accuracy: "+ str(accuracy_score(true, preds)) + "\n")
# Compute macro-f1 on test set
p_macro, r_macro, f_macro, support_macro \
= precision_recall_fscore_support(true, preds, average='macro')
f_macro = 2*p_macro*r_macro/(p_macro+r_macro)
logger.info("macro-f1: "+str(f_macro) + "\n\n")
| 14,860 | 49.037037 | 205 | py |
DAS | DAS-master/code/models.py | import numpy as np
import logging
import codecs
from keras.layers import Dense, Dropout, Activation, Embedding, Input
from keras.models import Model
import keras.backend as K
from my_layers import Conv1DWithMasking, Max_over_time, KL_loss, Ensemble_pred_loss, mmd_loss
from keras.constraints import maxnorm
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
def create_model(args, overal_maxlen, vocab):
##############################################################################################################################
# Custom CNN kernel initializer
# Use the initialization from Kim et al. (2014) for CNN kernel initialization.
def my_init(shape, dtype=K.floatx()):
return 0.01 * np.random.standard_normal(size=shape)
##############################################################################################################################
# Funtion that loads word embeddings from Glove vectors
def init_emb(emb_matrix, vocab, emb_file):
print 'Loading word embeddings ...'
counter = 0.
pretrained_emb = open(emb_file)
for line in pretrained_emb:
tokens = line.split()
if len(tokens) != 301:
continue
word = tokens[0]
vec = tokens[1:]
try:
emb_matrix[0][vocab[word]] = vec
counter += 1
except KeyError:
pass
pretrained_emb.close()
logger.info('%i/%i word vectors initialized (hit rate: %.2f%%)' % (counter, len(vocab), 100*counter/len(vocab)))
return emb_matrix
##############################################################################################################################
# Create Model
cnn_padding='same'
vocab_size = len(vocab)
if args.model_type == 'DAS':
print '\n'
logger.info('Building model for DAS')
# labeled source examples
source_input = Input(shape=(overal_maxlen,), dtype='int32', name='source_input')
# unlabeled source examples (this includes all source examples with and without labels)
source_un_input = Input(shape=(overal_maxlen,), dtype='int32', name='source_un_input')
# unlabeled target examples
target_un_input = Input(shape=(overal_maxlen,), dtype='int32', name='target_un_input')
# all examples from both source and target domains
uns_input = Input(shape=(overal_maxlen,), dtype='int32', name='uns_input')
# estimated sentiment labels for all examples
uns_target = Input(shape=(args.n_class,), dtype=K.floatx(), name='uns_target')
# ramp-up weight
uns_weight = Input(shape=(1, ), dtype=K.floatx(), name='uns_weight')
word_emb = Embedding(vocab_size, args.emb_dim, mask_zero=True, name='word_emb')
source_output = word_emb(source_input)
source_un_output = word_emb(source_un_input)
target_un_output = word_emb(target_un_input)
uns_output = word_emb(uns_input)
print 'use a cnn layer'
conv = Conv1DWithMasking(filters=args.cnn_dim, kernel_size=args.cnn_window_size, \
activation=args.cnn_activation, padding=cnn_padding, kernel_initializer=my_init, name='cnn')
source_output = conv(source_output)
source_un_output = conv(source_un_output)
target_un_output = conv(target_un_output)
uns_output = conv(uns_output)
print 'use max_over_time as aggregation function'
source_output = Max_over_time(name='mot')(source_output)
source_un_output = Max_over_time()(source_un_output)
target_un_output = Max_over_time()(target_un_output)
uns_output = Max_over_time()(uns_output)
if args.minimize_discrepancy_obj == 'kl_loss':
dis_loss = KL_loss(args.batch_size, name='discrepancy_loss')([source_un_output, target_un_output])
elif args.minimize_discrepancy_obj == 'mmd':
dis_loss = mmd_loss(args.batch_size, name='discrepancy_loss')([source_un_output, target_un_output])
else:
raise NotImplementedError
if args.weight_discrepancy > 0:
print 'Minimize domain discrepancy between source and target via %s'%(args.minimize_discrepancy_obj)
if args.dropout_prob > 0:
print 'use dropout layer'
source_output = Dropout(args.dropout_prob)(source_output)
target_un_output = Dropout(args.dropout_prob)(target_un_output)
uns_output = Dropout(args.dropout_prob)(uns_output)
clf = Dense(args.n_class, kernel_constraint=maxnorm(3), name='dense')
source_output = clf(source_output)
target_output = clf(target_un_output)
uns_output = clf(uns_output)
source_probs = Activation('softmax', name='source_probs')(source_output)
target_probs = Activation('softmax', name='target_probs')(target_output)
uns_probs = Activation('softmax', name='uns_predictions')(uns_output)
uns_pred_loss = Ensemble_pred_loss(name='uns_loss')([uns_probs, uns_target, uns_weight])
if args.weight_uns > 0:
print 'Use ensemble prediction on unlabeled data for semi-supervised training'
model = Model(inputs=[source_input, source_un_input, target_un_input, uns_input, uns_target, uns_weight],
outputs=[source_probs, target_probs, dis_loss, uns_pred_loss])
else:
raise NotImplementedError
logger.info(' Done')
print '\n'
##############################################################################################################################
# Initialize embeddings if embedding path is given
if args.emb_path:
# It takes around 3 mininutes to load pre-trained word embeddings.
model.get_layer('word_emb').set_weights(init_emb(model.get_layer('word_emb').get_weights(), vocab, args.emb_path))
return model
| 6,134 | 39.629139 | 130 | py |
DAS | DAS-master/code/read.py | import codecs
import operator
import numpy as np
import re
from keras.preprocessing import sequence
from keras.utils.np_utils import to_categorical
num_regex = re.compile('^[+-]?[0-9]+\.?[0-9]*$')
def create_vocab(file_list, vocab_size, skip_len):
print 'Creating vocab ...'
total_words, unique_words = 0, 0
word_freqs = {}
for f in file_list:
fin = codecs.open(f, 'r', 'utf-8')
for line in fin:
words = line.split()
if skip_len > 0 and len(words) > skip_len:
continue
for w in words:
if not bool(num_regex.match(w)):
try:
word_freqs[w] += 1
except KeyError:
unique_words += 1
word_freqs[w] = 1
total_words += 1
fin.close()
print (' %i total words, %i unique words' % (total_words, unique_words))
sorted_word_freqs = sorted(word_freqs.items(), key=operator.itemgetter(1), reverse=True)
vocab = {'<pad>':0, '<unk>':1, '<num>':2}
index = len(vocab)
for word, _ in sorted_word_freqs:
vocab[word] = index
index += 1
if vocab_size > 0 and index > vocab_size + 2:
break
print (' keep the top %i words' % vocab_size)
return vocab
def create_data(vocab, text_path, label_path, domain, n_class, skip_top, skip_len, replace_non_vocab):
data = []
label = [] # {pos: 0, neg: 1, neu: 2}
f = codecs.open(text_path, 'r', 'utf-8')
f_l = codecs.open(label_path, 'r', 'utf-8')
num_hit, unk_hit, skip_top_hit, total = 0., 0., 0., 0.
pos_count, neg_count, neu_count = 0, 0, 0
max_len = 0
for line, score in zip(f, f_l):
word_indices = []
words = line.split()
if skip_len > 0 and len(words) > skip_len:
continue
score = float(score.strip())
if domain == 'imdb':
if score < 5:
neg_count += 1
label.append(1)
elif score > 6:
pos_count += 1
label.append(0)
else:
if n_class == 3:
neu_count += 1
label.append(2)
else:
continue
elif domain in {'yelp2014', 'book', 'electronics', 'beauty', 'music', 'cell_phone', 'baby'}:
if score < 3:
neg_count += 1
label.append(1)
elif score > 3:
pos_count += 1
label.append(0)
else:
if n_class == 3:
neu_count += 1
label.append(2)
else:
continue
else:
print 'No such domain!'
break
for word in words:
if bool(num_regex.match(word)):
word_indices.append(vocab['<num>'])
num_hit += 1
elif word in vocab:
word_ind = vocab[word]
if skip_top > 0 and word_ind < skip_top + 3:
skip_top_hit += 1
else:
word_indices.append(word_ind)
else:
if replace_non_vocab:
word_indices.append(vocab['<unk>'])
unk_hit += 1
total += 1
if len(word_indices) > max_len:
max_len = len(word_indices)
data.append(word_indices)
f.close()
f_l.close()
print(' <num> hit rate: %.2f%%, <unk> hit rate: %.2f%%, <skip_top> hit rate: %.2f%%' \
% (100*num_hit/total, 100*unk_hit/total, 100*skip_top_hit/total))
print domain
print 'pos count: ', pos_count
print 'neg count: ', neg_count
print 'neu count: ', neu_count
return np.array(data), np.array(label), max_len
def prepare_data(dataset, source_domain, target_domain, n_class, vocab_size=0, skip_len=0, skip_top=0, replace_non_vocab=1):
if dataset == 'small_1':
text_list = ['../data/small/%s/set1_text.txt'%source_domain,
'../data/small/%s/set1_text.txt'%target_domain]
score_list = ['../data/small/%s/set1_label.txt'%source_domain,
'../data/small/%s/set1_label.txt'%target_domain]
domain_list = [source_domain, target_domain]
elif dataset == 'small_2':
text_list = ['../data/small/%s/set1_text.txt'%source_domain,
'../data/small/%s/set1_text.txt'%target_domain,
'../data/small/%s/set2_text.txt'%source_domain,
'../data/small/%s/set2_text.txt'%target_domain]
score_list = ['../data/small/%s/set1_label.txt'%source_domain,
'../data/small/%s/set1_label.txt'%target_domain,
'../data/small/%s/set2_label.txt'%source_domain,
'../data/small/%s/set2_label.txt'%target_domain]
domain_list = [source_domain, target_domain, source_domain, target_domain]
else:
text_list = ['../data/large/%s/text.txt'%source_domain,
'../data/large/%s/text.txt'%target_domain]
score_list = ['../data/large/%s/label.txt'%source_domain,
'../data/large/%s/label.txt'%target_domain]
domain_list = [source_domain, target_domain]
vocab = create_vocab(text_list, vocab_size, skip_len)
data_list = []
label_list = []
overall_max_len = 0
for f, f_l, domain in zip(text_list, score_list, domain_list):
data, label, max_len = create_data(vocab, f, f_l, domain, n_class, skip_top, skip_len, replace_non_vocab)
data_list.append(data)
label_list.append(label)
if max_len > overall_max_len:
overall_max_len = max_len
return vocab, data_list, label_list, overall_max_len
def get_data(dataset, source_domain, target_domain, n_class, vocab_size=0):
assert dataset in ['small_1', 'small_2', 'large']
vocab, data_list, label_list, overall_maxlen = prepare_data(dataset, source_domain, target_domain, n_class, vocab_size)
data_list = [sequence.pad_sequences(d, maxlen=overall_maxlen) for d in data_list]
label_list = [to_categorical(l, n_class) for l in label_list]
if dataset == 'large':
# when using the large-scale dataset, we need to sample 1k balanced dev set from labeled source data
labels = np.argmax(label_list[0], axis=-1)
pos_inds = np.where(labels==0)[0]
neg_inds = np.where(labels==1)[0]
neu_inds = np.where(labels==2)[0]
np.random.shuffle(pos_inds)
np.random.shuffle(neg_inds)
np.random.shuffle(neu_inds)
dev_inds = np.concatenate((pos_inds[:333], neg_inds[:333]))
dev_inds = np.concatenate((dev_inds, neu_inds[:334]))
train_inds = np.concatenate((pos_inds[333:], neg_inds[333:]))
train_inds = np.concatenate((train_inds, neu_inds[334:]))
source_x, source_y = data_list[0][train_inds], label_list[0][train_inds]
dev_x, dev_y = data_list[0][dev_inds], label_list[0][dev_inds]
else:
#On small-scale dataset, randomly select 1k examples from set1 of source domain as dev set
inds = np.random.permutation(data_list[0].shape[0])
dev_inds, train_inds = inds[:1000], inds[1000:]
source_x, source_y = data_list[0][train_inds], label_list[0][train_inds]
dev_x, dev_y = data_list[0][dev_inds], label_list[0][dev_inds]
test_x, test_y = data_list[1], label_list[1]
if dataset in ['small_1', 'large']:
source_un = data_list[0]
target_un = data_list[1]
else:
source_un = np.concatenate((data_list[0], data_list[2]))
target_un = data_list[3]
return vocab, overall_maxlen, source_x, source_y, dev_x, dev_y, test_x, test_y, source_un, target_un
| 7,918 | 32.273109 | 124 | py |
DAS | DAS-master/code/read_amazon.py | import codecs
import operator
import numpy as np
import re
from keras.preprocessing import sequence
from keras.utils.np_utils import to_categorical
from read import create_vocab
num_regex = re.compile('^[+-]?[0-9]+\.?[0-9]*$')
def create_data(vocab, file_path, skip_top, skip_len, replace_non_vocab):
data = []
f = codecs.open(file_path, 'r', 'utf-8')
num_hit, unk_hit, skip_top_hit, total = 0., 0., 0., 0.
max_len = 0
for line in f:
word_indices = []
words = line.split()
if skip_len > 0 and len(words) > skip_len:
continue
for word in words:
if bool(num_regex.match(word)):
word_indices.append(vocab['<num>'])
num_hit += 1
elif word in vocab:
word_ind = vocab[word]
if skip_top > 0 and word_ind < skip_top + 3:
skip_top_hit += 1
else:
word_indices.append(word_ind)
else:
if replace_non_vocab:
word_indices.append(vocab['<unk>'])
unk_hit += 1
total += 1
if len(word_indices) > max_len:
max_len = len(word_indices)
data.append(word_indices)
print(' <num> hit rate: %.2f%%, <unk> hit rate: %.2f%%, <skip_top> hit rate: %.2f%%' \
% (100*num_hit/total, 100*unk_hit/total, 100*skip_top_hit/total))
return np.array(data), max_len
def prepare_data(source_domain, target_domain, n_class, vocab_size=0, skip_len=0, skip_top=0, replace_non_vocab=1):
file_list = ['../data/amazon/%s/pos.txt'%source_domain,
'../data/amazon/%s/neg.txt'%source_domain,
'../data/amazon/%s/un_pos.txt'%source_domain,
'../data/amazon/%s/un_neg.txt'%source_domain,
'../data/amazon/%s/pos.txt'%target_domain,
'../data/amazon/%s/neg.txt'%target_domain,
'../data/amazon/%s/un_pos.txt'%target_domain,
'../data/amazon/%s/un_neg.txt'%target_domain]
vocab = create_vocab(file_list, vocab_size, skip_len)
data_list = []
overall_max_len = 0
for f in file_list:
data, max_len = create_data(vocab, f, skip_top, skip_len, replace_non_vocab)
data_list.append(data)
if max_len > overall_max_len:
overall_max_len = max_len
return vocab, data_list, overall_max_len
def get_data(dataset, source_domain, target_domain, n_class, vocab_size=0):
vocab, data_list, overall_maxlen = prepare_data(source_domain, target_domain, n_class, vocab_size)
data_list = [sequence.pad_sequences(d, maxlen=overall_maxlen) for d in data_list]
for d in data_list:
np.random.shuffle(d)
source_pos, source_neg, source_un_pos, source_un_neg, target_pos, target_neg, target_un_pos, target_un_neg = data_list
# Each domain has a train set of size 1600, and a test set of size 400 with exactly balanced positive and negative examples
# Only consider binary classification {pos: 1, neg: 0}
source_train_y = np.concatenate((np.ones(800), np.zeros(800))).reshape(1600,1)
source_test_y = np.concatenate((np.ones(200), np.zeros(200))).reshape(400, 1)
target_train_y = np.concatenate((np.ones(800), np.zeros(800))).reshape(1600, 1)
target_test_y = np.concatenate((np.ones(200), np.zeros(200))).reshape(400, 1)
source_train_y = to_categorical(source_train_y, n_class)
source_test_y = to_categorical(source_test_y, n_class)
target_train_y = to_categorical(target_train_y, n_class)
target_test_y = to_categorical(target_test_y, n_class)
source_train_x = np.concatenate((source_pos[0:800], source_neg[0:800]))
source_test_x = np.concatenate((source_pos[800:], source_neg[800:]))
target_train_x = np.concatenate((target_pos[0:800], target_neg[0:800]))
target_test_x = np.concatenate((target_pos[800:], target_neg[800:]))
# Each domain has an additional unlabeled set of size 4000.
source_un = np.concatenate((source_un_pos, source_un_neg))
target_un = np.concatenate((target_un_pos, target_un_neg))
# For each pair of source-target domain, the classifier is trained on the training set of the source domain and
# is evaluated on the test set of the target domain. The test set from source domain is used as development set.
source_x, source_y = source_train_x, source_train_y
dev_x, dev_y = source_test_x, source_test_y
test_x, test_y = target_test_x, target_test_y
source_un = np.concatenate((source_x, source_un))
return vocab, overall_maxlen, source_x, source_y, dev_x, dev_y, test_x, test_y, source_un, target_un
| 4,704 | 40.27193 | 127 | py |
QuantFace | QuantFace-master/train_quantization_synthetic.py | import argparse
import logging
import os
import time
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel
import torch.utils.data.distributed
from torch.nn.utils import clip_grad_norm_
from backbones.mobilefacenet import MobileFaceNet
from config.config_Quantization_Synthetic import config as cfg
from utils.dataset import DataLoaderX, FaceDatasetFolder
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from utils.utils_logging import AverageMeter, init_logging
from backbones.iresnet import iresnet100, iresnet50, freeze_model, unfreeze_model, iresnet18
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.benchmark = True
def main(args):
dist.init_process_group(backend='nccl', init_method='env://')
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
rank = dist.get_rank()
world_size = dist.get_world_size()
if not os.path.exists(cfg.output) and rank == 0:
os.makedirs(cfg.output)
else:
time.sleep(2)
log_root = logging.getLogger()
init_logging(log_root, rank, cfg.output)
trainset = FaceDatasetFolder(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset, shuffle=True)
train_loader = DataLoaderX(
local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size,
sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True)
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
logging.info("load backbone!" + cfg.network)
elif cfg.network == "iresnet50":
backbone = iresnet50(dropout=0.4,num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet18":
backbone = iresnet18(dropout=0.4, num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network =="mobilefacenet":
backbone=MobileFaceNet().to(local_rank)
else:
backbone = None
logging.info("load backbone failed!")
exit()
if args.resume:
try:
backbone_pth = os.path.join(cfg.output32, str(cfg.global_step) + "backbone.pth")
backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("backbone resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("load backbone resume init, failed!")
for ps in backbone.parameters():
dist.broadcast(ps, 0)
if cfg.network == "mobilefacenet":
from backbones.mobilefacenet import quantize_model
backbone_quant = quantize_model(backbone, cfg.wq, cfg.aq).to(local_rank)
else:
from backbones.iresnet import quantize_model
backbone_quant = quantize_model(backbone, cfg.wq, cfg.aq).to(local_rank)
backbone = DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank])
backbone.eval()
backbone_quant = DistributedDataParallel(
module=backbone_quant, broadcast_buffers=True, device_ids=[local_rank])
backbone_quant.train()
opt_backbone = torch.optim.SGD(
params=[{'params': backbone_quant.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay,nesterov=True,)
scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_backbone, lr_lambda=cfg.lr_func)
criterion = torch.nn.MSELoss() #CrossEntropyLoss()
start_epoch = 0
total_step = int(len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
if rank == 0: logging.info("Total Step is: %d" % total_step)
callback_verification = CallBackVerification(cfg.eval_step, rank, cfg.val_targets, cfg.rec)
callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, writer=None)
callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
backbone_quant=unfreeze_model(backbone_quant)
loss = AverageMeter()
global_step = 0
for epoch in range(start_epoch, cfg.num_epoch):
train_sampler.set_epoch(epoch)
backbone_quant=freeze_model(backbone_quant)
for _, (img, label) in enumerate(train_loader):
global_step += 1
if (global_step<300):
backbone_quant = unfreeze_model(backbone_quant)
img = img.cuda(local_rank, non_blocking=True)
features = F.normalize(backbone_quant(img))
with torch.no_grad():
features_1 = F.normalize(backbone(img))
loss_v=criterion(features,features_1)
loss_v.backward()
clip_grad_norm_(backbone_quant.parameters(), max_norm=5, norm_type=2)
opt_backbone.step()
opt_backbone.zero_grad()
loss.update(loss_v.item(), 1)
if (global_step %5000==0):
logging.info(backbone_quant)
callback_logging(global_step, loss, epoch)
callback_verification(global_step, backbone_quant)
backbone_quant = freeze_model(backbone_quant)
scheduler_backbone.step()
callback_checkpoint(global_step, backbone_quant, None, quantiza=True)
callback_verification(cfg.eval_step, backbone_quant)
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch margin penalty loss training')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
parser.add_argument('--resume', type=int, default=1, help="resume training")
args_ = parser.parse_args()
main(args_)
| 5,889 | 37.496732 | 104 | py |
QuantFace | QuantFace-master/train_quantization.py | import argparse
import logging
import os
import time
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel
import torch.utils.data.distributed
from torch.nn.utils import clip_grad_norm_
from backbones.mobilefacenet import MobileFaceNet
from config.config_Quantization import config as cfg
from utils.dataset import MXFaceDataset, DataLoaderX
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from utils.utils_logging import AverageMeter, init_logging
from backbones.iresnet import iresnet100, iresnet50, freeze_model, unfreeze_model, iresnet18
torch.backends.cudnn.benchmark = True
def main(args):
dist.init_process_group(backend='nccl', init_method='env://')
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
rank = dist.get_rank()
world_size = dist.get_world_size()
if not os.path.exists(cfg.output) and rank == 0:
os.makedirs(cfg.output)
else:
time.sleep(2)
log_root = logging.getLogger()
init_logging(log_root, rank, cfg.output)
trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset, shuffle=True)
train_loader = DataLoaderX(
local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size,
sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True)
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
logging.info("load backbone!" + cfg.network)
elif cfg.network == "iresnet50":
backbone = iresnet50(dropout=0.4,num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet18":
backbone = iresnet18(dropout=0.4, num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network =="mobilefacenet":
backbone=MobileFaceNet().to(local_rank)
else:
backbone = None
logging.info("load backbone failed!")
exit()
if args.resume:
try:
backbone_pth = os.path.join(cfg.output32, str(cfg.global_step) + "backbone.pth")
backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("backbone resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("load backbone resume init, failed!")
for ps in backbone.parameters():
dist.broadcast(ps, 0)
if cfg.network =="mobilefacenet":
from backbones.mobilefacenet import quantize_model
backbone_quant = quantize_model(backbone, cfg.wq, cfg.aq).to(local_rank)
else:
from backbones.iresnet import quantize_model
backbone_quant=quantize_model(backbone,cfg.wq,cfg.aq).to(local_rank)
backbone = DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank])
backbone.eval()
backbone_quant = DistributedDataParallel(
module=backbone_quant, broadcast_buffers=True, device_ids=[local_rank])
backbone_quant.train()
opt_backbone = torch.optim.SGD(
params=[{'params': backbone_quant.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay,nesterov=True,)
scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_backbone, lr_lambda=cfg.lr_func)
criterion =torch.nn.MSELoss()
start_epoch = 0
total_step = int(len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
if rank == 0: logging.info("Total Step is: %d" % total_step)
callback_verification = CallBackVerification(cfg.eval_step, rank, cfg.val_targets, cfg.rec)
callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, writer=None)
callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
backbone_quant=unfreeze_model(backbone_quant)
loss = AverageMeter()
global_step = 0
for epoch in range(start_epoch, cfg.num_epoch):
train_sampler.set_epoch(epoch)
backbone_quant=freeze_model(backbone_quant)
for _, (img, label) in enumerate(train_loader):
global_step += 1
if (global_step < 300):
backbone_quant = unfreeze_model(backbone_quant)
img = img.cuda(local_rank, non_blocking=True)
features = F.normalize(backbone_quant(img))
with torch.no_grad():
features_1 = F.normalize(backbone(img))
loss_v=criterion(features,features_1)
loss_v.backward()
clip_grad_norm_(backbone_quant.parameters(), max_norm=5, norm_type=2)
opt_backbone.step()
opt_backbone.zero_grad()
loss.update(loss_v.item(), 1)
if (global_step %5000==0):
logging.info(backbone_quant)
callback_logging(global_step, loss, epoch)
callback_verification(global_step, backbone_quant)
backbone_quant = freeze_model(backbone_quant)
scheduler_backbone.step()
callback_checkpoint(global_step, backbone_quant, None,quantiza=True)
callback_verification(5686, backbone_quant)
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch margin penalty loss training')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
parser.add_argument('--resume', type=int, default=1, help="resume training")
args_ = parser.parse_args()
main(args_)
| 5,795 | 37.64 | 104 | py |
QuantFace | QuantFace-master/train_fp32.py | import argparse
import logging
import os
import time
import torch
import torch.distributed as dist
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel
import torch.utils.data.distributed
from torch.nn.utils import clip_grad_norm_
from torch.nn import CrossEntropyLoss
from backbones.mobilefacenet import MobileFaceNet
from utils import losses
from config.config_FP32 import config as cfg
from utils.dataset import MXFaceDataset, DataLoaderX
from utils.utils_callbacks import CallBackVerification, CallBackLogging, CallBackModelCheckpoint
from utils.utils_logging import AverageMeter, init_logging
from backbones.iresnet import iresnet100, iresnet50, iresnet18
torch.backends.cudnn.benchmark = True
def main(args):
dist.init_process_group(backend='nccl', init_method='env://')
local_rank = args.local_rank
torch.cuda.set_device(local_rank)
rank = dist.get_rank()
world_size = dist.get_world_size()
if not os.path.exists(cfg.output) and rank == 0:
os.makedirs(cfg.output)
else:
time.sleep(2)
log_root = logging.getLogger()
init_logging(log_root, rank, cfg.output)
trainset = MXFaceDataset(root_dir=cfg.rec, local_rank=local_rank)
train_sampler = torch.utils.data.distributed.DistributedSampler(
trainset, shuffle=True)
train_loader = DataLoaderX(
local_rank=local_rank, dataset=trainset, batch_size=cfg.batch_size,
sampler=train_sampler, num_workers=0, pin_memory=True, drop_last=True)
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet50":
backbone = iresnet50(dropout=0.4,num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "iresnet18":
backbone = iresnet18(dropout=0.4, num_features=cfg.embedding_size, use_se=cfg.SE).to(local_rank)
elif cfg.network == "mobilefacenet":
backbone = MobileFaceNet().to(local_rank)
else:
backbone = None
logging.info("load backbone failed!")
exit()
if args.resume:
try:
backbone_pth = os.path.join(cfg.output, str(cfg.global_step) + "backbone.pth")
backbone.load_state_dict(torch.load(backbone_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("backbone resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("load backbone resume init, failed!")
for ps in backbone.parameters():
dist.broadcast(ps, 0)
backbone = DistributedDataParallel(
module=backbone, broadcast_buffers=False, device_ids=[local_rank])
backbone.train()
# get header
if cfg.loss == "ElasticArcFace":
header = losses.ElasticArcFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,std=cfg.std).to(local_rank)
elif cfg.loss == "ElasticArcFacePlus":
header = losses.ElasticArcFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,std=cfg.std, plus=True).to(local_rank)
elif cfg.loss == "ElasticCosFace":
header = losses.ElasticCosFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,std=cfg.std).to(local_rank)
elif cfg.loss == "ElasticCosFacePlus":
header = losses.ElasticCosFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m,
std=cfg.std, plus=True).to(local_rank)
elif cfg.loss == "ArcFace":
header = losses.ArcFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m).to(local_rank)
elif cfg.loss == "CosFace":
header = losses.CosFace(in_features=cfg.embedding_size, out_features=cfg.num_classes, s=cfg.s, m=cfg.m).to(
local_rank)
else:
print("Header not implemented")
if args.resume:
try:
header_pth = os.path.join(cfg.output, str(cfg.global_step) + "header.pth")
header.load_state_dict(torch.load(header_pth, map_location=torch.device(local_rank)))
if rank == 0:
logging.info("header resume loaded successfully!")
except (FileNotFoundError, KeyError, IndexError, RuntimeError):
logging.info("header resume init, failed!")
header = DistributedDataParallel(
module=header, broadcast_buffers=False, device_ids=[local_rank])
header.train()
opt_backbone = torch.optim.SGD(
params=[{'params': backbone.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay)
opt_header = torch.optim.SGD(
params=[{'params': header.parameters()}],
lr=cfg.lr / 512 * cfg.batch_size * world_size,
momentum=0.9, weight_decay=cfg.weight_decay)
scheduler_backbone = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_backbone, lr_lambda=cfg.lr_func)
scheduler_header = torch.optim.lr_scheduler.LambdaLR(
optimizer=opt_header, lr_lambda=cfg.lr_func)
criterion = CrossEntropyLoss()
start_epoch = 0
total_step = int(len(trainset) / cfg.batch_size / world_size * cfg.num_epoch)
if rank == 0: logging.info("Total Step is: %d" % total_step)
if args.resume:
rem_steps = (total_step - cfg.global_step)
cur_epoch = cfg.num_epoch - int(cfg.num_epoch / total_step * rem_steps)
logging.info("resume from estimated epoch {}".format(cur_epoch))
logging.info("remaining steps {}".format(rem_steps))
start_epoch = cur_epoch
scheduler_backbone.last_epoch = cur_epoch
scheduler_header.last_epoch = cur_epoch
# --------- this could be solved more elegant ----------------
opt_backbone.param_groups[0]['lr'] = scheduler_backbone.get_lr()[0]
opt_header.param_groups[0]['lr'] = scheduler_header.get_lr()[0]
print("last learning rate: {}".format(scheduler_header.get_lr()))
# ------------------------------------------------------------
callback_verification = CallBackVerification(cfg.eval_step, rank, cfg.val_targets, cfg.rec)
callback_logging = CallBackLogging(50, rank, total_step, cfg.batch_size, world_size, writer=None)
callback_checkpoint = CallBackModelCheckpoint(rank, cfg.output)
loss = AverageMeter()
global_step = cfg.global_step
for epoch in range(start_epoch, cfg.num_epoch):
train_sampler.set_epoch(epoch)
for _, (img, label) in enumerate(train_loader):
global_step += 1
img = img.cuda(local_rank, non_blocking=True)
label = label.cuda(local_rank, non_blocking=True)
features = F.normalize(backbone(img))
thetas = header(features, label)
loss_v = criterion(thetas, label)
loss_v.backward()
clip_grad_norm_(backbone.parameters(), max_norm=5, norm_type=2)
opt_backbone.step()
opt_header.step()
opt_backbone.zero_grad()
opt_header.zero_grad()
loss.update(loss_v.item(), 1)
callback_logging(global_step, loss, epoch)
callback_verification(global_step, backbone)
scheduler_backbone.step()
scheduler_header.step()
callback_checkpoint(global_step, backbone, header)
dist.destroy_process_group()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='PyTorch margin penalty loss training')
parser.add_argument('--local_rank', type=int, default=0, help='local_rank')
parser.add_argument('--resume', type=int, default=0, help="resume training")
args_ = parser.parse_args()
main(args_)
| 7,876 | 39.394872 | 156 | py |
QuantFace | QuantFace-master/eval/verification.py | """Helper for evaluation on the Labeled Faces in the Wild dataset
"""
# MIT License
#
# Copyright (c) 2016 David Sandberg
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import datetime
import os
import pickle
import mxnet as mx
import numpy as np
import sklearn
import torch
from mxnet import ndarray as nd
from scipy import interpolate
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
class LFold:
def __init__(self, n_splits=2, shuffle=False):
self.n_splits = n_splits
if self.n_splits > 1:
self.k_fold = KFold(n_splits=n_splits, shuffle=shuffle)
def split(self, indices):
if self.n_splits > 1:
return self.k_fold.split(indices)
else:
return [(indices, indices)]
def calculate_roc(thresholds,
embeddings1,
embeddings2,
actual_issame,
nrof_folds=10,
pca=0):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds, nrof_thresholds))
fprs = np.zeros((nrof_folds, nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
if pca == 0:
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
if pca > 0:
print('doing pca on', fold_idx)
embed1_train = embeddings1[train_set]
embed2_train = embeddings2[train_set]
_embed_train = np.concatenate((embed1_train, embed2_train), axis=0)
pca_model = PCA(n_components=pca)
pca_model.fit(_embed_train)
embed1 = pca_model.transform(embeddings1)
embed2 = pca_model.transform(embeddings2)
embed1 = sklearn.preprocessing.normalize(embed1)
embed2 = sklearn.preprocessing.normalize(embed2)
diff = np.subtract(embed1, embed2)
dist = np.sum(np.square(diff), 1)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = calculate_accuracy(
threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx, threshold_idx], fprs[fold_idx, threshold_idx], _ = calculate_accuracy(
threshold, dist[test_set],
actual_issame[test_set])
_, _, accuracy[fold_idx] = calculate_accuracy(
thresholds[best_threshold_index], dist[test_set],
actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy
def calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame),
np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp + fn == 0) else float(tp) / float(tp + fn)
fpr = 0 if (fp + tn == 0) else float(fp) / float(fp + tn)
acc = float(tp + tn) / dist.size
return tpr, fpr, acc
def calculate_val(thresholds,
embeddings1,
embeddings2,
actual_issame,
far_target,
nrof_folds=10):
assert (embeddings1.shape[0] == embeddings2.shape[0])
assert (embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = LFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
diff = np.subtract(embeddings1, embeddings2)
dist = np.sum(np.square(diff), 1)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = calculate_val_far(
threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train) >= far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = calculate_val_far(
threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(
np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
# print(true_accept, false_accept)
# print(n_same, n_diff)
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def evaluate(embeddings, actual_issame, nrof_folds=10, pca=0):
# Calculate evaluation metrics
thresholds = np.arange(0, 4, 0.01)
embeddings1 = embeddings[0::2]
embeddings2 = embeddings[1::2]
tpr, fpr, accuracy = calculate_roc(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
nrof_folds=nrof_folds,
pca=pca)
thresholds = np.arange(0, 4, 0.001)
val, val_std, far = calculate_val(thresholds,
embeddings1,
embeddings2,
np.asarray(actual_issame),
1e-3,
nrof_folds=nrof_folds)
return tpr, fpr, accuracy, val, val_std, far
@torch.no_grad()
def load_bin(path, image_size):
try:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f) # py2
except UnicodeDecodeError as e:
with open(path, 'rb') as f:
bins, issame_list = pickle.load(f, encoding='bytes') # py3
data_list = []
for flip in [0, 1]:
data = torch.empty((len(issame_list) * 2, 3, image_size[0], image_size[1]))
data_list.append(data)
for idx in range(len(issame_list) * 2):
_bin = bins[idx]
img = mx.image.imdecode(_bin)
if img.shape[1] != image_size[0]:
img = mx.image.resize_short(img, image_size[0])
img = nd.transpose(img, axes=(2, 0, 1))
for flip in [0, 1]:
if flip == 1:
img = mx.ndarray.flip(data=img, axis=2)
data_list[flip][idx][:] = torch.from_numpy(img.asnumpy())
if idx % 1000 == 0:
print('loading bin', idx)
print(data_list[0].shape)
return data_list, issame_list
@torch.no_grad()
def test(data_set, backbone, batch_size, nfolds=10):
print('testing verification..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = data[bb - batch_size: bb]
time0 = datetime.datetime.now()
img = ((_data / 255) - 0.5) / 0.5
net_out: torch.Tensor = backbone(img)
_embeddings = net_out.detach().cpu().numpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
ba = bb
embeddings_list.append(embeddings)
_xnorm = 0.0
_xnorm_cnt = 0
for embed in embeddings_list:
for i in range(embed.shape[0]):
_em = embed[i]
_norm = np.linalg.norm(_em)
_xnorm += _norm
_xnorm_cnt += 1
_xnorm /= _xnorm_cnt
embeddings = embeddings_list[0].copy()
embeddings = sklearn.preprocessing.normalize(embeddings)
acc1 = 0.0
std1 = 0.0
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
print(embeddings.shape)
print('infer time', time_consumed)
_, _, accuracy, val, val_std, far = evaluate(embeddings, issame_list, nrof_folds=nfolds)
acc2, std2 = np.mean(accuracy), np.std(accuracy)
return acc1, std1, acc2, std2, _xnorm, embeddings_list
def dumpR(data_set,
backbone,
batch_size,
name='',
data_extra=None,
label_shape=None):
print('dump verification embedding..')
data_list = data_set[0]
issame_list = data_set[1]
embeddings_list = []
time_consumed = 0.0
for i in range(len(data_list)):
data = data_list[i]
embeddings = None
ba = 0
while ba < data.shape[0]:
bb = min(ba + batch_size, data.shape[0])
count = bb - ba
_data = nd.slice_axis(data, axis=0, begin=bb - batch_size, end=bb)
time0 = datetime.datetime.now()
if data_extra is None:
db = mx.io.DataBatch(data=(_data,), label=(_label,))
else:
db = mx.io.DataBatch(data=(_data, _data_extra),
label=(_label,))
model.forward(db, is_train=False)
net_out = model.get_outputs()
_embeddings = net_out[0].asnumpy()
time_now = datetime.datetime.now()
diff = time_now - time0
time_consumed += diff.total_seconds()
if embeddings is None:
embeddings = np.zeros((data.shape[0], _embeddings.shape[1]))
embeddings[ba:bb, :] = _embeddings[(batch_size - count):, :]
ba = bb
embeddings_list.append(embeddings)
embeddings = embeddings_list[0] + embeddings_list[1]
embeddings = sklearn.preprocessing.normalize(embeddings)
actual_issame = np.asarray(issame_list)
outname = os.path.join('temp.bin')
with open(outname, 'wb') as f:
pickle.dump((embeddings, issame_list),
f,
protocol=pickle.HIGHEST_PROTOCOL)
# if __name__ == '__main__':
#
# parser = argparse.ArgumentParser(description='do verification')
# # general
# parser.add_argument('--data-dir', default='', help='')
# parser.add_argument('--model',
# default='../model/softmax,50',
# help='path to load model.')
# parser.add_argument('--target',
# default='lfw,cfp_ff,cfp_fp,agedb_30',
# help='test targets.')
# parser.add_argument('--gpu', default=0, type=int, help='gpu id')
# parser.add_argument('--batch-size', default=32, type=int, help='')
# parser.add_argument('--max', default='', type=str, help='')
# parser.add_argument('--mode', default=0, type=int, help='')
# parser.add_argument('--nfolds', default=10, type=int, help='')
# args = parser.parse_args()
# image_size = [112, 112]
# print('image_size', image_size)
# ctx = mx.gpu(args.gpu)
# nets = []
# vec = args.model.split(',')
# prefix = args.model.split(',')[0]
# epochs = []
# if len(vec) == 1:
# pdir = os.path.dirname(prefix)
# for fname in os.listdir(pdir):
# if not fname.endswith('.params'):
# continue
# _file = os.path.join(pdir, fname)
# if _file.startswith(prefix):
# epoch = int(fname.split('.')[0].split('-')[1])
# epochs.append(epoch)
# epochs = sorted(epochs, reverse=True)
# if len(args.max) > 0:
# _max = [int(x) for x in args.max.split(',')]
# assert len(_max) == 2
# if len(epochs) > _max[1]:
# epochs = epochs[_max[0]:_max[1]]
#
# else:
# epochs = [int(x) for x in vec[1].split('|')]
# print('model number', len(epochs))
# time0 = datetime.datetime.now()
# for epoch in epochs:
# print('loading', prefix, epoch)
# sym, arg_params, aux_params = mx.model.load_checkpoint(prefix, epoch)
# # arg_params, aux_params = ch_dev(arg_params, aux_params, ctx)
# all_layers = sym.get_internals()
# sym = all_layers['fc1_output']
# model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
# # model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0], image_size[1]))], label_shapes=[('softmax_label', (args.batch_size,))])
# model.bind(data_shapes=[('data', (args.batch_size, 3, image_size[0],
# image_size[1]))])
# model.set_params(arg_params, aux_params)
# nets.append(model)
# time_now = datetime.datetime.now()
# diff = time_now - time0
# print('model loading time', diff.total_seconds())
#
# ver_list = []
# ver_name_list = []
# for name in args.target.split(','):
# path = os.path.join(args.data_dir, name + ".bin")
# if os.path.exists(path):
# print('loading.. ', name)
# data_set = load_bin(path, image_size)
# ver_list.append(data_set)
# ver_name_list.append(name)
#
# if args.mode == 0:
# for i in range(len(ver_list)):
# results = []
# for model in nets:
# acc1, std1, acc2, std2, xnorm, embeddings_list = test(
# ver_list[i], model, args.batch_size, args.nfolds)
# print('[%s]XNorm: %f' % (ver_name_list[i], xnorm))
# print('[%s]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], acc1, std1))
# print('[%s]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], acc2, std2))
# results.append(acc2)
# print('Max of [%s] is %1.5f' % (ver_name_list[i], np.max(results)))
# elif args.mode == 1:
# raise ValueError
# else:
# model = nets[0]
# dumpR(ver_list[0], model, args.batch_size, args.target)
| 16,187 | 38.579462 | 152 | py |
QuantFace | QuantFace-master/quantization_utils/quant_modules.py | # *
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
# https://github.com/amirgholami/ZeroQ
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository. If not, see <http://www.gnu.org/licenses/>.
# *
import torch
import time
import math
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import Module, Parameter
from .quant_utils import *
import sys
class QuantAct(Module):
"""
Class to quantize given activations
"""
def __init__(self,
activation_bit,
full_precision_flag=False,
running_stat=True,
beta=0.9):
"""
activation_bit: bit-setting for activation
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(QuantAct, self).__init__()
self.activation_bit = activation_bit
self.full_precision_flag = full_precision_flag
self.running_stat = running_stat
self.register_buffer('x_min', torch.zeros(1))
self.register_buffer('x_max', torch.zeros(1))
self.register_buffer('beta', torch.Tensor([beta]))
self.register_buffer('beta_t', torch.ones(1))
self.act_function = AsymmetricQuantFunction.apply
def __repr__(self):
return "{0}(activation_bit={1}, full_precision_flag={2}, running_stat={3}, Act_min: {4:.2f}, Act_max: {5:.2f})".format(
self.__class__.__name__, self.activation_bit,
self.full_precision_flag, self.running_stat, self.x_min.item(),
self.x_max.item())
def fix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = False
def unfix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = True
def forward(self, x):
"""
quantize given activation x
"""
if self.running_stat:
x_min = x.data.min()
x_max = x.data.max()
# in-place operation used on multi-gpus
self.x_min += -self.x_min + min(self.x_min, x_min)
self.x_max += -self.x_max + max(self.x_max, x_max)
#self.beta_t = self.beta_t * self.beta
#self.x_min = (self.x_min * self.beta + x_min * (1 - self.beta))/(1 - self.beta_t)
#self.x_max = (self.x_max * self.beta + x_max * (1 - self.beta)) / (1 - self.beta_t)
#self.x_min += -self.x_min + min(self.x_min, x_min)
#self.x_max += -self.x_max + max(self.x_max, x_max)
if not self.full_precision_flag:
quant_act = self.act_function(x, self.activation_bit, self.x_min,
self.x_max)
return quant_act
else:
return x
class QuantActPreLu(Module):
"""
Class to quantize given activations
"""
def __init__(self,
act_bit,
full_precision_flag=False,
running_stat=True):
"""
activation_bit: bit-setting for activation
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(QuantActPreLu, self).__init__()
self.activation_bit = act_bit
self.full_precision_flag = full_precision_flag
self.running_stat = running_stat
self.act_function = AsymmetricQuantFunction.apply
self.quantAct=QuantAct(activation_bit=act_bit,running_stat=True)
def __repr__(self):
s = super(QuantActPreLu, self).__repr__()
s = "(" + s + " activation_bit={}, full_precision_flag={})".format(
self.activation_bit, self.full_precision_flag)
return s
def set_param(self, prelu):
self.weight = Parameter(prelu.weight.data.clone())
def fix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = False
def unfix(self):
"""
fix the activation range by setting running stat
"""
self.running_stat = True
def forward(self, x):
w = self.weight
x_transform = w.data.detach()
a_min = x_transform.min(dim=0).values
a_max = x_transform.max(dim=0).values
if not self.full_precision_flag:
w = self.act_function(self.weight, self.activation_bit, a_min,
a_max)
else:
w = self.weight
#inputs = max(0, inputs) + alpha * min(0, inputs)
#w_min = torch.mul( F.relu(-x),-w)
#x= F.relu(x) + w_min
#inputs = self.quantized_op.add(torch.relu(x), weight_min_res)
x= F.prelu(x,weight=w)
x=self.quantAct(x)
return x
class Quant_Linear(Module):
"""
Class to quantize given linear layer weights
"""
def __init__(self, weight_bit, full_precision_flag=False):
"""
weight: bit-setting for weight
full_precision_flag: full precision or not
running_stat: determines whether the activation range is updated or froze
"""
super(Quant_Linear, self).__init__()
self.full_precision_flag = full_precision_flag
self.weight_bit = weight_bit
self.weight_function = AsymmetricQuantFunction.apply
def __repr__(self):
s = super(Quant_Linear, self).__repr__()
s = "(" + s + " weight_bit={}, full_precision_flag={})".format(
self.weight_bit, self.full_precision_flag)
return s
def set_param(self, linear):
self.in_features = linear.in_features
self.out_features = linear.out_features
self.weight = Parameter(linear.weight.data.clone())
try:
self.bias = Parameter(linear.bias.data.clone())
except AttributeError:
self.bias = None
def forward(self, x):
"""
using quantized weights to forward activation x
"""
w = self.weight
x_transform = w.data.detach()
w_min = x_transform.min(dim=1).values
w_max = x_transform.max(dim=1).values
if not self.full_precision_flag:
w = self.weight_function(self.weight, self.weight_bit, w_min,w_max)
else:
w = self.weight
return F.linear(x, weight=w, bias=self.bias)
class Quant_Conv2d(Module):
"""
Class to quantize given convolutional layer weights
"""
def __init__(self, weight_bit, full_precision_flag=False):
super(Quant_Conv2d, self).__init__()
self.full_precision_flag = full_precision_flag
self.weight_bit = weight_bit
self.weight_function = AsymmetricQuantFunction.apply
def __repr__(self):
s = super(Quant_Conv2d, self).__repr__()
s = "(" + s + " weight_bit={}, full_precision_flag={})".format(
self.weight_bit, self.full_precision_flag)
return s
def set_param(self, conv):
self.in_channels = conv.in_channels
self.out_channels = conv.out_channels
self.kernel_size = conv.kernel_size
self.stride = conv.stride
self.padding = conv.padding
self.dilation = conv.dilation
self.groups = conv.groups
self.weight = Parameter(conv.weight.data.clone())
try:
self.bias = Parameter(conv.bias.data.clone())
except AttributeError:
self.bias = None
def forward(self, x):
"""
using quantized weights to forward activation x
"""
w = self.weight
x_transform = w.data.contiguous().view(self.out_channels, -1)
w_min = x_transform.min(dim=1).values
w_max = x_transform.max(dim=1).values
if not self.full_precision_flag:
w = self.weight_function(self.weight, self.weight_bit, w_min,
w_max)
else:
w = self.weight
return F.conv2d(x, w, self.bias, self.stride, self.padding,
self.dilation, self.groups)
| 7,593 | 28.320463 | 121 | py |
QuantFace | QuantFace-master/quantization_utils/quant_utils.py | #*
# @file Different utility functions
# Copyright (c) Yaohui Cai, Zhewei Yao, Zhen Dong, Amir Gholami
# All rights reserved.
# This file is part of ZeroQ repository.
# https://github.com/amirgholami/ZeroQ
# ZeroQ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ZeroQ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ZeroQ repository. If not, see <http://www.gnu.org/licenses/>.
#*
import math
import numpy as np
from torch.autograd import Function, Variable
import torch
def clamp(input, min, max, inplace=False):
"""
Clamp tensor input to (min, max).
input: input tensor to be clamped
"""
if inplace:
input.clamp_(min, max)
return input
return torch.clamp(input, min, max)
def linear_quantize(input, scale, zero_point, inplace=False):
"""
Quantize single-precision input tensor to integers with the given scaling factor and zeropoint.
input: single-precision input tensor to be quantized
scale: scaling factor for quantization
zero_pint: shift for quantization
"""
# reshape scale and zeropoint for convolutional weights and activation
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
# mapping single-precision input to integer values with the given scale and zeropoint
if inplace:
input.mul_(scale).sub_(zero_point).round_()
return input
return torch.round(scale * input - zero_point)
def linear_dequantize(input, scale, zero_point, inplace=False):
"""
Map integer input tensor to fixed point float point with given scaling factor and zeropoint.
input: integer input tensor to be mapped
scale: scaling factor for quantization
zero_pint: shift for quantization
"""
# reshape scale and zeropoint for convolutional weights and activation
if len(input.shape) == 4:
scale = scale.view(-1, 1, 1, 1)
zero_point = zero_point.view(-1, 1, 1, 1)
# reshape scale and zeropoint for linear weights
elif len(input.shape) == 2:
scale = scale.view(-1, 1)
zero_point = zero_point.view(-1, 1)
# mapping integer input to fixed point float point value with given scaling factor and zeropoint
if inplace:
input.add_(zero_point).div_(scale)
return input
return (input + zero_point) / scale
def asymmetric_linear_quantization_params(num_bits,
saturation_min,
saturation_max,
integral_zero_point=True,
signed=True):
"""
Compute the scaling factor and zeropoint with the given quantization range.
saturation_min: lower bound for quantization range
saturation_max: upper bound for quantization range
"""
n = 2**num_bits - 1
scale = n / torch.clamp((saturation_max - saturation_min), min=1e-8)
zero_point = scale * saturation_min
if integral_zero_point:
if isinstance(zero_point, torch.Tensor):
zero_point = zero_point.round()
else:
zero_point = float(round(zero_point))
if signed:
zero_point += 2**(num_bits - 1)
return scale, zero_point
class AsymmetricQuantFunction(Function):
"""
Class to quantize the given floating-point values with given range and bit-setting.
Currently only support inference, but not support back-propagation.
"""
@staticmethod
def forward(ctx, x, k, x_min=None, x_max=None):
"""
x: single-precision value to be quantized
k: bit-setting for x
x_min: lower bound for quantization range
x_max=None
"""
# if x_min is None or x_max is None or (sum(x_min == x_max) == 1
# and x_min.numel() == 1):
# x_min, x_max = x.min(), x.max()
scale, zero_point = asymmetric_linear_quantization_params(
k, x_min, x_max)
new_quant_x = linear_quantize(x, scale, zero_point, inplace=False)
n = 2**(k - 1)
new_quant_x = torch.clamp(new_quant_x, -n, n - 1)
quant_x = linear_dequantize(new_quant_x,
scale,
zero_point,
inplace=False)
return torch.autograd.Variable(quant_x)
@staticmethod
def backward(ctx, grad_output):
return grad_output, None, None, None
| 5,114 | 35.535714 | 100 | py |
QuantFace | QuantFace-master/utils/losses.py | import torch
from torch import nn
import math
import numpy as np
import torch.nn.functional as F
def l2_norm(input, axis = 1):
norm = torch.norm(input, 2, axis, True)
output = torch.div(input, norm)
return output
class MLLoss(nn.Module):
def __init__(self, s=64.0):
super(MLLoss, self).__init__()
self.s = s
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
cos_theta.mul_(self.s)
return cos_theta
'''
# from https://github.com/HuangYG123/CurricularFace/blob/master/head/metrics.py
class ElasticArcFace(nn.Module):
r"""Implement of ArcFace (https://arxiv.org/pdf/1801.07698v1.pdf):
Args:
in_features: size of each input sample
out_features: size of each output sample
s: norm of input feature
m: margin
cos(theta+m)
"""
def __init__(self, in_features, out_features, s=64.0, m=0.50, easy_margin=False,std=0.0125):
super(ElasticArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.std=std
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
# nn.init.xavier_uniform_(self.kernel)
nn.init.normal_(self.kernel, std=0.01)
self.easy_margin = easy_margin
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1 + 1e-5, 1 + 1e-5) # for numerical stability
with torch.no_grad():
origin_cos = cos_theta.clone()
target_logit = cos_theta[torch.arange(0, embbedings.size(0)), label].view(-1, 1)
sin_theta = torch.sqrt(1.0 - torch.pow(target_logit, 2))
index = torch.where(label != -1)[0]
margin = torch.normal(mean=self.m, std=self.std, size=label[index, None].size(), device=cos_theta.device).clamp(self.m-self.std, self.m+self.std) # Fast converge .clamp(self.m-self.std, self.m+self.std)
with torch.no_grad():
#distmat = cos_theta[index, label.view(-1)].detach().clone()
_, idicate_cosie = torch.sort(target_logit, dim=0, descending=True)
margin, _ = torch.sort(margin, dim=0)
cos_m=torch.cos(margin)
sin_m=torch.sin(margin)
th=torch.cos(math.pi-margin)
mm=torch.sin(math.pi-margin)*margin
cos_theta_m = target_logit * cos_m - sin_theta * sin_m # cos(target+margin)
if self.easy_margin:
final_target_logit = torch.where(target_logit > 0, cos_theta_m, target_logit)
else:
final_target_logit = torch.where(target_logit > th, cos_theta_m, target_logit - mm)
cos_theta.scatter_(1, label.view(-1, 1).long(), final_target_logit)
output = cos_theta * self.s
return output # , origin_cos * self.s
'''
class ElasticArcFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.50,std=0.0125, random=True):
super(ElasticArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
self.std=std
self.random=random
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
margin = torch.normal(mean=self.m, std=self.std, size=label[index, None].size(), device=cos_theta.device)#.clamp(self.m-self.std, self.m+self.std) # Fast converge .clamp(self.m-self.std, self.m+self.std)
if not self.random:
with torch.no_grad():
distmat = cos_theta[index, label.view(-1)].detach().clone()
_, idicate_cosie = torch.sort(distmat, dim=0, descending=True)
margin, _ = torch.sort(margin, dim=0)
m_hot.scatter_(1, label[index, None], margin[idicate_cosie])
else:
m_hot.scatter_(1, label[index, None], margin)
cos_theta.acos_()
cos_theta[index] += m_hot
cos_theta.cos_().mul_(self.s)
return cos_theta
class ElasticCosFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.35,std=0.0125, random=False):
super(ElasticCosFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
self.std=std
self.random=random
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
margin = torch.normal(mean=self.m, std=self.std, size=label[index, None].size(), device=cos_theta.device) # Fast converge .clamp(self.m-self.std, self.m+self.std)
if not self.random:
with torch.no_grad():
distmat = cos_theta[index, label.view(-1)].detach().clone()
_, idicate_cosie = torch.sort(distmat, dim=0, descending=True)
margin, _ = torch.sort(margin, dim=0)
m_hot.scatter_(1, label[index, None], margin[idicate_cosie])
else:
m_hot.scatter_(1, label[index, None], margin)
m_hot.scatter_(1, label[index, None], margin)
cos_theta[index] -= m_hot
ret = cos_theta * self.s
return ret
class CosFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.35):
super(CosFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
m_hot.scatter_(1, label[index, None], self.m)
cos_theta[index] -= m_hot
ret = cos_theta * self.s
return ret
def loss_func(feat1, feat2):
return 1- F.cosine_similarity(feat1, feat2).abs().mean()
class ArcFace(nn.Module):
def __init__(self, in_features, out_features, s=64.0, m=0.50):
super(ArcFace, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.s = s
self.m = m
self.kernel = nn.Parameter(torch.FloatTensor(in_features, out_features))
nn.init.normal_(self.kernel, std=0.01)
def forward(self, embbedings, label):
embbedings = l2_norm(embbedings, axis=1)
kernel_norm = l2_norm(self.kernel, axis=0)
cos_theta = torch.mm(embbedings, kernel_norm)
cos_theta = cos_theta.clamp(-1, 1) # for numerical stability
index = torch.where(label != -1)[0]
m_hot = torch.zeros(index.size()[0], cos_theta.size()[1], device=cos_theta.device)
m_hot.scatter_(1, label[index, None], self.m)
cos_theta.acos_()
cos_theta[index] += m_hot
cos_theta.cos_().mul_(self.s)
return cos_theta
| 8,386 | 40.315271 | 211 | py |
QuantFace | QuantFace-master/utils/countFLOPS.py | from torch.autograd import Variable
import numpy as np
import torch
def count_model_flops(model, input_res=[112, 112], multiply_adds=True):
list_conv = []
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (
2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear = []
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
if self.bias is not None:
bias_ops = self.bias.nelement() if self.bias.nelement() else 0
flops = batch_size * (weight_ops + bias_ops)
else:
flops = batch_size * weight_ops
list_linear.append(flops)
list_bn = []
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu = []
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling = []
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
def pooling_hook_ad(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
input = input[0]
flops = int(np.prod(input.shape))
list_pooling.append(flops)
handles = []
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d) or isinstance(net, torch.nn.ConvTranspose2d):
handles.append(net.register_forward_hook(conv_hook))
elif isinstance(net, torch.nn.Linear):
handles.append(net.register_forward_hook(linear_hook))
elif isinstance(net, torch.nn.BatchNorm2d) or isinstance(net, torch.nn.BatchNorm1d):
handles.append(net.register_forward_hook(bn_hook))
elif isinstance(net, torch.nn.ReLU) or isinstance(net, torch.nn.PReLU):
handles.append(net.register_forward_hook(relu_hook))
elif isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
handles.append(net.register_forward_hook(pooling_hook))
else:
print("warning" + str(net))
return
for c in childrens:
foo(c)
model.eval()
foo(model)
input = Variable(torch.rand(3, input_res[1], input_res[0]).unsqueeze(0), requires_grad=True)
out = model(input)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))
for h in handles:
h.remove()
model.train()
return flops_to_string(total_flops)
def flops_to_string(flops, units='MFLOPS', precision=4):
if units == 'GFLOPS':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MFLOPS':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KFLOPS':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' FLOPS'
def _calc_width(net):
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count | 4,062 | 36.275229 | 112 | py |
QuantFace | QuantFace-master/utils/modelFLOPS.py | import logging
from pytorch_model_summary import summary
import torch
from utils.countFLOPS import count_model_flops
from backbones.iresnet import iresnet100
from config.config_FP32 import config as cfg
if __name__ == "__main__":
# load model
if cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size)
elif cfg.network == "iresnet100":
backbone = iresnet100(num_features=cfg.embedding_size)
else:
backbone = None
logging.info("load backbone failed!")
print(summary(backbone, torch.zeros((1, 3, 112, 112)), show_input=False))
flops = count_model_flops(backbone)
print(flops)
#model.eval()
#tic = time.time()
#model.forward(torch.zeros((1, 3, 112, 112)))
#end = time.time()
#print(end-tic)
| 813 | 21.611111 | 77 | py |
QuantFace | QuantFace-master/utils/dataset.py | import numbers
import os
import queue as Queue
import random
import threading
import mxnet as mx
import numpy as np
import torch
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
import cv2
class BackgroundGenerator(threading.Thread):
def __init__(self, generator, local_rank, max_prefetch=6):
super(BackgroundGenerator, self).__init__()
self.queue = Queue.Queue(max_prefetch)
self.generator = generator
self.local_rank = local_rank
self.daemon = True
self.start()
def run(self):
torch.cuda.set_device(self.local_rank)
for item in self.generator:
self.queue.put(item)
self.queue.put(None)
def next(self):
next_item = self.queue.get()
if next_item is None:
raise StopIteration
return next_item
def __next__(self):
return self.next()
def __iter__(self):
return self
class DataLoaderX(DataLoader):
def __init__(self, local_rank, **kwargs):
super(DataLoaderX, self).__init__(**kwargs)
self.stream = torch.cuda.Stream(local_rank)
self.local_rank = local_rank
def __iter__(self):
self.iter = super(DataLoaderX, self).__iter__()
self.iter = BackgroundGenerator(self.iter, self.local_rank)
self.preload()
return self
def preload(self):
self.batch = next(self.iter, None)
if self.batch is None:
return None
with torch.cuda.stream(self.stream):
for k in range(len(self.batch)):
self.batch[k] = self.batch[k].to(device=self.local_rank,
non_blocking=True)
def __next__(self):
torch.cuda.current_stream().wait_stream(self.stream)
batch = self.batch
if batch is None:
raise StopIteration
self.preload()
return batch
class MXFaceDataset(Dataset):
def __init__(self, root_dir, local_rank):
super(MXFaceDataset, self).__init__()
self.transform = transforms.Compose(
[transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
self.root_dir = root_dir
self.local_rank = local_rank
path_imgrec = os.path.join(root_dir, 'train.rec')
path_imgidx = os.path.join(root_dir, 'train.idx')
self.imgrec = mx.recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r')
s = self.imgrec.read_idx(0)
header, _ = mx.recordio.unpack(s)
if header.flag > 0:
self.header0 = (int(header.label[0]), int(header.label[1]))
self.imgidx = np.array(range(1, int(header.label[0])))
else:
self.imgidx = np.array(list(self.imgrec.keys))
def __getitem__(self, index):
idx = self.imgidx[index]
s = self.imgrec.read_idx(idx)
header, img = mx.recordio.unpack(s)
label = header.label
if not isinstance(label, numbers.Number):
label = label[0]
label = torch.tensor(label, dtype=torch.long)
sample = mx.image.imdecode(img).asnumpy()
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def __len__(self):
return len(self.imgidx)
class FaceDatasetFolder(Dataset):
def __init__(self, root_dir, local_rank):
super(FaceDatasetFolder, self).__init__()
self.transform = transforms.Compose(
[transforms.ToPILImage(),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
])
self.root_dir = os.path.join( root_dir)
self.local_rank = local_rank
self.imgidx, self.labels=self.scan(self.root_dir)
def scan(self,root):
imgidex=[]
labels=[]
lb=0
list_dir=os.listdir(root)
#list_dir.sort()
for img in list_dir:
imgidex.append(os.path.join(root,img))
labels.append(lb)
lb = lb+1
return imgidex,labels
def readImage(self,path):
return cv2.imread(os.path.join(self.root_dir,path))
def __getitem__(self, index):
path = self.imgidx[index]
img=self.readImage(path)
label = self.labels[index]
label = torch.tensor(label, dtype=torch.long)
sample = cv2.cvtColor(img,cv2.COLOR_RGB2BGR)
if self.transform is not None:
sample = self.transform(sample)
return sample, label
def __len__(self):
return len(self.imgidx) | 4,790 | 30.728477 | 82 | py |
QuantFace | QuantFace-master/utils/utils_amp.py | from typing import Dict, List
import torch
from torch._six import container_abcs
from torch.cuda.amp import GradScaler
class _MultiDeviceReplicator(object):
"""
Lazily serves copies of a tensor to requested devices. Copies are cached per-device.
"""
def __init__(self, master_tensor: torch.Tensor) -> None:
assert master_tensor.is_cuda
self.master = master_tensor
self._per_device_tensors: Dict[torch.device, torch.Tensor] = {}
def get(self, device) -> torch.Tensor:
retval = self._per_device_tensors.get(device, None)
if retval is None:
retval = self.master.to(device=device, non_blocking=True, copy=True)
self._per_device_tensors[device] = retval
return retval
class MaxClipGradScaler(GradScaler):
def __init__(self, init_scale, max_scale: float, growth_interval=100):
GradScaler.__init__(self, init_scale=init_scale, growth_interval=growth_interval)
self.max_scale = max_scale
def scale_clip(self):
if self.get_scale() == self.max_scale:
self.set_growth_factor(1)
elif self.get_scale() < self.max_scale:
self.set_growth_factor(2)
elif self.get_scale() > self.max_scale:
self._scale.fill_(self.max_scale)
self.set_growth_factor(1)
def scale(self, outputs):
"""
Multiplies ('scales') a tensor or list of tensors by the scale factor.
Returns scaled outputs. If this instance of :class:`GradScaler` is not enabled, outputs are returned
unmodified.
Arguments:
outputs (Tensor or iterable of Tensors): Outputs to scale.
"""
if not self._enabled:
return outputs
self.scale_clip()
# Short-circuit for the common case.
if isinstance(outputs, torch.Tensor):
assert outputs.is_cuda
if self._scale is None:
self._lazy_init_scale_growth_tracker(outputs.device)
assert self._scale is not None
return outputs * self._scale.to(device=outputs.device, non_blocking=True)
# Invoke the more complex machinery only if we're treating multiple outputs.
stash: List[_MultiDeviceReplicator] = [] # holds a reference that can be overwritten by apply_scale
def apply_scale(val):
if isinstance(val, torch.Tensor):
assert val.is_cuda
if len(stash) == 0:
if self._scale is None:
self._lazy_init_scale_growth_tracker(val.device)
assert self._scale is not None
stash.append(_MultiDeviceReplicator(self._scale))
return val * stash[0].get(val.device)
elif isinstance(val, container_abcs.Iterable):
iterable = map(apply_scale, val)
if isinstance(val, list) or isinstance(val, tuple):
return type(val)(iterable)
else:
return iterable
else:
raise ValueError("outputs must be a Tensor or an iterable of Tensors")
return apply_scale(outputs)
| 3,187 | 37.878049 | 109 | py |
QuantFace | QuantFace-master/utils/utils_callbacks.py | import logging
import os
import time
from typing import List
import torch
from eval import verification
from utils.utils_logging import AverageMeter
class CallBackVerification(object):
def __init__(self, frequent, rank, val_targets, rec_prefix, image_size=(112, 112)):
self.frequent: int = frequent
self.rank: int = rank
self.highest_acc: float = 0.0
self.highest_acc_list: List[float] = [0.0] * len(val_targets)
self.ver_list: List[object] = []
self.ver_name_list: List[str] = []
if self.rank == 0:
self.init_dataset(val_targets=val_targets, data_dir=rec_prefix, image_size=image_size)
def ver_test(self, backbone: torch.nn.Module, global_step: int):
results = []
for i in range(len(self.ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(
self.ver_list[i], backbone, 10, 10)
logging.info('[%s][%d]XNorm: %f' % (self.ver_name_list[i], global_step, xnorm))
logging.info('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (self.ver_name_list[i], global_step, acc2, std2))
if acc2 > self.highest_acc_list[i]:
self.highest_acc_list[i] = acc2
logging.info(
'[%s][%d]Accuracy-Highest: %1.5f' % (self.ver_name_list[i], global_step, self.highest_acc_list[i]))
results.append(acc2)
def init_dataset(self, val_targets, data_dir, image_size):
for name in val_targets:
path = os.path.join(data_dir, name + ".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
self.ver_list.append(data_set)
self.ver_name_list.append(name)
def __call__(self, num_update, backbone: torch.nn.Module):
if self.rank == 0 and num_update > 0 and num_update % self.frequent == 0:
backbone.eval()
self.ver_test(backbone, num_update)
backbone.train()
class CallBackLogging(object):
def __init__(self, frequent, rank, total_step, batch_size, world_size, writer=None, resume=0, rem_total_steps=None):
self.frequent: int = frequent
self.rank: int = rank
self.time_start = time.time()
self.total_step: int = total_step
self.batch_size: int = batch_size
self.world_size: int = world_size
self.writer = writer
self.resume = resume
self.rem_total_steps = rem_total_steps
self.init = False
self.tic = 0
def __call__(self, global_step, loss: AverageMeter, epoch: int):
if self.rank == 0 and global_step > 0 and global_step % self.frequent == 0:
if self.init:
try:
speed: float = self.frequent * self.batch_size / (time.time() - self.tic)
speed_total = speed * self.world_size
except ZeroDivisionError:
speed_total = float('inf')
time_now = (time.time() - self.time_start) / 3600
# TODO: resume time_total is not working
if self.resume:
time_total = time_now / ((global_step + 1) / self.rem_total_steps)
else:
time_total = time_now / ((global_step + 1) / self.total_step)
time_for_end = time_total - time_now
if self.writer is not None:
self.writer.add_scalar('time_for_end', time_for_end, global_step)
self.writer.add_scalar('loss', loss.avg, global_step)
msg = "Speed %.2f samples/sec Loss %.4f Epoch: %d Global Step: %d Required: %1.f hours" % (
speed_total, loss.avg, epoch, global_step, time_for_end
)
logging.info(msg)
loss.reset()
self.tic = time.time()
else:
self.init = True
self.tic = time.time()
class CallBackModelCheckpoint(object):
def __init__(self, rank, output="./"):
self.rank: int = rank
self.output: str = output
def __call__(self, global_step, backbone: torch.nn.Module, header: torch.nn.Module = None, quantiza : bool= False):
if quantiza:
if global_step > 100 and self.rank == 0:
torch.save(backbone.module, os.path.join(self.output, str(global_step) + "backbone.pt"))
else:
if global_step > 100 and self.rank == 0:
torch.save(backbone.module.state_dict(), os.path.join(self.output, str(global_step)+ "backbone.pth"))
if global_step > 100 and self.rank == 0 and header is not None:
torch.save(header.module.state_dict(), os.path.join(self.output, str(global_step)+ "header.pth"))
| 4,819 | 43.220183 | 120 | py |
QuantFace | QuantFace-master/backbones/vggface.py | import torch
from torchvision import datasets, transforms, models
from torch import nn, optim
from torch.autograd import Variable
from torch.utils.data.sampler import SubsetRandomSampler
from torch.utils.data import Dataset, DataLoader
from skimage import io, transform
from PIL import Image
import torchvision.transforms.functional as TF
import itertools
import torch.utils.data as data_utils
from backbones.countFLOPS import _calc_width, count_model_flops
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
class VGG_16(nn.Module):
"""
Main Class
"""
def __init__(self):
"""
Constructor
"""
super().__init__()
self.block_size = [2, 2, 3, 3, 3]
self.conv_1_1 = nn.Conv2d(3, 64, 3, stride=1, padding=1)
self.conv_1_2 = nn.Conv2d(64, 64, 3, stride=1, padding=1)
self.conv_2_1 = nn.Conv2d(64, 128, 3, stride=1, padding=1)
self.conv_2_2 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.conv_3_1 = nn.Conv2d(128, 256, 3, stride=1, padding=1)
self.conv_3_2 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
self.conv_3_3 = nn.Conv2d(256, 256, 3, stride=1, padding=1)
self.conv_4_1 = nn.Conv2d(256, 512, 3, stride=1, padding=1)
self.conv_4_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_4_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_5_1 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_5_2 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.conv_5_3 = nn.Conv2d(512, 512, 3, stride=1, padding=1)
self.fc6 = nn.Linear(512 * 7 * 7, 4096)
self.fc7 = nn.Linear(4096, 4096)
self.fc8 = nn.Linear(4096, 2622)
def forward(self, x):
""" Pytorch forward
Args:
x: input image (224x224)
Returns: class logits
"""
x = F.relu(self.conv_1_1(x))
x = F.relu(self.conv_1_2(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_2_1(x))
x = F.relu(self.conv_2_2(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_3_1(x))
x = F.relu(self.conv_3_2(x))
x = F.relu(self.conv_3_3(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_4_1(x))
x = F.relu(self.conv_4_2(x))
x = F.relu(self.conv_4_3(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv_5_1(x))
x = F.relu(self.conv_5_2(x))
x = F.relu(self.conv_5_3(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc6(x))
x = F.dropout(x, 0.5, self.training)
x = F.relu(self.fc7(x))
x = F.dropout(x, 0.5, self.training)
return self.fc8(x)
def _test():
import torch
pretrained = False
models = [
VGG_16
]
for model in models:
net = model()
print(net)
# net.train()
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
flops=count_model_flops(net, input_res=[224,224])
print("m={}, {}".format(model.__name__, flops))
net.eval()
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 512))
if __name__ == "__main__":
_test()
| 3,350 | 29.189189 | 67 | py |
QuantFace | QuantFace-master/backbones/activation.py | import torch.nn as nn
import torch.nn.functional as F
import torch
from inspect import isfunction
class Identity(nn.Module):
"""
Identity block.
"""
def __init__(self):
super(Identity, self).__init__()
def forward(self, x):
return x
def __repr__(self):
return '{name}()'.format(name=self.__class__.__name__)
class HSigmoid(nn.Module):
"""
Approximated sigmoid function, so-called hard-version of sigmoid from 'Searching for MobileNetV3,'
https://arxiv.org/abs/1905.02244.
"""
def forward(self, x):
return F.relu6(x + 3.0, inplace=True) / 6.0
class Swish(nn.Module):
"""
Swish activation function from 'Searching for Activation Functions,' https://arxiv.org/abs/1710.05941.
"""
def forward(self, x):
return x * torch.sigmoid(x)
class HSwish(nn.Module):
"""
H-Swish activation function from 'Searching for MobileNetV3,' https://arxiv.org/abs/1905.02244.
Parameters:
----------
inplace : bool
Whether to use inplace version of the module.
"""
def __init__(self, inplace=False):
super(HSwish, self).__init__()
self.inplace = inplace
def forward(self, x):
return x * F.relu6(x + 3.0, inplace=self.inplace) / 6.0
def get_activation_layer(activation,param):
"""
Create activation layer from string/function.
Parameters:
----------
activation : function, or str, or nn.Module
Activation function or name of activation function.
Returns:
-------
nn.Module
Activation layer.
"""
assert (activation is not None)
if isfunction(activation):
return activation()
elif isinstance(activation, str):
if activation == "relu":
return nn.ReLU(inplace=True)
elif activation =="prelu":
return nn.PReLU(param)
elif activation == "relu6":
return nn.ReLU6(inplace=True)
elif activation == "swish":
return Swish()
elif activation == "hswish":
return HSwish(inplace=True)
elif activation == "sigmoid":
return nn.Sigmoid()
elif activation == "hsigmoid":
return HSigmoid()
elif activation == "identity":
return Identity()
else:
raise NotImplementedError()
else:
assert (isinstance(activation, nn.Module))
return activation | 2,439 | 27.045977 | 106 | py |
QuantFace | QuantFace-master/backbones/countFLOPS.py | from torch.autograd import Variable
import numpy as np
import torch
def count_model_flops(model, input_res=[112, 112], multiply_adds=True):
list_conv = []
def conv_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = (kernel_ops * (
2 if multiply_adds else 1) + bias_ops) * output_channels * output_height * output_width * batch_size
list_conv.append(flops)
list_linear = []
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
if self.bias is not None:
bias_ops = self.bias.nelement() if self.bias.nelement() else 0
flops = batch_size * (weight_ops + bias_ops)
else:
flops = batch_size * weight_ops
list_linear.append(flops)
list_bn = []
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu = []
def relu_hook(self, input, output):
list_relu.append(input[0].nelement())
list_pooling = []
def pooling_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = 0
flops = (kernel_ops + bias_ops) * output_channels * output_height * output_width * batch_size
list_pooling.append(flops)
def pooling_hook_ad(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
input = input[0]
flops = int(np.prod(input.shape))
list_pooling.append(flops)
handles = []
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, torch.nn.Conv2d) or isinstance(net, torch.nn.ConvTranspose2d):
handles.append(net.register_forward_hook(conv_hook))
elif isinstance(net, torch.nn.Linear):
handles.append(net.register_forward_hook(linear_hook))
elif isinstance(net, torch.nn.BatchNorm2d) or isinstance(net, torch.nn.BatchNorm1d):
handles.append(net.register_forward_hook(bn_hook))
elif isinstance(net, torch.nn.ReLU) or isinstance(net, torch.nn.PReLU):
handles.append(net.register_forward_hook(relu_hook))
elif isinstance(net, torch.nn.MaxPool2d) or isinstance(net, torch.nn.AvgPool2d):
handles.append(net.register_forward_hook(pooling_hook))
else:
print("warning" + str(net))
return
for c in childrens:
foo(c)
model.eval()
foo(model)
input = Variable(torch.rand(3, input_res[0], input_res[1]).unsqueeze(0), requires_grad=True)
out = model(input)
total_flops = (sum(list_conv) + sum(list_linear) + sum(list_bn) + sum(list_relu) + sum(list_pooling))
for h in handles:
h.remove()
model.train()
return flops_to_string(total_flops)
def flops_to_string(flops, units='MFLOPS', precision=4):
if units == 'GFLOPS':
return str(round(flops / 10.**9, precision)) + ' ' + units
elif units == 'MFLOPS':
return str(round(flops / 10.**6, precision)) + ' ' + units
elif units == 'KFLOPS':
return str(round(flops / 10.**3, precision)) + ' ' + units
else:
return str(flops) + ' FLOPS'
def _calc_width(net):
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count | 4,062 | 36.275229 | 112 | py |
QuantFace | QuantFace-master/backbones/mobilefacenet.py | import copy
from torch.nn import (
Linear,
Conv2d,
BatchNorm1d,
BatchNorm2d,
PReLU,
ReLU,
Sigmoid,
Dropout2d,
Dropout,
AvgPool2d,
MaxPool2d,
AdaptiveAvgPool2d,
Sequential,
Module,
Parameter,
)
import torch.nn.functional as F
import torch
import torch.nn as nn
from collections import namedtuple, OrderedDict
import math
#from .common import ECA_Layer, SEBlock, CbamBlock, Identity, GCT
################################## Original Arcface Model #############################################################
from quantization_utils.quant_modules import Quant_Conv2d, Quant_Linear, QuantAct, QuantActPreLu
class Flatten(Module):
def forward(self, input):
return input.view(input.size(0), -1)
class Conv_block(Module):
def __init__(
self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1
):
super(Conv_block, self).__init__()
self.conv = Conv2d(
in_c,
out_channels=out_c,
kernel_size=kernel,
groups=groups,
stride=stride,
padding=padding,
bias=False,
)
self.bn = BatchNorm2d(out_c)
self.prelu = PReLU(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
x = self.prelu(x)
return x
class Linear_block(Module):
def __init__(
self, in_c, out_c, kernel=(1, 1), stride=(1, 1), padding=(0, 0), groups=1
):
super(Linear_block, self).__init__()
self.conv = Conv2d(
in_c,
out_channels=out_c,
kernel_size=kernel,
groups=groups,
stride=stride,
padding=padding,
bias=False,
)
self.bn = BatchNorm2d(out_c)
def forward(self, x):
x = self.conv(x)
x = self.bn(x)
return x
class Depth_Wise(Module):
def __init__(
self,
in_c,
out_c,
attention,
residual=False,
kernel=(3, 3),
stride=(2, 2),
padding=(1, 1),
groups=1,
):
super(Depth_Wise, self).__init__()
self.conv = Conv_block(
in_c, out_c=groups, kernel=(1, 1), padding=(0, 0), stride=(1, 1)
)
self.conv_dw = Conv_block(
groups, groups, groups=groups, kernel=kernel, padding=padding, stride=stride
)
self.project = Linear_block(
groups, out_c, kernel=(1, 1), padding=(0, 0), stride=(1, 1)
)
self.attention = attention
self.residual = residual
self.attention = attention # se, eca, cbam
def forward(self, x):
if self.residual:
short_cut = x
x = self.conv(x)
x = self.conv_dw(x)
x = self.project(x)
if self.attention != "none":
x = self.attention_layer(x)
if self.residual:
output = short_cut + x
else:
output = x
return output
class Residual(Module):
def __init__(
self,
c,
attention,
num_block,
groups,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
):
super(Residual, self).__init__()
modules = []
for _ in range(num_block):
modules.append(
Depth_Wise(
c,
c,
attention,
residual=True,
kernel=kernel,
padding=padding,
stride=stride,
groups=groups,
)
)
self.model = Sequential(*modules)
def forward(self, x):
return self.model(x)
class GNAP(Module):
def __init__(self, embedding_size):
super(GNAP, self).__init__()
assert embedding_size == 512
self.bn1 = BatchNorm2d(512, affine=False)
self.pool = nn.AdaptiveAvgPool2d((1, 1))
self.bn2 = BatchNorm1d(512, affine=False)
def forward(self, x):
x = self.bn1(x)
x_norm = torch.norm(x, 2, 1, True)
x_norm_mean = torch.mean(x_norm)
weight = x_norm_mean / x_norm
x = x * weight
x = self.pool(x)
x = x.view(x.shape[0], -1)
feature = self.bn2(x)
return feature
class GDC(Module):
def __init__(self, embedding_size):
super(GDC, self).__init__()
self.conv_6_dw = Linear_block(
512, 512, groups=512, kernel=(7, 7), stride=(1, 1), padding=(0, 0)
)
self.conv_6_flatten = Flatten()
self.linear = Linear(512, embedding_size, bias=False)
# self.bn = BatchNorm1d(embedding_size, affine=False)
self.bn = BatchNorm1d(embedding_size)
def forward(self, x):
x = self.conv_6_dw(x)
x = self.conv_6_flatten(x)
x = self.linear(x)
x = self.bn(x)
return x
class MobileFaceNet(Module):
def __init__(
self, input_size=(112,112), embedding_size=128, output_name="GDC", attention="none"
):
super(MobileFaceNet, self).__init__()
assert output_name in ["GNAP", "GDC"]
assert input_size[0] in [112]
self.conv1 = Conv_block(3, 64, kernel=(3, 3), stride=(2, 2), padding=(1, 1))
self.conv2_dw = Conv_block(
64, 64, kernel=(3, 3), stride=(1, 1), padding=(1, 1), groups=64
)
self.conv_23 = Depth_Wise(
64, 64, attention, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=128
)
self.conv_3 = Residual(
64,
attention,
num_block=4,
groups=128,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.conv_34 = Depth_Wise(
64, 128, attention, kernel=(3, 3), stride=(2, 2), padding=(1, 1), groups=256
)
self.conv_4 = Residual(
128,
attention,
num_block=6,
groups=256,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.conv_45 = Depth_Wise(
128,
128,
attention,
kernel=(3, 3),
stride=(2, 2),
padding=(1, 1),
groups=512,
)
self.conv_5 = Residual(
128,
attention,
num_block=2,
groups=256,
kernel=(3, 3),
stride=(1, 1),
padding=(1, 1),
)
self.conv_6_sep = Conv_block(
128, 512, kernel=(1, 1), stride=(1, 1), padding=(0, 0)
)
if output_name == "GNAP":
self.output_layer = GNAP(512)
else:
self.output_layer = GDC(embedding_size)
self._initialize_weights()
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
m.bias.data.zero_()
def forward(self, x):
out = self.conv1(x)
out = self.conv2_dw(out)
out = self.conv_23(out)
out = self.conv_3(out)
out = self.conv_34(out)
out = self.conv_4(out)
out = self.conv_45(out)
out = self.conv_5(out)
conv_features = self.conv_6_sep(out)
out = self.output_layer(conv_features)
return out
def quantize_model(model, weight_bit=None, act_bit=None):
"""
Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model
"""
# if not (weight_bit) and not (act_bit ):
# weight_bit = self.settings.qw
# act_bit = self.settings.qa
# quantize convolutional and linear layers
if type(model) == nn.Conv2d:
quant_mod = Quant_Conv2d(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.Linear:
quant_mod = Quant_Linear(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.PReLU:
quant_mod = QuantActPreLu(act_bit=act_bit)
quant_mod.set_param(model)
return quant_mod
# quantize all the activation
elif type(model) == nn.ReLU or type(model) == nn.ReLU6 or type(model) == nn.PReLU:
return nn.Sequential(*[model, QuantAct(activation_bit=act_bit)])
# recursively use the quantized module to replace the single-precision module
elif type(model) == nn.Sequential or isinstance(model, nn.Sequential):
mods = OrderedDict()
for n, m in model.named_children():
if isinstance(m, Depth_Wise) and m.residual:
mods[n] = nn.Sequential(
*[quantize_model(m, weight_bit=weight_bit, act_bit=act_bit), QuantAct(activation_bit=act_bit)])
else:
mods[n] = quantize_model(m, weight_bit=weight_bit, act_bit=act_bit)
# mods.append(self.quantize_model(m))
return nn.Sequential(mods)
else:
q_model = copy.deepcopy(model)
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
setattr(q_model, attr, quantize_model(mod, weight_bit=weight_bit, act_bit=act_bit))
return q_model
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
if __name__ == "__main__":
net = MobileFaceNet()
quant=quantize_model(net,8,8)
print(quant)
| 10,285 | 28.13881 | 120 | py |
QuantFace | QuantFace-master/backbones/utils.py | import torch
from torch import nn
import torch.nn.functional as F
from backbones.activation import get_activation_layer
class DropBlock2D(nn.Module):
r"""Randomly zeroes 2D spatial blocks of the input tensor.
As described in the paper
`DropBlock: A regularization method for convolutional networks`_ ,
dropping whole blocks of feature map allows to remove semantic
information as compared to regular dropout.
Args:
drop_prob (float): probability of an element to be dropped.
block_size (int): size of the block to drop
Shape:
- Input: `(N, C, H, W)`
- Output: `(N, C, H, W)`
.. _DropBlock: A regularization method for convolutional networks:
https://arxiv.org/abs/1810.12890
"""
def __init__(self, drop_prob, block_size):
super(DropBlock2D, self).__init__()
self.drop_prob = drop_prob
self.block_size = block_size
def forward(self, x):
# shape: (bsize, channels, height, width)
assert x.dim() == 4, \
"Expected input with 4 dimensions (bsize, channels, height, width)"
if not self.training or self.drop_prob == 0.:
return x
else:
# get gamma value
gamma = self._compute_gamma(x)
# sample mask
mask = (torch.rand(x.shape[0], *x.shape[2:]) < gamma).float()
# place mask on input device
mask = mask.to(x.device)
# compute block mask
block_mask = self._compute_block_mask(mask)
# apply block mask
out = x * block_mask[:, None, :, :]
# scale output
out = out * block_mask.numel() / block_mask.sum()
return out
def _compute_block_mask(self, mask):
block_mask = F.max_pool2d(input=mask[:, None, :, :],
kernel_size=(self.block_size, self.block_size),
stride=(1,1),
padding=self.block_size//2)
if self.block_size % 2 == 0:
block_mask = block_mask[:, :, :-1, :-1]
block_mask = 1 - block_mask.squeeze(1)
return block_mask
def _compute_gamma(self, x):
return self.drop_prob / (self.block_size**2)
def round_channels(channels,
divisor=8):
"""
Round weighted channel number (make divisible operation).
Parameters:
----------
channels : int or float
Original number of channels.
divisor : int, default 8
Alignment value.
Returns:
-------
int
Weighted number of channels.
"""
rounded_channels = max(int(channels + divisor / 2.0) // divisor * divisor, divisor)
if float(rounded_channels) < 0.9 * channels:
rounded_channels += divisor
return rounded_channels
def conv1x1(in_channels,
out_channels,
stride=1,
groups=1, dilation=1,
bias=False):
"""
Convolution 1x1 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
groups=groups, dilation=dilation,
bias=bias)
def conv3x3(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False):
"""
Convolution 3x3 layer.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
class Flatten(nn.Module):
"""
Simple flatten module.
"""
def forward(self, x):
return x.view(x.size(0), -1)
def depthwise_conv3x3(channels,
stride=1,
padding=1,
dilation=1,
bias=False):
"""
Depthwise convolution 3x3 layer.
Parameters:
----------
channels : int
Number of input/output channels.
strides : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int or tuple/list of 2 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
"""
return nn.Conv2d(
in_channels=channels,
out_channels=channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=channels,
bias=bias)
class ConvBlock(nn.Module):
"""
Standard convolution block with Batch normalization and activation.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
super(ConvBlock, self).__init__()
self.activate = (activation is not None)
self.use_bn = use_bn
self.use_pad = (isinstance(padding, (list, tuple)) and (len(padding) == 4))
if self.use_pad:
self.pad = nn.ZeroPad2d(padding=padding)
padding = 0
self.conv = nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias)
if self.use_bn:
self.bn = nn.BatchNorm2d(
num_features=out_channels,
eps=bn_eps)
if self.activate:
self.activ = get_activation_layer(activation,out_channels)
def forward(self, x):
if self.use_pad:
x = self.pad(x)
x = self.conv(x)
if self.use_bn:
x = self.bn(x)
if self.activate:
x = self.activ(x)
return x
def conv1x1_block(in_channels,
out_channels,
stride=1,
padding=0,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
1x1 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 0
Padding value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
stride=stride,
padding=padding,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def conv3x3_block(in_channels,
out_channels,
stride=1,
padding=1,
dilation=1,
groups=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
3x3 version of the standard convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
stride : int or tuple/list of 2 int, default 1
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int, default 1
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
groups : int, default 1
Number of groups.
bias : bool, default False
Whether the layer uses a bias vector.
use_bn : bool, default True
Whether to use BatchNorm layer.
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
activation : function or str or None, default nn.ReLU(inplace=True)
Activation function or name of activation function.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
stride=stride,
padding=padding,
dilation=dilation,
groups=groups,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
class DwsConvBlock(nn.Module):
"""
Depthwise separable convolution block with BatchNorms and activations at each convolution layers.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
stride : int or tuple/list of 2 int
Strides of the convolution.
padding : int, or tuple/list of 2 int, or tuple/list of 4 int
Padding value for convolution layer.
dilation : int or tuple/list of 2 int, default 1
Dilation value for convolution layer.
bias : bool, default False
Whether the layer uses a bias vector.
dw_use_bn : bool, default True
Whether to use BatchNorm layer (depthwise convolution block).
pw_use_bn : bool, default True
Whether to use BatchNorm layer (pointwise convolution block).
bn_eps : float, default 1e-5
Small float added to variance in Batch norm.
dw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the depthwise convolution block.
pw_activation : function or str or None, default nn.ReLU(inplace=True)
Activation function after the pointwise convolution block.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation=1,
bias=False,
dw_use_bn=True,
pw_use_bn=True,
bn_eps=1e-5,
dw_activation=(lambda: nn.ReLU(inplace=True)),
pw_activation=(lambda: nn.ReLU(inplace=True))):
super(DwsConvBlock, self).__init__()
self.dw_conv = dwconv_block(
in_channels=in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias,
use_bn=dw_use_bn,
bn_eps=bn_eps,
activation=dw_activation)
self.pw_conv = conv1x1_block(
in_channels=in_channels,
out_channels=out_channels,
bias=bias,
use_bn=pw_use_bn,
bn_eps=bn_eps,
activation=pw_activation)
def forward(self, x):
x = self.dw_conv(x)
x = self.pw_conv(x)
return x
def dwconv_block(in_channels,
out_channels,
kernel_size,
stride=1,
padding=1,
dilation=1,
bias=False,
use_bn=True,
bn_eps=1e-5,
activation=(lambda: nn.ReLU(inplace=True))):
"""
Depthwise convolution block.
"""
return ConvBlock(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=out_channels,
bias=bias,
use_bn=use_bn,
bn_eps=bn_eps,
activation=activation)
def channel_shuffle2(x,
groups):
"""
Channel shuffle operation from 'ShuffleNet: An Extremely Efficient Convolutional Neural Network for Mobile Devices,'
https://arxiv.org/abs/1707.01083. The alternative version.
Parameters:
----------
x : Tensor
Input tensor.
groups : int
Number of groups.
Returns:
-------
Tensor
Resulted tensor.
"""
batch, channels, height, width = x.size()
assert (channels % groups == 0)
channels_per_group = channels // groups
x = x.view(batch, channels_per_group, groups, height, width)
x = torch.transpose(x, 1, 2).contiguous()
x = x.view(batch, channels, height, width)
return x
def _calc_width(net):
import numpy as np
net_params = filter(lambda p: p.requires_grad, net.parameters())
weight_count = 0
for param in net_params:
weight_count += np.prod(param.size())
return weight_count
| 15,256 | 29.211881 | 120 | py |
QuantFace | QuantFace-master/backbones/senet.py | import torch.nn as nn
import math
import torch.nn.functional as F
__all__ = ['SENet', 'senet50']
from backbones.countFLOPS import count_model_flops
from backbones.utils import _calc_width
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
# This SEModule is not used.
class SEModule(nn.Module):
def __init__(self, planes, compress_rate):
super(SEModule, self).__init__()
self.conv1 = nn.Conv2d(planes, planes // compress_rate, kernel_size=1, stride=1, bias=True)
self.conv2 = nn.Conv2d(planes // compress_rate, planes, kernel_size=1, stride=1, bias=True)
self.relu = nn.ReLU(inplace=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = F.avg_pool2d(module_input, kernel_size=module_input.size(2))
x = self.conv1(x)
x = self.relu(x)
x = self.conv2(x)
x = self.sigmoid(x)
return module_input * x
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
# SENet
compress_rate = 16
# self.se_block = SEModule(planes * 4, compress_rate) # this is not used.
self.conv4 = nn.Conv2d(planes * 4, planes * 4 // compress_rate, kernel_size=1, stride=1, bias=True)
self.conv5 = nn.Conv2d(planes * 4 // compress_rate, planes * 4, kernel_size=1, stride=1, bias=True)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
## senet
out2 = F.avg_pool2d(out, kernel_size=out.size(2))
out2 = self.conv4(out2)
out2 = self.relu(out2)
out2 = self.conv5(out2)
out2 = self.sigmoid(out2)
# out2 = self.se_block.forward(out) # not used
if self.downsample is not None:
residual = self.downsample(x)
out = out2 * out + residual
# out = out2 + residual # not used
out = self.relu(out)
return out
class SENet(nn.Module):
def __init__(self, block, layers, num_classes=1000, include_top=True):
self.inplanes = 64
super(SENet, self).__init__()
self.include_top = include_top
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
if not self.include_top:
return x
#x = x.view(x.size(0), -1)
#x = self.fc(x)
return x
class sphere64(nn.Module):
def __init__(self,classnum=10574,feature=False):
super(sphere64, self).__init__()
self.classnum = classnum
self.feature = feature
#input = B*3*112*96
self.conv1_1 = nn.Conv2d(3,64,3,2,1) #=>B*64*56*48
self.relu1_1 = nn.PReLU(64)
self.conv1_2 = nn.Conv2d(64,64,3,1,1)
self.relu1_2 = nn.PReLU(64)
self.conv1_3 = nn.Conv2d(64,64,3,1,1)
self.relu1_3 = nn.PReLU(64)
self.conv1_4 = nn.Conv2d(64,64,3,1,1)
self.relu1_4 = nn.PReLU(64)
self.conv1_5 = nn.Conv2d(64,64,3,1,1)
self.relu1_5 = nn.PReLU(64)
self.conv1_6 = nn.Conv2d(64,64,3,1,1)
self.relu1_6 = nn.PReLU(64)
self.conv1_7 = nn.Conv2d(64,64,3,1,1)
self.relu1_7 = nn.PReLU(64)
self.conv1_8 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu1_8 = nn.PReLU(64)
self.conv1_9 = nn.Conv2d(64, 64, 3, 1, 1)
self.relu1_9 = nn.PReLU(64)
self.conv2_1 = nn.Conv2d(64,128,3,2,1) #=>B*128*28*24
self.relu2_1 = nn.PReLU(128)
self.conv2_2 = nn.Conv2d(128,128,3,1,1)
self.relu2_2 = nn.PReLU(128)
self.conv2_3 = nn.Conv2d(128,128,3,1,1)
self.relu2_3 = nn.PReLU(128)
self.conv2_4 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_4 = nn.PReLU(128)
self.conv2_5 = nn.Conv2d(128,128,3,1,1)
self.relu2_5 = nn.PReLU(128)
self.conv2_6 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_6 = nn.PReLU(128)
self.conv2_7 = nn.Conv2d(128,128,3,1,1)
self.relu2_7 = nn.PReLU(128)
self.conv2_8 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_8 = nn.PReLU(128)
self.conv2_9 = nn.Conv2d(128,128,3,1,1)
self.relu2_9 = nn.PReLU(128)
self.conv2_10 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_10 = nn.PReLU(128)
self.conv2_11 = nn.Conv2d(128,128,3,1,1)
self.relu2_11 = nn.PReLU(128)
self.conv2_12 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_12 = nn.PReLU(128)
self.conv2_13 = nn.Conv2d(128,128,3,1,1)
self.relu2_13 = nn.PReLU(128)
self.conv2_14 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_14 = nn.PReLU(128)
self.conv2_15 = nn.Conv2d(128,128,3,1,1)
self.relu2_15 = nn.PReLU(128)
self.conv2_16 = nn.Conv2d(128,128,3,1,1) #=>B*128*28*24
self.relu2_16 = nn.PReLU(128)
self.conv2_17 = nn.Conv2d(128,128,3,1,1)
self.relu2_17 = nn.PReLU(128)
self.conv3_1 = nn.Conv2d(128,256,3,2,1) #=>B*256*14*12
self.relu3_1 = nn.PReLU(256)
self.conv3_2 = nn.Conv2d(256,256,3,1,1)
self.relu3_2 = nn.PReLU(256)
self.conv3_3 = nn.Conv2d(256,256,3,1,1)
self.relu3_3 = nn.PReLU(256)
self.conv3_4 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_4 = nn.PReLU(256)
self.conv3_5 = nn.Conv2d(256,256,3,1,1)
self.relu3_5 = nn.PReLU(256)
self.conv3_6 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_6 = nn.PReLU(256)
self.conv3_7 = nn.Conv2d(256,256,3,1,1)
self.relu3_7 = nn.PReLU(256)
self.conv3_8 = nn.Conv2d(256,256,3,1,1) #=>B*256*14*12
self.relu3_8 = nn.PReLU(256)
self.conv3_9 = nn.Conv2d(256,256,3,1,1)
self.relu3_9 = nn.PReLU(256)
self.conv3_10 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_10 = nn.PReLU(256)
self.conv3_11 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_11 = nn.PReLU(256)
self.conv3_12 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_12 = nn.PReLU(256)
self.conv3_13 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_13 = nn.PReLU(256)
self.conv3_14 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_14 = nn.PReLU(256)
self.conv3_15 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_15 = nn.PReLU(256)
self.conv3_16 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_16 = nn.PReLU(256)
self.conv3_17 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_17 = nn.PReLU(256)
self.conv3_18 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_18 = nn.PReLU(256)
self.conv3_19 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_19 = nn.PReLU(256)
self.conv3_20 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_20 = nn.PReLU(256)
self.conv3_21 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_21 = nn.PReLU(256)
self.conv3_22 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_22 = nn.PReLU(256)
self.conv3_23 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_23 = nn.PReLU(256)
self.conv3_24 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_24 = nn.PReLU(256)
self.conv3_25 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_25 = nn.PReLU(256)
self.conv3_26 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_26 = nn.PReLU(256)
self.conv3_27 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_27 = nn.PReLU(256)
self.conv3_28 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_28 = nn.PReLU(256)
self.conv3_29 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_29 = nn.PReLU(256)
self.conv3_30 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_30 = nn.PReLU(256)
self.conv3_31 = nn.Conv2d(256, 256, 3, 1, 1) # =>B*256*14*12
self.relu3_31 = nn.PReLU(256)
self.conv3_32 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_32 = nn.PReLU(256)
self.conv3_33 = nn.Conv2d(256, 256, 3, 1, 1)
self.relu3_33 = nn.PReLU(256)
self.conv4_1 = nn.Conv2d(256,512,3,2,1) #=>B*512*7*6
self.relu4_1 = nn.PReLU(512)
self.conv4_2 = nn.Conv2d(512,512,3,1,1)
self.relu4_2 = nn.PReLU(512)
self.conv4_3 = nn.Conv2d(512,512,3,1,1)
self.relu4_3 = nn.PReLU(512)
self.conv4_4 = nn.Conv2d(512,512,3,1,1)
self.relu4_4 = nn.PReLU(512)
self.conv4_5 = nn.Conv2d(512,512,3,1,1)
self.relu4_5 = nn.PReLU(512)
self.conv4_6 = nn.Conv2d(512,512,3,1,1)
self.relu4_6 = nn.PReLU(512)
self.conv4_7 = nn.Conv2d(512,512,3,1,1)
self.relu4_7 = nn.PReLU(512)
self.fc5 = nn.Linear(512*7*6,512)
def forward(self, x):
x = self.relu1_1(self.conv1_1(x))
x = x + self.relu1_3(self.conv1_3(self.relu1_2(self.conv1_2(x))))
x = x + self.relu1_5(self.conv1_5(self.relu1_4(self.conv1_4(x))))
x = x + self.relu1_7(self.conv1_7(self.relu1_6(self.conv1_6(x))))
x = self.relu2_1(self.conv2_1(x))
x = x + self.relu2_3(self.conv2_3(self.relu2_2(self.conv2_2(x))))
x = x + self.relu2_5(self.conv2_5(self.relu2_4(self.conv2_4(x))))
x = x + self.relu2_7(self.conv2_7(self.relu2_6(self.conv2_6(x))))
x = x + self.relu2_9(self.conv2_9(self.relu2_8(self.conv2_8(x))))
x = x + self.relu2_11(self.conv2_11(self.relu2_10(self.conv2_10(x))))
x = x + self.relu2_13(self.conv2_13(self.relu2_12(self.conv2_12(x))))
x = x + self.relu2_15(self.conv2_15(self.relu2_14(self.conv2_14(x))))
x = x + self.relu2_17(self.conv2_17(self.relu2_16(self.conv2_16(x))))
x = self.relu3_1(self.conv3_1(x))
x = x + self.relu3_3(self.conv3_3(self.relu3_2(self.conv3_2(x))))
x = x + self.relu3_5(self.conv3_5(self.relu3_4(self.conv3_4(x))))
x = x + self.relu3_7(self.conv3_7(self.relu3_6(self.conv3_6(x))))
x = x + self.relu3_9(self.conv3_9(self.relu3_8(self.conv3_8(x))))
x = x + self.relu3_11(self.conv3_11(self.relu3_10(self.conv3_10(x))))
x = x + self.relu3_13(self.conv3_13(self.relu3_12(self.conv3_12(x))))
x = x + self.relu3_15(self.conv3_15(self.relu3_14(self.conv3_14(x))))
x = x + self.relu3_17(self.conv3_17(self.relu3_16(self.conv3_16(x))))
x = x + self.relu3_19(self.conv3_19(self.relu3_18(self.conv3_18(x))))
x = x + self.relu3_21(self.conv3_21(self.relu3_20(self.conv3_20(x))))
x = x + self.relu3_23(self.conv3_23(self.relu3_22(self.conv3_22(x))))
x = x + self.relu3_25(self.conv3_25(self.relu3_24(self.conv3_24(x))))
x = x + self.relu3_27(self.conv3_27(self.relu3_26(self.conv3_26(x))))
x = x + self.relu3_29(self.conv3_29(self.relu3_28(self.conv3_28(x))))
x = x + self.relu3_31(self.conv3_31(self.relu3_30(self.conv3_20(x))))
x = x + self.relu3_33(self.conv3_33(self.relu3_32(self.conv3_32(x))))
x = self.relu4_1(self.conv4_1(x))
x = x + self.relu4_3(self.conv4_3(self.relu4_2(self.conv4_2(x))))
x = x + self.relu4_5(self.conv4_5(self.relu4_4(self.conv4_4(x))))
x = x + self.relu4_7(self.conv4_7(self.relu4_7(self.conv4_6(x))))
x = x.view(x.size(0),-1)
x = self.fc5(x)
if self.feature: return x
return x
def senet50(**kwargs):
"""Constructs a SENet-50 model.
"""
model = SENet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def _test():
import torch
pretrained = False
models = [
senet50
]
for model in models:
net = model()
#print(net)
# net.train()
net.eval()
x = torch.randn(1, 3, 224, 224)
y = net(x)
y.sum().backward()
print(y.size())
assert (tuple(y.size()) == (1, 2048,1,1))
weight_count = _calc_width(net)
print("m={}, {}".format(model.__name__, weight_count))
flops = count_model_flops(net,input_res=[224,224])
print("m={}, {}".format(model.__name__, flops))
if __name__ == "__main__":
_test()
| 15,709 | 33.679912 | 107 | py |
QuantFace | QuantFace-master/backbones/iresnet.py | import copy
from collections import OrderedDict
import torch
from torch import nn
__all__ = ['iresnet18', 'iresnet34', 'iresnet50', 'iresnet100']
from backbones.countFLOPS import _calc_width, count_model_flops
from quantization_utils.quant_modules import QuantAct, Quant_Linear, Quant_Conv2d, QuantActPreLu
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes,
out_planes,
kernel_size=1,
stride=stride,
bias=False)
class SEModule(nn.Module):
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0, bias=False)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0, bias=False)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return input * x
class IBasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None,
groups=1, base_width=64, dilation=1,use_se=False):
super(IBasicBlock, self).__init__()
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
self.bn1 = nn.BatchNorm2d(inplanes, eps=1e-05,)
self.conv1 = conv3x3(inplanes, planes)
self.bn2 = nn.BatchNorm2d(planes, eps=1e-05,)
self.prelu = nn.PReLU(planes)
self.conv2 = conv3x3(planes, planes, stride)
self.bn3 = nn.BatchNorm2d(planes, eps=1e-05,)
self.downsample = downsample
self.stride = stride
self.use_se=use_se
if (use_se):
self.se_block=SEModule(planes,16)
def forward(self, x):
identity = x
out = self.bn1(x)
out = self.conv1(out)
out = self.bn2(out)
out = self.prelu(out)
out = self.conv2(out)
out = self.bn3(out)
if(self.use_se):
out=self.se_block(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
class IResNet(nn.Module):
fc_scale = 7 * 7
def __init__(self,
block, layers, dropout=0, num_features=512, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None, use_se=False):
super(IResNet, self).__init__()
self.inplanes = 64
self.dilation = 1
self.use_se=use_se
if replace_stride_with_dilation is None:
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(self.inplanes, eps=1e-05)
self.prelu = nn.PReLU(self.inplanes)
self.layer1 = self._make_layer(block, 64, layers[0], stride=2 ,use_se=self.use_se)
self.layer2 = self._make_layer(block,
128,
layers[1],
stride=2,
dilate=replace_stride_with_dilation[0],use_se=self.use_se)
self.layer3 = self._make_layer(block,
256,
layers[2],
stride=2,
dilate=replace_stride_with_dilation[1] ,use_se=self.use_se)
self.layer4 = self._make_layer(block,
512,
layers[3],
stride=2,
dilate=replace_stride_with_dilation[2] ,use_se=self.use_se)
self.bn2 = nn.BatchNorm2d(512 * block.expansion, eps=1e-05,)
self.dropout =nn.Dropout(p=dropout, inplace=True) # 7x7x 512
self.fc = nn.Linear(512 * block.expansion * self.fc_scale, num_features)
self.features = nn.BatchNorm1d(num_features, eps=1e-05)
nn.init.constant_(self.features.weight, 1.0)
self.features.weight.requires_grad = False
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, 0, 0.1)
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
if zero_init_residual:
for m in self.modules():
if isinstance(m, IBasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False,use_se=False):
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion, eps=1e-05, ),
)
layers = []
layers.append(
block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation,use_se=use_se))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,use_se=use_se))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.prelu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.bn2(x)
x = torch.flatten(x, 1)
x = self.dropout(x)
x = self.fc(x)
x = self.features(x)
return x
def _iresnet(arch, block, layers, pretrained, progress, **kwargs):
model = IResNet(block, layers, **kwargs)
if pretrained:
raise ValueError()
return model
def iresnet18(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet18', IBasicBlock, [2, 2, 2, 2], pretrained,
progress, **kwargs)
def iresnet34(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet34', IBasicBlock, [3, 4, 6, 3], pretrained,
progress, **kwargs)
def iresnet50(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet50', IBasicBlock, [3, 4, 14, 3], pretrained,
progress, **kwargs)
def iresnet100(pretrained=False, progress=True, **kwargs):
return _iresnet('iresnet100', IBasicBlock, [3, 13, 30, 3], pretrained,
progress, **kwargs)
def quantize_model( model,weight_bit=None, act_bit=None ):
"""
Recursively quantize a pretrained single-precision model to int8 quantized model
model: pretrained single-precision model
"""
#if not (weight_bit) and not (act_bit ):
# weight_bit = self.settings.qw
# act_bit = self.settings.qa
# quantize convolutional and linear layers
if type(model) == nn.Conv2d:
quant_mod = Quant_Conv2d(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.Linear :
quant_mod = Quant_Linear(weight_bit=weight_bit)
quant_mod.set_param(model)
return quant_mod
elif type(model) == nn.PReLU :
quant_mod = QuantActPreLu(act_bit=act_bit)
quant_mod.set_param(model)
return quant_mod
# quantize all the activation
elif type(model) == nn.ReLU or type(model) == nn.ReLU6 or type(model)==nn.PReLU:
return nn.Sequential(*[model, QuantAct(activation_bit=act_bit)])
# recursively use the quantized module to replace the single-precision module
elif type(model) == nn.Sequential or isinstance(model,nn.Sequential):
mods = OrderedDict()
for n, m in model.named_children():
if isinstance(m,IBasicBlock):
mods[n] = nn.Sequential(*[quantize_model(m,weight_bit=weight_bit, act_bit=act_bit), QuantAct(activation_bit=act_bit)])
else:
mods[n] = quantize_model(m, weight_bit=weight_bit, act_bit=act_bit)
return nn.Sequential(mods)
else:
q_model = copy.deepcopy(model)
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
setattr(q_model, attr, quantize_model(mod,weight_bit=weight_bit, act_bit=act_bit))
return q_model
def freeze_model( model):
"""
freeze the activation range
"""
if type(model) == QuantAct:
model.fix()
elif type(model) == nn.Sequential:
for n, m in model.named_children():
freeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
freeze_model(mod)
return model
def unfreeze_model( model):
"""
unfreeze the activation range
"""
if type(model) == QuantAct:
model.unfix()
elif type(model) == nn.Sequential:
for n, m in model.named_children():
unfreeze_model(m)
else:
for attr in dir(model):
mod = getattr(model, attr)
if isinstance(mod, nn.Module) and 'norm' not in attr:
unfreeze_model(mod)
return model
def _test():
models = [
iresnet100
]
for model in models:
net=model()
quant=quantize_model(net,8,8)
print(quant)
weight_count = _calc_width(net)
flops = count_model_flops(net)
print("m={}, {}".format(model.__name__, weight_count))
print("m={}, {}".format(model.__name__, flops))
net.eval()
x = torch.randn(1, 3, 112, 112)
y = net(x)
y.sum().backward()
assert (tuple(y.size()) == (1, 512))
if __name__ == "__main__":
_test()
| 11,550 | 36.141479 | 142 | py |
Progressive-Pruning | Progressive-Pruning-main/main_anytime_train.py | import argparse
import os
import pdb
import pickle
import random
import shutil
import time
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data.sampler import SubsetRandomSampler
import utils
torch.multiprocessing.set_sharing_strategy("file_system")
from dataset import (Setup_RestrictedImageNet,
generate_anytime_cifar10_dataloader,
generate_anytime_cifar100_dataloader,
generate_anytime_res_img_dataloader,
generate_anytime_res_img_dataloader_few,
setup__cifar10_dataset, setup__cifar100_dataset)
from generate_mask import generate_mask_
from pruner import *
from utils import evaluate_cer, setup_model
from wb import WandBLogger
parser = argparse.ArgumentParser(description="PyTorch Anytime Training")
##################################### Dataset #################################################
parser.add_argument(
"--data", type=str, default="../data", help="location of the data corpus"
)
parser.add_argument("--dataset", type=str, default="cifar10", help="dataset")
parser.add_argument(
"--meta_batch_size",
type=int,
default=5000,
help="data number in each meta batch_size",
)
parser.add_argument("--meta_batch_number", type=int, default=10)
##################################### Architecture ############################################
parser.add_argument("--arch", type=str, default="resnet20s", help="model architecture")
parser.add_argument(
"--imagenet_arch",
action="store_true",
help="architecture for imagenet size samples",
)
parser.add_argument(
"--imagenet_path",
type=str,
default="../imagenet",
help="location of the imagenet folder",
)
##################################### General setting ############################################
parser.add_argument("--seed", default=None, type=int, help="random seed")
parser.add_argument("--gpu", type=int, default=0, help="gpu device id")
parser.add_argument(
"--workers", type=int, default=2, help="number of workers in dataloader"
)
parser.add_argument("--resume", action="store_true", help="resume from checkpoint")
parser.add_argument("--checkpoint", type=str, default=None, help="checkpoint file")
parser.add_argument(
"--save_dir",
help="The directory used to save the trained models",
default=None,
type=str,
)
parser.add_argument("-no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-one_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-buffer_replay", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--buffer_size_train",
default=182,
type=int,
help="number of Random Train examples to add in buffer",
)
parser.add_argument(
"--buffer_size_valid",
default=182,
type=int,
help="number of Random Valid examples to add in buffer",
)
parser.add_argument("-snip_no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-few_shot", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--n_shots",
default=100,
type=int,
help="number of Random Valid examples to add in buffer",
)
##################################### Training setting #################################################
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--weight_decay", default=1e-4, type=float, help="weight decay")
parser.add_argument(
"--epochs", default=182, type=int, help="number of total epochs to run"
)
parser.add_argument("--warmup", default=0, type=int, help="warm up epochs")
parser.add_argument("--print_freq", default=50, type=int, help="print frequency")
parser.add_argument("--decreasing_lr", default="91,136", help="decreasing strategy")
##################################### Pruning setting #################################################
parser.add_argument(
"--tickets_mask", default=None, type=str, help="mask for subnetworks"
)
parser.add_argument(
"--tickets_init", default=None, type=str, help="initilization for subnetworks"
)
parser.add_argument(
"--snip_size", default=0.20, type=float, help="the size for the snip"
)
parser.add_argument("--sparsity_level", default=0, type=float, help="sparsity level")
parser.add_argument(
"--pruner", default="snip", type=str, help="Pruner Type[mag,snip,GraSP,SynFlow]"
)
parser.add_argument(
"--scope", default="global", type=str, help="Scope of Pruner[local,global]"
)
##################################### W&B Logging setting #################################################
parser.add_argument("-wb", action="store_true", help="Flag for using W&B logging")
parser.add_argument(
"--project_name", default="APP", type=str, help="Name of the W&B project"
)
parser.add_argument(
"--run", default="Anytime_fixed", type=str, help="Name for the W&B run"
)
best_sa = 0
args = parser.parse_args()
print(args)
os.makedirs(args.save_dir, exist_ok=True)
if args.scope == "l":
args.scope = "local"
def main():
global args, best_sa
args = parser.parse_args()
print(args)
torch.cuda.set_device(int(args.gpu))
os.makedirs(args.save_dir, exist_ok=True)
if args.seed:
setup_seed(args.seed)
model = setup_model(args)
if args.dataset == "cifar10":
whole_trainset = setup__cifar10_dataset(args)
elif args.dataset == "cifar100":
whole_trainset = setup__cifar100_dataset(args)
elif args.dataset == "restricted_imagenet":
whole_trainset, test_set = Setup_RestrictedImageNet(args, args.imagenet_path)
if args.tickets_init:
print("loading init from {}".format(args.tickets_init))
init_file = torch.load(args.tickets_init, map_location="cpu")
if "init_weight" in init_file:
init_file = init_file["init_weight"]
model.load_state_dict(init_file)
else:
torch.save(model.state_dict(), os.path.join(args.save_dir, "randinit.pth.tar"))
# setup initialization and mask
if args.tickets_mask:
print("loading mask from {}".format(args.tickets_mask))
mask_file = torch.load(args.tickets_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
criterion = nn.CrossEntropyLoss()
decreasing_lr = list(map(int, args.decreasing_lr.split(",")))
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
if args.wb:
wandb_logger = WandBLogger(
project_name=args.project_name,
run_name=args.run,
dir=args.save_dir,
config=vars(args),
model=model,
params={"resume": args.resume},
)
else:
wandb_logger = None
if args.resume:
print("resume from checkpoint {}".format(args.checkpoint))
checkpoint = torch.load(
args.checkpoint, map_location=torch.device("cuda:" + str(args.gpu))
)
best_sa = checkpoint["best_sa"]
start_epoch = checkpoint["epoch"]
all_result = checkpoint["result"]
start_state = checkpoint["state"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
print(
"loading from state: {} epoch: {}, best_sa = {}".format(
start_state, start_epoch, best_sa
)
)
else:
all_result = {}
all_result["gen_gap"] = []
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["lr"] = []
all_result["val_loss"] = []
start_epoch = 0
start_state = 1
# sparsity = [1, 1.5,1.75,2, 2.5,3,3.5,4,4.5,5] # 32.768 remaining_weights=0.8**(sparsity)
if args.scope == "local":
sparsity = [args.sparsity_level for x in range(args.meta_batch_number)]
else:
sparsity = np.linspace(1, args.sparsity_level, args.meta_batch_number)
time_list = []
CER = []
CER_diff = []
for current_state in range(start_state, args.meta_batch_number + 1):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
print("Current state = {}".format(current_state))
start_time = time.time()
if args.dataset == "cifar10":
print("Loading cifar10 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar10_dataloader(args, whole_trainset, current_state)
elif args.dataset == "cifar100":
print("Loading cifar100 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar100_dataloader(
args, whole_trainset, current_state
)
elif args.dataset == "restricted_imagenet":
print("Loading Restricted Imagenet dataset in anytime setting")
if args.meta_batch_number == 3:
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, 80565, current_state
)
elif args.meta_batch_number == 10:
# Few Shot Dataloader Example
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, 6800, current_state
)
# Generate Mask using SNIP
sparsity_level = sparsity[current_state - 1]
save_mask = (
args.save_dir
+ f"/{current_state}mask_{args.pruner}_{sparsity_level}.pth.tar"
)
if current_state == 1:
model_load_dir = (
args.save_dir + "/randinit.pth.tar"
) # 1st Meta Batch Randomly initialized model
else:
model_load_dir = args.save_dir + f"/{current_state-1}model_SA_best.pth.tar"
generate_mask_(
args,
train_snip_set,
args.pruner,
model_load_dir,
save=save_mask,
state=sparsity_level,
)
model.cpu()
# Load the Model by applying above mask
print("loading mask from {}".format(save_mask))
mask_file = torch.load(save_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
for epoch in range(start_epoch, args.epochs):
print(optimizer.state_dict()["param_groups"][0]["lr"])
acc, loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
tacc, vloss = validate(val_loader, model, criterion)
# evaluate on test set
# test_tacc = validate(test_loader, model, criterion)
scheduler.step()
# remember best prec@1 and save checkpoint
is_best_sa = tacc > best_sa
best_sa = max(tacc, best_sa)
gen_gap = acc - tacc
all_result["gen_gap"].append(gen_gap)
all_result["train_ta"].append(acc)
all_result["val_ta"].append(tacc)
all_result["best_sa"].append(best_sa)
all_result["train_loss"].append(loss)
all_result["val_loss"].append(vloss)
all_result["lr"].append(optimizer.state_dict()["param_groups"][0]["lr"])
save_checkpoint(
{
"state": current_state,
"result": all_result,
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_sa": best_sa,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_SA_best=is_best_sa,
data_state=current_state,
save_path=args.save_dir,
)
if wandb_logger:
wandb_logger.log_metrics(all_result)
# report result
val_pick_best_epoch = np.argmax(np.array(all_result["val_ta"]))
print(
"* State = {} best SA = {} Epoch = {}".format(
current_state,
all_result["val_ta"][val_pick_best_epoch],
val_pick_best_epoch + 1,
)
)
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
best_sa = 0
start_epoch = 0
best_checkpoint = torch.load(
os.path.join(args.save_dir, "{}model_SA_best.pth.tar".format(current_state))
)
print("Loading Best Weight")
model.load_state_dict(best_checkpoint["state_dict"])
end_time = time.time() - start_time
print("Total time elapsed: {:.4f}s".format(end_time))
time_list.append(end_time)
if args.dataset == "restricted_imagenet":
CER.append(evaluate_cer(model, args, test_loader))
else:
CER.append(evaluate_cer(model, args))
if current_state != 1:
diff = (CER[current_state - 1] - CER[current_state - 2]) / 10000
CER_diff.append(diff)
print("CER diff: {}".format(diff))
# Reset LR to 0.1 after each state
for g in optimizer.param_groups:
g["lr"] = 0.1
print("LR reset to 0.1")
print(optimizer.state_dict()["param_groups"][0]["lr"])
test_tacc, _ = validate(test_loader, model, criterion)
print("Test Acc = {}".format(test_tacc))
print("CER = {}".format(sum(CER)))
wandb_logger.log_metrics({"Test/test_acc": test_tacc})
wandb_logger.log_metrics({"Test/CER": sum(CER)})
print("Final Test Accuracy: ")
print(test_tacc)
print("CER")
print(CER)
print("Anytime Relative Error")
print(CER_diff)
print("Total time")
print(time_list)
def train(train_loader, model, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (image, target) in enumerate(train_loader):
if epoch < args.warmup:
warmup_lr(epoch, i + 1, optimizer, one_epoch_step=len(train_loader))
image = image.cuda()
target = target.cuda()
# compute output
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print(
"Epoch: [{0}][{1}/{2}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})\t"
"Time {3:.2f}".format(
epoch, i, len(train_loader), end - start, loss=losses, top1=top1
)
)
start = time.time()
print("train_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (image, target) in enumerate(val_loader):
image = image.cuda()
target = target.cuda()
# compute output
with torch.no_grad():
output = model(image)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
print(
"Test: [{0}/{1}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})".format(
i, len(val_loader), loss=losses, top1=top1
)
)
print("valid_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def save_checkpoint(
state, is_SA_best, data_state, save_path, filename="checkpoint.pth.tar"
):
filepath = os.path.join(save_path, str(data_state) + filename)
torch.save(state, filepath)
if is_SA_best:
shutil.copyfile(
filepath,
os.path.join(save_path, "{}model_SA_best.pth.tar".format(data_state)),
)
def warmup_lr(epoch, step, optimizer, one_epoch_step):
overall_steps = args.warmup * one_epoch_step
current_steps = epoch * one_epoch_step + step
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p["lr"] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def setup_seed(seed):
print("setup random seed = {}".format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == "__main__":
main()
| 19,993 | 31.777049 | 107 | py |
Progressive-Pruning | Progressive-Pruning-main/main_anytime_baseline.py | import argparse
import os
import pdb
import pickle
import random
import shutil
import time
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data.sampler import SubsetRandomSampler
import utils
torch.multiprocessing.set_sharing_strategy("file_system")
from dataset import (Setup_RestrictedImageNet,
generate_anytime_cifar10_dataloader,
generate_anytime_cifar100_dataloader,
generate_anytime_res_img_dataloader,
generate_anytime_res_img_dataloader_few,
setup__cifar10_dataset, setup__cifar100_dataset)
from generate_mask import generate_mask_
from pruner import *
from utils import evaluate_cer, setup_model
from wb import WandBLogger
parser = argparse.ArgumentParser(description="PyTorch Anytime Training")
##################################### Dataset #################################################
parser.add_argument(
"--data", type=str, default="../data", help="location of the data corpus"
)
parser.add_argument("--dataset", type=str, default="cifar10", help="dataset")
parser.add_argument(
"--meta_batch_size",
type=int,
default=5000,
help="data number in each meta batch_size",
)
parser.add_argument("--meta_batch_number", type=int, default=10)
##################################### Architecture ############################################
parser.add_argument("--arch", type=str, default="resnet20s", help="model architecture")
parser.add_argument(
"--imagenet_arch",
action="store_true",
help="architecture for imagenet size samples",
)
parser.add_argument(
"--imagenet_path",
type=str,
default="../imagenet",
help="location of the imagenet folder",
)
##################################### General setting ############################################
parser.add_argument("--seed", default=None, type=int, help="random seed")
parser.add_argument("--gpu", type=int, default=0, help="gpu device id")
parser.add_argument(
"--workers", type=int, default=2, help="number of workers in dataloader"
)
parser.add_argument("--resume", action="store_true", help="resume from checkpoint")
parser.add_argument("--checkpoint", type=str, default=None, help="checkpoint file")
parser.add_argument(
"--save_dir",
help="The directory used to save the trained models",
default=None,
type=str,
)
parser.add_argument("-no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-one_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-buffer_replay", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--buffer_size_train",
default=182,
type=int,
help="number of Random Train examples to add in buffer",
)
parser.add_argument(
"--buffer_size_valid",
default=182,
type=int,
help="number of Random Valid examples to add in buffer",
)
parser.add_argument("-snip_no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-few_shot", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--n_shots",
default=100,
type=int,
help="number of Random Valid examples to add in buffer",
)
##################################### Training setting #################################################
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--weight_decay", default=1e-4, type=float, help="weight decay")
parser.add_argument(
"--epochs", default=182, type=int, help="number of total epochs to run"
)
parser.add_argument("--warmup", default=0, type=int, help="warm up epochs")
parser.add_argument("--print_freq", default=50, type=int, help="print frequency")
parser.add_argument("--decreasing_lr", default="91,136", help="decreasing strategy")
##################################### Pruning setting #################################################
parser.add_argument(
"--tickets_mask", default=None, type=str, help="mask for subnetworks"
)
parser.add_argument(
"--tickets_init", default=None, type=str, help="initilization for subnetworks"
)
parser.add_argument(
"--snip_size", default=0.20, type=float, help="the size for the snip"
)
parser.add_argument("--sparsity_level", default=0, type=float, help="sparsity level")
parser.add_argument(
"--pruner", default="snip", type=str, help="Pruner Type[mag,snip,GraSP,SynFlow]"
)
parser.add_argument(
"--scope", default="global", type=str, help="Scope of Pruner[local,global]"
)
##################################### W&B Logging setting #################################################
parser.add_argument("-wb", action="store_true", help="Flag for using W&B logging")
parser.add_argument(
"--project_name", default="APP", type=str, help="Name of the W&B project"
)
parser.add_argument(
"--run", default="Anytime_fixed", type=str, help="Name for the W&B run"
)
best_sa = 0
args = parser.parse_args()
print(args)
os.makedirs(args.save_dir, exist_ok=True)
def main():
global args, best_sa
args = parser.parse_args()
print(args)
torch.cuda.set_device(int(args.gpu))
os.makedirs(args.save_dir, exist_ok=True)
if args.seed:
setup_seed(args.seed)
model = setup_model(args)
if args.dataset == "cifar10":
whole_trainset = setup__cifar10_dataset(args)
elif args.dataset == "cifar100":
whole_trainset = setup__cifar100_dataset(args)
elif args.dataset == "restricted_imagenet":
whole_trainset, test_set = Setup_RestrictedImageNet(args.imagenet_path)
model.cuda()
criterion = nn.CrossEntropyLoss()
decreasing_lr = list(map(int, args.decreasing_lr.split(",")))
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
if args.wb:
wandb_logger = WandBLogger(
project_name=args.project_name,
run_name=args.run,
dir=args.save_dir,
config=vars(args),
model=model,
params={"resume": args.resume},
)
else:
wandb_logger = None
all_result = {}
all_result["gen_gap"] = []
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
start_epoch = 0
start_state = 1
time_list = []
CER = []
CER_diff = []
for current_state in range(start_state, args.meta_batch_number + 1):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
print("Current state = {}".format(current_state))
start_time = time.time()
if args.dataset == "cifar10":
print("Loading cifar10 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_cifar10_dataloader(args, whole_trainset, current_state)
elif args.dataset == "cifar100":
print("Loading cifar100 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_cifar100_dataloader(
args, whole_trainset, current_state
)
elif args.dataset == "restricted_imagenet":
print("Loading Restricted Imagenet dataset in anytime setting")
if args.meta_batch_number == 3:
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, 80565, current_state
)
elif args.meta_batch_number == 10 and args.few_shot:
# Few Shot Dataloader Example
(
train_loader,
val_loader,
test_loader,
_,
) = generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, 6800, current_state
)
for epoch in range(start_epoch, args.epochs):
print(optimizer.state_dict()["param_groups"][0]["lr"])
acc, loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
tacc, vloss = validate(val_loader, model, criterion)
scheduler.step()
# remember best prec@1 and save checkpoint
is_best_sa = tacc > best_sa
best_sa = max(tacc, best_sa)
gen_gap = acc - tacc
all_result["gen_gap"].append(gen_gap)
all_result["train_ta"].append(acc)
all_result["val_ta"].append(tacc)
all_result["best_sa"].append(best_sa)
all_result["train_loss"].append(loss)
all_result["val_loss"].append(vloss)
all_result["lr"].append(optimizer.state_dict()["param_groups"][0]["lr"])
save_checkpoint(
{
"state": current_state,
"result": all_result,
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_sa": best_sa,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_SA_best=is_best_sa,
data_state=current_state,
save_path=args.save_dir,
)
if wandb_logger:
wandb_logger.log_metrics(all_result)
val_pick_best_epoch = np.argmax(np.array(all_result["val_ta"]))
print(
"* State = {} best SA = {} Epoch = {}".format(
current_state,
all_result["val_ta"][val_pick_best_epoch],
val_pick_best_epoch + 1,
)
)
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
best_sa = 0
start_epoch = 0
best_checkpoint = torch.load(
os.path.join(args.save_dir, "{}model_SA_best.pth.tar".format(current_state))
)
print("Loading Best Weight")
model.load_state_dict(best_checkpoint["state_dict"])
end_time = time.time() - start_time
print("Total time elapsed: {:.4f}s".format(end_time))
time_list.append(end_time)
if args.dataset == "restricted_imagenet":
CER.append(evaluate_cer(model, args, test_loader))
else:
CER.append(evaluate_cer(model, args))
if current_state != 1:
diff = (CER[current_state - 1] - CER[current_state - 2]) / 10000
CER_diff.append(diff)
print("CER diff = {}".format(diff))
# Reset LR to 0.1 after each state
for g in optimizer.param_groups:
g["lr"] = 0.1
print("LR reset to 0.1")
print(optimizer.state_dict()["param_groups"][0]["lr"])
test_tacc, _ = validate(test_loader, model, criterion)
wandb_logger.log_metrics({"Test/test_acc": test_tacc})
wandb_logger.log_metrics({"Test/CER": sum(CER)})
print("Test Acc = {}".format(test_tacc))
print("CER = {}".format(sum(CER)))
print("CER")
print(CER)
print("Final Test Accuracy: ")
print(test_tacc)
print("Anytime Relative Error")
print(CER_diff)
print("Total time")
print(time_list)
def train(train_loader, model, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (image, target) in enumerate(train_loader):
if epoch < args.warmup:
warmup_lr(epoch, i + 1, optimizer, one_epoch_step=len(train_loader))
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print(
"Epoch: [{0}][{1}/{2}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})\t"
"Time {3:.2f}".format(
epoch, i, len(train_loader), end - start, loss=losses, top1=top1
)
)
start = time.time()
print("train_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (image, target) in enumerate(val_loader):
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
with torch.no_grad():
output = model(image)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
print(
"Test: [{0}/{1}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})".format(
i, len(val_loader), loss=losses, top1=top1
)
)
print("valid_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def save_checkpoint(
state, is_SA_best, data_state, save_path, filename="checkpoint.pth.tar"
):
filepath = os.path.join(save_path, str(data_state) + filename)
torch.save(state, filepath)
if is_SA_best:
shutil.copyfile(
filepath,
os.path.join(save_path, "{}model_SA_best.pth.tar".format(data_state)),
)
def warmup_lr(epoch, step, optimizer, one_epoch_step):
overall_steps = args.warmup * one_epoch_step
current_steps = epoch * one_epoch_step + step
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p["lr"] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def setup_seed(seed):
print("setup random seed = {}".format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == "__main__":
main()
| 16,789 | 31.041985 | 107 | py |
Progressive-Pruning | Progressive-Pruning-main/utils.py | """
setup model and datasets
"""
import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
# from advertorch.utils import NormalizeByChannelMeanStd
from torch.autograd.variable import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100
from dataset import *
from models import *
__all__ = ["setup_model_dataset", "setup_model"]
def evaluate_cer(net, args, loader_=None):
criterion = nn.CrossEntropyLoss()
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
if args.dataset == "cifar10":
test_set = CIFAR10(
"../data", train=False, transform=test_transform, download=True
)
test_loader = DataLoader(
test_set,
batch_size=128,
shuffle=False,
num_workers=2,
pin_memory=True,
)
elif args.dataset == "cifar100":
test_set = CIFAR100(
"../data", train=False, transform=test_transform, download=True
)
test_loader = DataLoader(
test_set,
batch_size=128,
shuffle=False,
num_workers=2,
pin_memory=True,
)
elif args.dataset == "restricted_imagenet":
test_loader = loader_
correct = 0
total_loss = 0
total = 0 # number of samples
num_batch = len(test_loader)
use_cuda = True
net.cuda()
net.eval()
with torch.no_grad():
if isinstance(criterion, nn.CrossEntropyLoss):
for batch_idx, (inputs, targets) in enumerate(test_loader):
# print(inputs.size(0))
batch_size = inputs.size(0)
total += batch_size
inputs = Variable(inputs)
targets = Variable(targets)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
outputs = net(inputs)
loss = criterion(outputs, targets)
total_loss += loss.item() * batch_size
_, predicted = torch.max(outputs.data, 1)
correct += predicted.eq(targets).sum().item()
print("Correct %")
print(100 * correct / total)
misclassified = total - correct
print("Total Loss")
print(total_loss * 100 / total)
print(f"misclassified samples from {total}")
print(misclassified)
return misclassified
def setup_model(args):
if args.dataset == "cifar10":
classes = 10
normalization = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
elif args.dataset == "cifar100":
classes = 100
normalization = NormalizeByChannelMeanStd(
mean=[0.5071, 0.4866, 0.4409], std=[0.2673, 0.2564, 0.2762]
)
elif args.dataset == "restricted_imagenet":
classes = 14
if args.imagenet_arch:
if args.dataset == "restricted_imagenet":
classes = 14
model = model_dict[args.arch](num_classes=classes, imagenet=True)
else:
model = model_dict[args.arch](num_classes=classes)
if args.dataset != "restricted_imagenet":
model.normalize = normalization
return model
| 3,323 | 27.904348 | 75 | py |
Progressive-Pruning | Progressive-Pruning-main/dataset.py | """
function for loading datasets
contains:
CIFAR-10
CIFAR-100
"""
import os
import random
import numpy as np
import torch
import torchvision
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100
__all__ = [
"cifar10_dataloaders",
"cifar100_dataloaders",
"generate_anytime_cifar10_dataloader",
]
from robustness.datasets import RestrictedImageNetBalanced
def to_few_shot(dataset, n_shots=10):
"""
Transforms torchvision dataset to a few-shot dataset.
:param dataset: torchvision dataset
:param n_shots: number of samples per class
:return: few-shot torchvision dataset
"""
try:
targets = dataset.targets # targets or labels depending on the dataset
is_targets = True
except:
targets = dataset.labels
is_targets = False
assert min(targets) == 0, "labels should start from 0, not from {}".format(
min(targets)
)
# Find n_shots samples for each class
labels_dict = {}
imgs = dataset.imgs
for i, lbl in enumerate(imgs):
if lbl[1] not in labels_dict:
labels_dict[lbl[1]] = []
if len(labels_dict[lbl[1]]) < n_shots:
labels_dict[lbl[1]].append(i)
idx = sorted(
torch.cat([torch.tensor(v) for k, v in labels_dict.items()])
) # sort according to the original order in the full dataset
dataset.imgs = (
[dataset.imgs[i] for i in idx]
if isinstance(dataset.imgs, list)
else dataset.imgs[idx]
)
targets = [imgs[i][1] for i in idx]
if is_targets:
dataset.targets = targets
else:
dataset.labels = targets
return dataset
def Setup_RestrictedImageNet(args, path):
ds = RestrictedImageNetBalanced(path)
train_set, test_set = ds.make_loaders(batch_size=128, workers=8)
if args.few_shot:
print("Few Shot Regime Train Data Loading ")
train_set = to_few_shot(train_set, n_shots=args.n_shots)
return train_set, test_set
def generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, sample_len, state=1
):
meta_train_size = int(args.meta_batch_size * 0.9) # 29839#
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.one_replay:
if state == 1:
train_list = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
else:
train_list = list(
range((state - 2) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 2) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.buffer_replay:
k = args.buffer_size_train
l = args.buffer_size_val
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
train_list.extend(buffer_train_set)
val_list.extend(buffer_val_set)
# Populating Buffer
train_sampled_set = random.sample(train_list, k)
valid_sampled_set = random.sample(val_list, l)
buffer_train_set.extend(train_sampled_set)
buffer_val_set.extend(valid_sampled_set)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(
range(sample_len, sample_len + state * meta_val_size)
) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
def generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, sample_len, state=1
):
meta_train_size = int(args.meta_batch_size * 0.9) # 29839#
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.one_replay:
if state == 1:
train_list = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
else: # 0-1, 1-2,2-3,3-4,4-5
train_list = list(
range((state - 2) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
sample_len + (state - 2) * meta_val_size,
sample_len + state * meta_val_size,
)
)
elif args.buffer_replay:
k = args.buffer_size_train
l = args.buffer_size_val
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(
sample_len + (state - 1) * meta_val_size,
sample_len + state * meta_val_size,
)
)
train_list.extend(buffer_train_set)
val_list.extend(buffer_val_set)
# Populating Buffer
train_sampled_set = random.sample(train_list, k)
valid_sampled_set = random.sample(val_list, l)
buffer_train_set.extend(train_sampled_set)
buffer_val_set.extend(valid_sampled_set)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(
range(sample_len, sample_len + state * meta_val_size)
) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
def cifar10_dataloaders(batch_size=128, data_dir="datasets/cifar10", num_workers=2):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
print(
"Dataset information: CIFAR-10\t 45000 images for training \t 500 images for validation\t"
)
print("10000 images for testing\t no normalize applied in data_transform")
print("Data augmentation = randomcrop(32,4) + randomhorizontalflip")
train_set = Subset(
CIFAR10(data_dir, train=True, transform=train_transform, download=True),
list(range(45000)),
)
val_set = Subset(
CIFAR10(data_dir, train=True, transform=test_transform, download=True),
list(range(45000, 50000)),
)
test_set = CIFAR10(data_dir, train=False, transform=test_transform, download=True)
train_loader = DataLoader(
train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader
def cifar100_dataloaders(batch_size=128, data_dir="datasets/cifar100", num_workers=2):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
print(
"Dataset information: CIFAR-100\t 45000 images for training \t 500 images for validation\t"
)
print("10000 images for testing\t no normalize applied in data_transform")
print("Data augmentation = randomcrop(32,4) + randomhorizontalflip")
train_set = Subset(
CIFAR100(data_dir, train=True, transform=train_transform, download=True),
list(range(45000)),
)
val_set = Subset(
CIFAR100(data_dir, train=True, transform=test_transform, download=True),
list(range(45000, 50000)),
)
test_set = CIFAR100(data_dir, train=False, transform=test_transform, download=True)
train_loader = DataLoader(
train_set,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=batch_size,
shuffle=False,
num_workers=num_workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader
def setup__cifar10_dataset(args):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
whole_trainset = CIFAR10(
args.data, train=True, transform=train_transform, download=True
)
return whole_trainset
def setup__cifar10_dataset_end(args):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
whole_trainset = CIFAR10(
args.data, train=True, transform=train_transform, download=True
)
# 50,000 -200 = 49800
end_list = list(range(49800, 50000))
sub_whole_trainset = Subset(whole_trainset, list(range(49800)))
end_trainset = Subset(whole_trainset, end_list)
return sub_whole_trainset, end_trainset
def generate_anytime_cifar10_dataloader_end(args, whole_trainset, state=1):
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
# 45000-200 = 49800 , 49800-623
meta_train_size = int(args.meta_batch_size * 0.9) # #5602
meta_val_size = args.meta_batch_size - meta_train_size # 623
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(44816 + (state - 1) * meta_val_size, 44816 + state * meta_val_size)
)
else:
train_list = list(range(0, state * meta_train_size)) # 0 44816
val_list = list(range(44816, 44816 + state * meta_val_size)) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
test_set = CIFAR10(args.data, train=False, transform=test_transform, download=True)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
buffer_train_set = []
buffer_val_set = []
def generate_anytime_cifar10_dataloader(args, whole_trainset, state=1):
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
meta_train_size = int(args.meta_batch_size * 0.9) # 4500 #
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size)
)
elif args.one_replay:
if state == 1:
train_list = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size
)
)
else: # 0-1, 1-2,2-3,3-4,4-5
train_list = list(
range((state - 2) * meta_train_size, state * meta_train_size)
)
val_list = list(
range(
45000 + (state - 2) * meta_val_size, 45000 + state * meta_val_size
)
)
elif args.buffer_replay:
k = args.buffer_size_train
l = args.buffer_size_val
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size)
)
train_list.extend(buffer_train_set)
val_list.extend(buffer_val_set)
# Populating Buffer
train_sampled_set = random.sample(train_list, k)
valid_sampled_set = random.sample(val_list, l)
buffer_train_set.extend(train_sampled_set)
buffer_val_set.extend(valid_sampled_set)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(range(45000, 45000 + state * meta_val_size)) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
test_set = CIFAR10(args.data, train=False, transform=test_transform, download=True)
if args.snip_no_replay:
train_list_norep = list(
range((state - 1) * meta_train_size, state * meta_train_size)
)
train_set_norep = Subset(whole_trainset, train_list_norep)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set_norep, list(range(snip_set)))
else:
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
def setup__cifar100_dataset(args):
train_transform = transforms.Compose(
[
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
]
)
print(
"Dataset information: CIFAR-100\t 45000 images for training \t 500 images for validation\t"
)
print("10000 images for testing\t no normalize applied in data_transform")
print("Data augmentation = randomcrop(32,4) + randomhorizontalflip")
whole_trainset = CIFAR100(
args.data, train=True, transform=train_transform, download=True
)
return whole_trainset
def generate_anytime_cifar100_dataloader(args, whole_trainset, state=1):
test_transform = transforms.Compose(
[
transforms.ToTensor(),
]
)
meta_train_size = int(args.meta_batch_size * 0.9) # 4500
meta_val_size = args.meta_batch_size - meta_train_size # 500
if args.no_replay:
train_list = list(range((state - 1) * meta_train_size, state * meta_train_size))
val_list = list(
range(45000 + (state - 1) * meta_val_size, 45000 + state * meta_val_size)
)
else:
train_list = list(range(0, state * meta_train_size)) # 0 45000
val_list = list(range(45000, 45000 + state * meta_val_size)) # 45000 500
print(
"Current: Trainset size = {}, Valset size = {}".format(
len(train_list), len(val_list)
)
)
train_set = Subset(whole_trainset, train_list)
val_set = Subset(whole_trainset, val_list)
test_set = CIFAR100(args.data, train=False, transform=test_transform, download=True)
snip_set = int(args.meta_batch_size * args.snip_size)
train_snip_set = Subset(train_set, list(range(snip_set)))
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
num_workers=args.workers,
pin_memory=True,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
test_loader = DataLoader(
test_set,
batch_size=args.batch_size,
shuffle=False,
num_workers=args.workers,
pin_memory=True,
)
return train_loader, val_loader, test_loader, train_snip_set
| 21,039 | 28.928876 | 99 | py |
Progressive-Pruning | Progressive-Pruning-main/main_anytime_one.py | import argparse
import os
import pdb
import pickle
import random
import shutil
import time
from copy import deepcopy
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.backends.cudnn as cudnn
import torch.multiprocessing
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data.sampler import SubsetRandomSampler
import utils
torch.multiprocessing.set_sharing_strategy("file_system")
from dataset import (Setup_RestrictedImageNet,
generate_anytime_cifar10_dataloader,
generate_anytime_cifar100_dataloader,
generate_anytime_res_img_dataloader,
generate_anytime_res_img_dataloader_few,
setup__cifar10_dataset, setup__cifar100_dataset)
from generate_mask import generate_mask_
from pruner import *
from utils import evaluate_cer, setup_model
from wb import WandBLogger
parser = argparse.ArgumentParser(description="PyTorch Anytime Training")
##################################### Dataset #################################################
parser.add_argument(
"--data", type=str, default="../data", help="location of the data corpus"
)
parser.add_argument("--dataset", type=str, default="cifar10", help="dataset")
parser.add_argument(
"--meta_batch_size",
type=int,
default=5000,
help="data number in each meta batch_size",
)
parser.add_argument("--meta_batch_number", type=int, default=10)
##################################### Architecture ############################################
parser.add_argument("--arch", type=str, default="resnet20s", help="model architecture")
parser.add_argument(
"--imagenet_arch",
action="store_true",
help="architecture for imagenet size samples",
)
parser.add_argument(
"--imagenet_path",
type=str,
default="/home/mila/i/irina.rish/scratch/imagenet",
help="location of the data corpus",
)
##################################### General setting ############################################
parser.add_argument("--seed", default=None, type=int, help="random seed")
parser.add_argument("--gpu", type=int, default=0, help="gpu device id")
parser.add_argument(
"--workers", type=int, default=2, help="number of workers in dataloader"
)
parser.add_argument("--resume", action="store_true", help="resume from checkpoint")
parser.add_argument("--checkpoint", type=str, default=None, help="checkpoint file")
parser.add_argument(
"--save_dir",
help="The directory used to save the trained models",
default=None,
type=str,
)
parser.add_argument("-no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-one_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-buffer_replay", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--buffer_size_train",
default=182,
type=int,
help="number of Random Train examples to add in buffer",
)
parser.add_argument(
"--buffer_size_valid",
default=182,
type=int,
help="number of Random Valid examples to add in buffer",
)
parser.add_argument("-snip_no_replay", action="store_true", help="Flag for No Replay")
parser.add_argument("-few_shot", action="store_true", help="Flag for No Replay")
parser.add_argument(
"--n_shots",
default=100,
type=int,
help="number of Random Valid examples to add in buffer",
)
##################################### Training setting #################################################
parser.add_argument("--batch_size", type=int, default=128, help="batch size")
parser.add_argument("--lr", default=0.1, type=float, help="initial learning rate")
parser.add_argument("--momentum", default=0.9, type=float, help="momentum")
parser.add_argument("--weight_decay", default=1e-4, type=float, help="weight decay")
parser.add_argument(
"--epochs", default=182, type=int, help="number of total epochs to run"
)
parser.add_argument("--warmup", default=0, type=int, help="warm up epochs")
parser.add_argument("--print_freq", default=50, type=int, help="print frequency")
parser.add_argument("--decreasing_lr", default="91,136", help="decreasing strategy")
##################################### Pruning setting #################################################
parser.add_argument(
"--tickets_mask", default=None, type=str, help="mask for subnetworks"
)
parser.add_argument(
"--tickets_init", default=None, type=str, help="initilization for subnetworks"
)
parser.add_argument(
"--snip_size", default=0.20, type=float, help="the size for the snip"
)
parser.add_argument("--sparsity_level", default=0, type=float, help="sparsity level")
parser.add_argument(
"--pruner", default="snip", type=str, help="Pruner Type[mag,snip,GraSP,SynFlow]"
)
parser.add_argument(
"--scope", default="global", type=str, help="Scope of Pruner[local,global]"
)
##################################### W&B Logging setting #################################################
parser.add_argument("-wb", action="store_true", help="Flag for using W&B logging")
parser.add_argument(
"--project_name", default="APP", type=str, help="Name of the W&B project"
)
parser.add_argument(
"--run", default="Anytime_fixed", type=str, help="Name for the W&B run"
)
best_sa = 0
args = parser.parse_args()
print(args)
os.makedirs(args.save_dir, exist_ok=True)
if args.scope == "l":
args.scope = "local"
def main():
global args, best_sa
args = parser.parse_args()
print(args)
torch.cuda.set_device(int(args.gpu))
os.makedirs(args.save_dir, exist_ok=True)
if args.seed:
setup_seed(args.seed)
model = setup_model(args)
if args.dataset == "cifar10":
whole_trainset = setup__cifar10_dataset(args)
elif args.dataset == "cifar100":
whole_trainset = setup__cifar100_dataset(args)
elif args.dataset == "restricted_imagenet":
whole_trainset, test_set = Setup_RestrictedImageNet(args.imagenet_path)
model.cpu()
# print(model)
if args.tickets_init:
print("loading init from {}".format(args.tickets_init))
init_file = torch.load(args.tickets_init, map_location="cpu")
if "init_weight" in init_file:
init_file = init_file["init_weight"]
model.load_state_dict(init_file)
else:
torch.save(model.state_dict(), os.path.join(args.save_dir, "randinit.pth.tar"))
# setup initialization and mask
if args.tickets_mask:
print("loading mask from {}".format(args.tickets_mask))
mask_file = torch.load(args.tickets_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
criterion = nn.CrossEntropyLoss()
decreasing_lr = list(map(int, args.decreasing_lr.split(",")))
optimizer = torch.optim.SGD(
model.parameters(),
args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
)
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
if args.wb:
wandb_logger = WandBLogger(
project_name=args.project_name,
run_name=args.run,
dir=args.save_dir,
config=vars(args),
model=model,
params={"resume": args.resume},
)
else:
wandb_logger = None
if args.resume:
print("resume from checkpoint {}".format(args.checkpoint))
checkpoint = torch.load(
args.checkpoint, map_location=torch.device("cuda:" + str(args.gpu))
)
best_sa = checkpoint["best_sa"]
start_epoch = checkpoint["epoch"]
all_result = checkpoint["result"]
start_state = checkpoint["state"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
scheduler.load_state_dict(checkpoint["scheduler"])
print(
"loading from state: {} epoch: {}, best_sa = {}".format(
start_state, start_epoch, best_sa
)
)
else:
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
start_epoch = 0
start_state = 1
time_list = []
CER = []
CER_diff = []
for current_state in range(start_state, args.meta_batch_number + 1):
scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, milestones=decreasing_lr, gamma=0.1
)
start_time = time.time()
print("Current state = {}".format(current_state))
if args.dataset == "cifar10":
print("Loading cifar10 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar10_dataloader(args, whole_trainset, current_state)
elif args.dataset == "cifar100":
print("Loading cifar100 dataset in anytime setting")
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_cifar100_dataloader(
args, whole_trainset, current_state
)
elif args.dataset == "restricted_imagenet":
print("Loading Restricted Imagenet dataset in anytime setting")
if args.meta_batch_number == 3:
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader(
args, whole_trainset, test_set, 80565, current_state
)
elif args.meta_batch_number == 10 and args.few_shot:
# Few Shot Dataloader Example
(
train_loader,
val_loader,
test_loader,
train_snip_set,
) = generate_anytime_res_img_dataloader_few(
args, whole_trainset, test_set, 6800, current_state
)
# Generate Mask using SNIP
if current_state == 1:
sparsity_level = (
args.sparsity_level
) # 0.8**sparsity_level 80% Remaining Weights
save_mask = (
args.save_dir
+ f"/{current_state}mask_{args.pruner}_{sparsity_level}.pth.tar"
)
model_load_dir = (
args.save_dir + "/randinit.pth.tar"
) # 1st Meta Batch Randomly initialized model
generate_mask_(
args,
train_snip_set,
args.pruner,
model_load_dir,
save=save_mask,
state=sparsity_level,
)
model.cpu()
# Load the Model by applying above mask
print("loading mask from {}".format(save_mask))
mask_file = torch.load(save_mask, map_location="cpu")
if "state_dict" in mask_file:
mask_file = mask_file["state_dict"]
mask_file = extract_mask(mask_file)
print("pruning with {} masks".format(len(mask_file)))
prune_model_custom(model, mask_file)
model.cuda()
for epoch in range(start_epoch, args.epochs):
print(optimizer.state_dict()["param_groups"][0]["lr"])
acc, loss = train(train_loader, model, criterion, optimizer, epoch)
# evaluate on validation set
tacc, vloss = validate(val_loader, model, criterion)
# evaluate on test set
scheduler.step()
# remember best prec@1 and save checkpoint
is_best_sa = tacc > best_sa
best_sa = max(tacc, best_sa)
gen_gap = acc - tacc
all_result["gen_gap"].append(gen_gap)
all_result["train_ta"].append(acc)
all_result["val_ta"].append(tacc)
all_result["best_sa"].append(best_sa)
all_result["train_loss"].append(loss)
all_result["val_loss"].append(vloss)
all_result["lr"].append(optimizer.state_dict()["param_groups"][0]["lr"])
save_checkpoint(
{
"state": current_state,
"result": all_result,
"epoch": epoch + 1,
"state_dict": model.state_dict(),
"best_sa": best_sa,
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
},
is_SA_best=is_best_sa,
data_state=current_state,
save_path=args.save_dir,
)
if wandb_logger:
wandb_logger.log_metrics(all_result)
# report result
val_pick_best_epoch = np.argmax(np.array(all_result["val_ta"]))
print(
"* State = {} best SA = {} Epoch = {}".format(
current_state,
all_result["val_ta"][val_pick_best_epoch],
val_pick_best_epoch + 1,
)
)
all_result = {}
all_result["train_ta"] = []
all_result["val_ta"] = []
all_result["best_sa"] = []
all_result["gen_gap"] = []
all_result["train_loss"] = []
all_result["val_loss"] = []
all_result["lr"] = []
best_sa = 0
start_epoch = 0
best_checkpoint = torch.load(
os.path.join(args.save_dir, "{}model_SA_best.pth.tar".format(current_state))
)
print("Loading Best Weight")
model.load_state_dict(best_checkpoint["state_dict"])
end_time = time.time() - start_time
print("Total time elapsed: {:.4f}s".format(end_time))
time_list.append(end_time)
if args.dataset == "restricted_imagenet":
CER.append(evaluate_cer(model, args, test_loader))
else:
CER.append(evaluate_cer(model, args))
if current_state != 1:
diff = (CER[current_state - 1] - CER[current_state - 2]) / 10000
CER_diff.append(diff)
print("CER diff = {}".format(diff))
# Reset LR to 0.1 after each state
for g in optimizer.param_groups:
g["lr"] = 0.1
print("LR reset to 0.1")
print(optimizer.state_dict()["param_groups"][0]["lr"])
test_tacc, _ = validate(test_loader, model, criterion)
print("Test Acc = {}".format(test_tacc))
wandb_logger.log_metrics({"Test/test_acc": test_tacc})
wandb_logger.log_metrics({"Test/CER": sum(CER)})
print("CER = {}".format(sum(CER)))
print("CER")
print(CER)
print("Final Test Accuracy: ")
print(test_tacc)
print("Anytime Relative Error")
print(CER_diff)
print("Time Elapsed")
print(time_list)
def train(train_loader, model, criterion, optimizer, epoch):
losses = AverageMeter()
top1 = AverageMeter()
# switch to train mode
model.train()
start = time.time()
for i, (image, target) in enumerate(train_loader):
if epoch < args.warmup:
warmup_lr(epoch, i + 1, optimizer, one_epoch_step=len(train_loader))
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
output_clean = model(image)
loss = criterion(output_clean, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
output = output_clean.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
end = time.time()
print(
"Epoch: [{0}][{1}/{2}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})\t"
"Time {3:.2f}".format(
epoch, i, len(train_loader), end - start, loss=losses, top1=top1
)
)
start = time.time()
print("train_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def validate(val_loader, model, criterion):
"""
Run evaluation
"""
losses = AverageMeter()
top1 = AverageMeter()
# switch to evaluate mode
model.eval()
for i, (image, target) in enumerate(val_loader):
image = image.clone().cuda()
target = target.clone().cuda()
# compute output
with torch.no_grad():
output = model(image)
loss = criterion(output, target)
output = output.float()
loss = loss.float()
# measure accuracy and record loss
prec1 = accuracy(output.data, target)[0]
losses.update(loss.item(), image.size(0))
top1.update(prec1.item(), image.size(0))
if i % args.print_freq == 0:
print(
"Test: [{0}/{1}]\t"
"Loss {loss.val:.4f} ({loss.avg:.4f})\t"
"Accuracy {top1.val:.3f} ({top1.avg:.3f})".format(
i, len(val_loader), loss=losses, top1=top1
)
)
print("valid_accuracy {top1.avg:.3f}".format(top1=top1))
return top1.avg, losses.avg
def save_checkpoint(
state, is_SA_best, data_state, save_path, filename="checkpoint.pth.tar"
):
filepath = os.path.join(save_path, str(data_state) + filename)
torch.save(state, filepath)
if is_SA_best:
shutil.copyfile(
filepath,
os.path.join(save_path, "{}model_SA_best.pth.tar".format(data_state)),
)
def warmup_lr(epoch, step, optimizer, one_epoch_step):
overall_steps = args.warmup * one_epoch_step
current_steps = epoch * one_epoch_step + step
lr = args.lr * current_steps / overall_steps
lr = min(lr, args.lr)
for p in optimizer.param_groups:
p["lr"] = lr
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def setup_seed(seed):
print("setup random seed = {}".format(seed))
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
if __name__ == "__main__":
main()
| 19,758 | 31.767828 | 107 | py |
Progressive-Pruning | Progressive-Pruning-main/generate_mask.py | import argparse
import os
import numpy as np
import torch
import torch.nn as nn
import torchvision
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.data import DataLoader, Subset
from torchvision import transforms
from torchvision.datasets import CIFAR10, CIFAR100
from models.ResNets import resnet20s
from tools.pruning_utils import *
from utils import setup_model
def generate_mask_(args, data, pruner, model_dir, save, state, gpu=0):
def prune_loop(
model,
loss,
pruner,
dataloader,
device,
sparsity,
scope,
epochs,
train_mode=False,
):
# Set model to train or eval mode
model.train()
if not train_mode:
model.eval()
# Prune model
for epoch in range(epochs):
pruner.score(model, loss, dataloader, device)
sparse = sparsity ** ((epoch + 1) / epochs)
pruner.mask(sparse, scope)
torch.cuda.set_device(int(gpu))
model = setup_model(args)
prune_conv(model)
print("loading model from {}".format(model_dir))
checkpoint = torch.load(model_dir, map_location="cpu")
if "state_dict" in checkpoint.keys():
checkpoint = checkpoint["state_dict"]
model.load_state_dict(checkpoint, strict=False)
model.cuda()
remain_weight = 0.8 ** state
if pruner == "mag":
print("Pruning with Magnitude")
pruner = Mag(masked_parameters(model))
prune_loop(
model,
None,
pruner,
None,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope=args.scope,
epochs=10,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
elif pruner == "snip":
print("Pruning with SNIP")
criterion = nn.CrossEntropyLoss()
data_loader = DataLoader(
data, batch_size=100, shuffle=False, num_workers=2, pin_memory=True
)
pruner = SNIP(masked_parameters(model))
prune_loop(
model,
criterion,
pruner,
data_loader,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope=args.scope,
epochs=1,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
elif pruner == "random":
print("Pruning with Magnitude")
pruner = Rand(masked_parameters(model))
prune_loop(
model,
None,
pruner,
None,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope=args.scope,
epochs=1,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
elif pruner == "GraSP":
print("Pruning with GraSP")
criterion = nn.CrossEntropyLoss()
trainset = torchvision.datasets.CIFAR10(
args.data, train=True, download=True, transform=transforms.ToTensor()
)
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2
)
pruner = GraSP(masked_parameters(model))
prune_loop(
model,
criterion,
pruner,
trainloader,
torch.device("cuda:{}".format(gpu)),
remain_weight,
scope="global",
epochs=1,
train_mode=True,
)
current_mask = extract_mask(model.state_dict())
check_sparsity_dict(current_mask)
torch.save(current_mask, save)
| 3,926 | 26.270833 | 81 | py |
Progressive-Pruning | Progressive-Pruning-main/tools/layers.py | import math
import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch.nn.parameter import Parameter
class Linear(nn.Linear):
def __init__(self, in_features, out_features, bias=True):
super(Linear, self).__init__(in_features, out_features, bias)
self.register_buffer("weight_mask", torch.ones(self.weight.shape))
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return F.linear(input, W, b)
class Conv2d(nn.Conv2d):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
padding_mode="zeros",
):
super(Conv2d, self).__init__(
in_channels,
out_channels,
kernel_size,
stride,
padding,
dilation,
groups,
bias,
padding_mode,
)
self.register_buffer("weight_mask", torch.ones(self.weight.shape))
def _conv_forward(self, input, weight, bias):
if self.padding_mode != "zeros":
return F.conv2d(
F.pad(input, self._padding_repeated_twice, mode=self.padding_mode),
weight,
bias,
self.stride,
_pair(0),
self.dilation,
self.groups,
)
return F.conv2d(
input, weight, bias, self.stride, self.padding, self.dilation, self.groups
)
def forward(self, input):
W = self.weight_mask * self.weight
b = self.bias
return self._conv_forward(input, W, b)
| 1,785 | 25.656716 | 86 | py |
Progressive-Pruning | Progressive-Pruning-main/tools/pruning_utils.py | import copy
import numpy as np
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
from tools.layers import Conv2d, Linear
__all__ = [
"masked_parameters",
"SynFlow",
"Mag",
"Taylor1ScorerAbs",
"Rand",
"SNIP",
"GraSP",
"check_sparsity_dict",
"extract_mask",
"prune_conv",
]
def masks(module):
r"""Returns an iterator over modules masks, yielding the mask."""
for name, buf in module.named_buffers():
if "mask" in name:
yield buf
def masked_parameters(model):
r"""Returns an iterator over models prunable parameters, yielding both the
mask and parameter tensors.
"""
for module in model.modules():
if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
for mask, param in zip(masks(module), module.parameters(recurse=False)):
if param is not module.bias:
yield mask, param
class Pruner:
def __init__(self, masked_parameters):
self.masked_parameters = list(masked_parameters)
self.scores = {}
def score(self, model, loss, dataloader, device):
raise NotImplementedError
def _global_mask(self, sparsity):
r"""Updates masks of model with scores by sparsity level globally."""
# # Set score for masked parameters to -inf
# for mask, param in self.masked_parameters:
# score = self.scores[id(param)]
# score[mask == 0.0] = -np.inf
# Threshold scores
global_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
k = int((1.0 - sparsity) * global_scores.numel())
if not k < 1:
threshold, _ = torch.kthvalue(global_scores, k)
for mask, param in self.masked_parameters:
score = self.scores[id(param)]
zero = torch.tensor([0.0]).to(mask.device)
one = torch.tensor([1.0]).to(mask.device)
mask.copy_(torch.where(score <= threshold, zero, one))
def _local_mask(self, sparsity):
r"""Updates masks of model with scores by sparsity level parameter-wise."""
for mask, param in self.masked_parameters:
score = self.scores[id(param)]
k = int((1.0 - sparsity) * score.numel())
if not k < 1:
threshold, _ = torch.kthvalue(torch.flatten(score), k)
zero = torch.tensor([0.0]).to(mask.device)
one = torch.tensor([1.0]).to(mask.device)
mask.copy_(torch.where(score <= threshold, zero, one))
def mask(self, sparsity, scope):
r"""Updates masks of model with scores by sparsity according to scope."""
if scope == "global":
self._global_mask(sparsity)
if scope == "local":
self._local_mask(sparsity)
@torch.no_grad()
def apply_mask(self):
r"""Applies mask to prunable parameters."""
for mask, param in self.masked_parameters:
param.mul_(mask)
def alpha_mask(self, alpha):
r"""Set all masks to alpha in model."""
for mask, _ in self.masked_parameters:
mask.fill_(alpha)
# Based on https://github.com/facebookresearch/open_lth/blob/master/utils/tensor_utils.py#L43
def shuffle(self):
for mask, param in self.masked_parameters:
shape = mask.shape
perm = torch.randperm(mask.nelement())
mask = mask.reshape(-1)[perm].reshape(shape)
def invert(self):
for v in self.scores.values():
v.div_(v ** 2)
def stats(self):
r"""Returns remaining and total number of prunable parameters."""
remaining_params, total_params = 0, 0
for mask, _ in self.masked_parameters:
remaining_params += mask.detach().cpu().numpy().sum()
total_params += mask.numel()
return remaining_params, total_params
class SynFlow(Pruner):
def __init__(self, masked_parameters):
super(SynFlow, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
@torch.no_grad()
def linearize(model):
# model.double()
signs = {}
for name, param in model.state_dict().items():
signs[name] = torch.sign(param)
param.abs_()
return signs
@torch.no_grad()
def nonlinearize(model, signs):
# model.float()
for name, param in model.state_dict().items():
param.mul_(signs[name])
signs = linearize(model)
(data, _) = next(iter(dataloader))
input_dim = list(data[0, :].shape)
input = torch.ones([1] + input_dim).to(
device
) # , dtype=torch.float64).to(device)
output = model(input)
torch.sum(output).backward()
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p).detach().abs_()
p.grad.data.zero_()
nonlinearize(model, signs)
class Mag(Pruner):
def __init__(self, masked_parameters):
super(Mag, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.data).detach().abs_()
class Rand(Pruner):
def __init__(self, masked_parameters):
super(Rand, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.randn_like(p)
# Based on https://github.com/mi-lad/snip/blob/master/snip.py#L18
class SNIP(Pruner):
def __init__(self, masked_parameters):
super(SNIP, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
# allow masks to have gradient
for m, _ in self.masked_parameters:
m.requires_grad = True
# compute gradient
for batch_idx, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
loss(output, target).backward()
# calculate score |g * theta|
for m, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(m.grad).detach().abs_()
p.grad.data.zero_()
m.grad.data.zero_()
m.requires_grad = False
# normalize score
all_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
norm = torch.sum(all_scores)
for _, p in self.masked_parameters:
self.scores[id(p)].div_(norm)
def GraSP_fetch_data(dataloader, num_classes, samples_per_class):
datas = [[] for _ in range(num_classes)]
labels = [[] for _ in range(num_classes)]
mark = dict()
dataloader_iter = iter(dataloader)
while True:
inputs, targets = next(dataloader_iter)
for idx in range(inputs.shape[0]):
x, y = inputs[idx : idx + 1], targets[idx : idx + 1]
category = y.item()
if len(datas[category]) == samples_per_class:
mark[category] = True
continue
datas[category].append(x)
labels[category].append(y)
if len(mark) == num_classes:
break
X, y = torch.cat([torch.cat(_, 0) for _ in datas]), torch.cat(
[torch.cat(_) for _ in labels]
).view(-1)
return X, y
# Based on https://github.com/alecwangcq/GraSP/blob/master/pruner/GraSP.py#L49
class GraSP(Pruner):
def __init__(self, masked_parameters):
super(GraSP, self).__init__(masked_parameters)
self.temp = 200
self.eps = 1e-10
def score(self, model, loss, dataloader, device):
# first gradient vector without computational graph
stopped_grads = 0
data, target = GraSP_fetch_data(dataloader, 10, 10)
data, target = data.to(device), target.to(device)
output = model(data) / self.temp
L = loss(output, target)
grads = torch.autograd.grad(
L, [p for (_, p) in self.masked_parameters], create_graph=False
)
flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None])
stopped_grads += flatten_grads
# second gradient vector with computational graph
data, target = GraSP_fetch_data(dataloader, 10, 10)
data, target = data.to(device), target.to(device)
output = model(data) / self.temp
L = loss(output, target)
grads = torch.autograd.grad(
L, [p for (_, p) in self.masked_parameters], create_graph=True
)
flatten_grads = torch.cat([g.reshape(-1) for g in grads if g is not None])
gnorm = (stopped_grads * flatten_grads).sum()
gnorm.backward()
# calculate score Hg * theta (negate to remove top percent)
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p.data).detach()
p.grad.data.zero_()
# normalize score
all_scores = torch.cat([torch.flatten(v) for v in self.scores.values()])
norm = torch.abs(torch.sum(all_scores)) + self.eps
for _, p in self.masked_parameters:
self.scores[id(p)].div_(norm)
class Taylor1ScorerAbs(Pruner):
def __init__(self, masked_parameters):
super(Taylor1ScorerAbs, self).__init__(masked_parameters)
def score(self, model, loss, dataloader, device):
for batch_idx, (data, target) in enumerate(dataloader):
data, target = data.to(device), target.to(device)
output = model(data)
loss(output, target).backward()
for _, p in self.masked_parameters:
self.scores[id(p)] = torch.clone(p.grad * p).detach().abs_()
p.grad.data.zero_()
def check_sparsity_dict(model_dict):
sum_list = 0
zero_sum = 0
for key in model_dict.keys():
if "mask" in key:
sum_list = sum_list + float(model_dict[key].nelement())
zero_sum = zero_sum + float(torch.sum(model_dict[key] == 0))
print("* remain weight = ", 100 * (1 - zero_sum / sum_list), "%")
return 100 * (1 - zero_sum / sum_list)
def extract_mask(model_dict):
new_dict = {}
for key in model_dict.keys():
if "mask" in key:
new_dict[key] = copy.deepcopy(model_dict[key])
return new_dict
def prune_conv(model):
for name, module in reversed(model._modules.items()):
if len(list(module.children())) > 0:
model._modules[name] = prune_conv(model=module)
if isinstance(module, nn.Conv2d):
bias = True
if module.bias == None:
bias = False
layer_new = Conv2d(
module.in_channels,
module.out_channels,
module.kernel_size,
module.stride,
padding=module.padding,
dilation=module.dilation,
groups=module.groups,
bias=bias,
)
model._modules[name] = layer_new
return model
| 11,187 | 31.618076 | 97 | py |
Progressive-Pruning | Progressive-Pruning-main/pruner/pruner.py | import copy
import torch
import torch.nn as nn
import torch.nn.utils.prune as prune
__all__ = [
"pruning_model",
"pruning_model_random",
"prune_model_custom",
"remove_prune",
"extract_mask",
"reverse_mask",
"check_sparsity",
"check_sparsity_dict",
]
# Pruning operation
def pruning_model(model, px):
print("Apply Unstructured L1 Pruning Globally (all conv layers)")
parameters_to_prune = []
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
parameters_to_prune.append((m, "weight"))
parameters_to_prune = tuple(parameters_to_prune)
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.L1Unstructured,
amount=px,
)
def pruning_model_random(model, px):
print("Apply Unstructured Random Pruning Globally (all conv layers)")
parameters_to_prune = []
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
parameters_to_prune.append((m, "weight"))
parameters_to_prune = tuple(parameters_to_prune)
prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.RandomUnstructured,
amount=px,
)
def prune_model_custom(model, mask_dict):
print("Pruning with custom mask (all conv layers)")
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
mask_name = name + ".weight_mask"
if mask_name in mask_dict.keys():
prune.CustomFromMask.apply(
m, "weight", mask=mask_dict[name + ".weight_mask"]
)
else:
print("Can not fing [{}] in mask_dict".format(mask_name))
def remove_prune(model):
print("Remove hooks for multiplying masks (all conv layers)")
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
prune.remove(m, "weight")
# Mask operation function
def extract_mask(model_dict):
new_dict = {}
for key in model_dict.keys():
if "mask" in key:
new_dict[key] = copy.deepcopy(model_dict[key])
return new_dict
def reverse_mask(mask_dict):
new_dict = {}
for key in mask_dict.keys():
new_dict[key] = 1 - mask_dict[key]
return new_dict
# Mask statistic function
def check_sparsity(model):
sum_list = 0
zero_sum = 0
for name, m in model.named_modules():
if isinstance(m, nn.Conv2d):
sum_list = sum_list + float(m.weight.nelement())
zero_sum = zero_sum + float(torch.sum(m.weight == 0))
if zero_sum:
remain_weight_ratie = 100 * (1 - zero_sum / sum_list)
print("* remain weight ratio = ", 100 * (1 - zero_sum / sum_list), "%")
else:
print("no weight for calculating sparsity")
remain_weight_ratie = None
return remain_weight_ratie
def check_sparsity_dict(state_dict):
sum_list = 0
zero_sum = 0
for key in state_dict.keys():
if "mask" in key:
sum_list += float(state_dict[key].nelement())
zero_sum += float(torch.sum(state_dict[key] == 0))
if zero_sum:
remain_weight_ratie = 100 * (1 - zero_sum / sum_list)
print("* remain weight ratio = ", 100 * (1 - zero_sum / sum_list), "%")
else:
print("no weight for calculating sparsity")
remain_weight_ratie = None
return remain_weight_ratie
| 3,421 | 24.537313 | 79 | py |
Progressive-Pruning | Progressive-Pruning-main/models/ResNet.py | import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = [
"ResNet",
"resnet18",
"resnet34",
"resnet50",
"resnet101",
"resnet152",
"resnext50_32x4d",
"resnext101_32x8d",
"wide_resnet50_2",
"wide_resnet101_2",
]
model_urls = {
"resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
"resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
"resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
"resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
"resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
"resnext50_32x4d": "https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth",
"resnext101_32x8d": "https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth",
"wide_resnet50_2": "https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth",
"wide_resnet101_2": "https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth",
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(
in_planes,
out_planes,
kernel_size=3,
stride=stride,
padding=dilation,
groups=groups,
bias=False,
dilation=dilation,
)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError("BasicBlock only supports groups=1 and base_width=64")
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
__constants__ = ["downsample"]
def __init__(
self,
inplanes,
planes,
stride=1,
downsample=None,
groups=1,
base_width=64,
dilation=1,
norm_layer=None,
):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.0)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(
self,
block,
layers,
num_classes=1000,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None,
imagenet=False,
):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError(
"replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation)
)
self.groups = groups
self.base_width = width_per_group
print("The normalize layer is contained in the network")
self.normalize = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
if not imagenet:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.Identity()
else:
self.conv1 = nn.Conv2d(
3, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False
)
self.bn1 = nn.BatchNorm2d(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(
block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0]
)
self.layer3 = self._make_layer(
block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1]
)
self.layer4 = self._make_layer(
block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2]
)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(
block(
self.inplanes,
planes,
stride,
downsample,
self.groups,
self.base_width,
previous_dilation,
norm_layer,
)
)
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(
block(
self.inplanes,
planes,
groups=self.groups,
base_width=self.base_width,
dilation=self.dilation,
norm_layer=norm_layer,
)
)
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.normalize(x)
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet18", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet34", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet("resnet50", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet101", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet(
"resnet152", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs
)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 4
return _resnet(
"resnext50_32x4d", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["groups"] = 32
kwargs["width_per_group"] = 8
return _resnet(
"resnext101_32x8d", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet50_2", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs
)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs["width_per_group"] = 64 * 2
return _resnet(
"wide_resnet101_2", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs
)
| 14,716 | 32.754587 | 107 | py |
Progressive-Pruning | Progressive-Pruning-main/models/VGG.py | import torch
import torch.nn as nn
from advertorch.utils import NormalizeByChannelMeanStd
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = [
"VGG",
"vgg11",
"vgg11_bn",
"vgg13",
"vgg13_bn",
"vgg16",
"vgg16_bn",
"vgg19_bn",
"vgg19",
]
model_urls = {
"vgg11": "https://download.pytorch.org/models/vgg11-bbd30ac9.pth",
"vgg13": "https://download.pytorch.org/models/vgg13-c768596a.pth",
"vgg16": "https://download.pytorch.org/models/vgg16-397923af.pth",
"vgg19": "https://download.pytorch.org/models/vgg19-dcbb9e9d.pth",
"vgg11_bn": "https://download.pytorch.org/models/vgg11_bn-6002323d.pth",
"vgg13_bn": "https://download.pytorch.org/models/vgg13_bn-abd245e5.pth",
"vgg16_bn": "https://download.pytorch.org/models/vgg16_bn-6c64b313.pth",
"vgg19_bn": "https://download.pytorch.org/models/vgg19_bn-c79401a0.pth",
}
class VGG(nn.Module):
def __init__(self, features, num_classes=10, init_weights=True):
super(VGG, self).__init__()
self.features = features
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.classifier = nn.Linear(512, num_classes)
print("The normalize layer is contained in the network")
self.normalize = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
if init_weights:
self._initialize_weights()
def forward(self, x):
x = self.normalize(x)
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu")
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.constant_(m.bias, 0)
def make_layers(cfg, batch_norm=False):
layers = []
in_channels = 3
for v in cfg:
if v == "M":
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
else:
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
if batch_norm:
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
else:
layers += [conv2d, nn.ReLU(inplace=True)]
in_channels = v
return nn.Sequential(*layers)
cfgs = {
"A": [64, "M", 128, "M", 256, 256, "M", 512, 512, "M", 512, 512],
"B": [64, 64, "M", 128, 128, "M", 256, 256, "M", 512, 512, "M", 512, 512],
"D": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
"M",
512,
512,
512,
"M",
512,
512,
512,
],
"E": [
64,
64,
"M",
128,
128,
"M",
256,
256,
256,
256,
"M",
512,
512,
512,
512,
"M",
512,
512,
512,
512,
],
}
def _vgg(arch, cfg, batch_norm, pretrained, progress, **kwargs):
if pretrained:
kwargs["init_weights"] = False
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def vgg11(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") from
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11", "A", False, pretrained, progress, **kwargs)
def vgg11_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 11-layer model (configuration "A") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg11_bn", "A", True, pretrained, progress, **kwargs)
def vgg13(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg13", "B", False, pretrained, progress, **kwargs)
def vgg13_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 13-layer model (configuration "B") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg13_bn", "B", True, pretrained, progress, **kwargs)
def vgg16(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg16", "D", False, pretrained, progress, **kwargs)
def vgg16_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 16-layer model (configuration "D") with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg16_bn", "D", True, pretrained, progress, **kwargs)
def vgg19(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration "E")
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg19", "E", False, pretrained, progress, **kwargs)
def vgg19_bn(pretrained=False, progress=True, **kwargs):
r"""VGG 19-layer model (configuration 'E') with batch normalization
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _vgg("vgg19_bn", "E", True, pretrained, progress, **kwargs)
| 7,591 | 32.59292 | 113 | py |
Progressive-Pruning | Progressive-Pruning-main/models/ResNets.py | """
Properly implemented ResNet-s for CIFAR10 as described in paper [1].
The implementation and structure of this file is hugely influenced by [2]
which is implemented for ImageNet and doesn't have option A for identity.
Moreover, most of the implementations on the web is copy-paste from
torchvision's resnet and has wrong number of params.
Proper ResNet-s for CIFAR10 (for fair comparision and etc.) has following
number of layers and parameters:
name | layers | params
ResNet20 | 20 | 0.27M
ResNet32 | 32 | 0.46M
ResNet44 | 44 | 0.66M
ResNet56 | 56 | 0.85M
ResNet110 | 110 | 1.7M
ResNet1202| 1202 | 19.4m
which this implementation indeed has.
Reference:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
[2] https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py
If you use this implementation in you work, please don't forget to mention the
author, Yerlan Idelbayev.
Borrow from : https://github.com/akamaster/pytorch_resnet_cifar10.git
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
from advertorch.utils import NormalizeByChannelMeanStd
from torch.autograd import Variable
__all__ = [
"ResNets",
"resnet20s",
"resnet32s",
"resnet44s",
"resnet56s",
"resnet110s",
"resnet1202s",
]
def _weights_init(m):
classname = m.__class__.__name__
# print(classname)
if isinstance(m, nn.Linear) or isinstance(m, nn.Conv2d):
init.kaiming_normal_(m.weight)
class LambdaLayer(nn.Module):
def __init__(self, lambd):
super(LambdaLayer, self).__init__()
self.lambd = lambd
def forward(self, x):
return self.lambd(x)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1, option="A"):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(
planes, planes, kernel_size=3, stride=1, padding=1, bias=False
)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != planes:
if option == "A":
"""
For CIFAR10 ResNet paper uses option A.
"""
self.shortcut = LambdaLayer(
lambda x: F.pad(
x[:, :, ::2, ::2],
(0, 0, 0, 0, planes // 4, planes // 4),
"constant",
0,
)
)
elif option == "B":
self.shortcut = nn.Sequential(
nn.Conv2d(
in_planes,
self.expansion * planes,
kernel_size=1,
stride=stride,
bias=False,
),
nn.BatchNorm2d(self.expansion * planes),
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNets(nn.Module):
def __init__(self, block, num_blocks, num_classes=10):
super(ResNets, self).__init__()
self.in_planes = 16
print("The normalize layer is contained in the network")
self.normalize = NormalizeByChannelMeanStd(
mean=[0.4914, 0.4822, 0.4465], std=[0.2470, 0.2435, 0.2616]
)
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(16)
self.layer1 = self._make_layer(block, 16, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 32, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 64, num_blocks[2], stride=2)
self.fc = nn.Linear(64, num_classes)
self.apply(_weights_init)
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1] * (num_blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward(self, x):
x = self.normalize(x)
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = F.avg_pool2d(out, out.size()[3])
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
def resnet20s(num_classes=10):
return ResNets(BasicBlock, [3, 3, 3], num_classes=num_classes)
def resnet32s(num_classes=10):
return ResNets(BasicBlock, [5, 5, 5], num_classes=num_classes)
def resnet44s(num_classes=10):
return ResNets(BasicBlock, [7, 7, 7], num_classes=num_classes)
def resnet56s(num_classes=10):
return ResNets(BasicBlock, [9, 9, 9], num_classes=num_classes)
def resnet110s(num_classes=10):
return ResNets(BasicBlock, [18, 18, 18], num_classes=num_classes)
def resnet1202s(num_classes=10):
return ResNets(BasicBlock, [200, 200, 200], num_classes=num_classes)
| 5,404 | 30.794118 | 85 | py |
Progressive-Pruning | Progressive-Pruning-main/wb/wandb_logger.py | """
Utilities for Weights & Biases logging.
"""
from pathlib import Path
from typing import Union
import PIL
from matplotlib.pyplot import Figure
from PIL.Image import Image
from torch import Tensor
__all__ = ["WandBLogger"]
class WandBLogger:
"""
The `WandBLogger` provides an easy integration with
Weights & Biases logging. Each monitored metric is automatically
logged to a dedicated Weights & Biases project dashboard.
.. note::
The wandb log files are placed by default in "./wandb/" unless specified.
"""
def __init__(
self,
project_name: str = "APP",
run_name: str = "Prune1",
save_code: bool = True,
config: object = None,
dir: Union[str, Path] = None,
model: object = None,
params: dict = None,
) -> None:
"""
Creates an instance of the `WandBLogger`.
:param project_name: Name of the W&B project.
:param run_name: Name of the W&B run.
:param save_code: Saves the main training script to W&B.
:param dir: Path to the local log directory for W&B logs to be saved at.
:param config: Syncs hyper-parameters and config values used to W&B.
:param params: All arguments for wandb.init() function call.
Visit https://docs.wandb.ai/ref/python/init to learn about all
wand.init() parameters.
"""
self.project_name = project_name
self.run_name = run_name
self.save_code = save_code
self.dir = dir
self.config = config
self.model = model
self.params = params
self._import_wandb()
self._args_parse()
self._before_job()
def _import_wandb(self):
try:
import wandb
assert hasattr(wandb, "__version__")
except (ImportError, AssertionError):
raise ImportError('Please run "pip install wandb" to install wandb')
self.wandb = wandb
def _args_parse(self):
self.init_kwargs = {
"project": self.project_name,
"name": self.run_name,
"save_code": self.save_code,
"dir": self.dir,
"config": self.config,
}
if self.params:
self.init_kwargs.update(self.params)
def _before_job(self):
if self.wandb is None:
self.import_wandb()
if self.init_kwargs:
self.wandb.init(**self.init_kwargs)
else:
self.wandb.init()
if self.model is not None:
self.wandb.watch(self.model)
def log_metrics(
self,
log_dict: dict = None,
img: Union[Image, Figure, str, Path] = None,
curve: object = None,
) -> None:
for key, value in log_dict.items():
if isinstance(value, (int, float, Tensor)):
self.wandb.log({key: value})
else:
if "ARE" in key:
curr_val = value
else:
curr_val = value[-1]
if isinstance(curr_val, (int, float, Tensor)):
if "train" in key.lower():
key = "Train/" + key
self.wandb.log({key: curr_val})
if "val" in key.lower():
key = "Val/" + key
self.wandb.log({key: curr_val})
if "test" in key.lower():
key = "Test/" + key
self.wandb.log({key: curr_val})
self.wandb.log({key: curr_val})
else:
return
if img is not None:
if isinstance(img, (Image, Figure)):
self.wandb.log({"Media/Training Curve": self.wandb.Image(img)})
if isinstance(img, (str, Path)):
img_pil = PIL.Image.open(img)
self.wandb.log({"Media/Training Curve": self.wandb.Image(img_pil)})
if curve is not None:
if isinstance(curve, (object)):
self.wandb.log({"Training Curves": curve})
| 4,146 | 30.416667 | 87 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/baseline_convex_fair_regression.py | import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import matplotlib.pyplot as plt
from tqdm import tqdm
import time
import fairness_metrics
import data_loader
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://arxiv.org/pdf/1706.02409.pdf
An example usage:
python .\baseline_convex_fair_regression.py --seed {} --fairness {} --dataset {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrime':
ds = data_loader.CommunitiesCrime()
if args.dataset == 'BarPass':
ds = data_loader.BarPass()
if args.dataset == 'StudentsMath':
ds = data_loader.StudentPerformance(subject='Math')
if args.dataset == 'StudentsPortugese':
ds = data_loader.StudentPerformance(subject='Portugese')
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'R2' : fairness_metrics.R2
}
# storage of results
results_train = []
results_test = []
# get data
X0, Y0 = ds.get_data_for_A(0)
X0 = X0.numpy()
Y0 = Y0.numpy()
X1, Y1 = ds.get_data_for_A(1)
X1 = X1.numpy()
Y1 = Y1.numpy()
X, Y, A = ds.get_data()
X_test, Y_test, A_test = ds.get_test_data()
# run the test for various lambdas
for lambda_ in lambda_candidates:
start_time = time.time()
if args.fairness == 'group':
D = np.exp(-(Y1-Y0.T)**2)
n1n0 = D.shape[0]*D.shape[1]
theta = cp.Variable((X1.shape[1],1))
theta0 = cp.Variable()
objective = cp.Minimize(cp.sum((Y0-theta0-X0@theta)**2)/Y0.shape[0]+\
cp.sum((Y1-theta0-X1@theta)**2)/Y1.shape[0]+\
lambda_*(cp.sum(cp.multiply(D, (X1@theta - (X0@theta).T)))/n1n0)**2 +\
args.gamma*(theta0**2 + cp.sum_squares(theta)))
problem = cp.Problem(objective, [])
problem.solve(solver = cp.GUROBI, verbose=False)
else:
D = np.exp(-(Y1-Y0.T)**2)
n1n0 = D.shape[0]*D.shape[1]
theta = cp.Variable((X1.shape[1],1))
theta0 = cp.Variable()
objective = cp.Minimize(cp.sum((Y0-theta0-X0@theta)**2)/Y0.shape[0]+\
cp.sum((Y1-theta0-X1@theta)**2)/Y1.shape[0]+\
lambda_*(cp.sum(cp.multiply(D, (X1@theta - (X0@theta).T)**2))/n1n0) +\
args.gamma*(theta0**2 + cp.sum_squares(theta)))
problem = cp.Problem(objective, [])
problem.solve(solver = cp.GUROBI, verbose=False)
duration = time.time()-start_time
theta = torch.tensor(theta.value).float()
theta0 = torch.tensor(theta0.value).float()
predict = lambda X: theta0 + X@theta
# metrics on train set
y_hat = predict(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
train_results['time'] = duration
test_results['time'] = duration
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results_regression/cvx_regression_baseline/{}_cvx-bl-{}_{}_train_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
df_test.to_csv('results_regression/cvx_regression_baseline/{}_cvx-bl-{}_{}_test_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
def run_sgd(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrime':
ds = data_loader.CommunitiesCrime()
if args.dataset == 'BarPass':
ds = data_loader.BarPass()
if args.dataset == 'StudentsMath':
ds = data_loader.StudentPerformance(subject='Math')
if args.dataset == 'StudentsPortugese':
ds = data_loader.StudentPerformance(subject='Portugese')
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'R2' : fairness_metrics.R2
}
# storage of results
results_train = []
results_test = []
# get data
X0, Y0 = ds.get_data_for_A(0)
X1, Y1 = ds.get_data_for_A(1)
X, Y, A = ds.get_data()
k = X.shape[1]
X_test, Y_test, A_test = ds.get_test_data()
# run the test for various lambdas
for lambda_ in lambda_candidates:
D = torch.exp(-(Y1-Y0.T)**2)
objective_group = lambda theta0, theta: (torch.sum((Y0-theta0-X0@theta)**2)+torch.sum((Y1-theta0-X1@theta)**2))/(D.shape[0]+D.shape[1])+\
lambda_*(torch.mean(D*(X1@theta - (X0@theta).T)))**2
objective_individual = lambda theta0, theta: (torch.sum((Y0-theta0-X0@theta)**2)+\
torch.sum((Y1-theta0-X1@theta)**2))/(D.shape[0]+D.shape[1])+\
lambda_*(torch.mean(D*(X1@theta - (X0@theta).T)**2))
objective = objective_group if args.fairness=='group' else objective_individual
theta_0 = torch.rand(1)
theta = torch.rand([k, 1])
theta_0.requires_grad = True
theta.requires_grad = True
optimizer = torch.optim.Adam([theta_0, theta])
losses = []
for epoch in tqdm(range(5000)):
optimizer.zero_grad()
loss = objective(theta_0, theta)
losses.append(loss.item())
loss.backward()
optimizer.step()
predict = lambda X: theta_0 + X@theta
# metrics on train set
y_hat = predict(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/cvx_regression_baseline/{}_cvx-bl-{}_{}_train_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
df_test.to_csv('results/cvx_regression_baseline/{}_cvx-bl-{}_{}_test_{}.csv'.format(args.dataset, \
args.gamma, \
args.fairness, args.seed))
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-2, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=5, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrime', 'BarPass', 'StudentsMath', 'StudentsPortugese'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
parser.add_argument('--gamma', help='Weight of L2 regularizer', type=float, default=0)
parser.add_argument('--fairness', help='Fairness Type to use', choices=['group', 'individual'])
args = parser.parse_args()
run(args) | 10,008 | 39.522267 | 145 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/fair_training.py | # fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of stochastic gradient descent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Classification |
# +--------------------------------------------------+
def fair_learning(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm 2, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_after_sig = torch.sigmoid(y_hat)
y_after_sig = y_after_sig[:, None]
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Regression |
# +--------------------------------------------------+
def fair_learning_regression(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.MSELoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss | 5,881 | 45.314961 | 189 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/fairness_metrics.py | import torch
import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 9,848 | 34.428058 | 133 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/benchmark.py | # benchmark.py
# file with functions for running experiment
import fair_training
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
import time
def convergence_plotter(regloss, fairloss, lambda_):
plt.figure(figsize=(16,5))
plt.subplot(131)
plt.plot(regloss)
plt.title('Regression Loss')
plt.xlabel('Iteration')
plt.ylabel('Regression Loss')
plt.subplot(132)
plt.plot(fairloss)
plt.title('Fairness Loss')
plt.xlabel('Iteration')
plt.ylabel('Fairness Loss')
plt.subplot(133)
plt.plot(lambda_*np.array(fairloss)+np.array(regloss))
plt.title('Overall Loss')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.show()
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementaion train and test function for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def train_test_fair_learning(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
y_hat = torch.round(torch.sigmoid(model(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
def train_test_fair_learning_regression(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning_regression(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
y_hat = model(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = model(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
| 7,129 | 40.213873 | 206 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/data_loader.py | # data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
import copy
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides data loading functinality for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, X_test=None, Y_test=None, A_test=None, use_tensor=True, device='cpu', info='No Info Available', min_max_scaler=None):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
if X_test is not None:
self.X_test = to_tensor(X_test, device) if use_tensor else X_test
self.A_test = to_tensor(A_test, device) if use_tensor else A_test
self.Y_test = to_tensor(Y_test, device) if use_tensor else Y_test
self.info = info
self.min_max_scaler = None
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_adult_data(self):
return (self.X, self.Y, self.A, self.X_test, self.Y_test, self.A_test)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
total_samples = self.A.shape[0]
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
if self.min_max_scaler is not None:
x_vals = self.X_test.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
self.X_test = pd.DataFrame(x_scaled)
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class LawSchool(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_sas('./data/classification/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
A = rawdata['White']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict admission,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Drug(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x):
X, Y, A = load_drug_data('data/classification/drug/drug_consumption.data.txt')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Credit(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_excel('./data/classification/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
A = 2 - preproc_data['SEX']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Credit data'''
self.min_max_scaler = preprocessing.MinMaxScaler()
x_vals = X.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
X = pd.DataFrame(x_scaled)
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Adult(DataLoader):
def __init__(self, a_inside_x, **kwargs):
X_train_, Y_train_, X_test_, Y_test_ = adult_data_read('./data/classification/adult/')
A = X_train_['Sex']
A_test = X_test_['Sex']
le = LabelEncoder()
Y = le.fit_transform(Y_train_)
Y = pd.Series(Y, name='>50k')
Y_test = le.fit_transform(Y_test_)
Y_test = pd.Series(Y_test, name='>50k')
if not a_inside_x:
X = X_train_.drop(labels=['Sex'], axis=1)
X = pd.get_dummies(X)
X_test = X_test_.drop(labels=['Sex'], axis=1)
X_test = pd.get_dummies(X_test)
else:
X = pd.get_dummies(X_train_)
X_test = pd.get_dummies(X_test_)
info = """Adult dataset for classification. Train Test split is already provided"""
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], X_test, np.array(Y_test)[:, None], np.array(A_test)[:, None], info=info, **kwargs)
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, a_inside_x, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
if a_inside_x:
X = np.concatenate((np.array(X), np.array(A)), axis=1)
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(500, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
def get_log_data(self):
return self.get_data()
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self, a_inside_x):
X, Y, A = load_compas_data('data/classification/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
def adult_data_read(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
| 16,841 | 39.681159 | 159 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/zafar_classification.py | # Baseline 1: https://arxiv.org/pdf/1706.02409.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
from zafar_method import funcs_disp_mist
from zafar_method.utils import *
import fairness_metrics
import data_loader
from zafar_method import utils
import numpy as np
from tqdm import tqdm
import cvxpy as cp
from collections import namedtuple
from sklearn.metrics import log_loss
from zafar_method import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import pickle
from copy import deepcopy
import os, sys
# from generate_synthetic_data import *
from zafar_method import utils as ut
from zafar_method import funcs_disp_mist as fdm
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementation of http://proceedings.mlr.press/v54/zafar17a/zafar17a.pdf.
gamma parameter is the accuracy fairness tradeoff of the model.
An example usage is python zafar_classification.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
gamma_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=0)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
X, Y, A = ds.get_data()
X_test, Y_test, A_test = ds.get_test_data()
x_train = X.cpu().detach().numpy()
Y_train = Y.cpu().detach().numpy().flatten()
a_train = A.cpu().detach().numpy().flatten()
x_test = X_test.cpu().detach().numpy()
y_test = Y_test.cpu().detach().numpy().flatten()
a_test = A_test.cpu().detach().numpy().flatten()
loss_function = "logreg" # perform the experiments with logistic regression
Y_test_ = y_test.copy()
Y_train_ = Y_train.copy()
Y_test_[y_test == 0] = -1
Y_train_[Y_train_ == 0] = -1
# run the test for various lambdas
y_train = Y_train_
y_test = Y_test_
x_control_train = {"s1": a_train}
x_control_test = {"s1": a_test}
cons_params = None # constraint parameters, will use them later
EPS = 1e-6
for gamma in gamma_candidates:
print('Training Zafar method, for gamma: {}/{}, seed:{}'.format(gamma, args.nlambda, args.seed))
start_time = time.time()
# mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
apply_fairness_constraints = 0 # set this flag to one since we want to optimize accuracy subject to fairness constraints
apply_accuracy_constraint = 1
sep_constraint = 0
# for m in mult_range:
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
# for s_attr in sensitive_attrs_to_cov_thresh.keys():
# for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
# for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
# sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
sensitive_attrs_to_cov_thresh = {"s1":0}
w = train_model(x_train, y_train, x_control_train, lf._logistic_loss, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], sensitive_attrs_to_cov_thresh, gamma)
# y_test_predicted = np.sign(np.dot(x_test, w))
# correct_answers = (y_test_predicted == y_test).astype(int) # will have 1 when the prediction and the actual label match
# accuracy = float(sum(correct_answers)) / float(len(correct_answers))
# y_test_predict[y_test_predict == -1] = 0
# w = torch.tensor(w).float()
# theta0 = torch.tensor(w).float()
stop_time = time.time()
predict = lambda X: torch.tensor(np.maximum(np.sign(np.dot(X.cpu().detach().numpy(), w)), 0)).float()
# metrics on train set
y_hat = predict(X).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = gamma
train_results['time'] = stop_time - start_time
test_results['lambda_'] = gamma
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/zafar/{}_zafar_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/zafar/{}_zafar_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'method':'zafar',
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'a_inside_x': False
}
with open('results/zafar/{}_zafar_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=1, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'Compas', 'LawSchool', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=25)
args = parser.parse_args()
run(args) | 8,066 | 42.139037 | 195 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/MMD_fair.py | # fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
import time
from tqdm import tqdm
# +---------------------------------+
# | Algorithm 1: Gradient Descent |
# +---------------------------------+
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://papers.nips.cc/paper/2020/file/af9c0e0c1dee63e5acad8b7ed1a5be96-Paper.pdf
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def mmd_gradient_descent(X, Y, A, model, predict, reg_loss, fair_loss, params, lr, N_iterates, lambda_, verbose=False, log=False, logfairloss=None, lr_decay=1, **kwargs):
'''
Train model using Algorithm 1, which uses simple gradient descent.
Args:
X (torch.Tensor): X data
Y (torch.Tensor): Y data
A (torch.Tensor): A data
predict (fct handle): Prediction function handle, maps X-->Y_hat
reg_loss (fct handle): Regression Loss function handle, maps Y_hat, Y-->L_reg
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lr (float): SGD Learning Rate
N_iterates (int): Number of iterates for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
psi (fct handle, optional):Transformation function maps from Y_hat, Y --> score, fair_loss is computed on score
verbose (bool, optional): Verbosity
log (bool, optional): Return training path
logfairloss (optional): Sinkhorn divergence
Returns:
Trainig Loss over Training if log=True, but changes params
'''
optimizer = optim.SGD(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
# optimizer = optim.Adam(params)
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
epoch_reg_loss = []
epoch_fair_loss = []
for iterate in tqdm(range(N_iterates)):
# zero grad accumulator
optimizer.zero_grad()
# predict
y_hat = predict(X)
y_hat_first_layer = model.first_layer(X)
L_reg = criterion(y_hat, Y)
# y_hat = torch.sigmoid(y_hat)
# compute regression and fairness loss
y_hat_1 = y_hat_first_layer[A.squeeze()==1]
y_hat_0 = y_hat_first_layer[A.squeeze()==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# all_linear1_params = torch.cat([x.view(-1) for x in model.linear1.parameters()])
# all_linear2_params = torch.cat([x.view(-1) for x in model.linear2.parameters()])
# W_froben = torch.norm(all_linear1_params, 2) ** 2
# V_froben = torch.norm(all_linear2_params, 2) ** 2
# overall loss
# reg_weight = 0.1
loss = L_reg + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L_reg.data.item(), L_fair.data.item()))
if log:
epoch_fair_loss.append(L_fair.data.item())
epoch_reg_loss.append(L_reg.data.item())
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
#scheduler.step()
return epoch_reg_loss, epoch_fair_loss
def mmd_fair_traintest(ds, model, reg_loss, fair_loss, lr, n_iterates, lambda_, metrics, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, lr_decay=1, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
regloss, fairloss = mmd_gradient_descent(X, Y, A, model, model.forward,
reg_loss,
fair_loss,
model.parameters(), lr, n_iterates,
lambda_,
logdata = ds.get_log_data() if plot_convergence else None, logfairloss=logfairloss, lr_decay=lr_decay, **kwargs)
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training seed
stop_time = time.time()
y_hat = torch.round(torch.sigmoid(model.forward(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = torch.round(torch.sigmoid(model.forward(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return train_results, test_results
| 6,629 | 44.102041 | 187 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/models.py | # models.py
# models for regression
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides models for MFL and Oneta et al.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
class LinearRegression(nn.Module):
def __init__(self, k):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(k, 1, bias=True)
def forward(self, x):
return self.linear(x)
class NeuralNetwork(nn.Module):
def __init__(self, k):
super(NeuralNetwork, self).__init__()
self.linear1 = torch.nn.Linear(k, 20, bias=True)
self.linear2 = torch.nn.Linear(20, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of MFL"""
class NeuralNetworkClassification(nn.Module):
def __init__(self, k):
super(NeuralNetworkClassification, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of Oneta et al."""
class NeuralNetwork_MMD(nn.Module):
def __init__(self, k):
super(NeuralNetwork_MMD, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.sigmoid_ = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def first_layer(self, x):
return self.sigmoid_((self.linear1(x)))
def forward(self, x):
self.output = self.linear2(self.sigmoid_((self.linear1(x))))
return self.output
# loss_functions: MAE and MSE
def MSE(y_pred, y):
return ((y_pred - y) ** 2).mean()
def MAE(y_pred, y):
return (y_pred - y).abs().mean() | 2,012 | 29.5 | 97 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/fair_KDE.py | # Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import pickle
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from Fair_KDE.models import Classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from Fair_KDE.dataloader import CustomDataset
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of https://proceedings.neurips.cc/paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
An example usage python fair_KDE.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
def run(args):
# act on experiment parameters:
seed = args.seed
data_loader.set_seed(args.seed)
##### Other training hyperparameters #####
lr = 2e-4
n_epochs = 200
lr_decay = 1.0
batch_size = 2048
n_epochs = 500
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
batch_size = 128
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
lr = 5e-4
batch_size = 2048
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
lr = 2e-4
batch_size = 2048
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
lr = 5e-4
n_batch = 2048
if args.dataset == 'Adult':
ds = data_loader.Adult(0)
train_test_split_fin = 1
batch_size = 2048
lr = 1e-1
lr_decay - 0.98
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
batch_size = 128
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 16 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
if args.dataset == 'Adult':
X_train, Y_train, Z_train, X_test, Y_test, Z_test = ds.get_adult_data()
else:
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
time_track = []
for lambda_ in lambda_candidates:
print('Training FKDE method, for lambda: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
start_time = time.time()
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
# if (i + 1) % 10 == 0 or (i + 1) == len(generator):
# print('Lambda:{}, Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(lambda_, epoch+1, n_epochs,
# i+1, len(generator),
# cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
stop_time = time.time()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
print(train_results)
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0))
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/FKDE/{}_FKDE_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/FKDE/{}_FKDE_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'method':'FKDE',
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'L':'BCE_cross_entropy',
'lr_decay':lr_decay,
'a_inside_x': True
}
with open('results/FKDE/{}_FKDE_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'LawSchool', 'Compas', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
args = parser.parse_args()
run(args) | 14,239 | 40.037464 | 153 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/fairness_metrics.py | import torch
import cvxpy as cp
import numpy as np
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).mean()
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).mean())
def energy_distance_forloop(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
d11 = torch.tensor(0.)
d12 = torch.tensor(0.)
d22 = torch.tensor(0.)
for y_ in y1:
d11 += (y_-y1).abs().mean()
d12 += (y_-y2).abs().mean()
d11 = d11/(y1.shape[0])
d12 = d12/(y1.shape[0])
for y_ in y2:
d22 += (y_-y2).abs().mean()
d22 = d22/(y2.shape[0])
return 2*d12-d11-d22
# +------------------------------------------+
# | Metric 2: Wasserstein Distance |
# +------------------------------------------+
def W1dist(y1,y2):
'''
Compute type 1 Wasserstein distance between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1))
C_np = C.data.numpy()
# solve OT problem
T = cp.Variable(C_np.shape)
ones_1 = np.ones((C_np.shape[0], 1))
ones_2 = np.ones((C_np.shape[1], 1))
objective = cp.Minimize(cp.sum(cp.multiply(C_np,T)))
constraints = [
T >=0,
T@ones_2==ones_1/len(ones_1),
T.T@ones_1==ones_2/len(ones_2)
]
problem = cp.Problem(objective, constraints)
problem.solve(solver=cp.GUROBI)
# objective value for gradient computation
return (torch.Tensor(T.value)*C).sum()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Regression Metric 1: MSE |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
# print('Accuracy of the network on the 10000 test images: %d %%' % (
# 100 * correct / total))
return torch.tensor(correct / total * 100)
| 8,196 | 30.771318 | 100 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/algorithm.py | import random
import IPython
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
def train_fair_classifier(dataset, net, optimizer, lr_scheduler, fairness, lambda_, h, delta, device, n_epochs=200, batch_size=2048, seed=0):
# Retrieve train/test splitted pytorch tensors for index=split
train_tensors, test_tensors = dataset.get_dataset_in_tensor()
X_train, Y_train, Z_train, XZ_train = train_tensors
X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
data_loader = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# An empty dataframe for logging experimental results
df = pd.DataFrame()
df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(data_loader):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.detach().reshape(-1))
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch)
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(),h,tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(data_loader):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(data_loader),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
Yhat_train = net(XZ_train).squeeze().detach().cpu().numpy()
df_temp = measures_from_Yhat(Y_train_np, Z_train_np, Yhat=Yhat_train, threshold=tau)
df_temp['epoch'] = epoch * len(data_loader) + i + 1
df_ckpt = df_ckpt.append(df_temp)
# Plot (cost, train accuracies, fairness measures) curves per 50 epochs
if (epoch + 1) % 50 == 0:
IPython.display.clear_output()
print('Currently working on - seed: {}'.format(seed))
plt.figure(figsize=(15,5), dpi=100)
plt.subplot(1,3,1)
plt.plot(costs)
plt.xlabel('x10 iterations')
plt.title('cost')
plt.subplot(1,3,2)
plt.plot(df_ckpt['acc'].to_numpy())
plt.xlabel('epoch')
plt.title('Accuracy')
plt.subplot(1,3,3)
if fairness == 'DP':
plt.plot(df_ckpt['DDP'].to_numpy())
plt.title('DDP')
elif fairness == 'EO':
plt.plot(df_ckpt['DEO'].to_numpy())
plt.title('DEO')
plt.xlabel('epoch')
plt.show()
Yhat_test = net(XZ_test).squeeze().detach().cpu().numpy()
df_test = measures_from_Yhat(Y_test_np, Z_test_np, Yhat=Yhat_test, threshold=tau)
return df_test | 7,375 | 41.390805 | 141 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/dataloader.py | import os
import copy
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import data_loader
from tempeh.configurations import datasets
from sklearn.datasets import make_moons
from sklearn.preprocessing import LabelEncoder, StandardScaler
def arrays_to_tensor(X, Y, Z, XZ, device):
return torch.FloatTensor(X).to(device), torch.FloatTensor(Y).to(device), torch.FloatTensor(Z).to(device), torch.FloatTensor(XZ).to(device)
def adult(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
def compas_data_loader():
""" Downloads COMPAS data from the propublica GitHub repository.
:return: pandas.DataFrame with columns 'sex', 'age', 'juv_fel_count', 'juv_misd_count',
'juv_other_count', 'priors_count', 'two_year_recid', 'age_cat_25 - 45',
'age_cat_Greater than 45', 'age_cat_Less than 25', 'race_African-American',
'race_Caucasian', 'c_charge_degree_F', 'c_charge_degree_M'
"""
data = pd.read_csv("./data/compas/compas-scores-two-years.csv") # noqa: E501
# filter similar to
# https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
data = data[(data['days_b_screening_arrest'] <= 30) &
(data['days_b_screening_arrest'] >= -30) &
(data['is_recid'] != -1) &
(data['c_charge_degree'] != "O") &
(data['score_text'] != "N/A")]
# filter out all records except the ones with the most common two races
data = data[(data['race'] == 'African-American') | (data['race'] == 'Caucasian')]
# Select relevant columns for machine learning.
# We explicitly leave in age_cat to allow linear classifiers to be non-linear in age
data = data[["sex", "age", "age_cat", "race", "juv_fel_count", "juv_misd_count",
"juv_other_count", "priors_count", "c_charge_degree", "two_year_recid"]]
# map string representation of feature "sex" to 0 for Female and 1 for Male
data = data.assign(sex=(data["sex"] == "Male") * 1)
data = pd.get_dummies(data)
return data
class CustomDataset():
def __init__(self, X, Y, Z):
self.X = X
self.Y = Y
self.Z = Z
def __len__(self):
return len(self.Y)
def __getitem__(self, index):
x, y, z = self.X[index], self.Y[index], self.Z[index]
return x, y, z
class FairnessDataset():
def __init__(self, dataset, device=torch.device('cuda')):
self.dataset = dataset
self.device = device
np.random.seed(12345678)
if self.dataset == 'AdultCensus':
self.get_adult_data()
elif self.dataset == 'COMPAS':
self.get_compas_data()
elif self.dataset == 'CreditDefault':
self.get_credit_default_data()
elif self.dataset == 'Lawschool':
self.get_lawschool_data()
elif self.dataset == 'Moon':
self.get_moon_data()
else:
raise ValueError('Your argument {} for dataset name is invalid.'.format(self.dataset))
self.prepare_ndarray()
def get_adult_data(self):
X_train, Y_train, X_test, Y_test = adult('./data/adult/')
self.Z_train_ = X_train['Sex']
self.Z_test_ = X_test['Sex']
self.X_train_ = X_train.drop(labels=['Sex'], axis=1)
self.X_train_ = pd.get_dummies(self.X_train_)
self.X_test_ = X_test.drop(labels=['Sex'], axis=1)
self.X_test_ = pd.get_dummies(self.X_test_)
le = LabelEncoder()
self.Y_train_ = le.fit_transform(Y_train)
self.Y_train_ = pd.Series(self.Y_train_, name='>50k')
self.Y_test_ = le.fit_transform(Y_test)
self.Y_test_ = pd.Series(self.Y_test_, name='>50k')
# def get_compas_data(self):
# dataset = datasets['compas']()
# # dataset = compas_data_loader()
# X_train, X_test = dataset.get_X(format=pd.DataFrame)
# Y_train, Y_test = dataset.get_y(format=pd.Series)
# Z_train, Z_test = dataset.get_sensitive_features('race', format=pd.Series)
# self.X_train_ = X_train
# self.Y_train_ = Y_train
# self.Z_train_ = (Z_train != 'African-American').astype(float)
# self.X_test_ = X_test
# self.Y_test_ = Y_test
# self.Z_test_ = (Z_test != 'African-American').astype(float)
def get_compas_data(self):
dataset = datasets['compas']()
ds = data_loader.Compas()
ds.split_test()
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
self.X_train_ = X
self.Y_train_ = Y
self.Z_train_ = A
self.X_test_ = X_test
self.Y_test_ = Y_test
self.Z_test_ = A_test
def get_credit_default_data(self):
rawdata = pd.read_excel('./data/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
Z = 2 - preproc_data['SEX']
self.X_train_ = X.loc[list(range(24000)), :]
self.Y_train_ = Y.loc[list(range(24000))]
self.Z_train_ = Z.loc[list(range(24000))]
self.X_test_ = X.loc[list(range(24000,30000)), :]
self.Y_test_ = Y.loc[list(range(24000,30000))]
self.Z_test_ = Z.loc[list(range(24000,30000))]
def get_lawschool_data(self):
rawdata = pd.read_sas('./data/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
Z = rawdata['White']
self.X_train_ = X.loc[list(range(77267)), :]
self.Y_train_ = Y.loc[list(range(77267))]
self.Z_train_ = Z.loc[list(range(77267))]
self.X_test_ = X.loc[list(range(77267,96584)), :]
self.Y_test_ = Y.loc[list(range(77267,96584))]
self.Z_test_ = Z.loc[list(range(77267,96584))]
def get_moon_data(self):
n_train = 10000
n_test = 5000
X, Y = make_moons(n_samples=n_train+n_test, noise=0.2, random_state=0)
Z = np.zeros_like(Y)
np.random.seed(0)
for i in range(n_train + n_test):
if Y[i] == 0:
if -0.734 < X[i][0] < 0.734:
Z[i] = np.random.binomial(1, 0.90)
else:
Z[i] = np.random.binomial(1, 0.35)
elif Y[i] == 1:
if 0.262 < X[i][0] < 1.734:
Z[i] = np.random.binomial(1, 0.55)
else:
Z[i] = np.random.binomial(1, 0.10)
X = pd.DataFrame(X, columns=['x_1', 'x_2'])
Y = pd.Series(Y, name='label')
Z = pd.Series(Z, name='sensitive attribute')
self.X_train_ = X.loc[list(range(10000)), :]
self.Y_train_ = Y.loc[list(range(10000))]
self.Z_train_ = Z.loc[list(range(10000))]
self.X_test_ = X.loc[list(range(10000,15000)), :]
self.Y_test_ = Y.loc[list(range(10000,15000))]
self.Z_test_ = Z.loc[list(range(10000,15000))]
def prepare_ndarray(self):
self.normalized = False
self.X_train = self.X_train_.to_numpy(dtype=np.float64)
self.Y_train = self.Y_train_.to_numpy(dtype=np.float64)
self.Z_train = self.Z_train_.to_numpy(dtype=np.float64)
self.XZ_train = np.concatenate([self.X_train, self.Z_train.reshape(-1,1)], axis=1)
self.X_test = self.X_test_.to_numpy(dtype=np.float64)
self.Y_test = self.Y_test_.to_numpy(dtype=np.float64)
self.Z_test = self.Z_test_.to_numpy(dtype=np.float64)
self.XZ_test = np.concatenate([self.X_test, self.Z_test.reshape(-1,1)], axis=1)
self.sensitive_attrs = sorted(list(set(self.Z_train)))
return None
def normalize(self):
self.normalized = True
scaler_XZ = StandardScaler()
self.XZ_train = scaler_XZ.fit_transform(self.XZ_train)
self.XZ_test = scaler_XZ.transform(self.XZ_test)
scaler_X = StandardScaler()
self.X_train = scaler_X.fit_transform(self.X_train)
self.X_test = scaler_X.transform(self.X_test)
return None
def get_dataset_in_ndarray(self):
return (self.X_train, self.Y_train, self.Z_train, self.XZ_train),\
(self.X_test, self.Y_test, self.Z_test, self.XZ_test)
def get_dataset_in_tensor(self, validation=False, val_portion=.0):
X_train_, Y_train_, Z_train_, XZ_train_ = arrays_to_tensor(
self.X_train, self.Y_train, self.Z_train, self.XZ_train, self.device)
X_test_, Y_test_, Z_test_, XZ_test_ = arrays_to_tensor(
self.X_test, self.Y_test, self.Z_test, self.XZ_test, self.device)
return (X_train_, Y_train_, Z_train_, XZ_train_),\
(X_test_, Y_test_, Z_test_, XZ_test_) | 11,649 | 40.459075 | 159 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/data_loader_or.py | # data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
# TODO: possibly some form of (cross) validation
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, use_tensor=True, device='cpu', info='No Info Available'):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
self.X_test = None
self.A_test = None
self.Y_test = None
self.info = info
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
# def stratified_batch_generator(self, n_samples, n_iterates):
# # get propoertions of protected attribute
# p_A1 = self.A.mean()
# p_A0 = 1-p_A1
# # build index set of protected and unprotected attribute
# ind_A1 = (self.A==1).nonzero()[:,0]
# ind_A0 = (self.A==0).nonzero()[:,0]
# # number of samples to sample from each distribution
# n_batch_1 = int(p_A1*n_samples)
# n_batch_0 = int(p_A0*n_samples)
# replacement = False
# for _ in range(n_iterates):
# # sample indexes for protected and unprotected class
# batch_idx1 = ind_A1[(torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
# num_samples=n_batch_1,
# replacement=replacement)]
# batch_idx0 = ind_A0[(torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
# num_samples=n_batch_0,
# replacement=replacement)]
# yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
# torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
# torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
# n_epochs = 100
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
# print(p_A0)
total_samples = self.A.shape[0]
# batch_size = 32
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
# print(epoch)
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
# print(ind_A1.shape)
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(10000, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self):
X, Y, A = load_compas_data('data/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
| 11,586 | 38.546075 | 121 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/models.py | import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, n_layers, n_inputs, n_hidden_units):
super(Classifier, self).__init__()
layers = []
if n_layers == 1: # Logistic Regression
layers.append(nn.Linear(n_inputs, 1))
layers.append(nn.Sigmoid())
else:
layers.append(nn.Linear(n_inputs, n_hidden_units))
layers.append(nn.ReLU())
for i in range(n_layers-2):
layers.append(nn.Linear(n_hidden_units, n_hidden_units))
layers.append(nn.ReLU())
layers.append(nn.Linear(n_hidden_units,1))
layers.append(nn.Sigmoid())
self.layers = nn.Sequential(*layers)
def forward(self, x):
x = self.layers(x)
return x | 829 | 33.583333 | 72 | py |
Metrizing-Fairness | Metrizing-Fairness-main/offline_experiments/src/Fair_KDE/fair_KDE_.py | # Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from models import Classifier
from dataloader import FairnessDataset
from algorithm import train_fair_classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
data_loader.set_seed(0)
gamma_candidates = np.logspace(-2, 2, num=10)
ds = data_loader.Compas()
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
dataset_name = 'COMPAS' # ['Moon', 'Lawschool', 'AdultCensus', 'CreditDefault', 'COMPAS']
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 20 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
##### Other training hyperparameters #####
batch_size = 2048
lr = 2e-4
lr_decay = 1.0 # Exponential decay factor of LR scheduler
n_seeds = 5 # Number of random seeds to try
n_epochs = 200
seed = 5
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
results_test = []
results_train = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(generator):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(generator),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0)) | 9,960 | 34.830935 | 136 | py |
Metrizing-Fairness | Metrizing-Fairness-main/online_classification/fair_training.py | # fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of stochastic gradient descent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Classification |
# +--------------------------------------------------+
def fair_learning(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm 2, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_after_sig = torch.sigmoid(y_hat)
y_after_sig = y_after_sig[:, None]
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Regression |
# +--------------------------------------------------+
def fair_learning_regression(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.MSELoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss | 5,881 | 45.314961 | 189 | py |
Metrizing-Fairness | Metrizing-Fairness-main/online_classification/fairness_metrics.py | import torch
#import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
def energy_distance_biased(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*n1)
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*n2))
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 10,438 | 34.266892 | 133 | py |
Metrizing-Fairness | Metrizing-Fairness-main/online_classification/data_loader.py | # data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
import copy
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides data loading functinality for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, X_test=None, Y_test=None, A_test=None, use_tensor=True, device='cpu', info='No Info Available', min_max_scaler=None):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
if X_test is not None:
self.X_test = to_tensor(X_test, device) if use_tensor else X_test
self.A_test = to_tensor(A_test, device) if use_tensor else A_test
self.Y_test = to_tensor(Y_test, device) if use_tensor else Y_test
self.info = info
self.min_max_scaler = None
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_adult_data(self):
return (self.X, self.Y, self.A, self.X_test, self.Y_test, self.A_test)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
total_samples = self.A.shape[0]
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
if self.min_max_scaler is not None:
x_vals = self.X_test.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
self.X_test = pd.DataFrame(x_scaled)
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class LawSchool(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_sas('./data/classification/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
A = rawdata['White']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict admission,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Drug(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x):
X, Y, A = load_drug_data('data/classification/drug/drug_consumption.data.txt')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Credit(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_excel('./data/classification/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
A = 2 - preproc_data['SEX']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Credit data'''
self.min_max_scaler = preprocessing.MinMaxScaler()
x_vals = X.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
X = pd.DataFrame(x_scaled)
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Adult(DataLoader):
def __init__(self, a_inside_x, **kwargs):
X_train_, Y_train_, X_test_, Y_test_ = adult_data_read('./data/classification/adult/')
A = X_train_['Sex']
A_test = X_test_['Sex']
le = LabelEncoder()
Y = le.fit_transform(Y_train_)
Y = pd.Series(Y, name='>50k')
Y_test = le.fit_transform(Y_test_)
Y_test = pd.Series(Y_test, name='>50k')
if not a_inside_x:
X = X_train_.drop(labels=['Sex'], axis=1)
X = pd.get_dummies(X)
X_test = X_test_.drop(labels=['Sex'], axis=1)
X_test = pd.get_dummies(X_test)
else:
X = pd.get_dummies(X_train_)
X_test = pd.get_dummies(X_test_)
info = """Adult dataset for classification. Train Test split is already provided"""
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], X_test, np.array(Y_test)[:, None], np.array(A_test)[:, None], info=info, **kwargs)
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, a_inside_x, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
if a_inside_x:
X = np.concatenate((np.array(X), np.array(A)), axis=1)
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(500, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
def get_log_data(self):
return self.get_data()
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self, a_inside_x):
X, Y, A = load_compas_data('data/classification/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
def adult_data_read(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
| 16,842 | 39.585542 | 159 | py |
Metrizing-Fairness | Metrizing-Fairness-main/online_classification/bias_eval.py | import torch
import data_loader
import models
import fairness_metrics
from sklearn.utils import shuffle
import matplotlib.pyplot as plt
def find_batchsize(N_target, A):
candidate_1 = torch.argmax((A.flatten().cumsum(0)==2).int()).item() + 1
candidate_0 = torch.argmax(((1-A).flatten().cumsum(0)==2).int()).item() + 1
if candidate_0<2 or candidate_1<0:
return -1
return max(N_target, candidate_1, candidate_0)
def accuracy_1(y_hat_1, y_hat_0, y_1, y_0):
return torch.Tensor((y_hat_1==y_1).float().mean())
def accuracy_0(y_hat_1, y_hat_0, y_1, y_0):
return torch.Tensor((y_hat_0==y_0).float().mean())
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy,
'accuracy1' : accuracy_1,
'accuracy0' : accuracy_0
}
def train_metrics_fullbias(target_batchsize, seed=0, plot=False, lambda_=1):
lr = 5e-4
drug = data_loader.Drug(True)
model = models.NeuralNetworkClassification(drug.get_k())
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-3)
criterion = torch.nn.BCEWithLogitsLoss()
fairloss = fairness_metrics.energy_distance_biased
data_loader.set_seed(seed)
drug.split_test()
X, Y, A = drug.get_data()
X_test, Y_test, A_test = drug.get_test_data()
N_epochs = 500
losses = []
test_losses = []
for epoch in range(N_epochs):
X,Y,A = shuffle(X,Y,A)
sumloss = 0
batchstart = 0
while batchstart<len(A):
optimizer.zero_grad()
batchsize = find_batchsize(target_batchsize, A[batchstart:])
if batchsize>0:
X_batch, Y_batch, A_batch = X[batchstart:batchstart+batchsize], Y[batchstart:batchstart+batchsize], A[batchstart:batchstart+batchsize]
batchstart = batchstart+batchsize
pred = model(X_batch)
L = criterion(pred, Y_batch)
y_after_sig = torch.sigmoid(pred)
y_after_sig = y_after_sig[:, None]
y_hat_1 = pred[A_batch.flatten()==1]
y_hat_0 = pred[A_batch.flatten()==0]
L_fair = fairloss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
loss.backward()
sumloss += loss.detach().item()
optimizer.step()
else:
batchstart = len(A)+1
losses.append(sumloss)
with torch.no_grad():
pred = model(X_test)
y_hat_1 = pred[A_test.flatten()==1]
y_hat_0 = pred[A_test.flatten()==0]
testloss = criterion(model(X_test), Y_test) + fairloss(y_hat_1, y_hat_0)
test_losses.append(testloss)
if plot:
plt.plot(losses)
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test.flatten()==1].flatten()
y_hat_0 = y_hat[A_test.flatten()==0].flatten()
y_1 = Y_test[A_test.flatten()==1].flatten()
y_0 = Y_test[A_test.flatten()==0].flatten()
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return test_results, losses, test_losses
def train_metrics_debiased(target_batchsize, seed=0, plot=False,lambda_ = 1):
lr = 5e-4
drug = data_loader.Drug(True)
model = models.NeuralNetworkClassification(drug.get_k())
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-3)
criterion = torch.nn.BCEWithLogitsLoss()
fairloss = fairness_metrics.energy_distance
data_loader.set_seed(seed)
drug.split_test()
X, Y, A = drug.get_data()
X_test, Y_test, A_test = drug.get_test_data()
N_epochs = 500
losses = []
test_losses = []
for epoch in range(N_epochs):
X,Y,A = shuffle(X,Y,A)
sumloss = 0
batchstart = 0
while batchstart<len(A):
optimizer.zero_grad()
batchsize = find_batchsize(target_batchsize, A[batchstart:])
if batchsize>0:
X_batch, Y_batch, A_batch = X[batchstart:batchstart+batchsize], Y[batchstart:batchstart+batchsize], A[batchstart:batchstart+batchsize]
batchstart = batchstart+batchsize
pred = model(X_batch)
#L = criterion(pred, Y_batch)
y_after_sig = torch.sigmoid(pred)
y_after_sig = y_after_sig[:, None]
y_hat_1 = pred[A_batch.flatten()==1]
y_hat_0 = pred[A_batch.flatten()==0]
L_fair = fairloss(y_hat_1, y_hat_0)
# overall loss
y_1 = Y_batch[A_batch.flatten()==1]
y_0 = Y_batch[A_batch.flatten()==0]
delta_1, delta_0 = 1, 1
N = len(A_batch)
N_1 = A_batch.sum()
N_0 = N-N_1
if N >= target_batchsize:
if N_1 == 2:
delta_1 = N/(2*(N-1))
delta_0 = N/((N-1))
else:
delta_1 = N/((N-1))
delta_0 = N/(2*(N-1))
weight_1 = (delta_1) * N_1/N
weight_0 = (delta_0) * N_0/N
accloss1 = criterion(y_hat_1, y_1)
accloss0 = criterion(y_hat_0, y_0)
L = (weight_0 * accloss0 + weight_1 * accloss1)
loss = L + lambda_ * L_fair
loss.backward()
sumloss += loss.detach().item()
optimizer.step()
else:
batchstart = len(A)+1
losses.append(sumloss)
with torch.no_grad():
pred = model(X_test)
y_hat_1 = pred[A_test.flatten()==1]
y_hat_0 = pred[A_test.flatten()==0]
testloss = criterion(model(X_test), Y_test) + fairloss(y_hat_1, y_hat_0)
test_losses.append(testloss)
if plot:
plt.plot(losses)
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test.flatten()==1].flatten()
y_hat_0 = y_hat[A_test.flatten()==0].flatten()
y_1 = Y_test[A_test.flatten()==1].flatten()
y_0 = Y_test[A_test.flatten()==0].flatten()
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return test_results, losses, test_losses
def train_metrics_noreg(target_batchsize, seed=0, plot=False):
lr = 5e-4
drug = data_loader.Drug(True)
model = models.NeuralNetworkClassification(drug.get_k())
optimizer = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=5e-3)
criterion = torch.nn.BCEWithLogitsLoss()
fairloss = fairness_metrics.energy_distance_biased
data_loader.set_seed(seed)
drug.split_test()
X, Y, A = drug.get_data()
X_test, Y_test, A_test = drug.get_test_data()
N_epochs = 500
losses = []
test_losses = []
for epoch in range(N_epochs):
X,Y,A = shuffle(X,Y,A)
sumloss = 0
batchstart = 0
while batchstart<len(A):
optimizer.zero_grad()
batchsize = min(target_batchsize, len(A[batchstart:]))
if batchsize>0:
X_batch, Y_batch, A_batch = X[batchstart:batchstart+batchsize], Y[batchstart:batchstart+batchsize], A[batchstart:batchstart+batchsize]
batchstart = batchstart+batchsize
pred = model(X_batch)
L = criterion(pred, Y_batch)
# overall loss
loss = L
loss.backward()
sumloss += loss.detach().item()
optimizer.step()
else:
batchstart = len(A)+1
losses.append(sumloss)
with torch.no_grad():
testloss = criterion(model(X_test), Y_test)
test_losses.append(testloss)
if plot:
plt.plot(losses)
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test.flatten()==1].flatten()
y_hat_0 = y_hat[A_test.flatten()==0].flatten()
y_1 = Y_test[A_test.flatten()==1].flatten()
y_0 = Y_test[A_test.flatten()==0].flatten()
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return test_results, losses, test_losses | 9,003 | 36.991561 | 150 | py |
Metrizing-Fairness | Metrizing-Fairness-main/online_classification/models.py | # models.py
# models for regression
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides models for MFL and Oneta et al.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
class LinearRegression(nn.Module):
def __init__(self, k):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(k, 1, bias=True)
def forward(self, x):
return self.linear(x)
class NeuralNetwork(nn.Module):
def __init__(self, k):
super(NeuralNetwork, self).__init__()
self.linear1 = torch.nn.Linear(k, 20, bias=True)
self.linear2 = torch.nn.Linear(20, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of MFL"""
class NeuralNetworkClassification(nn.Module):
def __init__(self, k):
super(NeuralNetworkClassification, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of Oneta et al."""
class NeuralNetwork_MMD(nn.Module):
def __init__(self, k):
super(NeuralNetwork_MMD, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.sigmoid_ = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def first_layer(self, x):
return self.sigmoid_((self.linear1(x)))
def forward(self, x):
self.output = self.linear2(self.sigmoid_((self.linear1(x))))
return self.output
# loss_functions: MAE and MSE
def MSE(y_pred, y):
return ((y_pred - y) ** 2).mean()
def MAE(y_pred, y):
return (y_pred - y).abs().mean() | 2,012 | 29.5 | 97 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/fair_training.py | # fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of stochastic gradient descent
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Classification |
# +--------------------------------------------------+
def fair_learning(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm 2, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
y_hat_1_log = y_hat_log[(logdata[2]==1) & (logdata[1]==1)]
y_hat_0_log = y_hat_log[(logdata[2]==0) & (logdata[1]==1)]
# END CHANGE
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_after_sig = torch.sigmoid(y_hat)
y_after_sig = y_after_sig[:, None]
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
y_hat_1 = y_hat[(A==1) & (Y==1)]
y_hat_0 = y_hat[(A==0) & (Y==1)]
if (len(y_hat_0)<=1) or (len(y_hat_0)<=1):
nit = len(batch_fair_loss)
print(f'Iterate {nit}: Error for number of samples')
# END CHANGE
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss
# +--------------------------------------------------+
# | Algorithm: Stratified SGD - Regression |
# +--------------------------------------------------+
def fair_learning_regression(generator, predict, fair_loss, params, lambda_, psi=None, verbose = False, logdata=None, gamma_scheduler=None, lr_decay=1, lr=1e-3, logfairloss=None, **kwargs):
'''
Train model using Algorithm, which uses mini-batch SGD to train accuracy_loss + lambda_ * fairness_loss.
Args:
generator (generator): Generator which yields (X,Y,A)
predict (fct handle): Prediction function handle, maps X-->Y_hat
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lambda_ (numeric): Hyperparameter controlling influence of L_fair
verbose (bool, optional): Verbosity
logdata (None or tuple of X,Y,A, all torch.Tensor, optional): data for keeping track of training process
gamma_scheduler (optional): Learning Rate Scheduler
logfairloss (optional): Fairness log function to use for logging instead of fairloss
Returns:
Trainig Loss over Training if logdata is provided, but changes params
'''
raise NotImplementedError
if logfairloss==None:
logfairloss = fair_loss
optimizer = optim.Adam(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.MSELoss()
batch_reg_loss = []
batch_fair_loss = []
for iterate, (X,Y,A) in enumerate(generator):
if logdata:
with torch.no_grad():
y_hat_log = predict(logdata[0])
batch_reg_loss.append(criterion(y_hat_log, logdata[1]))
y_hat_1_log = y_hat_log[logdata[2]==1]
y_hat_0_log = y_hat_log[logdata[2]==0]
batch_fair_loss.append(logfairloss(y_hat_1_log, y_hat_0_log))
optimizer.zero_grad()
# predict
y_hat = predict(X)
# compute regression and fairness loss
L = criterion(y_hat, Y)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
L_fair = fair_loss(y_hat_1, y_hat_0)
# overall loss
loss = L + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L.data.item(), L_fair.data.item()))
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
return batch_reg_loss, batch_fair_loss | 6,302 | 45.688889 | 189 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/fairness_metrics.py | import torch
import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +-----------------------------------------+
# | Evaluation Metric 2: Equal Opportunity |
# +-----------------------------------------+
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
def equal_opportunity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute equal opportunity metric
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
y1_hat_sharp = (y1_hat>=0.5).float()[y1==1]
y2_hat_sharp = (y2_hat>=0.5).float()[y2==1]
return (y1_hat_sharp.mean()-y2_hat_sharp.mean()).abs()
# END CHANGE
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 10,671 | 34.221122 | 133 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/benchmark.py | # benchmark.py
# file with functions for running experiment
import fair_training
import numpy as np
import torch
import matplotlib.pyplot as plt
from scipy.spatial import ConvexHull
import time
def convergence_plotter(regloss, fairloss, lambda_):
plt.figure(figsize=(16,5))
plt.subplot(131)
plt.plot(regloss)
plt.title('Regression Loss')
plt.xlabel('Iteration')
plt.ylabel('Regression Loss')
plt.subplot(132)
plt.plot(fairloss)
plt.title('Fairness Loss')
plt.xlabel('Iteration')
plt.ylabel('Fairness Loss')
plt.subplot(133)
plt.plot(lambda_*np.array(fairloss)+np.array(regloss))
plt.title('Overall Loss')
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.show()
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementaion train and test function for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def train_test_fair_learning(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
y_hat = torch.round(torch.sigmoid(model(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = torch.round(torch.sigmoid(model(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
def train_test_fair_learning_regression(ds, model, fair_loss, lr, batch_size, N_epochs, lambda_, metrics, lr_decay = 1, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
raise NotImplementedError
# train the model
start_time = time.time()
regloss, fairloss = fair_training.fair_learning_regression(generator=ds.stratified_batch_generator_worep(batch_size, N_epochs),
predict=model.forward,
fair_loss=fair_loss,
params=model.parameters(),
lambda_=lambda_,
lr_decay=lr_decay,
logdata = ds.get_log_data() if plot_convergence else None,
psi=psi, lr=lr, logfairloss=logfairloss, **kwargs)
stop_time = time.time()
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training set
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
y_hat = model(X)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = model(X_test)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
return train_results, test_results
| 7,159 | 40.149425 | 206 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/data_loader.py | # data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
import copy
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn import preprocessing
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides data loading functinality for MFL.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, X_test=None, Y_test=None, A_test=None, use_tensor=True, device='cpu', info='No Info Available', min_max_scaler=None):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
if X_test is not None:
self.X_test = to_tensor(X_test, device) if use_tensor else X_test
self.A_test = to_tensor(A_test, device) if use_tensor else A_test
self.Y_test = to_tensor(Y_test, device) if use_tensor else Y_test
self.info = info
self.min_max_scaler = None
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_adult_data(self):
return (self.X, self.Y, self.A, self.X_test, self.Y_test, self.A_test)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
total_samples = self.A.shape[0]
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
if self.min_max_scaler is not None:
x_vals = self.X_test.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
self.X_test = pd.DataFrame(x_scaled)
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class LawSchool(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_sas('./data/classification/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
A = rawdata['White']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict admission,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Drug(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x):
X, Y, A = load_drug_data('data/classification/drug/drug_consumption.data.txt')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Credit(DataLoader):
"""This is a classification dataset"""
def __init__(self, a_inside_x, **kwargs):
rawdata = pd.read_excel('./data/classification/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
A = 2 - preproc_data['SEX']
if a_inside_x:
X = pd.concat((X, A), axis=1)
info = '''Credit data'''
self.min_max_scaler = preprocessing.MinMaxScaler()
x_vals = X.values #returns a numpy array
x_scaled = self.min_max_scaler.fit_transform(x_vals)
X = pd.DataFrame(x_scaled)
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], info=info, **kwargs)
class Adult(DataLoader):
def __init__(self, a_inside_x, **kwargs):
X_train_, Y_train_, X_test_, Y_test_ = adult_data_read('./data/classification/adult/')
A = X_train_['Sex']
A_test = X_test_['Sex']
le = LabelEncoder()
Y = le.fit_transform(Y_train_)
Y = pd.Series(Y, name='>50k')
Y_test = le.fit_transform(Y_test_)
Y_test = pd.Series(Y_test, name='>50k')
if not a_inside_x:
X = X_train_.drop(labels=['Sex'], axis=1)
X = pd.get_dummies(X)
X_test = X_test_.drop(labels=['Sex'], axis=1)
X_test = pd.get_dummies(X_test)
else:
X = pd.get_dummies(X_train_)
X_test = pd.get_dummies(X_test_)
info = """Adult dataset for classification. Train Test split is already provided"""
super().__init__(X, np.array(Y)[:, None], np.array(A)[:, None], X_test, np.array(Y_test)[:, None], np.array(A_test)[:, None], info=info, **kwargs)
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, a_inside_x, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
if a_inside_x:
X = np.concatenate((np.array(X), np.array(A)), axis=1)
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(500, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
def get_log_data(self):
return self.get_data()
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self, a_inside_x):
X, Y, A = load_compas_data('data/classification/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
if a_inside_x:
X = np.concatenate((X, A[:, None]), axis=1)
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
def adult_data_read(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
| 16,841 | 39.681159 | 159 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/zafar_classification.py | # Baseline 1: https://arxiv.org/pdf/1706.02409.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
from zafar_method import funcs_disp_mist
from zafar_method.utils import *
import fairness_metrics
import data_loader
from zafar_method import utils
import numpy as np
from tqdm import tqdm
import cvxpy as cp
from collections import namedtuple
from sklearn.metrics import log_loss
from zafar_method import loss_funcs as lf # loss funcs that can be optimized subject to various constraints
import pickle
from copy import deepcopy
import os, sys
# from generate_synthetic_data import *
from zafar_method import utils as ut
from zafar_method import funcs_disp_mist as fdm
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides implementation of http://proceedings.mlr.press/v54/zafar17a/zafar17a.pdf.
gamma parameter is the accuracy fairness tradeoff of the model.
An example usage is python zafar_classification.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def run(args):
# act on experiment parameters:
data_loader.set_seed(args.seed)
gamma_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
if args.dataset == 'Adult':
ds = data_loader.Adult(a_inside_x=0)
train_test_split_fin = 1
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'equal_opportunity' : fairness_metrics.equal_opportunity_classification,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
X, Y, A = ds.get_data()
X_test, Y_test, A_test = ds.get_test_data()
x_train = X.cpu().detach().numpy()
Y_train = Y.cpu().detach().numpy().flatten()
a_train = A.cpu().detach().numpy().flatten()
x_test = X_test.cpu().detach().numpy()
y_test = Y_test.cpu().detach().numpy().flatten()
a_test = A_test.cpu().detach().numpy().flatten()
loss_function = "logreg" # perform the experiments with logistic regression
Y_test_ = y_test.copy()
Y_train_ = Y_train.copy()
Y_test_[y_test == 0] = -1
Y_train_[Y_train_ == 0] = -1
# run the test for various lambdas
y_train = Y_train_
y_test = Y_test_
x_control_train = {"s1": a_train}
x_control_test = {"s1": a_test}
cons_params = None # constraint parameters, will use them later
EPS = 1e-6
for gamma in gamma_candidates:
print('Training Zafar method, for gamma: {}/{}, seed:{}'.format(gamma, args.nlambda, args.seed))
start_time = time.time()
# mult_range = np.arange(1.0, 0.0 - it, -it).tolist()
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
apply_fairness_constraints = 0 # set this flag to one since we want to optimize accuracy subject to fairness constraints
apply_accuracy_constraint = 1
sep_constraint = 0
# for m in mult_range:
# sensitive_attrs_to_cov_thresh = deepcopy(cov_all_train_uncons)
# for s_attr in sensitive_attrs_to_cov_thresh.keys():
# for cov_type in sensitive_attrs_to_cov_thresh[s_attr].keys():
# for s_val in sensitive_attrs_to_cov_thresh[s_attr][cov_type]:
# sensitive_attrs_to_cov_thresh[s_attr][cov_type][s_val] *= m
sensitive_attrs_to_cov_thresh = {"s1":0}
w = train_model(x_train, y_train, x_control_train, lf._logistic_loss, apply_fairness_constraints, apply_accuracy_constraint, sep_constraint, ['s1'], sensitive_attrs_to_cov_thresh, gamma)
# y_test_predicted = np.sign(np.dot(x_test, w))
# correct_answers = (y_test_predicted == y_test).astype(int) # will have 1 when the prediction and the actual label match
# accuracy = float(sum(correct_answers)) / float(len(correct_answers))
# y_test_predict[y_test_predict == -1] = 0
# w = torch.tensor(w).float()
# theta0 = torch.tensor(w).float()
stop_time = time.time()
predict = lambda X: torch.tensor(np.maximum(np.sign(np.dot(X.cpu().detach().numpy(), w)), 0)).float()
# metrics on train set
y_hat = predict(X).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(X_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = gamma
train_results['time'] = stop_time - start_time
test_results['lambda_'] = gamma
results_train.append(train_results)
results_test.append(test_results)
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/zafar/{}_zafar_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/zafar/{}_zafar_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'method':'zafar',
'seed':args.seed,
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'a_inside_x': False
}
with open('results/zafar/{}_zafar_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=1, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'Compas', 'LawSchool', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=25)
args = parser.parse_args()
run(args) | 8,147 | 42.340426 | 195 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/MMD_fair.py | # fair_training.py
# training methods for fair regression
import torch
from torch.autograd import Variable
import torch.optim as optim
import time
from tqdm import tqdm
# +---------------------------------+
# | Algorithm 1: Gradient Descent |
# +---------------------------------+
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides an implementation of the paper in
https://papers.nips.cc/paper/2020/file/af9c0e0c1dee63e5acad8b7ed1a5be96-Paper.pdf
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
def mmd_gradient_descent(X, Y, A, model, predict, reg_loss, fair_loss, params, lr, N_iterates, lambda_, verbose=False, log=False, logfairloss=None, lr_decay=1, **kwargs):
'''
Train model using Algorithm 1, which uses simple gradient descent.
Args:
X (torch.Tensor): X data
Y (torch.Tensor): Y data
A (torch.Tensor): A data
predict (fct handle): Prediction function handle, maps X-->Y_hat
reg_loss (fct handle): Regression Loss function handle, maps Y_hat, Y-->L_reg
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
params (list of params): List of learnable parameters, such as returned by "nn.parameters()" or list of torch Variables
lr (float): SGD Learning Rate
N_iterates (int): Number of iterates for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
psi (fct handle, optional):Transformation function maps from Y_hat, Y --> score, fair_loss is computed on score
verbose (bool, optional): Verbosity
log (bool, optional): Return training path
logfairloss (optional): Sinkhorn divergence
Returns:
Trainig Loss over Training if log=True, but changes params
'''
optimizer = optim.SGD(params, lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay)
criterion = torch.nn.BCEWithLogitsLoss()
# optimizer = optim.Adam(params)
#scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=100, gamma=0.1)
epoch_reg_loss = []
epoch_fair_loss = []
for iterate in tqdm(range(N_iterates)):
# zero grad accumulator
optimizer.zero_grad()
# predict
y_hat = predict(X)
y_hat_first_layer = model.first_layer(X)
L_reg = criterion(y_hat, Y)
# y_hat = torch.sigmoid(y_hat)
# compute regression and fairness loss
y_hat_1 = y_hat_first_layer[(A.squeeze()==1) & (Y.squeeze()==1)]
y_hat_0 = y_hat_first_layer[(A.squeeze()==0) & (Y.squeeze()==1)]
L_fair = fair_loss(y_hat_1, y_hat_0)
# all_linear1_params = torch.cat([x.view(-1) for x in model.linear1.parameters()])
# all_linear2_params = torch.cat([x.view(-1) for x in model.linear2.parameters()])
# W_froben = torch.norm(all_linear1_params, 2) ** 2
# V_froben = torch.norm(all_linear2_params, 2) ** 2
# overall loss
# reg_weight = 0.1
loss = L_reg + lambda_ * L_fair
# logging
if verbose:
print('Iterate {}: L_reg={}, L_fair={}'.format(iterate, L_reg.data.item(), L_fair.data.item()))
if log:
epoch_fair_loss.append(L_fair.data.item())
epoch_reg_loss.append(L_reg.data.item())
# gradient comuptation and optimizer step
loss.backward()
optimizer.step()
#scheduler.step()
return epoch_reg_loss, epoch_fair_loss
def mmd_fair_traintest(ds, model, reg_loss, fair_loss, lr, n_iterates, lambda_, metrics, psi=None, plot_convergence=False, logfairloss=None, train_test_split_fin=0, lr_decay=1, **kwargs):
'''
Train a model using algorithm 2 and test it on metrics
Args:
ds (data_loader.DataLoader): Data loader to use
model (torch.nn.Module): Pytorch module
fair_loss (fct handle): Fairness Loss function handle, maps Y_hat_prot, Y_hat_unprot-->L_fair
lr (float): SGD Learning Rate
batch_size (int): Batch-Size of SGD
N_epochs (int): Number of epochs for SGD
lambda_ (numeric): Hyperparameter controlling influence of L_fair
metrics (dict with fctn handles): Metrics to use in evaluation. Will return a dict with same keys
plot_convergence (bool, optional): If convergence plot of training should be shown
logfairloss (fctn handle, optional): Fairness function used for logging instead of fair_loss
train_test_split_fin (bool, adult data) : Train-Test split already performed on adultdata
Returns:
train_results, test_results: dicts of results
'''
# train the model
start_time = time.time()
if train_test_split_fin:
X, Y, A, X_test, Y_test, A_test = ds.get_adult_data()
else:
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test= ds.get_test_data()
regloss, fairloss = mmd_gradient_descent(X, Y, A, model, model.forward,
reg_loss,
fair_loss,
model.parameters(), lr, n_iterates,
lambda_,
logdata = ds.get_log_data() if plot_convergence else None, logfairloss=logfairloss, lr_decay=lr_decay, **kwargs)
# plot convergence if desired
if plot_convergence:
convergence_plotter(regloss, fairloss, lambda_)
# compute metrics
model.eval()
# metrics on training seed
stop_time = time.time()
y_hat = torch.round(torch.sigmoid(model.forward(X)))
y_hat_1 = y_hat[A==1]
y_hat_0 = y_hat[A==0]
y_1 = Y[A==1]
y_0 = Y[A==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = torch.round(torch.sigmoid(model.forward(X_test)))
y_hat_1 = y_hat[A_test==1]
y_hat_0 = y_hat[A_test==0]
y_1 = Y_test[A_test==1]
y_0 = Y_test[A_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
return train_results, test_results
| 6,671 | 44.387755 | 187 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/models.py | # models.py
# models for regression
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% This script provides models for MFL and Oneta et al.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
class LinearRegression(nn.Module):
def __init__(self, k):
super(LinearRegression, self).__init__()
self.linear = torch.nn.Linear(k, 1, bias=True)
def forward(self, x):
return self.linear(x)
class NeuralNetwork(nn.Module):
def __init__(self, k):
super(NeuralNetwork, self).__init__()
self.linear1 = torch.nn.Linear(k, 20, bias=True)
self.linear2 = torch.nn.Linear(20, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of MFL"""
class NeuralNetworkClassification(nn.Module):
def __init__(self, k):
super(NeuralNetworkClassification, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def forward(self, x):
x = F.relu(self.linear1(x))
self.output = self.linear2(x)
return self.output
"""Model of Oneta et al."""
class NeuralNetwork_MMD(nn.Module):
def __init__(self, k):
super(NeuralNetwork_MMD, self).__init__()
self.linear1 = torch.nn.Linear(k, 16, bias=True)
self.sigmoid_ = torch.nn.ReLU()
self.linear2 = torch.nn.Linear(16, 1, bias=True)
def first_layer(self, x):
return self.sigmoid_((self.linear1(x)))
def forward(self, x):
self.output = self.linear2(self.sigmoid_((self.linear1(x))))
return self.output
# loss_functions: MAE and MSE
def MSE(y_pred, y):
return ((y_pred - y) ** 2).mean()
def MAE(y_pred, y):
return (y_pred - y).abs().mean() | 2,012 | 29.5 | 97 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/fair_KDE.py | # Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import pickle
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from Fair_KDE.models import Classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from Fair_KDE.dataloader import CustomDataset
import time
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementation of https://proceedings.neurips.cc/paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
An example usage python fair_KDE.py --dataset {} --seed {} --nlambda {}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
def run(args):
# act on experiment parameters:
seed = args.seed
data_loader.set_seed(args.seed)
##### Other training hyperparameters #####
lr = 2e-4
n_epochs = 200
lr_decay = 1.0
batch_size = 2048
n_epochs = 500
lambda_candidates = np.logspace(args.lambda_min, args.lambda_max, num=args.nlambda)
if args.dataset == 'CommunitiesCrimeClassification':
ds = data_loader.CommunitiesCrimeClassification(a_inside_x=0)
batch_size = 128
if args.dataset == 'Compas':
ds = data_loader.Compas(a_inside_x=0)
lr = 5e-4
batch_size = 2048
if args.dataset == 'LawSchool':
ds = data_loader.LawSchool(a_inside_x=0)
lr = 2e-4
batch_size = 2048
if args.dataset == 'Credit':
ds = data_loader.Credit(a_inside_x=0)
lr = 5e-4
n_batch = 2048
if args.dataset == 'Adult':
ds = data_loader.Adult(0)
train_test_split_fin = 1
batch_size = 2048
lr = 1e-1
lr_decay - 0.98
if args.dataset == 'Drug':
ds = data_loader.Drug(a_inside_x=0)
batch_size = 128
if args.dataset != 'Adult':
ds.split_test()
k = ds.get_k()
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'equal_opportunity' : fairness_metrics.equal_opportunity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# END CHANGE
# storage of results
results_train = []
results_test = []
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 16 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
if args.dataset == 'Adult':
X_train, Y_train, Z_train, X_test, Y_test, Z_test = ds.get_adult_data()
else:
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
time_track = []
for lambda_ in lambda_candidates:
print('Training FKDE method, for lambda: {}/{}, seed:{}'.format(lambda_, args.nlambda, args.seed))
start_time = time.time()
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
# CHANGE: CHANGE TO EQUAL OPPORTUNITY FORMULATION
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[(z_batch==z) & (y_batch==1)],h,tau)
m_z = z_batch[(z_batch==z) & (y_batch==1)].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[(z_batch==z) & (y_batch==1)])/h).view(-1),
Yhat[(z_batch==z) & (y_batch==1)].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
# END CHANGE
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
# if (i + 1) % 10 == 0 or (i + 1) == len(generator):
# print('Lambda:{}, Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(lambda_, epoch+1, n_epochs,
# i+1, len(generator),
# cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
stop_time = time.time()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['time'] = stop_time - start_time
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
print(train_results)
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0))
df_train = pd.DataFrame(data=results_train)
df_test = pd.DataFrame(data=results_test)
df_train.to_csv('results/FKDE/{}_FKDE_{}_train.csv'.format(args.dataset, args.seed))
df_test.to_csv('results/FKDE/{}_FKDE_{}_test.csv'.format(args.dataset, args.seed))
PARAMS = {'dataset':args.dataset,
'batch_size':batch_size,
'lr':lr, 'epochs':n_epochs,
'seed':args.seed,
'method':'FKDE',
'nlambda': args.nlambda,
'lambda_min':args.lambda_min,
'lambda_max':args.lambda_max,
'algorihtm':'adam',
'L':'BCE_cross_entropy',
'lr_decay':lr_decay,
'a_inside_x': True
}
with open('results/FKDE/{}_FKDE_{}.pkl'.format(args.dataset, args.seed), 'wb') as f:
pickle.dump({**PARAMS}, f, protocol=pickle.HIGHEST_PROTOCOL)
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Experiment Inputs')
parser.add_argument('--seed', default=0, help='Randomness seed', type=int)
parser.add_argument('--lambda_min', default=-5, type=int, help='Minimum value of lambda: 10^x')
parser.add_argument('--lambda_max', default=2, type=int, help='Maximum value of lambda: 10^x')
parser.add_argument('--dataset', help='Dataset to use', choices=['CommunitiesCrimeClassification', 'LawSchool', 'Compas', 'Adult', 'Credit', 'Drug'])
parser.add_argument('--nlambda', help='Number of lambda candidates', type=int, default=50)
args = parser.parse_args()
run(args) | 14,571 | 40.280453 | 153 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/Fair_KDE/fairness_metrics.py | import torch
import cvxpy as cp
import numpy as np
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).mean()
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).mean())
def energy_distance_forloop(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
d11 = torch.tensor(0.)
d12 = torch.tensor(0.)
d22 = torch.tensor(0.)
for y_ in y1:
d11 += (y_-y1).abs().mean()
d12 += (y_-y2).abs().mean()
d11 = d11/(y1.shape[0])
d12 = d12/(y1.shape[0])
for y_ in y2:
d22 += (y_-y2).abs().mean()
d22 = d22/(y2.shape[0])
return 2*d12-d11-d22
# +------------------------------------------+
# | Metric 2: Wasserstein Distance |
# +------------------------------------------+
def W1dist(y1,y2):
'''
Compute type 1 Wasserstein distance between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1))
C_np = C.data.numpy()
# solve OT problem
T = cp.Variable(C_np.shape)
ones_1 = np.ones((C_np.shape[0], 1))
ones_2 = np.ones((C_np.shape[1], 1))
objective = cp.Minimize(cp.sum(cp.multiply(C_np,T)))
constraints = [
T >=0,
T@ones_2==ones_1/len(ones_1),
T.T@ones_1==ones_2/len(ones_2)
]
problem = cp.Problem(objective, constraints)
problem.solve(solver=cp.GUROBI)
# objective value for gradient computation
return (torch.Tensor(T.value)*C).sum()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Regression Metric 1: MSE |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
# print('Accuracy of the network on the 10000 test images: %d %%' % (
# 100 * correct / total))
return torch.tensor(correct / total * 100)
| 8,196 | 30.771318 | 100 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/Fair_KDE/algorithm.py | import random
import IPython
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
def train_fair_classifier(dataset, net, optimizer, lr_scheduler, fairness, lambda_, h, delta, device, n_epochs=200, batch_size=2048, seed=0):
# Retrieve train/test splitted pytorch tensors for index=split
train_tensors, test_tensors = dataset.get_dataset_in_tensor()
X_train, Y_train, Z_train, XZ_train = train_tensors
X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
data_loader = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# An empty dataframe for logging experimental results
df = pd.DataFrame()
df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(data_loader):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.detach().reshape(-1))
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch)
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(),h,tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(data_loader):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(data_loader),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
Yhat_train = net(XZ_train).squeeze().detach().cpu().numpy()
df_temp = measures_from_Yhat(Y_train_np, Z_train_np, Yhat=Yhat_train, threshold=tau)
df_temp['epoch'] = epoch * len(data_loader) + i + 1
df_ckpt = df_ckpt.append(df_temp)
# Plot (cost, train accuracies, fairness measures) curves per 50 epochs
if (epoch + 1) % 50 == 0:
IPython.display.clear_output()
print('Currently working on - seed: {}'.format(seed))
plt.figure(figsize=(15,5), dpi=100)
plt.subplot(1,3,1)
plt.plot(costs)
plt.xlabel('x10 iterations')
plt.title('cost')
plt.subplot(1,3,2)
plt.plot(df_ckpt['acc'].to_numpy())
plt.xlabel('epoch')
plt.title('Accuracy')
plt.subplot(1,3,3)
if fairness == 'DP':
plt.plot(df_ckpt['DDP'].to_numpy())
plt.title('DDP')
elif fairness == 'EO':
plt.plot(df_ckpt['DEO'].to_numpy())
plt.title('DEO')
plt.xlabel('epoch')
plt.show()
Yhat_test = net(XZ_test).squeeze().detach().cpu().numpy()
df_test = measures_from_Yhat(Y_test_np, Z_test_np, Yhat=Yhat_test, threshold=tau)
return df_test | 7,375 | 41.390805 | 141 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/Fair_KDE/dataloader.py | import os
import copy
import torch
import random
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import data_loader
#from tempeh.configurations import datasets
from sklearn.datasets import make_moons
from sklearn.preprocessing import LabelEncoder, StandardScaler
def arrays_to_tensor(X, Y, Z, XZ, device):
return torch.FloatTensor(X).to(device), torch.FloatTensor(Y).to(device), torch.FloatTensor(Z).to(device), torch.FloatTensor(XZ).to(device)
def adult(data_root, display=False):
""" Return the Adult census data in a nice package. """
dtypes = [
("Age", "float32"), ("Workclass", "category"), ("fnlwgt", "float32"),
("Education", "category"), ("Education-Num", "float32"), ("Marital Status", "category"),
("Occupation", "category"), ("Relationship", "category"), ("Race", "category"),
("Sex", "category"), ("Capital Gain", "float32"), ("Capital Loss", "float32"),
("Hours per week", "float32"), ("Country", "category"), ("Target", "category")
]
raw_train_data = pd.read_csv(
data_root+'adult.data',
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
raw_test_data = pd.read_csv(
data_root+'adult.test',
skiprows=1,
names=[d[0] for d in dtypes],
na_values="?",
dtype=dict(dtypes)
)
train_data = raw_train_data.drop(["Education"], axis=1) # redundant with Education-Num
test_data = raw_test_data.drop(["Education"], axis=1) # redundant with Education-Num
filt_dtypes = list(filter(lambda x: not (x[0] in ["Target", "Education"]), dtypes))
train_data["Target"] = train_data["Target"] == " >50K"
test_data["Target"] = test_data["Target"] == " >50K."
rcode = {
"Not-in-family": 0,
"Unmarried": 1,
"Other-relative": 2,
"Own-child": 3,
"Husband": 4,
"Wife": 5
}
for k, dtype in filt_dtypes:
if dtype == "category":
if k == "Relationship":
train_data[k] = np.array([rcode[v.strip()] for v in train_data[k]])
test_data[k] = np.array([rcode[v.strip()] for v in test_data[k]])
else:
train_data[k] = train_data[k].cat.codes
test_data[k] = test_data[k].cat.codes
return train_data.drop(["Target", "fnlwgt"], axis=1), train_data["Target"].values, test_data.drop(["Target", "fnlwgt"], axis=1), test_data["Target"].values
def compas_data_loader():
""" Downloads COMPAS data from the propublica GitHub repository.
:return: pandas.DataFrame with columns 'sex', 'age', 'juv_fel_count', 'juv_misd_count',
'juv_other_count', 'priors_count', 'two_year_recid', 'age_cat_25 - 45',
'age_cat_Greater than 45', 'age_cat_Less than 25', 'race_African-American',
'race_Caucasian', 'c_charge_degree_F', 'c_charge_degree_M'
"""
data = pd.read_csv("./data/compas/compas-scores-two-years.csv") # noqa: E501
# filter similar to
# https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb
data = data[(data['days_b_screening_arrest'] <= 30) &
(data['days_b_screening_arrest'] >= -30) &
(data['is_recid'] != -1) &
(data['c_charge_degree'] != "O") &
(data['score_text'] != "N/A")]
# filter out all records except the ones with the most common two races
data = data[(data['race'] == 'African-American') | (data['race'] == 'Caucasian')]
# Select relevant columns for machine learning.
# We explicitly leave in age_cat to allow linear classifiers to be non-linear in age
data = data[["sex", "age", "age_cat", "race", "juv_fel_count", "juv_misd_count",
"juv_other_count", "priors_count", "c_charge_degree", "two_year_recid"]]
# map string representation of feature "sex" to 0 for Female and 1 for Male
data = data.assign(sex=(data["sex"] == "Male") * 1)
data = pd.get_dummies(data)
return data
class CustomDataset():
def __init__(self, X, Y, Z):
self.X = X
self.Y = Y
self.Z = Z
def __len__(self):
return len(self.Y)
def __getitem__(self, index):
x, y, z = self.X[index], self.Y[index], self.Z[index]
return x, y, z
class FairnessDataset():
def __init__(self, dataset, device=torch.device('cuda')):
self.dataset = dataset
self.device = device
np.random.seed(12345678)
if self.dataset == 'AdultCensus':
self.get_adult_data()
elif self.dataset == 'COMPAS':
self.get_compas_data()
elif self.dataset == 'CreditDefault':
self.get_credit_default_data()
elif self.dataset == 'Lawschool':
self.get_lawschool_data()
elif self.dataset == 'Moon':
self.get_moon_data()
else:
raise ValueError('Your argument {} for dataset name is invalid.'.format(self.dataset))
self.prepare_ndarray()
def get_adult_data(self):
X_train, Y_train, X_test, Y_test = adult('./data/adult/')
self.Z_train_ = X_train['Sex']
self.Z_test_ = X_test['Sex']
self.X_train_ = X_train.drop(labels=['Sex'], axis=1)
self.X_train_ = pd.get_dummies(self.X_train_)
self.X_test_ = X_test.drop(labels=['Sex'], axis=1)
self.X_test_ = pd.get_dummies(self.X_test_)
le = LabelEncoder()
self.Y_train_ = le.fit_transform(Y_train)
self.Y_train_ = pd.Series(self.Y_train_, name='>50k')
self.Y_test_ = le.fit_transform(Y_test)
self.Y_test_ = pd.Series(self.Y_test_, name='>50k')
# def get_compas_data(self):
# dataset = datasets['compas']()
# # dataset = compas_data_loader()
# X_train, X_test = dataset.get_X(format=pd.DataFrame)
# Y_train, Y_test = dataset.get_y(format=pd.Series)
# Z_train, Z_test = dataset.get_sensitive_features('race', format=pd.Series)
# self.X_train_ = X_train
# self.Y_train_ = Y_train
# self.Z_train_ = (Z_train != 'African-American').astype(float)
# self.X_test_ = X_test
# self.Y_test_ = Y_test
# self.Z_test_ = (Z_test != 'African-American').astype(float)
def get_compas_data(self):
dataset = datasets['compas']()
ds = data_loader.Compas()
ds.split_test()
X, Y, A = ds.get_log_data()
X_test, Y_test, A_test = ds.get_test_data()
self.X_train_ = X
self.Y_train_ = Y
self.Z_train_ = A
self.X_test_ = X_test
self.Y_test_ = Y_test
self.Z_test_ = A_test
def get_credit_default_data(self):
rawdata = pd.read_excel('./data/credit_card/default_clients.xls', header=1)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
columns = list(rawdata.columns)
categ_cols = []
for column in columns:
if 2 < len(set(rawdata[column])) < 10:
categ_cols.append((column, len(set(rawdata[column]))))
preproc_data = copy.deepcopy(rawdata)
for categ_col, n_items in categ_cols:
for i in range(n_items):
preproc_data[categ_col + str(i)] = (preproc_data[categ_col] == i).astype(float)
preproc_data = preproc_data.drop(['EDUCATION', 'MARRIAGE'], axis=1)
X = preproc_data.drop(['ID', 'SEX', 'default payment next month'], axis=1)
Y = preproc_data['default payment next month']
Z = 2 - preproc_data['SEX']
self.X_train_ = X.loc[list(range(24000)), :]
self.Y_train_ = Y.loc[list(range(24000))]
self.Z_train_ = Z.loc[list(range(24000))]
self.X_test_ = X.loc[list(range(24000,30000)), :]
self.Y_test_ = Y.loc[list(range(24000,30000))]
self.Z_test_ = Z.loc[list(range(24000,30000))]
def get_lawschool_data(self):
rawdata = pd.read_sas('./data/lawschool/lawschs1_1.sas7bdat')
rawdata = rawdata.drop(['college', 'Year', 'URM', 'enroll'], axis=1)
rawdata = rawdata.dropna(axis=0)
rawdata = rawdata.sample(frac=1.0, random_state=12345678).reset_index(drop=True)
X = rawdata[['LSAT', 'GPA', 'Gender', 'resident']]
Y = rawdata['admit']
Z = rawdata['White']
self.X_train_ = X.loc[list(range(77267)), :]
self.Y_train_ = Y.loc[list(range(77267))]
self.Z_train_ = Z.loc[list(range(77267))]
self.X_test_ = X.loc[list(range(77267,96584)), :]
self.Y_test_ = Y.loc[list(range(77267,96584))]
self.Z_test_ = Z.loc[list(range(77267,96584))]
def get_moon_data(self):
n_train = 10000
n_test = 5000
X, Y = make_moons(n_samples=n_train+n_test, noise=0.2, random_state=0)
Z = np.zeros_like(Y)
np.random.seed(0)
for i in range(n_train + n_test):
if Y[i] == 0:
if -0.734 < X[i][0] < 0.734:
Z[i] = np.random.binomial(1, 0.90)
else:
Z[i] = np.random.binomial(1, 0.35)
elif Y[i] == 1:
if 0.262 < X[i][0] < 1.734:
Z[i] = np.random.binomial(1, 0.55)
else:
Z[i] = np.random.binomial(1, 0.10)
X = pd.DataFrame(X, columns=['x_1', 'x_2'])
Y = pd.Series(Y, name='label')
Z = pd.Series(Z, name='sensitive attribute')
self.X_train_ = X.loc[list(range(10000)), :]
self.Y_train_ = Y.loc[list(range(10000))]
self.Z_train_ = Z.loc[list(range(10000))]
self.X_test_ = X.loc[list(range(10000,15000)), :]
self.Y_test_ = Y.loc[list(range(10000,15000))]
self.Z_test_ = Z.loc[list(range(10000,15000))]
def prepare_ndarray(self):
self.normalized = False
self.X_train = self.X_train_.to_numpy(dtype=np.float64)
self.Y_train = self.Y_train_.to_numpy(dtype=np.float64)
self.Z_train = self.Z_train_.to_numpy(dtype=np.float64)
self.XZ_train = np.concatenate([self.X_train, self.Z_train.reshape(-1,1)], axis=1)
self.X_test = self.X_test_.to_numpy(dtype=np.float64)
self.Y_test = self.Y_test_.to_numpy(dtype=np.float64)
self.Z_test = self.Z_test_.to_numpy(dtype=np.float64)
self.XZ_test = np.concatenate([self.X_test, self.Z_test.reshape(-1,1)], axis=1)
self.sensitive_attrs = sorted(list(set(self.Z_train)))
return None
def normalize(self):
self.normalized = True
scaler_XZ = StandardScaler()
self.XZ_train = scaler_XZ.fit_transform(self.XZ_train)
self.XZ_test = scaler_XZ.transform(self.XZ_test)
scaler_X = StandardScaler()
self.X_train = scaler_X.fit_transform(self.X_train)
self.X_test = scaler_X.transform(self.X_test)
return None
def get_dataset_in_ndarray(self):
return (self.X_train, self.Y_train, self.Z_train, self.XZ_train),\
(self.X_test, self.Y_test, self.Z_test, self.XZ_test)
def get_dataset_in_tensor(self, validation=False, val_portion=.0):
X_train_, Y_train_, Z_train_, XZ_train_ = arrays_to_tensor(
self.X_train, self.Y_train, self.Z_train, self.XZ_train, self.device)
X_test_, Y_test_, Z_test_, XZ_test_ = arrays_to_tensor(
self.X_test, self.Y_test, self.Z_test, self.XZ_test, self.device)
return (X_train_, Y_train_, Z_train_, XZ_train_),\
(X_test_, Y_test_, Z_test_, XZ_test_) | 11,650 | 40.462633 | 159 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/Fair_KDE/data_loader_or.py | # data_loader.py
# utilities for loading data
import torch
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm import tqdm
from load_data import *
# TODO: possibly some form of (cross) validation
def to_tensor(data, device):
D = data
if type(data) == pd.core.frame.DataFrame:
D = data.to_numpy()
if type(D) == np.ndarray:
return torch.tensor(D, device=device).float()
elif type(D) == torch.Tensor:
return D.to(device).float()
else:
raise NotImplementedError('Currently only Torch Tensors, Numpy NDArrays and Pandas Dataframes are supported')
class DataLoader:
def __init__(self, X, Y, A, use_tensor=True, device='cpu', info='No Info Available'):
self.device = device
self.use_tensor = use_tensor
self.X = to_tensor(X, device) if use_tensor else X
self.A = to_tensor(A, device) if use_tensor else A
self.Y = to_tensor(Y, device) if use_tensor else Y
self.X_test = None
self.A_test = None
self.Y_test = None
self.info = info
def get_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_data_for_A(self, a):
# get dataset but only for samples with attribute a
X_a = self.X[(self.A==a).squeeze()]
Y_a = self.Y[(self.A==a).squeeze()]
return (X_a, Y_a)
# def stratified_batch_generator(self, n_samples, n_iterates):
# # get propoertions of protected attribute
# p_A1 = self.A.mean()
# p_A0 = 1-p_A1
# # build index set of protected and unprotected attribute
# ind_A1 = (self.A==1).nonzero()[:,0]
# ind_A0 = (self.A==0).nonzero()[:,0]
# # number of samples to sample from each distribution
# n_batch_1 = int(p_A1*n_samples)
# n_batch_0 = int(p_A0*n_samples)
# replacement = False
# for _ in range(n_iterates):
# # sample indexes for protected and unprotected class
# batch_idx1 = ind_A1[(torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
# num_samples=n_batch_1,
# replacement=replacement)]
# batch_idx0 = ind_A0[(torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
# num_samples=n_batch_0,
# replacement=replacement)]
# yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
# torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
# torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def stratified_batch_generator_worep(self, batch_size=32, n_epochs=100):
# get propoertions of protected attribute
# n_epochs = 100
p_A1 = self.A.mean()
p_A0 = 1 - p_A1
# print(p_A0)
total_samples = self.A.shape[0]
# batch_size = 32
# build index set of protected and unprotected attribute
# number of samples to sample from each distribution
n_batch_1 = int(p_A1*batch_size)
n_batch_0 = int(p_A0*batch_size)
for epoch in tqdm(range(n_epochs)):
# print(epoch)
ind_A1 = (self.A==1).nonzero()[:,0]
ind_A0 = (self.A==0).nonzero()[:,0]
for _ in range(0, total_samples - batch_size + 1, batch_size):
# sample indexes for protected and unprotected class
sampled_indices_A1 = (torch.ones(ind_A1.shape[0]) / (ind_A1.shape[0])).multinomial(
num_samples=n_batch_1,
replacement=False)
batch_idx1 = ind_A1[sampled_indices_A1]
mask = torch.ones(ind_A1.numel(), dtype=torch.bool)
mask[sampled_indices_A1] = False
ind_A1 = ind_A1[mask]
# print(ind_A1.shape)
sampled_indices_A0 = (torch.ones(ind_A0.shape[0]) / (ind_A0.shape[0])).multinomial(
num_samples=n_batch_0,
replacement=False)
batch_idx0 = ind_A0[sampled_indices_A0]
mask = torch.ones(ind_A0.numel(), dtype=torch.bool)
mask[sampled_indices_A0] = False
ind_A0 = ind_A0[mask]
yield (torch.vstack((self.X[batch_idx0], self.X[batch_idx1])),
torch.vstack((self.Y[batch_idx0], self.Y[batch_idx1])),
torch.vstack((self.A[batch_idx0], self.A[batch_idx1])))
def get_info(self):
return self.info
def split_test(self, **kwargs):
# perform train test split, kwargs for sklearn train-test-split
X_train, X_test, Y_train, Y_test, A_train, A_test = train_test_split(self.X, self.Y, self.A, **kwargs)
self.X = X_train
self.X_test = X_test
self.Y = Y_train
self.Y_test = Y_test
self.A = A_train
self.A_test = A_test
def get_test_data(self):
# get the test dataset
if self.X_test is None:
raise ValueError('Train-Test split has not yet been performed')
return (self.X_test, self.Y_test, self.A_test)
def get_log_data(self):
# get the dataset
return (self.X, self.Y, self.A)
def get_k(self):
return self.X.shape[1]
class CommunitiesCrime(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class CommunitiesCrimeClassification(DataLoader):
# http://archive.ics.uci.edu/ml/datasets/Communities+and+Crime
def __init__(self, **kwargs):
yvar = 'ViolentCrimesPerPop'
avar = 'racepctblack'
# load the data
with open('data/communities.names') as file:
info = file.read()
colnames = [line.split(' ')[1] for line in info.split('\n') if line and line.startswith('@attribute')]
df = pd.read_csv('data/communities.data',
header=None,
names=colnames,
na_values='?')
# process the data
Y = df[[yvar]]
bin_thr = Y.mean()
Y = (Y>= bin_thr).astype(int)
A = (df[[avar]] > df[[avar]].median()).astype(int)
nasum = df.isna().sum()
names = [name for name in nasum[nasum==0].index if name not in [yvar, avar, 'state', 'communityname', 'fold']]
X = df[names]
# init super
super().__init__(X, Y, A, info=info, **kwargs)
class BarPass(DataLoader):
# http://www.seaphe.org/databases.php
def __init__(self, **kwargs):
df = pd.read_sas('data/lawschs1_1.sas7bdat')
drop_cols = ['enroll', 'college', 'Year', 'Race']
df = df[[col for col in df.columns if col not in drop_cols]]
df = df.dropna()
Y = df[['GPA']]
A = df[['White']]
X = df.drop('GPA', axis=1)
info = '''Law School Admissions Data collected by Project SEAPHE, predict GPA,
don\'t discriminate White vs. Non-White\nhttp://www.seaphe.org/databases.php'''
self.first_call = True
super().__init__(X, Y, A, info=info, **kwargs)
def get_data(self):
if self.first_call:
self.Xs, self.Ys, self.As = next(self.stratified_batch_generator_worep(10000, 1))
self.first_call = False
return (self.Xs, self.Ys, self.As)
class StudentPerformance(DataLoader):
# https://archive.ics.uci.edu/ml/datasets/student+performance
def __init__(self, subject = 'Math', **kwargs):
# load data
df = pd.read_csv('data/student/student-{}.csv'.format(subject.lower()[:3]), sep=';')\
# convert the categorical values
categoricals = df.dtypes[df.dtypes==object].index
for attribute in categoricals:
options = df[attribute].unique()
options.sort()
options = options[:-1]
for option in options:
df['{}_{}'.format(attribute, option)] = (df[attribute]==option).astype(int)
df = df.drop(attribute, axis=1)
# extract X A Y
A = df[['sex_F']]
Y = df[['G3']]
X = df.drop(['sex_F', 'G3'], axis=1)
info = '''
Student Performance dataset. Predict Final Grade based on Attributes, don't discriminate against female students.
https://archive.ics.uci.edu/ml/datasets/student+performance
'''
super().__init__(X, Y, A, info=info, **kwargs)
class Compas(DataLoader):
def __init__(self):
X, Y, A = load_compas_data('data/compas/compas-scores-two-years.csv')
info = '''
https://www.kaggle.com/danofer/compass
'''
super().__init__(X, Y[:, None], A[:, None], info=info)
class Synthetic1(DataLoader):
# synthetic data: bias offset
def __init__(self, N, k, delta_intercept = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = delta_intercept+ X_0@theta
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
class Synthetic2(DataLoader):
# synthetic data: bias slope
def __init__(self, N, k, delta_slope = 0.5, **kwargs):
X_0 = torch.normal(mean=0.0, std=torch.ones(int(N/2),k))
X_1 = X_0
theta = torch.normal(mean=2, std=torch.ones(k,1))
Y_0 = X_0@(theta+delta_slope)
Y_1 = X_1@theta
A_0 = torch.zeros(int(N/2),1)
A_1 = torch.ones(N-int(N/2),1)
info = 'Synthetic Data'
X = torch.vstack((X_0, X_1))
Y = torch.vstack((Y_0, Y_1))
A = torch.vstack((A_0, A_1))
super().__init__(np.hstack((X,A)),
Y,
A,
info=info, **kwargs)
def set_seed(seed=0):
torch.manual_seed(seed)
np.random.seed(seed)
| 11,586 | 38.546075 | 121 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/Fair_KDE/models.py | import torch
import torch.nn as nn
class Classifier(nn.Module):
def __init__(self, n_layers, n_inputs, n_hidden_units):
super(Classifier, self).__init__()
layers = []
if n_layers == 1: # Logistic Regression
layers.append(nn.Linear(n_inputs, 1))
layers.append(nn.Sigmoid())
else:
layers.append(nn.Linear(n_inputs, n_hidden_units))
layers.append(nn.ReLU())
for i in range(n_layers-2):
layers.append(nn.Linear(n_hidden_units, n_hidden_units))
layers.append(nn.ReLU())
layers.append(nn.Linear(n_hidden_units,1))
layers.append(nn.Sigmoid())
self.layers = nn.Sequential(*layers)
def forward(self, x):
x = self.layers(x)
return x | 829 | 33.583333 | 72 | py |
Metrizing-Fairness | Metrizing-Fairness-main/equal_opportunity/Fair_KDE/fair_KDE_.py | # Baseline Fair KDE : https://proceedings.neurips.cc//paper/2020/file/ac3870fcad1cfc367825cda0101eee62-Paper.pdf
import cvxpy as cp
import numpy as np
import argparse
import pandas as pd
import torch
import fairness_metrics
import data_loader
from tqdm import tqdm
from collections import namedtuple
from sklearn.metrics import log_loss
from copy import deepcopy
import os, sys
import time
import random
import matplotlib.pyplot as plt
import torch.optim as optim
from models import Classifier
from dataloader import FairnessDataset
from algorithm import train_fair_classifier
import matplotlib.pyplot as plt
import torch.nn as nn
from torch.utils.data import DataLoader
from dataloader import CustomDataset
from utils import measures_from_Yhat
tau = 0.5
# Approximation of Q-function given by López-Benítez & Casadevall (2011) based on a second-order exponential function & Q(x) = 1- Q(-x):
a = 0.4920
b = 0.2887
c = 1.1893
Q_function = lambda x: torch.exp(-a*x**2 - b*x - c)
def CDF_tau(Yhat, h=0.01, tau=0.5):
m = len(Yhat)
Y_tilde = (tau-Yhat)/h
sum_ = torch.sum(Q_function(Y_tilde[Y_tilde>0])) \
+ torch.sum(1-Q_function(torch.abs(Y_tilde[Y_tilde<0]))) \
+ 0.5*(len(Y_tilde[Y_tilde==0]))
return sum_/m
def Huber_loss(x, delta):
if x.abs() < delta:
return (x ** 2) / 2
return delta * (x.abs() - delta / 2)
# act on experiment parameters:
data_loader.set_seed(0)
gamma_candidates = np.logspace(-2, 2, num=10)
ds = data_loader.Compas()
ds.split_test()
k = ds.get_k()
metrics = {
'statistical_parity' : fairness_metrics.statistical_parity,
'statistical_parity_classification' : fairness_metrics.statistical_parity_classification,
'bounded_group_loss_L1' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L1'),
'bounded_group_loss_L2' : fairness_metrics.bounded_group_loss,
'group_fair_expect' : fairness_metrics.group_fair_expect,
'l1_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=1),
'l2_dist' : lambda y1_hat, y2_hat, y1, y2: fairness_metrics.lp_dist(y1_hat, y2_hat, y1, y2, p=2),
'MSE' : fairness_metrics.MSE,
'MAE' : fairness_metrics.MAE,
'accuracy' : fairness_metrics.accuracy
}
# storage of results
results_train = []
results_test = []
dataset_name = 'COMPAS' # ['Moon', 'Lawschool', 'AdultCensus', 'CreditDefault', 'COMPAS']
##### Which fairness notion to consider (Demographic Parity / Equalized Odds) #####
fairness = 'DP' # ['DP', 'EO']
##### Model specifications #####
n_layers = 2 # [positive integers]
n_hidden_units = 20 # [positive integers]
##### Our algorithm hyperparameters #####
h = 0.1 # Bandwidth hyperparameter in KDE [positive real numbers]
delta = 1.0 # Delta parameter in Huber loss [positive real numbers]
lambda_ = 0.05 # regularization factor of DDP/DEO; Positive real numbers \in [0.0, 1.0]
##### Other training hyperparameters #####
batch_size = 2048
lr = 2e-4
lr_decay = 1.0 # Exponential decay factor of LR scheduler
n_seeds = 5 # Number of random seeds to try
n_epochs = 200
seed = 5
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
##### Whether to enable GPU training or not
device = torch.device('cpu') # or torch.device('cpu')
# Import dataset
# dataset = FairnessDataset(dataset=dataset_name, device=device)
# dataset.normalize()
input_dim = k + 1
net = Classifier(n_layers=n_layers, n_inputs=input_dim, n_hidden_units=n_hidden_units)
net = net.to(device)
# Set an optimizer
optimizer = optim.Adam(net.parameters(), lr=lr)
lr_scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=lr_decay) # None
# X, Y, A = ds.get_data()
# X_test, Y_test, A_test = ds.get_test_data()
# x_train = X.cpu().detach().numpy()
# Y_train = Y.cpu().detach().numpy().flatten()
# a_train = A.cpu().detach().numpy().flatten()
# x_test = X_test.cpu().detach().numpy()
# y_test = Y_test.cpu().detach().numpy().flatten()
# a_test = A_test.cpu().detach().numpy().flatten()
# train_tensors, test_tensors = dataset.get_dataset_in_tensor()
# X_train, Y_train, Z_train, XZ_train = train_tensors
# X_test, Y_test, Z_test, XZ_test = test_tensors
# Retrieve train/test splitted numpy arrays for index=split
# train_arrays, test_arrays = dataset.get_dataset_in_ndarray()
# X_train_np, Y_train_np, Z_train_np, XZ_train_np = train_arrays
# X_test_np, Y_test_np, Z_test_np, XZ_test_np = test_arrays
X_train, Y_train, Z_train = ds.get_data()
X_test, Y_test, Z_test = ds.get_test_data()
XZ_test = torch.cat([X_test, Z_test], 1)
XZ_train = torch.cat([X_train, Z_train], 1)
custom_dataset = CustomDataset(XZ_train, Y_train, Z_train)
if batch_size == 'full':
batch_size_ = XZ_train.shape[0]
elif isinstance(batch_size, int):
batch_size_ = batch_size
generator = DataLoader(custom_dataset, batch_size=batch_size_, shuffle=True)
pi = torch.tensor(np.pi).to(device)
phi = lambda x: torch.exp(-0.5*x**2)/torch.sqrt(2*pi) #normal distribution
# # An empty dataframe for logging experimental results
# df = pd.DataFrame()
# df_ckpt = pd.DataFrame()
loss_function = nn.BCELoss()
costs = []
results_test = []
results_train = []
for epoch in range(n_epochs):
for i, (xz_batch, y_batch, z_batch) in enumerate(generator):
xz_batch, y_batch, z_batch = xz_batch.to(device), y_batch.to(device), z_batch.to(device)
Yhat = net(xz_batch)
Ytilde = torch.round(Yhat.squeeze())
cost = 0
dtheta = 0
m = z_batch.shape[0]
# prediction loss
p_loss = loss_function(Yhat.squeeze(), y_batch.squeeze())
cost += (1 - lambda_) * p_loss
# DP_Constraint
if fairness == 'DP':
Pr_Ytilde1 = CDF_tau(Yhat.detach(), h, tau)
for z in range(1):
Pr_Ytilde1_Z = CDF_tau(Yhat.detach()[z_batch==z],h,tau)
m_z = z_batch[z_batch==z].shape[0]
Delta_z = Pr_Ytilde1_Z-Pr_Ytilde1
Delta_z_grad = torch.dot(phi((tau-Yhat.detach()[z_batch==z])/h).view(-1),
Yhat[z_batch==z].view(-1))/h/m_z
Delta_z_grad -= torch.dot(phi((tau-Yhat.detach())/h).view(-1),
Yhat.view(-1))/h/m
if Delta_z.abs() >= delta:
if Delta_z > 0:
Delta_z_grad *= lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= -lambda_*delta
cost += Delta_z_grad
else:
Delta_z_grad *= lambda_*Delta_z
cost += Delta_z_grad
# EO_Constraint
elif fairness == 'EO':
for y in [0,1]:
Pr_Ytilde1_Y = CDF_tau(Yhat[y_batch==y].detach(),h,tau)
m_y = y_batch[y_batch==y].shape[0]
for z in range(1):
Pr_Ytilde1_ZY = CDF_tau(Yhat[(y_batch==y) & (z_batch==z)].detach(),h,tau)
m_zy = z_batch[(y_batch==y) & (z_batch==z)].shape[0]
Delta_zy = Pr_Ytilde1_ZY-Pr_Ytilde1_Y
Delta_zy_grad = torch.dot(
phi((tau-Yhat[(y_batch==y) & (z_batch==z)].detach())/h).view(-1),
Yhat[(y_batch==y) & (z_batch==z)].view(-1)
)/h/m_zy
Delta_zy_grad -= torch.dot(
phi((tau-Yhat[y_batch==y].detach())/h).view(-1),
Yhat[y_batch==y].view(-1)
)/h/m_y
if Delta_zy.abs() >= delta:
if Delta_zy > 0:
Delta_zy_grad *= lambda_*delta
cost += Delta_zy_grad
else:
Delta_zy_grad *= lambda_*delta
cost += -lambda_*delta*Delta_zy_grad
else:
Delta_zy_grad *= lambda_*Delta_zy
cost += Delta_zy_grad
optimizer.zero_grad()
if (torch.isnan(cost)).any():
continue
cost.backward()
optimizer.step()
costs.append(cost.item())
# Print the cost per 10 batches
if (i + 1) % 10 == 0 or (i + 1) == len(generator):
print('Epoch [{}/{}], Batch [{}/{}], Cost: {:.4f}'.format(epoch+1, n_epochs,
i+1, len(generator),
cost.item()), end='\r')
if lr_scheduler is not None:
lr_scheduler.step()
def predict(XZ):
Y_hat_ = net(XZ)
Y_hat_[Y_hat_>=0.5] = 1
Y_hat_[Y_hat_ < 0.5] = 0
return Y_hat_
# metrics on train set
y_hat = predict(XZ_train).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_train==1]
y_hat_0 = y_hat[Z_train==0]
y_1 = Y_train[Z_train==1]
y_0 = Y_train[Z_train==0]
train_results = {}
for key in metrics.keys():
train_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
# metrics on test set
y_hat = predict(XZ_test).flatten()
y_hat = y_hat.unsqueeze(1)
y_hat_1 = y_hat[Z_test==1]
y_hat_0 = y_hat[Z_test==0]
y_1 = Y_test[Z_test==1]
y_0 = Y_test[Z_test==0]
test_results = {}
for key in metrics.keys():
test_results[key] = metrics[key](y_hat_1, y_hat_0, y_1, y_0).data.item()
train_results['lambda_'] = lambda_
test_results['lambda_'] = lambda_
results_train.append(train_results)
results_test.append(test_results)
# df_train = pd.DataFrame(data=results_train)
# df_test = pd.DataFrame(data=results_test)
# df_train.to_csv('results/{}_zafar_{}_train.csv'.format(args.dataset, 0))
# df_test.to_csv('results/{}_zafar_{}_test.csv'.format(args.dataset, 0)) | 9,960 | 34.830935 | 136 | py |
Metrizing-Fairness | Metrizing-Fairness-main/online_regression/fairness_metrics.py | import torch
import ot
import cvxpy as cp
import numpy as np
"""
% Metrizing Fairness
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
This script provides implementations of the fairness metrics (e.g. energy distance, Sinkhorn divergence, statistical parity)
as well as performance metrics (e.g. MSE, accuracy) of a model mentioned in the paper.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
"""
# +------------------------------------------+
# | Metric 1: Energy Distance |
# +------------------------------------------+
def energy_distance(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*(n1-1))
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*(n2-1)))
def energy_distance_biased(y1, y2):
'''
Compute energy distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Energy distance
'''
n1 = torch.numel(y1)
n2 = torch.numel(y2)
return (2*torch.abs(y1.unsqueeze(0)-y2.unsqueeze(1)).mean()
-torch.abs(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n1*n1)
-torch.abs(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(n2*n2))
def wasserstein_distance(y1, y2):
'''
Compute wasserstein distance between empirical distance y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
wd = torch.multiply(torch.Tensor(ot.emd(ones_1.flatten(), ones_2.flatten(), C_np)), C).sum()
return wd
# +------------------------------------------+
# | Metric 2: Sinkhorn Divergence |
# +------------------------------------------+
def sinkhorn_diver(y1, y2):
'''
Compute type Sinkhorn divergence between empirical distribution y1 and y2, each 1 dimensional
Args:
y1 (torch.Tensor): Samples from Distribution 1
y2 (torch.Tensor): Samples from Distribution 2
Returns:
dist (torch.Tensor): The computed Wasserstein distance
'''
# compute cost matrix
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink12 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y1.unsqueeze(0)-y1.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink11 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
C = torch.sqrt(torch.norm(y2.unsqueeze(0)-y2.unsqueeze(1), dim=2)**2)
C_np = C.data.numpy()
# solve OT problem
ones_1 = np.ones((C_np.shape[0], 1)) / C_np.shape[0]
ones_2 = np.ones((C_np.shape[1], 1)) / C_np.shape[1]
sink22 = torch.multiply(torch.Tensor(ot.sinkhorn(ones_1.flatten(), ones_2.flatten(), C_np, .1)), C).sum()
return (sink12 - 1/2 * sink11 - 1/2 * sink22).sum()
# +------------------------------------------+
# | Metric 3: MMD with RBF Kernel |
# +------------------------------------------+
def MMD_RBF(y1, y2):
n = y1.flatten().shape[0]
m = y2.flatten().shape[0]
def rbf(diff, diagzero=True):
sigma = 0.1
if diagzero:
return torch.exp(((diff*(1-torch.eye(diff.shape[0])))**2)/(2*sigma**2))
else:
return torch.exp((diff**2)/(2*sigma**2))
return (-2*rbf(y1.unsqueeze(0)-y2.unsqueeze(1), diagzero=False).sum()/(n*m)
+rbf(y1.unsqueeze(0)-y1.unsqueeze(1)).sum()/(n*(n-1))
+rbf(y2.unsqueeze(0)-y2.unsqueeze(1)).sum()/(m*(m-1)))
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
diff = torch.tensor(0)
for y_test in torch.hstack((y1_hat,y2_hat)).flatten():
cdf1_y = (y1_hat<=y_test).float().mean()
cdf2_y = (y2_hat<=y_test).float().mean()
if (cdf1_y-cdf2_y).abs()>diff:
diff = (cdf1_y-cdf2_y).abs()
return diff
# +------------------------------------------+
# | Evaluation Metric 2: Bounded Group Loss |
# +------------------------------------------+
def bounded_group_loss(y1_hat, y2_hat, y1, y2, loss='L2'):
'''
Compute fraction in group loss between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between group loss
'''
r1 = y1_hat-y1
r2 = y2_hat-y2
if loss=='L2':
lossf = lambda ra,rb: (ra**2).mean() / (rb**2).mean()
if loss=='L1':
lossf = lambda ra,rb: ra.abs().mean() / rb.abs().mean()
l = lossf(r1,r2)
return l if l<1 else 1/l
# +------------------------------------------+
# | Evaluation Metric 3: |
# | Group Fairness in Expectation |
# +------------------------------------------+
def group_fair_expect(y1_hat, y2_hat, y1, y2):
'''
Compute Group Fairness in Expectation between prediction for different
classes
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): The difference between means
'''
return (y1_hat.mean()-y2_hat.mean()).abs()
# +------------------------------------------+
# | Evaluation Metric 1: Statistical Parity |
# +------------------------------------------+
def statistical_parity_classification(y1_hat, y2_hat, y1, y2):
'''
Compute max statistical imparity. This is equivalent to max
difference in cdf
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): max statistical imparity
'''
return ((y1_hat).sum() / y1_hat.shape[0] - (y2_hat).sum() / y2_hat.shape[0]).abs()
# +------------------------------------------+
# | Evaluation Metric 4: lp distance |
# +------------------------------------------+
def lp_dist(y1_hat, y2_hat, y1, y2, p=1):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
epsilon (torch.Tensor): lp distance
'''
dist = torch.tensor(0.)
ys, idx = torch.hstack((y1_hat,y2_hat)).flatten().sort()
for i in range(ys.shape[0]-1):
cdf1_y = (y1_hat <= ys[i]).float().mean()
cdf2_y = (y2_hat <= ys[i]).float().mean()
dist += ((cdf1_y - cdf2_y).abs() ** p) * (ys[i+1] - ys[i])
return dist**(1/p)
# +------------------------------------------+
# | Reg/Clf Metrics: MSE, MAE, Accuracy |
# +------------------------------------------+
def MSE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MSE (torch.Tensor): mean squared error
'''
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
ys = torch.hstack((y1, y2)).flatten()
return ((ys-yhats)**2).mean()
def MAE(y1_hat, y2_hat, y1, y2):
'''
Compute lp distance.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
MAE (torch.Tensor): mean absolute error
'''
yhats = torch.hstack((y1_hat,y2_hat)).flatten()
ys = torch.hstack((y1,y2)).flatten()
return (ys-yhats).abs().mean()
def accuracy(y1_hat, y2_hat, y1, y2):
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
total = ys.size(0)
correct = (yhats == ys).sum().item()
return torch.tensor(correct / total * 100)
def R2(y1_hat, y2_hat, y1, y2):
'''
Compute regression R2.
Args:
y1_hat (torch.Tensor): Predictions for protected class 1
y2_hat (torch.Tensor): Predictions for protected class 2
y1 (torch.Tensor): True Value for protected class 1
y2 (torch.Tensor): True Value for protected class 2
Returns:
R2 (torch.Tensor): mean squared error
'''
ys = torch.hstack((y1,y2)).flatten()
yhats = torch.hstack((y1_hat, y2_hat)).flatten()
var_y = torch.var(ys, unbiased=False)
return 1.0 - torch.nn.MSELoss(reduction="mean")(yhats, ys) / var_y
| 11,133 | 34.012579 | 133 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.