repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
Tim-TSENet
|
Tim-TSENet-main/TSENET/create_scp_light.py
|
import os
from random import shuffle
train_mix_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tr_mix.scp'
train_s1_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tr_s1.scp'
train_re_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tr_re.scp'
test_mix_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tt_mix.scp'
test_s1_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tt_s1.scp'
test_re_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tt_re.scp'
val_mix_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/val_mix.scp'
val_s1_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/val_s1.scp'
val_re_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/val_re.scp'
train_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data_new2/train'
test_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data_new2/test'
vl_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data_new2/val'
tr_mix = open(train_mix_scp,'w')
tr_s1 = open(train_s1_scp,'w')
tr_re = open(train_re_scp,'w')
for root, dirs, files in os.walk(train_mix):
bags = []
for file in files:
if 'lab.wav' not in file and 're.wav' not in file:
bags.append(file)
leng = len(bags) // 10
shuffle(bags)
bags = bags[:leng]
bags.sort()
for file in bags:
tr_s1.write(file.split('.wav')[0] + "_lab.wav " + root + '/' + file.split('.wav')[0] + "_lab.wav")
tr_s1.write('\n')
tr_re.write(file.split('.wav')[0] + "_re.wav " + root + '/' + file.split('.wav')[0] + "_re.wav")
tr_re.write('\n')
tr_mix.write(file + " " + root + '/' + file)
tr_mix.write('\n')
tr_mix.close()
tr_s1.close()
tr_re.close()
tt_mix = open(test_mix_scp,'w')
tt_s1 = open(test_s1_scp,'w')
tt_re = open(test_re_scp,'w')
for root, dirs, files in os.walk(test_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
tt_s1.write(file+" "+root+'/'+file)
tt_s1.write('\n')
elif 're.wav' in file:
tt_re.write(file + " " + root + '/' + file)
tt_re.write('\n')
else:
tt_mix.write(file + " " + root + '/' + file)
tt_mix.write('\n')
tt_mix.close()
tt_s1.close()
tt_re.close()
val_mix = open(val_mix_scp,'w')
val_s1 = open(val_s1_scp,'w')
val_re = open(val_re_scp,'w')
for root, dirs, files in os.walk(vl_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
val_s1.write(file+" "+root+'/'+file)
val_s1.write('\n')
elif 're.wav' in file:
val_re.write(file + " " + root + '/' + file)
val_re.write('\n')
else:
val_mix.write(file + " " + root + '/' + file)
val_mix.write('\n')
val_mix.close()
val_s1.close()
val_re.close()
| 2,844
| 34.123457
| 106
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/create_class_dict.py
|
#
event_ls_train = ['Snare_drum,Drum,Percussion,Musical_instrument,Music', 'Thump_and_thud',
'Writing,Domestic_sounds_and_home_sounds', 'Keys_jangling,Domestic_sounds_and_home_sounds',
'Scratching_(performance_technique),Musical_instrument,Music', 'Organ,Keyboard_(musical),Musical_instrument,Music', 'Liquid',
'Bass_guitar,Guitar,Plucked_string_instrument,Musical_instrument,Music',
'Bowed_string_instrument,Musical_instrument,Music', 'Tools', 'Breathing,Respiratory_sounds', 'Laughter,Human_voice', 'Fire', 'Harmonica,Musical_instrument,Music', 'Bird,Wild_animals,Animal',
'Bell', 'Cutlery_and_silverware,Domestic_sounds_and_home_sounds', 'Burping_and_eructation', 'Whoosh_and_swoosh_and_swish', 'Shatter,Glass', 'Cat,Domestic_animals_and_pets,Animal', 'Vehicle', 'Wind_instrument_and_woodwind_instrument,Musical_instrument,Music', 'Hi-hat,Cymbal,Percussion,Musical_instrument,Music',
'Cowbell,Bell', 'Engine', 'Bicycle,Vehicle', 'Camera,Mechanisms', 'Marimba_and_xylophone,Mallet_percussion,Percussion,Musical_instrument,Music',
'Dog,Domestic_animals_and_pets,Animal', 'Subway_and_metro_and_underground,Rail_transport,Vehicle',
'Trumpet,Brass_instrument,Musical_instrument,Music', 'Explosion',
'Vehicle_horn_and_car_horn_and_honking,Car,Motor_vehicle_(road),Vehicle,Alarm',
'Fireworks,Explosion', 'Speech,Human_voice', 'Door,Domestic_sounds_and_home_sounds',
'Meow,Cat,Domestic_animals_and_pets,Animal', 'Guitar,Plucked_string_instrument,Musical_instrument,Music',
'Chuckle_and_chortle,Laughter,Human_voice', 'Livestock_and_farm_animals_and_working_animals,Animal',
'Mechanisms', 'Crushing', 'Clapping,Hands', 'Toilet_flush,Domestic_sounds_and_home_sounds',
'Telephone,Alarm', 'Glockenspiel,Mallet_percussion,Percussion,Musical_instrument,Music',
'Splash_and_splatter,Liquid', 'Human_voice', 'Computer_keyboard,Typing,Domestic_sounds_and_home_sounds',
'Slam,Door,Domestic_sounds_and_home_sounds', 'Piano,Keyboard_(musical),Musical_instrument,Music',
'Brass_instrument,Musical_instrument,Music', 'Sink_(filling_or_washing),Domestic_sounds_and_home_sounds',
'Drip,Liquid', 'Sawing,Tools', 'Tearing', 'Wind', 'Drum,Percussion,Musical_instrument,Music',
'Drum_kit,Percussion,Musical_instrument,Music', 'Singing,Human_voice', 'Scissors,Domestic_sounds_and_home_sounds', 'Microwave_oven,Domestic_sounds_and_home_sounds', 'Zipper_(clothing),Domestic_sounds_and_home_sounds', 'Squeak', 'Typing,Domestic_sounds_and_home_sounds', 'Gasp,Breathing,Respiratory_sounds', 'Bass_drum,Drum,Percussion,Musical_instrument,Music', 'Musical_instrument,Music', 'Accelerating_and_revving_and_vroom,Engine', 'Giggle,Laughter,Human_voice', 'Boom,Explosion', 'Boiling,Liquid', 'Gunshot_and_gunfire,Explosion',
'Walk_and_footsteps', 'Crying_and_sobbing,Human_voice', 'Alarm', 'Screaming,Human_voice',
'Keyboard_(musical),Musical_instrument,Music', 'Fart', 'Yell,Shout,Human_voice', 'Doorbell,Door,Domestic_sounds_and_home_sounds,Alarm',
'Coin_(dropping),Domestic_sounds_and_home_sounds', 'Tambourine,Percussion,Musical_instrument,Music',
'Chewing_and_mastication', 'Bird_vocalization_and_bird_call_and_bird_song,Bird,Wild_animals,Animal',
'Thunder,Thunderstorm', 'Crack', 'Cough,Respiratory_sounds', 'Crackle', 'Mallet_percussion,Percussion,Musical_instrument,Music',
'Sneeze,Respiratory_sounds', 'Run', 'Male_speech_and_man_speaking,Speech,Human_voice', 'Finger_snapping,Hands', 'Tap', 'Drill,Power_tool,Tools', 'Chirp_and_tweet,Bird_vocalization_and_bird_call_and_bird_song,Bird,Wild_animals,Animal', 'Rattle_(instrument),Percussion,Musical_instrument,Music', 'Crowd,Human_group_actions', 'Ringtone,Telephone,Alarm', 'Printer,Mechanisms', 'Chime,Bell', 'Insect,Wild_animals,Animal', 'Harp,Musical_instrument,Music', 'Electric_guitar,Guitar,Plucked_string_instrument,Musical_instrument,Music', 'Cricket,Insect,Wild_animals,Animal', 'Crash_cymbal,Cymbal,Percussion,Musical_instrument,Music', 'Cupboard_open_or_close,Domestic_sounds_and_home_sounds', 'Glass', 'Chink_and_clink,Glass', 'Fill_(with_liquid),Liquid', 'Knock,Door,Domestic_sounds_and_home_sounds', 'Motor_vehicle_(road),Siren,Vehicle,Alarm', 'Car,Motor_vehicle_(road),Vehicle', 'Hiss', 'Acoustic_guitar,Guitar,Plucked_string_instrument,Musical_instrument,Music', 'Crumpling_and_crinkling', 'Dishes_and_pots_and_pans,Domestic_sounds_and_home_sounds', 'Chicken_and_rooster,Fowl,Livestock_and_farm_animals_and_working_animals,Animal',
'Domestic_sounds_and_home_sounds', 'Drawer_open_or_close,Domestic_sounds_and_home_sounds', 'Sliding_door,Door,Domestic_sounds_and_home_sounds', 'Water_tap_and_faucet,Domestic_sounds_and_home_sounds',
'Plucked_string_instrument,Musical_instrument,Music', 'Packing_tape_and_duct_tape,Domestic_sounds_and_home_sounds', 'Car_passing_by,Car,Motor_vehicle_(road),Vehicle',
'Gong,Percussion,Musical_instrument,Music', 'Wind_chime,Chime,Bell', 'Fowl,Livestock_and_farm_animals_and_working_animals,Animal', 'Bark,Dog,Domestic_animals_and_pets,Animal',
'Percussion,Musical_instrument,Music', 'Female_speech_and_woman_speaking,Speech,Human_voice', 'Motorcycle,Motor_vehicle_(road),Vehicle', 'Frying_(food),Domestic_sounds_and_home_sounds',
'Animal', 'Respiratory_sounds', 'Sigh,Human_voice', 'Train,Rail_transport,Vehicle', 'Clock,Mechanisms', 'Screech', 'Tick', 'Hammer,Tools', 'Stream,Water', 'Water', 'Rain,Water', 'Shout,Human_voice', 'Applause,Human_group_actions',
'Crow,Bird,Wild_animals,Animal', 'Cheering,Human_group_actions', 'Engine_starting,Engine', 'Human_group_actions,Shout,Human_voice', 'Female_singing,Singing,Human_voice',
'Gull_and_seagull,Bird,Wild_animals,Animal', 'Hands', 'Rattle', 'Boat_and_Water_vehicle,Vehicle', 'Frog,Wild_animals,Animal', 'Singing,Musical_instrument,Human_voice,Music',
'Idling,Engine', 'Aircraft,Vehicle', 'Bathtub_(filling_or_washing),Domestic_sounds_and_home_sounds',
'Wood', 'Skateboard,Vehicle', 'Growling,Animal', 'Whispering,Human_voice', 'Fixed-wing_aircraft_and_airplane,Aircraft,Vehicle', 'Bicycle_bell,Alarm,Bicycle,Vehicle,Bell', 'Motor_vehicle_(road),Vehicle', 'Truck,Motor_vehicle_(road),Vehicle', 'Raindrop,Rain,Water', 'Waves_and_surf,Ocean,Water', 'Tick-tock,Clock,Mechanisms', 'Purr,Cat,Domestic_animals_and_pets,Animal', 'Wild_animals,Animal', 'Church_bell,Bell', 'Siren,Alarm']
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_data/train.txt'
test_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_data/test.txt'
val_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_data/val.txt'
class_dict_train = []
with open(train_txt, 'r') as f:
data = f.readlines()
data.sort()
for i in range(len(data)):
file = data[i].split('\t')[0]
cls = data[i].split('\t')[7]
if cls not in class_dict_train:
class_dict_train.append(cls)
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
print(len(class_dict_train))
# print(class_dict_train)
# assert 1==2
with open(test_txt, 'r') as f:
data = f.readlines()
data.sort()
for i in range(len(data)):
#print(data[i])
file = data[i].split('\t')[0]
cls = data[i].split('\t')[7]
#print(cls)
# assert 1==2
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
if cls not in event_ls_train:
event_ls_train.append(cls)
print(len(event_ls_train))
assert 1==2
with open(val_txt, 'r') as f:
data = f.readlines()
data.sort()
for i in range(len(data)):
file = data[i].split('\t')[0]
cls = data[i].split('\t')[7]
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
if cls not in event_ls_train:
print(cls)
| 8,183
| 89.933333
| 1,143
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/create_scp_debug.py
|
import os
train_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_mix.scp'
train_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_s1.scp'
train_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_s2.scp'
train_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tr_re.scp'
test_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_mix.scp'
test_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_s1.scp'
test_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_s2.scp'
test_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tt_re.scp'
val_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_mix.scp'
val_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_s1.scp'
val_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_s2.scp'
val_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/val_re.scp'
test_offset_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_mix.scp'
test_offset_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_s1.scp'
test_offset_s2_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_s2.scp'
test_offset_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_re.scp'
train_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/train'
test_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test'
vl_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/val'
test_offset_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test_offset'
tr_mix = open(train_mix_scp,'w')
tr_s1 = open(train_s1_scp,'w')
tr_s2 = open(train_s2_scp,'w')
tr_re = open(train_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(train_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
tr_s1.write(file+" "+root+'/'+file)
tr_s1.write('\n')
num1 += 1
tr_re.write(file + " " + root + '/' + file)
tr_re.write('\n')
num2 += 1
elif 'b.wav' in file and num3 < 30:
tr_s2.write(file + " " + root + '/' + file)
tr_s2.write('\n')
num3 += 1
elif num4 < 30:
tr_mix.write(file + " " + root + '/' + file)
tr_mix.write('\n')
num4 += 1
tr_mix.close()
tr_s1.close()
tr_s2.close()
tr_re.close()
tt_mix = open(test_mix_scp,'w')
tt_s1 = open(test_s1_scp,'w')
tt_s2 = open(test_s2_scp,'w')
tt_re = open(test_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(test_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
tt_s1.write(file+" "+root+'/'+file)
tt_s1.write('\n')
num1 += 1
tt_re.write(file + " " + root + '/' + file)
tt_re.write('\n')
num2 += 1
elif 'b.wav' in file and num3 < 30:
tt_s2.write(file + " " + root + '/' + file)
tt_s2.write('\n')
num3 += 1
elif num4 < 30:
tt_mix.write(file + " " + root + '/' + file)
tt_mix.write('\n')
num4 += 1
tt_mix.close()
tt_s1.close()
tt_s2.close()
tt_re.close()
val_mix = open(val_mix_scp,'w')
val_s1 = open(val_s1_scp,'w')
val_s2 = open(val_s2_scp,'w')
val_re = open(val_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(vl_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
val_s1.write(file+" "+root+'/'+file)
val_s1.write('\n')
num4 += 1
val_re.write(file + " " + root + '/' + file)
val_re.write('\n')
num2 += 1
elif 'b.wav' in file and num3 < 30:
val_s2.write(file + " " + root + '/' + file)
val_s2.write('\n')
num3 += 1
elif num4 < 30:
val_mix.write(file + " " + root + '/' + file)
val_mix.write('\n')
num4 += 1
val_mix.close()
val_s1.close()
val_s2.close()
val_re.close()
tto_mix = open(test_offset_mix_scp,'w')
tto_s1 = open(test_offset_s1_scp,'w')
tto_s2 = open(test_offset_s2_scp,'w')
tto_re = open(test_offset_re_scp,'w')
num1 = 0
num2 = 0
num3 = 0
num4 = 0
for root, dirs, files in os.walk(test_offset_mix):
files.sort()
for file in files:
if 'a.wav' in file and num1 < 30:
tto_s1.write(file+" "+root+'/'+file)
tto_s1.write('\n')
num1 += 1
elif 'b.wav' in file and num2 < 30:
tto_s2.write(file + " " + root + '/' + file)
tto_s2.write('\n')
num2 += 1
elif 're.wav' in file and num3 < 30:
tto_re.write(file + " " + root + '/' + file)
tto_re.write('\n')
num3 += 1
elif num4 < 30:
tto_mix.write(file + " " + root + '/' + file)
tto_mix.write('\n')
num4 += 1
tto_mix.close()
tto_s1.close()
tto_s2.close()
tto_re.close()
| 5,313
| 33.506494
| 102
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/create_scp_inf.py
|
#
event_ls_train = ['Snare_drum,Drum,Percussion,Musical_instrument,Music', 'Thump_and_thud',
'Writing,Domestic_sounds_and_home_sounds', 'Keys_jangling,Domestic_sounds_and_home_sounds',
'Scratching_(performance_technique),Musical_instrument,Music', 'Organ,Keyboard_(musical),Musical_instrument,Music', 'Liquid',
'Bass_guitar,Guitar,Plucked_string_instrument,Musical_instrument,Music',
'Bowed_string_instrument,Musical_instrument,Music', 'Tools', 'Breathing,Respiratory_sounds', 'Laughter,Human_voice', 'Fire', 'Harmonica,Musical_instrument,Music', 'Bird,Wild_animals,Animal',
'Bell', 'Cutlery_and_silverware,Domestic_sounds_and_home_sounds', 'Burping_and_eructation', 'Whoosh_and_swoosh_and_swish', 'Shatter,Glass', 'Cat,Domestic_animals_and_pets,Animal', 'Vehicle', 'Wind_instrument_and_woodwind_instrument,Musical_instrument,Music', 'Hi-hat,Cymbal,Percussion,Musical_instrument,Music',
'Cowbell,Bell', 'Engine', 'Bicycle,Vehicle', 'Camera,Mechanisms', 'Marimba_and_xylophone,Mallet_percussion,Percussion,Musical_instrument,Music',
'Dog,Domestic_animals_and_pets,Animal', 'Subway_and_metro_and_underground,Rail_transport,Vehicle',
'Trumpet,Brass_instrument,Musical_instrument,Music', 'Explosion',
'Vehicle_horn_and_car_horn_and_honking,Car,Motor_vehicle_(road),Vehicle,Alarm',
'Fireworks,Explosion', 'Speech,Human_voice', 'Door,Domestic_sounds_and_home_sounds',
'Meow,Cat,Domestic_animals_and_pets,Animal', 'Guitar,Plucked_string_instrument,Musical_instrument,Music',
'Chuckle_and_chortle,Laughter,Human_voice', 'Livestock_and_farm_animals_and_working_animals,Animal',
'Mechanisms', 'Crushing', 'Clapping,Hands', 'Toilet_flush,Domestic_sounds_and_home_sounds',
'Telephone,Alarm', 'Glockenspiel,Mallet_percussion,Percussion,Musical_instrument,Music',
'Splash_and_splatter,Liquid', 'Human_voice', 'Computer_keyboard,Typing,Domestic_sounds_and_home_sounds',
'Slam,Door,Domestic_sounds_and_home_sounds', 'Piano,Keyboard_(musical),Musical_instrument,Music',
'Brass_instrument,Musical_instrument,Music', 'Sink_(filling_or_washing),Domestic_sounds_and_home_sounds',
'Drip,Liquid', 'Sawing,Tools', 'Tearing', 'Wind', 'Drum,Percussion,Musical_instrument,Music',
'Drum_kit,Percussion,Musical_instrument,Music', 'Singing,Human_voice', 'Scissors,Domestic_sounds_and_home_sounds', 'Microwave_oven,Domestic_sounds_and_home_sounds', 'Zipper_(clothing),Domestic_sounds_and_home_sounds', 'Squeak', 'Typing,Domestic_sounds_and_home_sounds', 'Gasp,Breathing,Respiratory_sounds', 'Bass_drum,Drum,Percussion,Musical_instrument,Music', 'Musical_instrument,Music', 'Accelerating_and_revving_and_vroom,Engine', 'Giggle,Laughter,Human_voice', 'Boom,Explosion', 'Boiling,Liquid', 'Gunshot_and_gunfire,Explosion',
'Walk_and_footsteps', 'Crying_and_sobbing,Human_voice', 'Alarm', 'Screaming,Human_voice',
'Keyboard_(musical),Musical_instrument,Music', 'Fart', 'Yell,Shout,Human_voice', 'Doorbell,Door,Domestic_sounds_and_home_sounds,Alarm',
'Coin_(dropping),Domestic_sounds_and_home_sounds', 'Tambourine,Percussion,Musical_instrument,Music',
'Chewing_and_mastication', 'Bird_vocalization_and_bird_call_and_bird_song,Bird,Wild_animals,Animal',
'Thunder,Thunderstorm', 'Crack', 'Cough,Respiratory_sounds', 'Crackle', 'Mallet_percussion,Percussion,Musical_instrument,Music',
'Sneeze,Respiratory_sounds', 'Run', 'Male_speech_and_man_speaking,Speech,Human_voice', 'Finger_snapping,Hands', 'Tap', 'Drill,Power_tool,Tools', 'Chirp_and_tweet,Bird_vocalization_and_bird_call_and_bird_song,Bird,Wild_animals,Animal', 'Rattle_(instrument),Percussion,Musical_instrument,Music', 'Crowd,Human_group_actions', 'Ringtone,Telephone,Alarm', 'Printer,Mechanisms', 'Chime,Bell', 'Insect,Wild_animals,Animal', 'Harp,Musical_instrument,Music', 'Electric_guitar,Guitar,Plucked_string_instrument,Musical_instrument,Music', 'Cricket,Insect,Wild_animals,Animal', 'Crash_cymbal,Cymbal,Percussion,Musical_instrument,Music', 'Cupboard_open_or_close,Domestic_sounds_and_home_sounds', 'Glass', 'Chink_and_clink,Glass', 'Fill_(with_liquid),Liquid', 'Knock,Door,Domestic_sounds_and_home_sounds', 'Motor_vehicle_(road),Siren,Vehicle,Alarm', 'Car,Motor_vehicle_(road),Vehicle', 'Hiss', 'Acoustic_guitar,Guitar,Plucked_string_instrument,Musical_instrument,Music', 'Crumpling_and_crinkling', 'Dishes_and_pots_and_pans,Domestic_sounds_and_home_sounds', 'Chicken_and_rooster,Fowl,Livestock_and_farm_animals_and_working_animals,Animal',
'Domestic_sounds_and_home_sounds', 'Drawer_open_or_close,Domestic_sounds_and_home_sounds', 'Sliding_door,Door,Domestic_sounds_and_home_sounds', 'Water_tap_and_faucet,Domestic_sounds_and_home_sounds',
'Plucked_string_instrument,Musical_instrument,Music', 'Packing_tape_and_duct_tape,Domestic_sounds_and_home_sounds', 'Car_passing_by,Car,Motor_vehicle_(road),Vehicle',
'Gong,Percussion,Musical_instrument,Music', 'Wind_chime,Chime,Bell', 'Fowl,Livestock_and_farm_animals_and_working_animals,Animal', 'Bark,Dog,Domestic_animals_and_pets,Animal',
'Percussion,Musical_instrument,Music', 'Female_speech_and_woman_speaking,Speech,Human_voice', 'Motorcycle,Motor_vehicle_(road),Vehicle', 'Frying_(food),Domestic_sounds_and_home_sounds',
'Animal', 'Respiratory_sounds', 'Sigh,Human_voice', 'Train,Rail_transport,Vehicle', 'Clock,Mechanisms', 'Screech', 'Tick', 'Hammer,Tools', 'Stream,Water', 'Water', 'Rain,Water', 'Shout,Human_voice', 'Applause,Human_group_actions',
'Crow,Bird,Wild_animals,Animal', 'Cheering,Human_group_actions', 'Engine_starting,Engine', 'Human_group_actions,Shout,Human_voice', 'Female_singing,Singing,Human_voice',
'Gull_and_seagull,Bird,Wild_animals,Animal', 'Hands', 'Rattle', 'Boat_and_Water_vehicle,Vehicle', 'Frog,Wild_animals,Animal', 'Singing,Musical_instrument,Human_voice,Music',
'Idling,Engine', 'Aircraft,Vehicle', 'Bathtub_(filling_or_washing),Domestic_sounds_and_home_sounds',
'Wood', 'Skateboard,Vehicle', 'Growling,Animal', 'Whispering,Human_voice', 'Fixed-wing_aircraft_and_airplane,Aircraft,Vehicle', 'Bicycle_bell,Alarm,Bicycle,Vehicle,Bell', 'Motor_vehicle_(road),Vehicle', 'Truck,Motor_vehicle_(road),Vehicle', 'Raindrop,Rain,Water', 'Waves_and_surf,Ocean,Water', 'Tick-tock,Clock,Mechanisms', 'Purr,Cat,Domestic_animals_and_pets,Animal', 'Wild_animals,Animal', 'Church_bell,Bell', 'Siren,Alarm']
# print(len(event_ls_train))
# assert 1==2
# event_to_id = {label : i for i, label in enumerate(event_ls_train)}
train_inf_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tr_inf.scp'
test_inf_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tt_inf.scp'
val_inf_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/val_inf.scp'
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/train.txt'
test_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/test.txt'
val_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/val.txt'
with open(train_txt, 'r') as f:
data = f.readlines()
tr_inf = open(train_inf_scp,'w')
data.sort()
for i in range(len(data)):
file = data[i].split('\t')[0]
cls = data[i].split('\t')[7]
# cls_id = event_to_id[cls]
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
tr_inf.write(file + " " + cls + " " + onset + " " + offset)
tr_inf.write('\n')
tr_inf.close()
with open(test_txt, 'r') as f:
data = f.readlines()
tt_inf = open(test_inf_scp,'w')
data.sort()
for i in range(len(data)):
#print(data[i])
file = data[i].split('\t')[0]
cls = data[i].split('\t')[7]
# cls_id = event_to_id[cls]
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
tt_inf.write(file + " " + cls + " " + onset + " " + offset)
tt_inf.write('\n')
tt_inf.close()
with open(val_txt, 'r') as f:
data = f.readlines()
val_inf = open(val_inf_scp,'w')
data.sort()
for i in range(len(data)):
file = data[i].split('\t')[0]
cls = data[i].split('\t')[7]
# cls_id = event_to_id[cls]
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
val_inf.write(file + " " + cls + " " + onset + " " + offset)
val_inf.write('\n')
val_inf.close()
| 8,702
| 90.610526
| 1,143
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/create_scp_inf_light.py
|
#
train_inf_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tr_inf.scp'
test_inf_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tt_inf.scp'
val_inf_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/val_inf.scp'
train_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data_new2/train.txt'
test_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data_new2/test.txt'
val_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data_new2/val.txt'
train_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tr_mix.scp'
test_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/tt_mix.scp'
val_scp = '/apdcephfs/private_helinwang/tsss/TSENet/scps_light/val_mix.scp'
with open(train_txt, 'r') as f:
data = f.readlines()
tr_inf = open(train_inf_scp,'w')
file_list = []
line = 0
lines = open(train_scp, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
key, value = scp_parts
file_list.append(key)
for f in file_list:
for i in range(len(data)):
file = data[i].split('\t')[0]
if f == file:
cls = data[i].split('\t')[7]
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
tr_inf.write(file + " " + cls + " " + onset + " " + offset)
tr_inf.write('\n')
tr_inf.close()
with open(test_txt, 'r') as f:
data = f.readlines()
tt_inf = open(test_inf_scp,'w')
file_list = []
line = 0
lines = open(test_scp, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
key, value = scp_parts
file_list.append(key)
for f in file_list:
for i in range(len(data)):
file = data[i].split('\t')[0]
if f == file:
cls = data[i].split('\t')[7]
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
tt_inf.write(file + " " + cls + " " + onset + " " + offset)
tt_inf.write('\n')
tt_inf.close()
with open(val_txt, 'r') as f:
data = f.readlines()
val_inf = open(val_inf_scp,'w')
file_list = []
line = 0
lines = open(val_scp, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
key, value = scp_parts
file_list.append(key)
for f in file_list:
for i in range(len(data)):
file = data[i].split('\t')[0]
if f == file:
cls = data[i].split('\t')[7]
onset = data[i].split('\t')[5]
offset = data[i].split('\t')[6]
val_inf.write(file + " " + cls + " " + onset + " " + offset)
val_inf.write('\n')
val_inf.close()
| 2,610
| 29.011494
| 82
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/create_scp.py
|
import os
# assert 1==2
train_mix_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tr_mix.scp'
train_s1_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tr_s1.scp'
train_re_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tr_re.scp'
test_mix_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tt_mix.scp'
test_s1_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tt_s1.scp'
test_re_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/tt_re.scp'
val_mix_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/val_mix.scp'
val_s1_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/val_s1.scp'
val_re_scp = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/scps/val_re.scp'
train_mix = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/train'
test_mix = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/test'
vl_mix = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/urban/val'
tr_mix = open(train_mix_scp,'w')
tr_s1 = open(train_s1_scp,'w')
tr_re = open(train_re_scp,'w')
for root, dirs, files in os.walk(train_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
tr_s1.write(file+" "+root+'/'+file)
tr_s1.write('\n')
elif 're.wav' in file:
tr_re.write(file + " " + root + '/' + file)
tr_re.write('\n')
else:
tr_mix.write(file + " " + root + '/' + file)
tr_mix.write('\n')
tr_mix.close()
tr_s1.close()
tr_re.close()
tt_mix = open(test_mix_scp,'w')
tt_s1 = open(test_s1_scp,'w')
tt_re = open(test_re_scp,'w')
for root, dirs, files in os.walk(test_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
tt_s1.write(file+" "+root+'/'+file)
tt_s1.write('\n')
elif 're.wav' in file:
tt_re.write(file + " " + root + '/' + file)
tt_re.write('\n')
else:
tt_mix.write(file + " " + root + '/' + file)
tt_mix.write('\n')
tt_mix.close()
tt_s1.close()
tt_re.close()
val_mix = open(val_mix_scp,'w')
val_s1 = open(val_s1_scp,'w')
val_re = open(val_re_scp,'w')
for root, dirs, files in os.walk(vl_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
val_s1.write(file+" "+root+'/'+file)
val_s1.write('\n')
elif 're.wav' in file:
val_re.write(file + " " + root + '/' + file)
val_re.write('\n')
else:
val_mix.write(file + " " + root + '/' + file)
val_mix.write('\n')
val_mix.close()
val_s1.close()
val_re.close()
| 2,717
| 34.763158
| 91
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/draw.py
|
import torchaudio
import matplotlib
import matplotlib.pyplot as plt
[width, height] = matplotlib.rcParams['figure.figsize']
if width < 10:
matplotlib.rcParams['figure.figsize'] = [width * 2.5, height]
if __name__ == "__main__":
# filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/train/train_1.wav"
filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/test_offset/test_offset_10_re.wav"
waveform, sample_rate = torchaudio.load(filename)
print("Shape of waveform: {}".format(waveform.size()))
print("Sample rate of waveform: {}".format(sample_rate))
plt.figure()
plt.plot(waveform.t().numpy())
# plt.title('test_offset_100_mix')
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.savefig('test_offset_10_re.png')
| 775
| 32.73913
| 95
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/trainer/trainer_Tasnet.py
|
import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from model.loss import get_loss
import torch
import os
import matplotlib.pyplot as plt
import numpy as np
import math
def time_to_frame(tm,st=True):
radio = 10.0/624
if st:
n_fame = tm//radio
else:
n_fame = math.ceil(tm/radio)
if n_fame >= 624:
n_fame = 623
if n_fame < 0:
n_fame = 0
return n_fame
def get_mask(onset,offset):
out_mask = np.zeros((onset.shape[0],257,624))
for i in range(onset.shape[0]):
st_frame = time_to_frame(onset[i])
ed_frame = time_to_frame(offset[i])
st_frame = st_frame.numpy()
ed_frame = ed_frame.numpy()
# print('st_t,ed_t ',onset[i],offset[i])
# print('st_frame,ed_frame',st_frame,ed_frame)
# assert 1==2
out_mask[i,:,int(st_frame):int(ed_frame)+1] = 1
out_mask = torch.from_numpy(out_mask)
return out_mask
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, TSENet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path'] # training path
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift'] # hop_length?
self.audio_length = opt['datasets']['audio_setting']['audio_length'] # 10
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.weighting_ratio = opt['train']['weighting_ratio'] # 0.3
self.metric_ratio = opt['train']['metirc_ratio'] # 0.5
self.loss_type = opt['train']['loss'] # 15?
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device('cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.net = TSENet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.net = TSENet.to(self.device)
self.logger.info('Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
if opt['resume']['state']: # load pre-train?
print(opt['resume']['path'])
# assert 1==2
ckp = torch.load(opt['resume']['path']+'/'+'best.pt', map_location='cpu')
print(ckp.keys())
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.net = TSENet.to(self.device)
self.net.load_state_dict(ckp['model_state_dict'])
self.optimizer = optimizer
self.optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.net = TSENet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.train()
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_loss_spec_all = 0.0
total_loss_mse_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_loss_spec_w = 0.0
total_loss_mse_w = 0.0
total_sisnrI_w = 0.0
total_loss_cls = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, framelab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
self.optimizer.zero_grad()
out, lps, lab, est_cls = self.net(mix, ref, s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_loss_mse_all += loss_mse_all.item()
total_loss_spec_all += loss_spec_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_loss_mse_w += loss_mse_w.item()
total_loss_spec_w += loss_spec_w.item()
total_sisnrI_w += sisnrI_w.item()
total_loss_cls += loss_cls.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.net.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_sisnr_all / num_index,
total_loss_mse_all / num_index,
total_loss_spec_all / num_index,
total_sisnrI_all / num_index,
total_loss_sisnr_w / num_index,
total_loss_mse_w / num_index,
total_loss_spec_w / num_index,
total_sisnrI_w / num_index,
total_loss_cls / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_loss_mse_all = total_loss_mse_all / num_index
total_loss_spec_all = total_loss_spec_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_loss_mse_w = total_loss_mse_w / num_index
total_loss_spec_w = total_loss_spec_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
total_loss_cls = total_loss_cls / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.6e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}, Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_loss_mse_all,
total_loss_spec_all, total_sisnrI_all, total_loss_sisnr_w, total_loss_mse_w,
total_loss_spec_w, total_sisnrI_w, total_loss_cls, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def validation(self, epoch):
self.logger.info(
'Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
out, lps, lab, est_cls = self.net(mix, ref, s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, ' \
'loss_sisnr:{:.3f}, sisnrI:{:.3f}, loss_sisnr_w:{:.3f}, sisnrI_w:{:.3f}' \
', Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
out_mask = get_mask(onset, offset)
out_mask = out_mask.to(self.device).float()
onset = onset.to(self.device)
offset = offset.to(self.device)
# print('onset ',onset)
# print('offset ',offset)
out, lps, lab, est_cls = self.net(mix, ref, s1[0],out_mask) # add true mask
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, ' \
'loss_sisnr:{:.3f}, sisnrI:{:.3f}, loss_sisnr_w:{:.3f}, sisnrI_w:{:.3f}, ' \
'Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def run(self):
train_loss = []
val_loss = []
test_loss = []
train_sisnrI = [] # sisnrI ?
val_sisnrI = []
test_sisnrI = []
train_sisnr = []
val_sisnr = []
test_sisnr = []
train_sisnrI_w = []
val_sisnrI_w= []
test_sisnrI_w = []
train_sisnr_w = []
val_sisnr_w = []
test_sisnr_w = []
train_metric = []
val_metric = []
test_metric = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,v_sisnr,v_sisnr_w,v_sisnrI,v_sisnrI_w= self.validation(self.cur_epoch)
best_loss = v_loss
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
best_metric = self.metric_ratio * best_sisnrI_w + (1. - self.metric_ratio) * best_sisnrI
self.logger.info("Starting epoch from {:d}, metric = {:.4f}, loss = {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}".format(self.cur_epoch, best_metric, best_loss, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss, t_sisnr, t_sisnr_w, t_sisnrI, t_sisnrI_w = self.train(self.cur_epoch)
v_loss, v_sisnr, v_sisnr_w, v_sisnrI, v_sisnrI_w = self.validation(self.cur_epoch)
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.test(self.cur_epoch)
t_metric = self.metric_ratio * t_sisnrI_w + (1. - self.metric_ratio) * t_sisnrI
v_metric = self.metric_ratio * v_sisnrI_w + (1. - self.metric_ratio) * v_sisnrI
tt_metric = self.metric_ratio * tt_sisnrI_w + (1. - self.metric_ratio) * tt_sisnrI
train_metric.append(t_metric)
val_metric.append(v_metric)
test_metric.append(tt_metric)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
train_sisnrI.append(t_sisnrI)
val_sisnrI.append(v_sisnrI)
test_sisnrI.append(tt_sisnrI)
train_sisnr.append(t_sisnr)
val_sisnr.append(v_sisnr)
test_sisnr.append(tt_sisnr)
train_sisnrI_w.append(t_sisnrI_w)
val_sisnrI_w.append(v_sisnrI_w)
test_sisnrI_w.append(tt_sisnrI_w)
train_sisnr_w.append(t_sisnr_w)
val_sisnr_w.append(v_sisnr_w)
test_sisnr_w.append(tt_sisnr_w)
# schedule here
self.scheduler.step()
if v_metric <= best_metric:
no_improve += 1
self.logger.info(
'No improvement, Best metric: {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
else:
best_loss = v_loss
best_metric = v_metric
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Metric Change: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
self.logger.info('Epoch: {:d}, Best Metirc Test: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w: {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, tt_metric, tt_sisnrI, tt_sisnr, tt_sisnrI_w, tt_sisnr_w))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
plt.plot(x, train_sisnrI, 'b-', label=u'train_sisnrI', linewidth=0.8)
plt.plot(x, val_sisnrI, 'c-', label=u'val_sisnrI', linewidth=0.8)
plt.plot(x, test_sisnrI, 'g', label=u'test_sisnrI', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI')
plt.xlabel('epoch')
plt.savefig('sisnrI.png')
plt.plot(x, train_sisnr, 'b-', label=u'train_sisnr', linewidth=0.8)
plt.plot(x, val_sisnr, 'c-', label=u'val_sisnr', linewidth=0.8)
plt.plot(x, test_sisnr, 'g', label=u'test_sisnr', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr')
plt.xlabel('epoch')
plt.savefig('sisnr.png')
plt.plot(x, train_sisnrI_w, 'b-', label=u'train_sisnrI_w', linewidth=0.8)
plt.plot(x, val_sisnrI_w, 'c-', label=u'val_sisnrI_w', linewidth=0.8)
plt.plot(x, test_sisnrI_w, 'g', label=u'test_sisnrI_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI_w')
plt.xlabel('epoch')
plt.savefig('sisnrI_w.png')
plt.plot(x, train_sisnr_w, 'b-', label=u'train_sisnr_w', linewidth=0.8)
plt.plot(x, val_sisnr_w, 'c-', label=u'val_sisnr_w', linewidth=0.8)
plt.plot(x, test_sisnr_w, 'g', label=u'test_sisnr_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr_w')
plt.xlabel('epoch')
plt.savefig('sisnr_w.png')
plt.plot(x, train_metric, 'b-', label=u'train_metric', linewidth=0.8)
plt.plot(x, val_metric, 'c-', label=u'val_metric', linewidth=0.8)
plt.plot(x, test_metric, 'g', label=u'test_metric', linewidth=0.8)
plt.legend()
plt.ylabel('metric')
plt.xlabel('epoch')
plt.savefig('metric.png')
def only_test(self):
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.validation(0)
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.net.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 21,031
| 46.58371
| 259
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/trainer/__init__.py
|
from .trainer_Tasnet import *
| 30
| 14.5
| 29
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/trainer/trainer_Tasnet_one_hot.py
|
import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from model.loss import get_loss,get_loss_one_hot
import torch
import os
import matplotlib.pyplot as plt
import numpy as np
def time_to_frame(tm,st=True):
radio = 10.0/624
if st:
n_fame = tm//radio
else:
n_fame = math.ceil(tm/radio)
if n_fame >= 624:
n_fame = 623
if n_fame < 0:
n_fame = 0
return n_fame
def get_mask(onset,offset):
out_mask = np.zeros((onset.shape[0],257,624))
for i in range(onset.shape[0]):
st_frame = time_to_frame(onset[i])
ed_frame = time_to_frame(offset[i])
st_frame = st_frame.numpy()
ed_frame = ed_frame.numpy()
# print('st_t,ed_t ',onset[i],offset[i])
# print('st_frame,ed_frame',st_frame,ed_frame)
# assert 1==2
out_mask[i,:,int(st_frame):int(ed_frame)+1] = 1
out_mask = torch.from_numpy(out_mask)
return out_mask
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, TSENet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path'] # training path
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift'] # hop_length?
self.audio_length = opt['datasets']['audio_setting']['audio_length'] # 10
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.weighting_ratio = opt['train']['weighting_ratio'] # 0.3
self.metric_ratio = opt['train']['metirc_ratio'] # 0.5
self.loss_type = opt['train']['loss'] # 15?
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.net = TSENet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.net = TSENet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.net)))
if opt['resume']['state']: # load pre-train?
ckp = torch.load(opt['resume']['path']+'/'+'best.pt', map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.net = TSENet.to(self.device)
self.net.load_state_dict(ckp['model_state_dict'])
self.optimizer = optimizer
self.optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.net = TSENet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info(
'Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.train()
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_loss_spec_all = 0.0
total_loss_mse_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_loss_spec_w = 0.0
total_loss_mse_w = 0.0
total_sisnrI_w = 0.0
total_loss_cls = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, framelab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
self.optimizer.zero_grad()
out, lps, lab, est_cls = self.net(mix, ref, cls_index.long(), s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss_one_hot(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_loss_mse_all += loss_mse_all.item()
total_loss_spec_all += loss_spec_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_loss_mse_w += loss_mse_w.item()
total_loss_spec_w += loss_spec_w.item()
total_sisnrI_w += sisnrI_w.item()
total_loss_cls += loss_cls.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.net.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_sisnr_all / num_index,
total_loss_mse_all / num_index,
total_loss_spec_all / num_index,
total_sisnrI_all / num_index,
total_loss_sisnr_w / num_index,
total_loss_mse_w / num_index,
total_loss_spec_w / num_index,
total_sisnrI_w / num_index,
total_loss_cls / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_loss_mse_all = total_loss_mse_all / num_index
total_loss_spec_all = total_loss_spec_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_loss_mse_w = total_loss_mse_w / num_index
total_loss_spec_w = total_loss_spec_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
total_loss_cls = total_loss_cls / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, loss_mse:{:.6f}, loss_spec:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, loss_mse_w:{:.6f}, loss_spec_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'loss_cls:{:.6f}, Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_loss_mse_all,
total_loss_spec_all, total_sisnrI_all, total_loss_sisnr_w, total_loss_mse_w,
total_loss_spec_w, total_sisnrI_w, total_loss_cls, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def validation(self, epoch):
self.logger.info(
'Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
out, lps, lab, est_cls = self.net(mix, ref, cls_index.long(), s1[0])
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss_one_hot(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, sisnrI_w:{:.6f}' \
', Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.net.eval()
num_index = 1
total_loss = 0.0
total_loss_sisnr_all = 0.0
total_sisnrI_all = 0.0
total_loss_sisnr_w = 0.0
total_sisnrI_w = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, framelab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
out_mask = get_mask(onset, offset)
out_mask = out_mask.to(self.device).float()
onset = onset.to(self.device)
offset = offset.to(self.device)
framelab = framelab.to(self.device)
out, lps, lab, est_cls = self.net(mix, ref, cls_index.long(), s1[0], out_mask)
epoch_loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, \
loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, \
loss_cls = get_loss_one_hot(self.loss_type, out[0], s1[0], mix, lps, lab, est_cls,
cls, onset, offset,
self.nFrameShift, self.sr, self.audio_length, self.weighting_ratio)
total_loss += epoch_loss.item()
total_loss_sisnr_all += loss_sisnr_all.item()
total_sisnrI_all += sisnrI_all.item()
total_loss_sisnr_w += loss_sisnr_w.item()
total_sisnrI_w += sisnrI_w.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_sisnr_all = total_loss_sisnr_all / num_index
total_sisnrI_all = total_sisnrI_all / num_index
total_loss_sisnr_w = total_loss_sisnr_w / num_index
total_sisnrI_w = total_sisnrI_w / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.6f}, ' \
'loss_sisnr:{:.6f}, sisnrI:{:.6f}, loss_sisnr_w:{:.6f}, sisnrI_w:{:.6f}, ' \
'Total time:{:.6f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_sisnr_all, total_sisnrI_all,
total_loss_sisnr_w, total_sisnrI_w, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, -total_loss_sisnr_all, -total_loss_sisnr_w, total_sisnrI_all, total_sisnrI_w
def only_test(self):
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.validation(0)
def run(self):
train_loss = []
val_loss = []
test_loss = []
train_sisnrI = [] # sisnrI ?
val_sisnrI = []
test_sisnrI = []
train_sisnr = []
val_sisnr = []
test_sisnr = []
train_sisnrI_w = []
val_sisnrI_w= []
test_sisnrI_w = []
train_sisnr_w = []
val_sisnr_w = []
test_sisnr_w = []
train_metric = []
val_metric = []
test_metric = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,v_sisnr,v_sisnr_w,v_sisnrI,v_sisnrI_w= self.validation(self.cur_epoch)
best_loss = v_loss
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
best_metric = self.metric_ratio * best_sisnrI_w + (1. - self.metric_ratio) * best_sisnrI
self.logger.info("Starting epoch from {:d}, metric = {:.4f}, loss = {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}".format(self.cur_epoch, best_metric, best_loss, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss, t_sisnr, t_sisnr_w, t_sisnrI, t_sisnrI_w = self.train(self.cur_epoch)
v_loss, v_sisnr, v_sisnr_w, v_sisnrI, v_sisnrI_w = self.validation(self.cur_epoch)
tt_loss, tt_sisnr, tt_sisnr_w, tt_sisnrI, tt_sisnrI_w = self.test(self.cur_epoch)
t_metric = self.metric_ratio * t_sisnrI_w + (1. - self.metric_ratio) * t_sisnrI
v_metric = self.metric_ratio * v_sisnrI_w + (1. - self.metric_ratio) * v_sisnrI
tt_metric = self.metric_ratio * tt_sisnrI_w + (1. - self.metric_ratio) * tt_sisnrI
train_metric.append(t_metric)
val_metric.append(v_metric)
test_metric.append(tt_metric)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
train_sisnrI.append(t_sisnrI)
val_sisnrI.append(v_sisnrI)
test_sisnrI.append(tt_sisnrI)
train_sisnr.append(t_sisnr)
val_sisnr.append(v_sisnr)
test_sisnr.append(tt_sisnr)
train_sisnrI_w.append(t_sisnrI_w)
val_sisnrI_w.append(v_sisnrI_w)
test_sisnrI_w.append(tt_sisnrI_w)
train_sisnr_w.append(t_sisnr_w)
val_sisnr_w.append(v_sisnr_w)
test_sisnr_w.append(tt_sisnr_w)
# schedule here
self.scheduler.step()
if v_metric <= best_metric:
no_improve += 1
self.logger.info(
'No improvement, Best metric: {:.4f}, sisnrI = {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
else:
best_loss = v_loss
best_metric = v_metric
best_sisnrI = v_sisnrI
best_sisnr = v_sisnr
best_sisnrI_w = v_sisnrI_w
best_sisnr_w = v_sisnr_w
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Metric Change: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w = {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, best_metric, best_sisnrI, best_sisnr, best_sisnrI_w, best_sisnr_w))
self.logger.info('Epoch: {:d}, Best Metirc Test: {:.4f}, sisnrI: {:.4f}, sisnr = {:.4f}, sisnrI_w: {:.4f}, sisnr_w = {:.4f}'.format(
self.cur_epoch, tt_metric, tt_sisnrI, tt_sisnr, tt_sisnrI_w, tt_sisnr_w))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
plt.plot(x, train_sisnrI, 'b-', label=u'train_sisnrI', linewidth=0.8)
plt.plot(x, val_sisnrI, 'c-', label=u'val_sisnrI', linewidth=0.8)
plt.plot(x, test_sisnrI, 'g', label=u'test_sisnrI', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI')
plt.xlabel('epoch')
plt.savefig('sisnrI.png')
plt.plot(x, train_sisnr, 'b-', label=u'train_sisnr', linewidth=0.8)
plt.plot(x, val_sisnr, 'c-', label=u'val_sisnr', linewidth=0.8)
plt.plot(x, test_sisnr, 'g', label=u'test_sisnr', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr')
plt.xlabel('epoch')
plt.savefig('sisnr.png')
plt.plot(x, train_sisnrI_w, 'b-', label=u'train_sisnrI_w', linewidth=0.8)
plt.plot(x, val_sisnrI_w, 'c-', label=u'val_sisnrI_w', linewidth=0.8)
plt.plot(x, test_sisnrI_w, 'g', label=u'test_sisnrI_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnrI_w')
plt.xlabel('epoch')
plt.savefig('sisnrI_w.png')
plt.plot(x, train_sisnr_w, 'b-', label=u'train_sisnr_w', linewidth=0.8)
plt.plot(x, val_sisnr_w, 'c-', label=u'val_sisnr_w', linewidth=0.8)
plt.plot(x, test_sisnr_w, 'g', label=u'test_sisnr_w', linewidth=0.8)
plt.legend()
plt.ylabel('sisnr_w')
plt.xlabel('epoch')
plt.savefig('sisnr_w.png')
plt.plot(x, train_metric, 'b-', label=u'train_metric', linewidth=0.8)
plt.plot(x, val_metric, 'c-', label=u'val_metric', linewidth=0.8)
plt.plot(x, test_metric, 'g', label=u'test_metric', linewidth=0.8)
plt.legend()
plt.ylabel('metric')
plt.xlabel('epoch')
plt.savefig('metric.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.net.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 21,117
| 47.104784
| 259
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/data_loader/AudioData.py
|
import torch.nn.functional as F
from utils import util
import torch
import torchaudio
import sys
sys.path.append('../')
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input:
scp_path (str): a different scp file address
sample_rate (int, optional): sample rate (default: 8000)
chunk_size (int, optional): split audio size (default: 32000(4 s))
least_size (int, optional): Minimum split size (default: 16000(2 s))
Output:
split audio (list)
'''
def __init__(self, scp_path, sample_rate=8000, chunk_size=32000, least_size=16000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = util.handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
self.audio = []
self.chunk_size = chunk_size
self.least_size = least_size
self.split()
def split(self):
'''
split audio with chunk_size and least_size
'''
for key in self.keys:
utt = read_wav(self.index_dict[key])
if utt.shape[0] < self.least_size:
continue
if utt.shape[0] > self.least_size and utt.shape[0] < self.chunk_size:
gap = self.chunk_size-utt.shape[0]
self.audio.append(F.pad(utt, (0, gap), mode='constant'))
if utt.shape[0] >= self.chunk_size:
start = 0
while True:
if start + self.chunk_size > utt.shape[0]:
break
self.audio.append(utt[start:start+self.chunk_size])
start += self.least_size
if __name__ == "__main__":
a = AudioReader("/home/likai/data1/create_scp/cv_mix.scp")
audio = a.audio
print(len(audio))
| 2,751
| 30.272727
| 87
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/data_loader/Dataset.py
|
import sys
sys.path.append('../')
import torch
from torch.utils.data import DataLoader, Dataset
import torchaudio
from utils.util import handle_scp, handle_scp_inf
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Datasets(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
inf_scp: include the onset and offset information?
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, inf_scp=None, sr=16000, cls_num=50, audio_length=10, nFrameShift=256):
super(Datasets, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.s1_audio = handle_scp(s1_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_scp)
self.sr = sr
self.cls_num = cls_num # class num
self.audio_length = audio_length # s
self.nFrameShift = nFrameShift #
self.key = list(self.mix_audio.keys()) # mixture audio name
def __len__(self):
return len(self.key)
def __getitem__(self, index):
index = self.key[index] # get name?
s1_index = index.replace('.wav', '_lab.wav') # get clean wav name
ref_index = index.replace('.wav', '_re.wav') # get reference wav name
mix = read_wav(self.mix_audio[index])
s1 = read_wav(self.s1_audio[s1_index])
ref = read_wav(self.ref_audio[ref_index])
cls = torch.zeros(self.cls_num) # ready for one-hot
cls[self.clss[index]] = 1. #
onset = self.onsets[index] # get oneset time
offset = self.offsets[index] # get offset time
max_frame = self.sr * self.audio_length // self.nFrameShift - 2 #
onset_frame = round(onset * (self.sr // self.nFrameShift - 1)) if round(onset * (self.sr // self.nFrameShift - 1)) >= 0 else 0
# time transfer to the number of frame
offset_frame = round(offset * (self.sr // self.nFrameShift - 1)) if round(
offset * (self.sr // self.nFrameShift - 1)) < max_frame else max_frame
framelab = torch.zeros(max_frame + 1) # frame-level label
for i in range(onset_frame, offset_frame + 1):
framelab[i] = 1.
return mix, s1, ref, cls, onset, offset, framelab
if __name__ == "__main__":
datasets = Datasets("/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_mix.scp",
"/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_s1.scp",
"/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_re.scp",
"/apdcephfs/share_1316500/donchaoyang/tsss/Dual-Path-RNN-Pytorch/scps/tr_inf.scp",
16000,
50,
8,
256)
print(datasets.key)
| 3,439
| 39.952381
| 134
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/data_loader/__init__.py
|
from .AudioData import *
from .Dataset import *
| 47
| 23
| 24
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/data_loader/AudioReader.py
|
import sys
sys.path.append('../')
import torchaudio
import torch
from utils.util import handle_scp
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input as a different scp file address
Output a matrix of wav files in all scp files.
'''
def __init__(self, scp_path, sample_rate=8000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
def _load(self, key):
src, sr = read_wav(self.index_dict[key], return_rate=True)
if self.sample_rate is not None and sr != self.sample_rate:
raise RuntimeError('SampleRate mismatch: {:d} vs {:d}'.format(
sr, self.sample_rate))
return src
def __len__(self):
return len(self.keys)
def __iter__(self):
for key in self.keys:
yield key, self._load(key)
def __getitem__(self, index):
if type(index) not in [int, str]:
raise IndexError('Unsupported index type: {}'.format(type(index)))
if type(index) == int:
num_uttrs = len(self.keys)
if num_uttrs < index and index < 0:
raise KeyError('Interger index out of range, {:d} vs {:d}'.format(
index, num_uttrs))
index = self.keys[index]
if index not in self.index_dict:
raise KeyError("Missing utterance {}!".format(index))
return self._load(index)
if __name__ == "__main__":
r = AudioReader('/home/likai/data1/create_scp/cv_s2.scp')
index = 0
print(r[1])
| 2,556
| 28.732558
| 82
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/config/option.py
|
import yaml
def parse(opt_path):
with open(opt_path, mode='r') as f:
opt = yaml.load(f,Loader=yaml.FullLoader)
opt['resume']['path'] = opt['resume']['path']+'/'+opt['name']
opt['logger']['path'] = opt['logger']['path']+'/'+opt['name']
return opt
if __name__ == "__main__":
parse('train.yml')
| 327
| 24.230769
| 65
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/config/__init__.py
|
from .option import *
| 21
| 21
| 21
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/logger/__init__.py
|
from .set_logger import *
| 25
| 25
| 25
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/logger/set_logger.py
|
import logging
from datetime import datetime
import os
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def setup_logger(logger_name, root, level=logging.INFO, screen=False, tofile=False):
'''set up logger'''
lg = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s [%(pathname)s:%(lineno)s - %(levelname)s ] %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
lg.setLevel(level)
os.makedirs(root,exist_ok=True)
if tofile:
log_file = os.path.join(root, '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
lg.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
lg.addHandler(sh)
if __name__ == "__main__":
setup_logger('base','root',level=logging.INFO,screen=True, tofile=False)
logger = logging.getLogger('base')
logger.info('hello')
| 990
| 32.033333
| 103
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/utils/util.py
|
import torch
import torch.nn as nn
def handle_scp(scp_path):
'''
Read scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 2:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 2:
key, value = scp_parts
if key in scp_dict:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict[key] = value
return scp_dict
def handle_scp_inf(scp_path):
'''
Read information scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict_cls = dict()
scp_dict_onset = dict()
scp_dict_offset = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 4:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 4:
key, cls, onset, offset = scp_parts
if key in scp_dict_cls:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict_cls[key] = int(cls)
scp_dict_onset[key] = float(onset)
scp_dict_offset[key] = float(offset)
return scp_dict_cls, scp_dict_onset, scp_dict_offset
def check_parameters(net):
'''
Returns module parameters. Mb
'''
parameters = sum(param.numel() for param in net.parameters())
return parameters / 10**6
| 1,932
| 26.225352
| 79
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/utils/__init__.py
|
from .util import *
| 19
| 19
| 19
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/model/model_t.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x-mean)**2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight*(x-mean)/torch.sqrt(var+self.eps)+self.bias
else:
x = (x-mean)/torch.sqrt(var+self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm not in ['gln', 'cln', 'bn']:
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
else:
return nn.BatchNorm1d(dim)
class Conv1D_e(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D_e, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D(nn.Conv1d):
'''
Applies a 1D convolution over an input signal composed of several input planes.
'''
def __init__(self, *args, **kwargs):
super(Conv1D, self).__init__(*args, **kwargs)
def forward(self, x, squeeze=False):
# x: N x C x L
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 2/3D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if squeeze:
x = torch.squeeze(x)
return x
class ConvTrans1D(nn.ConvTranspose1d):
'''
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution
or a deconvolution (although it is not an actual deconvolution operation).
'''
def __init__(self, *args, **kwargs):
super(ConvTrans1D, self).__init__(*args, **kwargs)
def forward(self, x, squeeze=False):
"""
x: N x L or N x C x L
"""
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 2/3D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if squeeze:
x = torch.squeeze(x)
return x
class Conv1D_Block(nn.Module):
'''
Consider only residual links
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D_Block, self).__init__()
# conv 1 x 1
self.conv1x1 = Conv1D(in_channels, out_channels, 1)
self.PReLU_1 = nn.PReLU()
self.norm_1 = select_norm(norm, out_channels)
# not causal don't need to padding, causal need to pad+1 = kernel_size
self.pad = (dilation * (kernel_size - 1)) // 2 if not causal else (
dilation * (kernel_size - 1))
# depthwise convolution
self.dwconv = Conv1D(out_channels, out_channels, kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLU_2 = nn.PReLU()
self.norm_2 = select_norm(norm, out_channels)
self.Sc_conv = nn.Conv1d(out_channels, in_channels, 1, bias=True)
self.causal = causal
def forward(self, x):
# x: N x C x L
# N x O_C x L
c = self.conv1x1(x)
# N x O_C x L
c = self.PReLU_1(c)
c = self.norm_1(c)
# causal: N x O_C x (L+pad)
# noncausal: N x O_C x L
c = self.dwconv(c)
# N x O_C x L
if self.causal:
c = c[:, :, :-self.pad]
c = self.PReLU_2(c)
c = self.norm_2(c)
c = self.Sc_conv(c)
return x+c
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False, fusion='concat', usingEmb=True, usingTsd=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.usingTsd = usingTsd
self.usingEmb = usingEmb
self.fusion = fusion # concat, add, multiply
if usingEmb:
if fusion == 'concat':
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels + 1, out_channels, kernel_size=1)
elif fusion == 'add':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
elif fusion == 'multiply':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
tsd: [B x 1 x T]
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
if self.usingEmb:
if self.fusion == 'concat':
if not self.usingTsd:
x_ = torch.cat([x, emb], 1)
else:
x_ = torch.cat([x, emb, tsd], 1)
elif self.fusion == 'add':
x_ = self.PReLu1(self.preCNN(emb)) + x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
elif self.fusion == 'multiply':
x_ = self.PReLu1(self.preCNN(emb)) * x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
else:
x_ = x
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class ExtractionNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=1, fusion='concat', usingEmb=[True,True,True], usingTsd=[False,False,False]):
super(ExtractionNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[0], usingTsd=usingTsd[0])
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[1], usingTsd=usingTsd[1])
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[2], usingTsd=usingTsd[2])
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D_e(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
x = self.PReLu(x)
# B x C x T
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_1_front(x, emb, tsd)
x = self.conv_block_1_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_2_front(x, emb, tsd)
x = self.conv_block_2_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_3_front(x, emb, tsd)
x = self.conv_block_3_back(x)
x = F.dropout(x, p=0.2, training=self.training)
# B x N x T
# print('x ',x.shape)
x = self.PReLu(x)
x = self.end_conv1x1(x)
# print('x ', x.shape)
# assert 1==2
x = self.activation(x)
return x
class ConvTasNet(nn.Module):
'''
ConvTasNet module
N Number of filters in autoencoder
L Length of the filters (in samples)
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
Sc Number of channels in skip-connection paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=128,
L=20,
B=128,
H=256,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
activate="relu",
causal=False,
cls_num=41,
nFrameLen=512,
nFrameShift=256, #
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(ConvTasNet, self).__init__()
# n x 1 x T => n x N x T
self.encoder = Conv1D(1, N, L, stride=L // 2, padding=0)
# n x N x T Layer Normalization of Separation
self.LayerN_S = select_norm('cln', N)
# n x B x T Conv 1 x 1 of Separation
self.BottleN_S = Conv1D(N, B, 1)
# Separation block
# n x B x T => n x B x T
# self.separation = self._Sequential_repeat(
# R, X, in_channels=B, out_channels=H, kernel_size=P, norm=norm, causal=causal)
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=N, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.conditioner_one_hot = nn.Embedding(cls_num, 128)
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
# self.init_conditioner() # init conditioner modual
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8]) # produce embedding
# if usingTsd[0] or usingTsd[1] or usingTsd[2]: # if we decide to use tsdNet
# self.tsdnet = TSDNet(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
# self.init_TSDNet() # init it
self.epsilon = 1e-20
# n x B x T => n x 2*N x T
self.gen_masks = Conv1D(B, num_spks*N, 1)
# n x N x T => n x 1 x L
self.decoder = ConvTrans1D(N, 1, L, stride=L//2)
# activation function
active_f = {
'relu': nn.ReLU(),
'sigmoid': nn.Sigmoid(),
'softmax': nn.Softmax(dim=0)
}
self.activation_type = activate
self.activation = active_f[activate]
self.num_spks = num_spks
def init_conditioner(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
if self.fixCNN10: # if fix it
for p in self.conditioner.parameters():
p.requires_grad = False
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_Block_lists = [Conv1D_Block(
**block_kwargs, dilation=(2**i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_Block_lists)
def _Sequential_repeat(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, one_hot):
if x.dim() >= 3:
raise RuntimeError(
"{} accept 1/2D tensor as input, but got {:d}".format(
self.__name__, x.dim()))
if x.dim() == 1:
x = torch.unsqueeze(x, 0)
# x: n x 1 x L => n x N x T
tsdMask = None
# print('x ',x.shape)
w = self.encoder(x)
# print('w ',w.shape)
# n x N x L => n x B x L
e = self.LayerN_S(w)
# print('e ', e.shape)
e = self.BottleN_S(e)
# print('e1 ', e.shape)
# conditional part
x_cls = None
# print('one_hot ',one_hot.shape)
emb_one_hot = self.conditioner_one_hot(one_hot)
# print('emb_one_hot ',emb_one_hot.shape)
# n x B x L => n x B x L
m = self.extractor(e, emb_one_hot, tsdMask)
# print('m ',m.shape)
# assert 1==2
# n x B x L => n x num_spk*N x L
# m = self.gen_masks(e)
# n x N x L x num_spks
m = torch.chunk(m, chunks=self.num_spks, dim=1)
# num_spks x n x N x L
# m = self.activation(torch.stack(m, dim=0))
gt = None
d = [w*m[i] for i in range(self.num_spks)]
# print('d ',d[0].shape)
#d = w*m
# decoder part num_spks x n x L
# audio_encoder = self.istft(x_ex, x_phase) # reconstruct predict audio
# audio = [audio_encoder[:, 0]]
s = [self.decoder(d[i], squeeze=True) for i in range(self.num_spks)]
# print('s ',s[0].shape)
return s, m, gt, x_cls
def check_parameters(net):
'''
Returns module parameters. Mb
'''
parameters = sum(param.numel() for param in net.parameters())
return parameters / 10**6
def test_convtasnet():
x = torch.randn(2, 160000)
one_hot = torch.zeros(2)
nnet = ConvTasNet()
s = nnet(x,one_hot.long())
print(str(look_parameters(nnet))+' Mb')
print(s[1].shape)
if __name__ == "__main__":
test_convtasnet()
| 21,609
| 37.451957
| 165
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/model/PANNS.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class CNN10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(CNN10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.relu_(self.fc1(x))
embedding = x
return embedding
| 4,628
| 38.905172
| 107
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/model/loss.py
|
import torch
import numpy as np
def nll_loss(output, target):
'''Negative likelihood loss. The output should be obtained using F.log_softmax(x).
Args:
output: (N, classes_num)
target: (N, classes_num)
'''
loss = - torch.mean(target * output)
return loss
def sisnr_loss(x, s, eps=1e-8):
"""
calculate training loss
input:
x: separated signal, N x S tensor, estimate value
s: reference signal, N x S tensor, True value
Return:
sisnr: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
def l2norm(mat, keepdim=False):
return torch.norm(mat, dim=-1, keepdim=keepdim)
if x.shape != s.shape:
raise RuntimeError(
"Dimention mismatch when calculate si-snr, {} vs {}".format(
x.shape, s.shape))
x_zm = x - torch.mean(x, dim=-1, keepdim=True)
s_zm = s - torch.mean(s, dim=-1, keepdim=True)
t = torch.sum(
x_zm * s_zm, dim=-1,
keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps)
loss = -20. * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))
return torch.sum(loss) / x.shape[0]
def sisnri(x, s, m): # sisnr improvement
"""
Arguments:
x: separated signal, BS x S predicted sound
s: reference signal, BS x S target sound
m: mixture signal, BS x S mixture sound
Return:
sisnri: N tensor
"""
sisnr = sisnr_loss(x, s)
sisnr_ori = sisnr_loss(m, s)
return sisnr_ori - sisnr #
def lfb_mse_loss(x, s):
"""
est_spec, ref_spec: BS x F x T
return: log fbank MSE: BS tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :, :s.shape[-1]]
else:
s = s[:, :, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1]*x.shape[2])
return t
def mse_loss(x, s):
"""
calculate training loss
input:
x: separated signal, N x S tensor
s: reference signal, N x S tensor
Return:
return: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1])
return t
def get_loss(loss_type, est_wav, lab_wav, mix_wav, est_mask, lab_mask, est_cls, lab_cls, onset, offset, nFrameShift, sr, audio_length, ratio):
"""
loss type:
1: enrollment: spec mse loss
2: enrollment: wave mse loss
3: enrollment: wave sisnrI loss
4: enrollment: spec mse loss + wave mse loss
5: enrollment: spec mse loss + wave sisnrI loss
6: enrollment: wave mse loss + wave sisnrI loss
7: enrollment: spec mse loss + wave mse loss + wave sisnrI loss
8: enrollment: spec mse loss (w)
9: enrollment: wave mse loss (w)
10: enrollment: wave sisnrI loss (w)
11: enrollment: spec mse loss (w) + wave mse loss (w)
12: enrollment: spec mse loss (w) + wave sisnrI loss (w)
13: enrollment: wave mse loss (w) + wave sisnrI loss (w)
14: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w)
15: enrollment: spec mse loss + wave mse loss + wave sisnrI loss + cls1 loss
16: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w) + cls1 loss
"""
loss_sisnr_w = 0.0
loss_mse_w = 0.0
loss_spec_w = 0.0
sisnrI_w = 0.0
onset = onset.cpu().numpy()
offset = offset.cpu().numpy()
sample_num = onset.shape[0] # batch_size
for i in range(sample_num):
assert onset[i] < offset[i]
max_wav = audio_length * sr - 1
# print('max_wav ',max_wav)
max_frame = sr * audio_length // nFrameShift - 2
# print('max_frame ',max_frame)
onset_wav = round(sr * onset[i]) if round(sr * onset[i]) >= 0 else 0 # target sound begin sample
# print('onset[i], onset_wav ',onset[i],onset_wav)
offset_wav = round(sr * offset[i]) if round(sr * offset[i]) < max_wav else max_wav # end
# print('offset[i], offset_wav ',offset[i],offset_wav)
onset_frame = round(onset[i] * (sr // nFrameShift - 1)) if round(onset[i] * (sr // nFrameShift - 1)) >= 0 else 0
# print('onset_frame ',onset_frame)
offset_frame = round(offset[i] * (sr // nFrameShift - 1)) if round(offset[i] * (sr // nFrameShift - 1)) < max_frame else max_frame
# print('offset_frame ',offset_frame)
est_wav_w = est_wav[i, onset_wav:offset_wav] # est_wav
est_wav_w = est_wav_w[None, :] # (1,N)
lab_wav_w = lab_wav[i, onset_wav:offset_wav] # lab_wav
lab_wav_w = lab_wav_w[None, :]
est_mask_w = est_mask[i, :, onset_frame:offset_frame]
est_mask_w = est_mask_w[None, :]
lab_mask_w = lab_mask[i, :, onset_frame:offset_frame]
lab_mask_w = lab_mask_w[None, :]
loss_sisnr_w += sisnr_loss(est_wav_w, lab_wav_w) # weighted sisnr
# print('loss_sisnr_w ',loss_sisnr_w)
loss_mse_w += mse_loss(est_wav_w, lab_wav_w) # weighted mse
loss_spec_w += lfb_mse_loss(est_mask_w, lab_mask_w) # mask loss
# assert loss_mse_w is nan
# print('loss_mse_w ',loss_mse_w)
# print('loss_spec_w ',loss_spec_w)
# assert 1==2
mix_wav_w = mix_wav[i, onset_wav:offset_wav] # mix wav
mix_wav_w = mix_wav_w[None, :]
sisnrI_w += sisnri(est_wav_w, lab_wav_w, mix_wav_w) # inmprovemnt
loss_sisnr_w = loss_sisnr_w / sample_num
loss_mse_w = loss_mse_w / sample_num
loss_spec_w = loss_spec_w / sample_num
sisnrI_w = sisnrI_w / sample_num
loss_sisnr_all = sisnr_loss(est_wav, lab_wav) # 整个音频的loss
loss_mse_all = mse_loss(est_wav, lab_wav)
# print(est_wav[0])
# print(lab_wav[0])
# assert 1==2
# print('loss_mse_all ',loss_mse_all)
# assert 1==2
loss_spec_all = lfb_mse_loss(est_mask, lab_mask)
sisnrI_all = sisnri(est_wav, lab_wav, mix_wav)
loss_cls = nll_loss(est_cls, lab_cls) # 分类损失
# loss_emb = torch.cosine_similarity(emb, emb2, dim=-1)
# loss_emb = 1.-torch.mean(loss_emb)
if loss_type == 1:
loss = loss_spec_all * 100.
elif loss_type == 2:
loss = loss_mse_all * 1000.
elif loss_type == 3:
loss = - sisnrI_all
elif loss_type == 4:
loss = loss_spec_all * 100. + loss_mse_all * 1000.
elif loss_type == 5:
loss = loss_spec_all * 100. - sisnrI_all
elif loss_type == 6:
loss = loss_mse_all * 1000. - sisnrI_all
elif loss_type == 7:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all
elif loss_type == 8:
loss = (loss_spec_all + ratio * loss_spec_w) * 100.
elif loss_type == 9:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 10:
loss = - sisnrI_all - ratio * sisnrI_w
elif loss_type == 11:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 12:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 13:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 14:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 15:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all + loss_cls * 1000.
elif loss_type == 16:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w + loss_cls * 1000.
elif loss_type == 17:
loss = (loss_spec_all + ratio * loss_spec_w) * 10. + (loss_mse_all + ratio * loss_mse_w) * 100000. - sisnrI_all - ratio * sisnrI_w + loss_cls * 100.
# print('loss_spec_all ',loss_spec_all)
# print('loss_spec_w ',loss_spec_w)
# print('loss_mse_all ',loss_mse_all)
# print('loss_mse_w ',loss_mse_w)
# print('sisnrI_all ',sisnrI_all)
# print('sisnrI_w ',sisnrI_w)
# print('loss_cls ',loss_cls)
# assert 1==2
return loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, loss_cls
def get_loss_one_hot(loss_type, est_wav, lab_wav, mix_wav, est_mask, lab_mask, est_cls, lab_cls, onset, offset, nFrameShift, sr, audio_length, ratio):
"""
loss type:
1: enrollment: spec mse loss
2: enrollment: wave mse loss
3: enrollment: wave sisnrI loss
4: enrollment: spec mse loss + wave mse loss
5: enrollment: spec mse loss + wave sisnrI loss
6: enrollment: wave mse loss + wave sisnrI loss
7: enrollment: spec mse loss + wave mse loss + wave sisnrI loss
8: enrollment: spec mse loss (w)
9: enrollment: wave mse loss (w)
10: enrollment: wave sisnrI loss (w)
11: enrollment: spec mse loss (w) + wave mse loss (w)
12: enrollment: spec mse loss (w) + wave sisnrI loss (w)
13: enrollment: wave mse loss (w) + wave sisnrI loss (w)
14: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w)
15: enrollment: spec mse loss + wave mse loss + wave sisnrI loss + cls1 loss
16: enrollment: spec mse loss (w) + wave mse loss (w) + wave sisnrI loss (w) + cls1 loss
"""
loss_sisnr_w = 0.0
loss_mse_w = 0.0
loss_spec_w = 0.0
sisnrI_w = 0.0
onset = onset.cpu().numpy()
offset = offset.cpu().numpy()
sample_num = onset.shape[0] # batch_size
for i in range(sample_num):
assert onset[i] < offset[i]
max_wav = audio_length * sr - 1
# print('max_wav ',max_wav)
max_frame = sr * audio_length // nFrameShift - 2
# print('max_frame ',max_frame)
onset_wav = round(sr * onset[i]) if round(sr * onset[i]) >= 0 else 0 # target sound begin sample
# print('onset[i], onset_wav ',onset[i],onset_wav)
offset_wav = round(sr * offset[i]) if round(sr * offset[i]) < max_wav else max_wav # end
# print('offset[i], offset_wav ',offset[i],offset_wav)
onset_frame = round(onset[i] * (sr // nFrameShift - 1)) if round(onset[i] * (sr // nFrameShift - 1)) >= 0 else 0
# print('onset_frame ',onset_frame)
offset_frame = round(offset[i] * (sr // nFrameShift - 1)) if round(offset[i] * (sr // nFrameShift - 1)) < max_frame else max_frame
# print('offset_frame ',offset_frame)
est_wav_w = est_wav[i, onset_wav:offset_wav] # est_wav
est_wav_w = est_wav_w[None, :] # (1,N)
lab_wav_w = lab_wav[i, onset_wav:offset_wav] # lab_wav
lab_wav_w = lab_wav_w[None, :]
est_mask_w = est_mask[i, :, onset_frame:offset_frame]
est_mask_w = est_mask_w[None, :]
lab_mask_w = lab_mask[i, :, onset_frame:offset_frame]
lab_mask_w = lab_mask_w[None, :]
loss_sisnr_w += sisnr_loss(est_wav_w, lab_wav_w) # weighted sisnr
# print('loss_sisnr_w ',loss_sisnr_w)
loss_mse_w += mse_loss(est_wav_w, lab_wav_w) # weighted mse
loss_spec_w += lfb_mse_loss(est_mask_w, lab_mask_w) # mask loss
# assert loss_mse_w is nan
# print('loss_mse_w ',loss_mse_w)
# print('loss_spec_w ',loss_spec_w)
# assert 1==2
mix_wav_w = mix_wav[i, onset_wav:offset_wav] # mix wav
mix_wav_w = mix_wav_w[None, :]
sisnrI_w += sisnri(est_wav_w, lab_wav_w, mix_wav_w) # inmprovemnt
loss_sisnr_w = loss_sisnr_w / sample_num
loss_mse_w = loss_mse_w / sample_num
loss_spec_w = loss_spec_w / sample_num
sisnrI_w = sisnrI_w / sample_num
loss_sisnr_all = sisnr_loss(est_wav, lab_wav) # 整个音频的loss
loss_mse_all = mse_loss(est_wav, lab_wav)
# print(est_wav[0])
# print(lab_wav[0])
# assert 1==2
# print('loss_mse_all ',loss_mse_all)
# assert 1==2
loss_spec_all = lfb_mse_loss(est_mask, lab_mask)
sisnrI_all = sisnri(est_wav, lab_wav, mix_wav)
# loss_cls = nll_loss(est_cls, lab_cls) # 分类损失
loss_cls = loss_spec_all
# loss_emb = torch.cosine_similarity(emb, emb2, dim=-1)
# loss_emb = 1.-torch.mean(loss_emb)
if loss_type == 1:
loss = loss_spec_all * 100.
elif loss_type == 2:
loss = loss_mse_all * 1000.
elif loss_type == 3:
loss = - sisnrI_all
elif loss_type == 4:
loss = loss_spec_all * 100. + loss_mse_all * 1000.
elif loss_type == 5:
loss = loss_spec_all * 100. - sisnrI_all
elif loss_type == 6:
loss = loss_mse_all * 1000. - sisnrI_all
elif loss_type == 7:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all
elif loss_type == 8:
loss = (loss_spec_all + ratio * loss_spec_w) * 100.
elif loss_type == 9:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 10:
loss = - sisnrI_all - ratio * sisnrI_w
elif loss_type == 11:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000.
elif loss_type == 12:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 13:
loss = (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 14:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 15:
loss = loss_spec_all * 100. + loss_mse_all * 1000. - sisnrI_all
elif loss_type == 16:
loss = (loss_spec_all + ratio * loss_spec_w) * 100. + (loss_mse_all + ratio * loss_mse_w) * 1000. - sisnrI_all - ratio * sisnrI_w
elif loss_type == 17:
loss = (loss_spec_all + ratio * loss_spec_w) * 10. + (loss_mse_all + ratio * loss_mse_w) * 100000. - sisnrI_all - ratio * sisnrI_w
# print('loss_spec_all ',loss_spec_all)
# print('loss_spec_w ',loss_spec_w)
# print('loss_mse_all ',loss_mse_all)
# print('loss_mse_w ',loss_mse_w)
# print('sisnrI_all ',sisnrI_all)
# print('sisnrI_w ',sisnrI_w)
# print('loss_cls ',loss_cls)
# assert 1==2
return loss, loss_sisnr_all, loss_spec_all, loss_mse_all, sisnrI_all, loss_sisnr_w, loss_spec_w, loss_mse_w, sisnrI_w, loss_cls
| 14,345
| 42.34139
| 156
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/model/model.py
|
import torch
from torch import nn
import torch.nn.functional as F
import sys
sys.path.append('../')
from utils.util import check_parameters
from model.PANNS import CNN10
from model.tsd import TSD
import math
def init_kernel(frame_len,
frame_hop,
num_fft=None,
window="sqrt_hann"):
if window != "sqrt_hann":
raise RuntimeError("Now only support sqrt hanning window in order "
"to make signal perfectly reconstructed")
if not num_fft:
# FFT points
fft_size = 2 ** math.ceil(math.log2(frame_len))
else:
fft_size = num_fft
# window [window_length]
window = torch.hann_window(frame_len) ** 0.5
S_ = 0.5 * (fft_size * fft_size / frame_hop) ** 0.5
# window_length, F, 2 (real+imag)
kernel = torch.rfft(torch.eye(fft_size) / S_, 1)[:frame_len]
# 2, F, window_length
kernel = torch.transpose(kernel, 0, 2) * window
# 2F, 1, window_length
kernel = torch.reshape(kernel, (fft_size + 2, 1, frame_len))
return kernel
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
NOTE:
1) Recommend sqrt_hann window with 2**N frame length, because it
could achieve perfect reconstruction after overlap-add
2) Now haven't consider padding problems yet
"""
def __init__(self,
frame_len,
frame_hop,
window="sqrt_hann",
num_fft=None):
super(STFTBase, self).__init__()
K = init_kernel(
frame_len,
frame_hop,
num_fft=num_fft,
window=window)
self.K = nn.Parameter(K, requires_grad=False)
self.stride = frame_hop
self.window = window
def freeze(self):
self.K.requires_grad = False
def unfreeze(self):
self.K.requires_grad = True
def check_nan(self):
num_nan = torch.sum(torch.isnan(self.K))
if num_nan:
raise RuntimeError(
"detect nan in STFT kernels: {:d}".format(num_nan))
def extra_repr(self):
return "window={0}, stride={1}, requires_grad={2}, kernel_size={3[0]}x{3[2]}".format(
self.window, self.stride, self.K.requires_grad, self.K.shape)
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, **kwargs)
def forward(self, x):
"""
Accept raw waveform and output magnitude and phase
x: input signal, N x 1 x S or N x S
m: magnitude, N x F x T
p: phase, N x F x T
"""
if x.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
x.dim()))
self.check_nan()
# if N x S, reshape N x 1 x S
if x.dim() == 2:
x = torch.unsqueeze(x, 1)
# N x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# N x F x T
r, i = torch.chunk(c, 2, dim=1)
m = (r ** 2 + i ** 2) ** 0.5
p = torch.atan2(i, r)
return m, p
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, **kwargs)
def forward(self, m, p, squeeze=False):
"""
Accept phase & magnitude and output raw waveform
m, p: N x F x T
s: N x C x S
"""
if p.dim() != m.dim() or p.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
p.dim()))
self.check_nan()
# if F x T, reshape 1 x F x T
if p.dim() == 2:
p = torch.unsqueeze(p, 0)
m = torch.unsqueeze(m, 0)
r = m * torch.cos(p)
i = m * torch.sin(p)
# N x 2F x T
c = torch.cat([r, i], dim=1)
# N x 2F x T
s = F.conv_transpose1d(c, self.K, stride=self.stride, padding=0)
if squeeze:
s = torch.squeeze(s)
return s
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim)
else:
return nn.BatchNorm1d(dim)
class Conv1D(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False, fusion='concat', usingEmb=True, usingTsd=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.usingTsd = usingTsd
self.usingEmb = usingEmb
self.fusion = fusion # concat, add, multiply
if usingEmb:
if fusion == 'concat':
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels + 1, out_channels, kernel_size=1)
elif fusion == 'add':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
elif fusion == 'multiply':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
tsd: [B x 1 x T]
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
if self.usingEmb:
if self.fusion == 'concat':
if not self.usingTsd:
x_ = torch.cat([x, emb], 1)
else:
x_ = torch.cat([x, emb, tsd], 1)
elif self.fusion == 'add':
x_ = self.PReLu1(self.preCNN(emb)) + x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
elif self.fusion == 'multiply':
x_ = self.PReLu1(self.preCNN(emb)) * x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
else:
x_ = x
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class ExtractionNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=1, fusion='concat', usingEmb=[True,True,True], usingTsd=[False,False,False]):
super(ExtractionNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[0], usingTsd=usingTsd[0])
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[1], usingTsd=usingTsd[1])
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[2], usingTsd=usingTsd[2])
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
x = self.PReLu(x)
# B x C x T
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_1_front(x, emb, tsd)
x = self.conv_block_1_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_2_front(x, emb, tsd)
x = self.conv_block_2_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_3_front(x, emb, tsd)
x = self.conv_block_3_back(x)
x = F.dropout(x, p=0.2, training=self.training)
# B x N x T
# print('x ',x.shape)
x = self.PReLu(x)
x = self.end_conv1x1(x)
# print('x ', x.shape)
# assert 1==2
x = self.activation(x)
return x
class TSENet(nn.Module):
'''
TSENet module
N Number of filters in autoencoder
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=512,
B=128,
H=512,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
causal=False,
cls_num=50,
nFrameLen=512,
nFrameShift=256, #
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(TSENet, self).__init__()
self.device = torch.device('cuda')
self.stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.istft = iSTFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.front_CNN = nn.Conv1d(nFrameShift+1, N, 1) # (input_c ,output_c,kernel_size), why is nFrameShift+1
self.PReLu = nn.PReLU()
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=nFrameShift + 1, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.num_spks = num_spks
self.conditioner = CNN10(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5],
classes_num=CNN10_settings[6])
self.cls1 = nn.Linear(CNN10_settings[7], CNN10_settings[8])
self.cls2 = nn.Linear(CNN10_settings[8], cls_num) # classifier head
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
self.init_conditioner() # init conditioner modual
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8]) # produce embedding
if usingTsd[0] or usingTsd[1] or usingTsd[2]: # if we decide to use tsdNet
self.tsdnet = TSDNet(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
self.init_TSDNet() # init it
self.epsilon = 1e-20
def init_conditioner(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
if self.fixCNN10: # if fix it
for p in self.conditioner.parameters():
p.requires_grad = False
def init_TSDNet(self):
if self.pretrainedTSDNet:
device = torch.device('cuda')
dicts = torch.load(self.pretrainedTSDNet, map_location=device)
self.tsdnet.load_state_dict(dicts["model_state_dict"])
if self.fixTSDNet:
for p in self.tsdnet.parameters():
p.requires_grad = False
def forward(self, x, ref, label=None, true_mask=None, inf=False):
"""
Input:
x: [B, T], B is batch size, T is times
ref: [B, T], B is batch size, T is times
Returns:
audio: [B, T]
"""
# B x T -> B x C x T
x_magnitude, x_phase = self.stft(x) # get mag spectrum
x_encoder = torch.log(x_magnitude ** 2 + self.epsilon) # bs, 257, 249
if not inf:
label_magnitude, label_phase = self.stft(label) # inf ?
# print('label_magnitude ',label_magnitude.shape)
if self.usingTsd[0] or self.usingTsd[1] or self.usingTsd[2]: # if use tsd
_, _, out_tsd_up = self.tsdnet(x, ref) # produce tsd results
tsdMask = torch.zeros(x_magnitude.shape[0], x_magnitude.shape[2]).cuda() # generate mask
tsdMask[out_tsd_up > self.threshold] = 1. # if large the threshold, set is 1
tsdMask = tsdMask[:, None, :]
else:
tsdMask = None
# B x T -> B x C -> B x C x T
out_enc = self.conditioner(ref) # encode ref audio
emb = self.emb_fc(out_enc) # get embedding
emb = self.PReLu(emb)
x_cls = self.PReLu(self.cls1(out_enc))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1) # produce classification results
x_encoder = self.PReLu(self.front_CNN(x_encoder))
mask = self.extractor(x_encoder, emb, tsdMask) # generate mask
# print('mask ',mask.shape)
# assert 1==2
if true_mask != None:
mask = true_mask*mask
x_ex = x_magnitude * mask
gt = label_magnitude / (x_magnitude + self.epsilon) * torch.cos(label_phase - x_phase) # PSM
gt = torch.clamp(gt, min=0., max=1.) # Truncated to [0,1]
audio_encoder = self.istft(x_ex, x_phase) # reconstruct predict audio
audio = [audio_encoder[:, 0]]
return audio, mask, gt, x_cls
class TSENet_one_hot(nn.Module):
'''
TSENet module
N Number of filters in autoencoder
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=512,
B=128,
H=512,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
causal=False,
cls_num=50,
nFrameLen=512,
nFrameShift=256, #
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(TSENet_one_hot, self).__init__()
self.device = torch.device('cuda')
self.stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.istft = iSTFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.front_CNN = nn.Conv1d(nFrameShift+1, N, 1) # (input_c ,output_c,kernel_size), why is nFrameShift+1
self.PReLu = nn.PReLU()
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=nFrameShift + 1, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.num_spks = num_spks
# self.conditioner = CNN10(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
# hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5],
# classes_num=CNN10_settings[6])
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# self.cls1 = nn.Linear(CNN10_settings[7], CNN10_settings[8])
# self.cls2 = nn.Linear(CNN10_settings[8], cls_num) # classifier head
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
# self.init_conditioner() # init conditioner modual
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8]) # produce embedding
if usingTsd[0] or usingTsd[1] or usingTsd[2]: # if we decide to use tsdNet
self.tsdnet = TSDNet_one_hot(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
self.init_TSDNet() # init it
self.epsilon = 1e-20
# def init_conditioner(self):
# if self.pretrainedCNN10:
# device = torch.device('cuda')
# checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
# self.conditioner.load_state_dict(checkpoint['model'])
# if self.fixCNN10: # if fix it
# for p in self.conditioner.parameters():
# p.requires_grad = False
def init_TSDNet(self):
if self.pretrainedTSDNet:
device = torch.device('cuda')
dicts = torch.load(self.pretrainedTSDNet, map_location=device)
self.tsdnet.load_state_dict(dicts["model_state_dict"])
if self.fixTSDNet:
for p in self.tsdnet.parameters():
p.requires_grad = False
def forward(self, x, ref,one_hot, label=None,true_mask=None, inf=False):
"""
Input:
x: [B, T], B is batch size, T is times
ref: [B, T], B is batch size, T is times
Returns:
audio: [B, T]
"""
# B x T -> B x C x T
x_magnitude, x_phase = self.stft(x) # get mag spectrum
x_encoder = torch.log(x_magnitude ** 2 + self.epsilon) # bs, 257, 249
if not inf:
label_magnitude, label_phase = self.stft(label) # inf ?
if self.usingTsd[0] or self.usingTsd[1] or self.usingTsd[2]: # if use tsd
_, _, out_tsd_up = self.tsdnet(x, ref) # produce tsd results
tsdMask = torch.zeros(x_magnitude.shape[0], x_magnitude.shape[2]).cuda() # generate mask
tsdMask[out_tsd_up > self.threshold] = 1. # if large the threshold, set is 1
tsdMask = tsdMask[:, None, :]
else:
tsdMask = None
# tsdMask = tsdMask[:, None, :]
# B x T -> B x C -> B x C x T
# out_enc = self.conditioner(ref) # encode ref audio
# emb = self.emb_fc(out_enc) # get embedding
# emb = self.PReLu(emb)
# # print('emb ',emb.shape)
# x_cls = self.PReLu(self.cls1(out_enc))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1) # produce classification results
x_cls = None
# print('one_hot ',one_hot.shape)
emb_one_hot = self.conditioner_one_hot(one_hot)
# print('emb_one_hot ',emb_one_hot.shape)
x_encoder = self.PReLu(self.front_CNN(x_encoder))
mask = self.extractor(x_encoder, emb_one_hot, tsdMask) # generate mask
# print('mask ',mask.shape)
# print('true_mask ',true_mask.shape)
# true_mask = true_mask.unsqueeze(2)
# true_mask = true_mask.repeat(1,1,257)
# true_mask = true_mask.transpose(1,2)
# if true_mask != None:
# mask = true_mask*mask
x_ex = x_magnitude * mask
gt = label_magnitude / (x_magnitude + self.epsilon) * torch.cos(label_phase - x_phase) # PSM
gt = torch.clamp(gt, min=0., max=1.) # Truncated to [0,1]
audio_encoder = self.istft(x_ex, x_phase) # reconstruct predict audio
audio = [audio_encoder[:, 0]]
return audio, mask, gt, x_cls
class TSDNet_one_hot(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet_one_hot, self).__init__()
self.PReLu = nn.PReLU()
# self.conditioner = CNN10(sample_rate=16000, window_size=1024,
# hop_size=320, mel_bins=64, fmin=50, fmax=8000,
# classes_num=527)
# self.cls1 = nn.Linear(128, 128)
# self.cls2 = nn.Linear(128, cls_num)
# self.pretrainedCNN10 = pretrainedCNN10
# self.init_ref()
# self.emb_fc = nn.Linear(512, 128)
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# print(CNN10_settings)
self.tsd = TSD(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref,onehot=None):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# out_enc = self.conditioner(ref)
# emb = self.emb_fc(out_enc)
# emb = self.PReLu(emb)
emb_onehot = self.conditioner_one_hot(onehot)
# print('emb_onehot ',emb_onehot.shape)
# assert 1==2
out_tsd_up, out_tsd_time,sim_cos = self.tsd(x, emb_onehot)
# x_cls = self.PReLu(self.cls1(emb))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1)
x_cls = torch.zeros(1).cuda()
return x_cls, out_tsd_time, out_tsd_up,sim_cos
class TSDNet(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=50, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet, self).__init__()
self.PReLu = nn.PReLU()
self.conditioner = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.pretrainedCNN10 = pretrainedCNN10
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
self.tsd = TSD(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
out_enc = self.conditioner(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
out_tsd_up, out_tsd_time = self.tsd(x, emb)
x_cls = self.PReLu(self.cls1(emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time, out_tsd_up
if __name__ == "__main__":
conv = Conv_TasNet().cuda()
# encoder = Encoder(16, 512)
x = torch.randn(4, 64000).cuda()
label = torch.randn(4, 64000).cuda()
ref = torch.randn(4, 64000).cuda()
audio, lps, lab = conv(x, ref, label)
print(audio[0].shape)
# print("{:.3f}".format(check_parameters(conv)))
| 32,316
| 39.497494
| 165
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/model/__init__.py
|
from .loss import *
from .model import *
| 40
| 19.5
| 20
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSENET/model/tsd.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
def init_weights(m):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Block2D(nn.Module):
def __init__(self, cin, cout, kernel_size=3, padding=1):
super().__init__()
self.block = nn.Sequential(
nn.BatchNorm2d(cin),
nn.Conv2d(cin,
cout,
kernel_size=kernel_size,
padding=padding,
bias=False),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
def forward(self, x):
return self.block(x)
class TSD(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 256)
self.outputlayer = nn.Linear(256, 2)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_up[:,:,0], decision_time[:,:,0]
| 5,798
| 33.517857
| 109
|
py
|
Tim-TSENet
|
Tim-TSENet-main/generate_dataset/generate_data_fsd_kaggle2.py
|
import os
import sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
import random
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
import soundfile as sf
import math
import pandas as pd
from pathlib import Path
event_ls = ["Acoustic_guitar", "Applause", "Bark", "Bass_drum",
"Burping_or_eructation", "Bus", "Cello", "Chime",
"Clarinet", "Computer_keyboard", "Cough", "Cowbell",
"Double_bass", "Drawer_open_or_close", "Electric_piano",
"Fart", "Finger_snapping", "Fireworks", "Flute", "Glockenspiel",
"Gong", "Gunshot_or_gunfire", "Harmonica", "Hi-hat", "Keys_jangling",
"Knock", "Laughter", "Meow", "Microwave_oven", "Oboe", "Saxophone",
"Scissors", "Shatter", "Snare_drum", "Squeak", "Tambourine", "Tearing",
"Telephone", "Trumpet", "Violin_or_fiddle", "Writing"]
event_to_id = {label : i for i, label in enumerate(event_ls)}
print(event_to_id)
def get_file_label_dict(csv_path):
#strong_csv = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSD50K.ground_truth/dev.csv'
print('strong_csv ',csv_path)
DF_strong = pd.read_csv(csv_path,sep=',',usecols=[0,1])
file_id = DF_strong['fname']
labels = DF_strong['label']
filename_ls = []
label_ls = []
for fname in file_id:
filename_ls.append(fname)
for label in labels:
label_ls.append(label)
dict_ls = {}
for i in range(len(filename_ls)):
dict_ls[filename_ls[i]] = str(event_to_id[label_ls[i]])
return dict_ls
def region_selection(top_result_mat):
region_index = np.zeros((top_result_mat.shape[-1], 2), dtype=np.int32)
max_value = np.zeros(top_result_mat.shape[-1])
for i in range(top_result_mat.shape[-1]):
max_index = np.argmax(top_result_mat[:, i])
max_value[i] = np.max(top_result_mat[:, i])
if max_index < 100:
max_index = 100
elif max_index > 900:
max_index = 900
l_index = max_index - 100
r_index = max_index + 100
region_index[i, 0] = l_index
region_index[i, 1] = r_index
return region_index, max_value
def check_files():
train_pth = '/apdcephfs/private_helinwang/tsss/ft_local/balanced_train_segments'
eval_pth = '/apdcephfs/private_helinwang/tsss/ft_local/eval_segments'
train_lst = []
eval_lst = []
for root, dirs, files in os.walk(train_pth):
for name in files:
train_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(eval_pth):
for name in files:
eval_lst.append(os.path.join(root, name))
for file in train_lst + eval_lst:
try:
(waveform, sr) = librosa.core.load(file, mono=True)
except:
print('{} Read Error'.format(file))
os.system('rm -rf '+ file)
else:
if waveform.shape[0] != int(sr * 10):
print('{} Wave Length Error: {} samples. Fix it.'.format(file, waveform.shape[0]))
if waveform.shape[0] > int(sr * 10):
waveform = waveform[:int(sr * 10)]
else:
waveform = np.concatenate((waveform, [0.] * (int(sr * 10) - waveform.shape[0])),0)
sf.write(file, waveform, sr, subtype='PCM_24')
else:
print('{} No Error.'.format(file))
print('Finished Checkout!')
def generate_mixed_data(args):
sample_rate = args.sample_rate
duration = args.duration
sample_num = int(sample_rate*duration)
num1 = int(sample_rate*2)
train_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/train'
test_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test'
val_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/val'
train_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/train.txt'
test_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/test.txt'
val_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/tsss_mixed/val.txt'
train_data_pth = '/apdcephfs/private_helinwang/tsss/tsss_data/train'
test_data_pth = '/apdcephfs/private_helinwang/tsss/tsss_data/test'
train_lst = []
test_lst = []
for root, dirs, files in os.walk(train_data_pth):
for name in files:
train_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(test_data_pth):
for name in files:
test_lst.append(os.path.join(root, name))
train_data_num = len(train_lst)
test_data_num = len(test_lst)
# generate train mixed data
rs1 = random.sample(train_lst, train_data_num)
rs1 = rs1 * 2
rs2 = random.sample(train_lst, train_data_num) + random.sample(train_lst, train_data_num)
flag_num = 1
for i in range(len(rs1)):
# resample
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[i], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# check and fix length
if waveform1.shape[0] > num1:
waveform1 = waveform1[:num1]
elif waveform1.shape[0] < num1:
waveform1 = np.concatenate((waveform1, [0.] * (num1 - waveform1.shape[0])), 0)
if waveform2.shape[0] > num1:
waveform2 = waveform2[:num1]
elif waveform2.shape[0] < num1:
waveform2 = np.concatenate((waveform2, [0.] * (num1 - waveform2.shape[0])), 0)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1
wave2[index2:(index2 + num1)] = waveform2
wave2 = wave2*np.sum(wave1**2)/np.sum(wave2**2)
wave = wave1 + wave2
# waveform = waveform1 + waveform2
file_name = 'train_' + str(flag_num) + '.wav'
file_name_a = 'train_' + str(flag_num) + '_a.wav'
file_name_b = 'train_' + str(flag_num) + '_b.wav'
file_name_re = 'train_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_a = train_dir + '/' + file_name_a
file_pth_b = train_dir + '/' + file_name_b
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform1, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(train_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[i].split('/')[-1] + '\t' + rs1[i].split('/')[-1] + '\n')
flag_num += 1
# generate val mixed data
rs1 = random.sample(train_lst, int(0.25 * train_data_num))
rs2 = random.sample(train_lst, int(0.25 * train_data_num))
flag_num = 1
for i in range(len(rs1)):
# resample
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[i], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# check and fix length
if waveform1.shape[0] > num1:
waveform1 = waveform1[:num1]
elif waveform1.shape[0] < num1:
waveform1 = np.concatenate((waveform1, [0.] * (num1 - waveform1.shape[0])), 0)
if waveform2.shape[0] > num1:
waveform2 = waveform2[:num1]
elif waveform2.shape[0] < num1:
waveform2 = np.concatenate((waveform2, [0.] * (num1 - waveform2.shape[0])), 0)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1
wave2[index2:(index2 + num1)] = waveform2
wave2 = wave2 * np.sum(wave1 ** 2) / np.sum(wave2 ** 2)
wave = wave1 + wave2
# waveform = waveform1 + waveform2
file_name = 'val_' + str(flag_num) + '.wav'
file_name_a = 'val_' + str(flag_num) + '_a.wav'
file_name_b = 'val_' + str(flag_num) + '_b.wav'
file_name_re = 'val_' + str(flag_num) + '_re.wav'
file_pth = val_dir + '/' + file_name
file_pth_a = val_dir + '/' + file_name_a
file_pth_b = val_dir + '/' + file_name_b
file_pth_re = val_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform1, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(val_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[i].split('/')[-1] + '\t' + rs1[i].split('/')[-1] + '\n')
flag_num += 1
# generate test mixed data
rs1 = random.sample(test_lst, int(0.25 * test_data_num))
rs2 = random.sample(test_lst, int(0.25 * test_data_num))
flag_num = 1
for i in range(len(rs1)):
# resample
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[i], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# check and fix length
if waveform1.shape[0] > num1:
waveform1 = waveform1[:num1]
elif waveform1.shape[0] < num1:
waveform1 = np.concatenate((waveform1, [0.] * (num1 - waveform1.shape[0])), 0)
if waveform2.shape[0] > num1:
waveform2 = waveform2[:num1]
elif waveform2.shape[0] < num1:
waveform2 = np.concatenate((waveform2, [0.] * (num1 - waveform2.shape[0])), 0)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1
wave2[index2:(index2 + num1)] = waveform2
wave2 = wave2 * np.sum(wave1 ** 2) / np.sum(wave2 ** 2)
wave = wave1 + wave2
# waveform = waveform1 + waveform2
file_name = 'test_' + str(flag_num) + '.wav'
file_name_a = 'test_' + str(flag_num) + '_a.wav'
file_name_b = 'test_' + str(flag_num) + '_b.wav'
file_name_re = 'test_' + str(flag_num) + '_re.wav'
file_pth = test_dir + '/' + file_name
file_pth_a = test_dir + '/' + file_name_a
file_pth_b = test_dir + '/' + file_name_b
file_pth_re = test_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform1, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(test_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[i].split('/')[-1] + '\t' + rs1[i].split('/')[-1] + '\n')
flag_num += 1
print('Finished Mixed Data!')
def generate_mixed_offset_data(args):
sample_rate = args.sample_rate
sample_num = int(sample_rate * 5)
num1 = int(sample_rate * 3)
test_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data'
test_txt = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/test.txt'
test_data_pth = '/apdcephfs/private_helinwang/tsss/ft_local/ESC-50-master/audio'
test_lst = []
for root, dirs, files in os.walk(test_data_pth):
for name in files:
test_lst.append(os.path.join(root, name))
flag_num = 1
test_data_num = len(test_lst)
rs1 = random.sample(test_lst, test_data_num) + random.sample(test_lst, test_data_num) + random.sample(test_lst, test_data_num) +random.sample(test_lst, test_data_num)
for i in range(len(rs1)):
cls1 = rs1[i].split('-')[-1]
while True:
rs2 = random.sample(test_lst, 1)
cls2 = rs2[0].split('-')[-1]
if cls2 == cls1:
break
while True:
rs3 = random.sample(test_lst, 1)
cls3 = rs3[0].split('-')[-1]
if cls3 != cls1:
break
(waveform1, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
# get random index
temp = random.random()
if temp > 0.5:
index1 = random.randint(0, (sample_num - num1 - 1) // 2)
index2 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
else:
index2 = random.randint(0, (sample_num - num1 - 1) // 2)
index1 = random.randint((sample_num - num1 - 1) // 2, sample_num - num1 - 1)
wave = np.zeros(sample_num)
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
# energy normalization
wave1[index1:(index1 + num1)] = waveform1[index1:(index1 + num1)]
wave2[index2:(index2 + num1)] = waveform3[index2:(index2 + num1)]
wave2 = wave2 * np.sum(wave1 ** 2) / np.sum(wave2 ** 2)
wave = wave1 + wave2
# waveform = waveform1 + waveform3
file_name = 'test_offset_' + str(flag_num) + '.wav'
file_name_a = 'test_offset_' + str(flag_num) + '_a.wav'
file_name_b = 'test_offset_' + str(flag_num) + '_b.wav'
file_name_re = 'test_offset_' + str(flag_num) + '_re.wav'
file_pth = test_dir + '/' + file_name
file_pth_a = test_dir + '/' + file_name_a
file_pth_b = test_dir + '/' + file_name_b
file_pth_re = test_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_24')
sf.write(file_pth_a, wave1, sample_rate, subtype='PCM_24')
sf.write(file_pth_b, wave2, sample_rate, subtype='PCM_24')
sf.write(file_pth_re, waveform2, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(file_pth))
with open(test_txt, "a+") as f:
f.write(file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs3[0].split('/')[-1]+ '\t' + rs2[0].split('/')[-1] + '\n')
flag_num += 1
print('Finished Mixed Offset Data!')
def data_pre(audio, audio_length, fs, audio_skip):
stride = round(audio_skip * fs / 2)
loop = round((audio_length * fs) // stride - 1)
i = 0
out = audio
while i < loop:
win_data = out[i*stride: (i+2)*stride]
maxamp = np.max(np.abs(win_data))
if maxamp < 0.0005:
loop = loop - 2
out[i*stride: (loop+1)*stride] = out[(i+2)*stride: (loop+3)*stride]
else:
i = i + 1
length = (audio_length * fs) // stride - loop - 1
if length == 0:
return out
else:
return out[:(loop + 1) * stride]
# out of domain
def generate_data_train(args):
sample_rate = args.sample_rate
sample_num = round(sample_rate * 10)
csv_path = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.meta/train_post_competition.csv'
train_dir = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/train'
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/train.txt'
data_pth = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.audio_train'
back_pth = '/apdcephfs/private_donchaoyang/tsss/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
data_lst = []
back_lst = []
name_dict = get_file_label_dict(csv_path)# get dict {filename: class_name}
for root, dirs, files in os.walk(data_pth): # 获得目录下所有的音频文件
for name in files:
data_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(back_pth):
for name in files:
back_lst.append(os.path.join(root, name))
flag_num = 1
data_num = len(data_lst)
print(data_num)
mix_lst = [1,2,3]
rs1 = random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num)+\
random.sample(data_lst, data_num) # 每个音频都有5次成为目标声音的机会
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+\
# random.sample(data_lst, data_num)+ \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num) + \
# random.sample(data_lst, data_num)
# print(len(rs1))
# assert 1==2
for i in range(len(rs1)):
real_name = Path(rs1[i]).name
cls1 = name_dict[real_name] # get class label
while True:
rs2 = random.sample(data_lst, 1) # 随机选择一个audio
rs2_real = Path(rs2[0]).name
cls2 = name_dict[rs2_real]
if cls2 == cls1 and rs2_real != real_name: # 保证不选到相同的音频
break
while True:
rs3 = random.sample(data_lst, 1)
rs3_real = Path(rs3[0]).name
cls3 = name_dict[rs3_real]
if cls3 != cls1:
break
while True:
rs4 = random.sample(data_lst, 1)
rs4_real = Path(rs4[0]).name
cls4 = name_dict[rs4_real]
if cls4 != cls1:
break
while True:
rs5 = random.sample(data_lst, 1)
rs5_real = Path(rs5[0]).name
cls5 = name_dict[rs5_real]
if cls5 != cls1:
break
rs6 = random.sample(back_lst, 1)
rs7 = random.sample(mix_lst, 1)
mix_num = rs7[0]
(waveform1_, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True) # target sound
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3_, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
(waveform4_, _) = librosa.core.load(rs4[0], sr=sample_rate, mono=True)
(waveform5_, _) = librosa.core.load(rs5[0], sr=sample_rate, mono=True)
(background, _) = librosa.core.load(rs6[0], sr=sample_rate, mono=True)
if waveform1_.shape[0] >= sample_num:
waveform1_ = waveform1_[:sample_num-100]
if waveform2.shape[0] > sample_num:
waveform2 = waveform2[:sample_num]
if waveform3_.shape[0] >= sample_num:
waveform3_ = waveform3_[:sample_num-100]
if waveform4_.shape[0] >= sample_num:
waveform4_ = waveform4_[:sample_num-100]
if waveform5_.shape[0] >= sample_num:
waveform5_ = waveform5_[:sample_num-100]
if background.shape[0] > sample_num:
background = background[:sample_num]
elif background.shape[0] < sample_num:
background = np.concatenate((background, [0.] * (sample_num - background.shape[0])), 0)
waveform1 = data_pre(waveform1_, audio_length=int(waveform1_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform3 = data_pre(waveform3_, audio_length=int(waveform3_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform4 = data_pre(waveform4_, audio_length=int(waveform4_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform5 = data_pre(waveform5_, audio_length=int(waveform5_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
num1 = waveform1.shape[0]
num3 = waveform3.shape[0]
num4 = waveform4.shape[0]
num5 = waveform5.shape[0]
index1 = random.randint(0, sample_num - num1 - 1) # 在0-10s中,随机选择起始位置,将声音放进去
index3 = random.randint(0, sample_num - num3 - 1)
index4 = random.randint(0, sample_num - num4 - 1)
index5 = random.randint(0, sample_num - num5 - 1)
onset = 10.0 * index1 / sample_num
offset = 10.0 * (index1 + num1) / sample_num
onset3 = 10.0 * index3 / sample_num
offset3 = 10.0 * (index3 + num3) / sample_num
onset4 = 10.0 * index4 / sample_num
offset4 = 10.0 * (index4 + num4) / sample_num
onset5 = 10.0 * index5 / sample_num
offset5 = 10.0 * (index5 + num5) / sample_num
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
wave3 = np.zeros(sample_num)
wave4 = np.zeros(sample_num)
wave5 = np.zeros(sample_num)
snr = 10. ** (float(random.uniform(-5, 10)) / 20.) # 随机生成一定的信噪比
waveform3 = waveform3 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform3) ** 2)) * snr) # add snr
waveform4 = waveform4 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform4) ** 2)) * snr)
waveform5 = waveform5 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform5) ** 2)) * snr)
wave1[index1:index1 + num1] = waveform1
if num1 == 0:
continue
wave3[index3:index3 + num3] = waveform3
wave4[index4:index4 + num4] = waveform4
wave5[index5:index5 + num5] = waveform5
wave2[:waveform2.shape[0]] = waveform2
snr2 = 10. ** (float(random.uniform(5, 20)) / 20.)
background = background * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(background) ** 2)) * snr2) # 背景声音也加入一个随机的信噪比
if mix_num == 1: # 一种干扰声音
wave = background + wave1 + wave3
elif mix_num == 2: # 2
wave = background + wave1 + wave3 + wave4
else: # 3
wave = background + wave1 + wave3 + wave4 + wave5
file_name = 'train_' + str(flag_num) + '.wav'
file_name_lab = 'train_' + str(flag_num) + '_lab.wav'
file_name_re = 'train_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_lab = train_dir + '/' + file_name_lab
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_16') # save mixture
sf.write(file_pth_lab, wave1, sample_rate, subtype='PCM_16') # save clean audio
sf.write(file_pth_re, wave2, sample_rate, subtype='PCM_16') # save reference audio
print('Save to: {}'.format(file_pth))
if mix_num == 1: # 保存基本信息
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\n')
elif mix_num == 2:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\n')
else:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\t' +
str(onset5) + '\t' + str(offset5) + '\t' + cls5 + '\n')
flag_num += 1
print('Finished Training Data Generation!')
def generate_data_val(args):
sample_rate = args.sample_rate
sample_num = round(sample_rate * 10)
csv_path = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.meta/test_post_competition_scoring_clips.csv'
train_dir = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/val'
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/val.txt'
data_pth = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.audio_test'
back_pth = '/apdcephfs/private_donchaoyang/tsss/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
data_lst = []
back_lst = []
name_dict = get_file_label_dict(csv_path)
# print(name_dict)
# assert 1==2
for root, dirs, files in os.walk(data_pth):
for name in files:
data_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(back_pth):
for name in files:
back_lst.append(os.path.join(root, name))
flag_num = 1
data_num = len(data_lst)
mix_lst = [1,2,3]
rs1 = random.sample(data_lst, data_num)
for i in range(len(rs1)):
real_name = Path(rs1[i]).name
cls1 = name_dict[real_name] # get class label
while True:
rs2 = random.sample(data_lst, 1)
real_name2 = Path(rs2[0]).name
cls2 = name_dict[real_name2] # get class label
if cls2 == cls1 and real_name2 != real_name:
break
while True:
rs3 = random.sample(data_lst, 1)
real_name3 = Path(rs3[0]).name
cls3 = name_dict[real_name3] # get class label
if cls3 != cls1:
break
while True:
rs4 = random.sample(data_lst, 1)
real_name4 = Path(rs4[0]).name
cls4 = name_dict[real_name4] # get class label
if cls4 != cls1:
break
while True:
rs5 = random.sample(data_lst, 1)
real_name5 = Path(rs5[0]).name
cls5 = name_dict[real_name5] # get class label
if cls5 != cls1:
break
rs6 = random.sample(back_lst, 1)
rs7 = random.sample(mix_lst, 1)
mix_num = rs7[0]
(waveform1_, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3_, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
(waveform4_, _) = librosa.core.load(rs4[0], sr=sample_rate, mono=True)
(waveform5_, _) = librosa.core.load(rs5[0], sr=sample_rate, mono=True)
(background, _) = librosa.core.load(rs6[0], sr=sample_rate, mono=True)
if waveform1_.shape[0] >= sample_num:
waveform1_ = waveform1_[:sample_num-100]
if waveform2.shape[0] > sample_num:
waveform2 = waveform2[:sample_num]
if waveform3_.shape[0] >= sample_num:
waveform3_ = waveform3_[:sample_num-100]
if waveform4_.shape[0] >= sample_num:
waveform4_ = waveform4_[:sample_num-100]
if waveform5_.shape[0] >= sample_num:
waveform5_ = waveform5_[:sample_num-100]
if background.shape[0] > sample_num:
background = background[:sample_num]
elif background.shape[0] < sample_num:
background = np.concatenate((background, [0.] * (sample_num - background.shape[0])), 0)
waveform1 = data_pre(waveform1_, audio_length=int(waveform1_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform3 = data_pre(waveform3_, audio_length=int(waveform3_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform4 = data_pre(waveform4_, audio_length=int(waveform4_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform5 = data_pre(waveform5_, audio_length=int(waveform5_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
num1 = waveform1.shape[0]
num3 = waveform3.shape[0]
num4 = waveform4.shape[0]
num5 = waveform5.shape[0]
index1 = random.randint(0, sample_num - num1 - 1)
index3 = random.randint(0, sample_num - num3 - 1)
index4 = random.randint(0, sample_num - num4 - 1)
index5 = random.randint(0, sample_num - num5 - 1)
onset = 10.0 * index1 / sample_num
offset = 10.0 * (index1 + num1) / sample_num
onset3 = 10.0 * index3 / sample_num
offset3 = 10.0 * (index3 + num3) / sample_num
onset4 = 10.0 * index4 / sample_num
offset4 = 10.0 * (index4 + num4) / sample_num
onset5 = 10.0 * index5 / sample_num
offset5 = 10.0 * (index5 + num5) / sample_num
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
wave3 = np.zeros(sample_num)
wave4 = np.zeros(sample_num)
wave5 = np.zeros(sample_num)
snr = 10. ** (float(random.uniform(-5, 10)) / 20.)
waveform3 = waveform3 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform3) ** 2)) * snr)
waveform4 = waveform4 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform4) ** 2)) * snr)
waveform5 = waveform5 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform5) ** 2)) * snr)
wave1[index1:index1 + num1] = waveform1
if num1 == 0:
continue
wave3[index3:index3 + num3] = waveform3
wave4[index4:index4 + num4] = waveform4
wave5[index5:index5 + num5] = waveform5
wave2[:waveform2.shape[0]] = waveform2
snr2 = 10. ** (float(random.uniform(5, 20)) / 20.)
background = background * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(background) ** 2)) * snr2)
if mix_num == 1:
wave = background + wave1 + wave3
elif mix_num == 2:
wave = background + wave1 + wave3 + wave4
else:
wave = background + wave1 + wave3 + wave4 + wave5
file_name = 'val_' + str(flag_num) + '.wav'
file_name_lab = 'val_' + str(flag_num) + '_lab.wav'
file_name_re = 'val_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_lab = train_dir + '/' + file_name_lab
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_16')
sf.write(file_pth_lab, wave1, sample_rate, subtype='PCM_16')
sf.write(file_pth_re, wave2, sample_rate, subtype='PCM_16')
print('Save to: {}'.format(file_pth))
if mix_num == 1:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\n')
elif mix_num == 2:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\n')
else:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\t' +
str(onset5) + '\t' + str(offset5) + '\t' + cls5 + '\n')
flag_num += 1
print('Finished Val Data Generation!')
def generate_data_test(args):
sample_rate = args.sample_rate
sample_num = round(sample_rate * 10)
csv_path = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.meta/test_post_competition_scoring_clips.csv'
train_dir = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/test'
train_txt = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018_all_n/test.txt'
data_pth = '/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/ft_local/FSDKaggle2018.audio_test'
back_pth = '/apdcephfs/private_donchaoyang/tsss/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
data_lst = []
back_lst = []
name_dict = get_file_label_dict(csv_path)
# print(name_dict)
# assert 1==2
for root, dirs, files in os.walk(data_pth):
for name in files:
data_lst.append(os.path.join(root, name))
for root, dirs, files in os.walk(back_pth):
for name in files:
back_lst.append(os.path.join(root, name))
flag_num = 1
data_num = len(data_lst)
mix_lst = [1,2,3]
rs1 = random.sample(data_lst, data_num)
for i in range(len(rs1)):
real_name = Path(rs1[i]).name
cls1 = name_dict[real_name] # get class label
while True:
rs2 = random.sample(data_lst, 1)
real_name2 = Path(rs2[0]).name
cls2 = name_dict[real_name2] # get class label
if cls2 == cls1 and real_name2 != real_name:
break
while True:
rs3 = random.sample(data_lst, 1)
real_name3 = Path(rs3[0]).name
cls3 = name_dict[real_name3] # get class label
if cls3 != cls1:
break
while True:
rs4 = random.sample(data_lst, 1)
real_name4 = Path(rs4[0]).name
cls4 = name_dict[real_name4] # get class label
if cls4 != cls1:
break
while True:
rs5 = random.sample(data_lst, 1)
real_name5 = Path(rs5[0]).name
cls5 = name_dict[real_name5] # get class label
if cls5 != cls1:
break
rs6 = random.sample(back_lst, 1)
rs7 = random.sample(mix_lst, 1)
mix_num = rs7[0]
(waveform1_, _) = librosa.core.load(rs1[i], sr=sample_rate, mono=True)
(waveform2, _) = librosa.core.load(rs2[0], sr=sample_rate, mono=True)
(waveform3_, _) = librosa.core.load(rs3[0], sr=sample_rate, mono=True)
(waveform4_, _) = librosa.core.load(rs4[0], sr=sample_rate, mono=True)
(waveform5_, _) = librosa.core.load(rs5[0], sr=sample_rate, mono=True)
(background, _) = librosa.core.load(rs6[0], sr=sample_rate, mono=True)
if waveform1_.shape[0] >= sample_num:
waveform1_ = waveform1_[:sample_num-100]
if waveform2.shape[0] > sample_num:
waveform2 = waveform2[:sample_num]
if waveform3_.shape[0] >= sample_num:
waveform3_ = waveform3_[:sample_num-100]
if waveform4_.shape[0] >= sample_num:
waveform4_ = waveform4_[:sample_num-100]
if waveform5_.shape[0] >= sample_num:
waveform5_ = waveform5_[:sample_num-100]
if background.shape[0] > sample_num:
background = background[:sample_num]
elif background.shape[0] < sample_num:
background = np.concatenate((background, [0.] * (sample_num - background.shape[0])), 0)
waveform1 = data_pre(waveform1_, audio_length=int(waveform1_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform3 = data_pre(waveform3_, audio_length=int(waveform3_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform4 = data_pre(waveform4_, audio_length=int(waveform4_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
waveform5 = data_pre(waveform5_, audio_length=int(waveform5_.shape[0]//16000), fs=sample_rate, audio_skip=0.2)
num1 = waveform1.shape[0]
num3 = waveform3.shape[0]
num4 = waveform4.shape[0]
num5 = waveform5.shape[0]
index1 = random.randint(0, sample_num - num1 - 1)
index3 = random.randint(0, sample_num - num3 - 1)
index4 = random.randint(0, sample_num - num4 - 1)
index5 = random.randint(0, sample_num - num5 - 1)
onset = 10.0 * index1 / sample_num
offset = 10.0 * (index1 + num1) / sample_num
onset3 = 10.0 * index3 / sample_num
offset3 = 10.0 * (index3 + num3) / sample_num
onset4 = 10.0 * index4 / sample_num
offset4 = 10.0 * (index4 + num4) / sample_num
onset5 = 10.0 * index5 / sample_num
offset5 = 10.0 * (index5 + num5) / sample_num
wave1 = np.zeros(sample_num)
wave2 = np.zeros(sample_num)
wave3 = np.zeros(sample_num)
wave4 = np.zeros(sample_num)
wave5 = np.zeros(sample_num)
snr = 10. ** (float(random.uniform(-5, 10)) / 20.)
waveform3 = waveform3 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform3) ** 2)) * snr)
waveform4 = waveform4 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform4) ** 2)) * snr)
waveform5 = waveform5 * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(waveform5) ** 2)) * snr)
wave1[index1:index1 + num1] = waveform1
if num1 == 0:
continue
wave3[index3:index3 + num3] = waveform3
wave4[index4:index4 + num4] = waveform4
wave5[index5:index5 + num5] = waveform5
wave2[:waveform2.shape[0]] = waveform2
snr2 = 10. ** (float(random.uniform(5, 20)) / 20.)
background = background * math.sqrt(np.mean(np.abs(waveform1) ** 2)) / (
math.sqrt(np.mean(np.abs(background) ** 2)) * snr2)
if mix_num == 1:
wave = background + wave1 + wave3
elif mix_num == 2:
wave = background + wave1 + wave3 + wave4
else:
wave = background + wave1 + wave3 + wave4 + wave5
file_name = 'test_' + str(flag_num) + '.wav'
file_name_lab = 'test_' + str(flag_num) + '_lab.wav'
file_name_re = 'test_' + str(flag_num) + '_re.wav'
file_pth = train_dir + '/' + file_name
file_pth_lab = train_dir + '/' + file_name_lab
file_pth_re = train_dir + '/' + file_name_re
sf.write(file_pth, wave, sample_rate, subtype='PCM_16')
sf.write(file_pth_lab, wave1, sample_rate, subtype='PCM_16')
sf.write(file_pth_re, wave2, sample_rate, subtype='PCM_16')
print('Save to: {}'.format(file_pth))
if mix_num == 1:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\n')
elif mix_num == 2:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\n')
else:
with open(train_txt, "a+") as f:
f.write(
file_name + '\t' + rs1[i].split('/')[-1] + '\t' + rs2[0].split('/')[-1] + '\t' + rs6[0].split('/')[-1] + '\t' +
rs5[0].split('/')[-1] + '\t' + str(onset) + '\t' + str(offset) + '\t' + cls1 + '\t' + str(mix_num) + '\t' +
str(onset3) + '\t' + str(offset3) + '\t' + cls3 + '\t' +
str(onset4) + '\t' + str(offset4) + '\t' + cls4 + '\t' +
str(onset5) + '\t' + str(offset5) + '\t' + cls5 + '\n')
flag_num += 1
print('Finished Test Data Generation!')
def save_data(args):
train_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/data/train'
test_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/data/test'
noise_dir = '/apdcephfs/share_1316500/helinwang/data/tsss/data/noise'
train_pth = '/apdcephfs/private_helinwang/tsss/ft_local/ESC-50-master/train'
test_pth = '/apdcephfs/private_helinwang/tsss/ft_local/ESC-50-master/test/audio'
noise_pth = '/apdcephfs/share_1316500/helinwang/data/ft_local/TAU-urban-acoustic-scenes-2019-development/audio'
sample_rate = args.sample_rate
train_dict = {}
test_dict = {}
noise_dict = {}
for root, dirs, files in os.walk(train_pth):
for name in files:
path_ = os.path.join(root, name)
path_2 = os.path.join(train_dir, name)
train_dict[path_] = path_2
for root, dirs, files in os.walk(test_pth):
for name in files:
path_ = os.path.join(root, name)
path_2 = os.path.join(test_dir, name)
test_dict[path_] = path_2
for root, dirs, files in os.walk(noise_pth):
for name in files:
path_ = os.path.join(root, name)
path_2 = os.path.join(noise_dir, name)
noise_dict[path_] = path_2
for i in train_dict.keys():
(wave, _) = librosa.core.load(i, sr=sample_rate, mono=True)
wave = data_pre(wave, audio_length=5, fs=sample_rate, audio_skip=0.2)
sf.write(train_dict[i], wave, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(train_dict[i]))
for i in test_dict.keys():
(wave, _) = librosa.core.load(i, sr=sample_rate, mono=True)
wave = data_pre(wave, audio_length=5, fs=sample_rate, audio_skip=0.2)
sf.write(test_dict[i], wave, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(test_dict[i]))
for i in noise_dict.keys():
(wave, _) = librosa.core.load(i, sr=sample_rate, mono=True)
if wave.shape[0] > round(sample_rate*10):
wave = wave[:round(sample_rate*10)]
elif wave.shape[0] < round(sample_rate*10):
wave = np.concatenate((wave, [0.] * (round(sample_rate*10) - wave.shape[0])), 0)
sf.write(noise_dict[i], wave, sample_rate, subtype='PCM_24')
print('Save to: {}'.format(noise_dict[i]))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Example of parser. ')
subparsers = parser.add_subparsers(dest='mode')
parser_at = subparsers.add_parser('audio_tagging')
parser_at.add_argument('--sample_rate', type=int, default=16000)
parser_at.add_argument('--duration', type=int, default=4)
parser_at.add_argument('--window_size', type=int, default=1024)
parser_at.add_argument('--hop_size', type=int, default=320)
parser_at.add_argument('--mel_bins', type=int, default=64)
parser_at.add_argument('--fmin', type=int, default=50)
parser_at.add_argument('--fmax', type=int, default=14000)
# parser_at.add_argument('--model_type', type=str, required=True)
# parser_at.add_argument('--checkpoint_path', type=str, required=True)
# parser_at.add_argument('--audio_path', type=str, required=True)
# parser_at.add_argument('--cuda', action='store_true', default=False)
parser_sed = subparsers.add_parser('sound_event_detection')
parser_sed.add_argument('--sample_rate', type=int, default=16000)
parser_sed.add_argument('--duration', type=int, default=4)
parser_sed.add_argument('--window_size', type=int, default=1024)
parser_sed.add_argument('--hop_size', type=int, default=320)
parser_sed.add_argument('--mel_bins', type=int, default=64)
parser_sed.add_argument('--fmin', type=int, default=50)
parser_sed.add_argument('--fmax', type=int, default=14000)
parser_sed.add_argument('--model_type', type=str, required=True)
parser_sed.add_argument('--checkpoint_path', type=str, required=True)
parser_sed.add_argument('--audio_path', type=str, required=True)
parser_sed.add_argument('--cuda', action='store_true', default=False)
args = parser.parse_args()
# check_files()
# if args.mode == 'audio_tagging':
# audio_tagging(args)
#
# elif args.mode == 'sound_event_detection':
# sound_event_detection(args)
#
# else:
# raise Exception('Error argument!')
# generate_mixed_data(args)
# generate_mixed_offset_data(args)
generate_data_train(args)
generate_data_val(args)
generate_data_test(args)
#save_data(args)
| 46,868
| 46.874362
| 170
|
py
|
Tim-TSENet
|
Tim-TSENet-main/generate_dataset/create_scp.py
|
import os
train_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_mix.scp'
train_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_s1.scp'
train_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_re.scp'
test_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tt_mix.scp'
test_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tt_s1.scp'
test_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tt_re.scp'
val_mix_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/val_mix.scp'
val_s1_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/val_s1.scp'
val_re_scp = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/val_re.scp'
train_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/train'
test_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/test'
vl_mix = '/apdcephfs/share_1316500/helinwang/data/tsss/esc_data/val'
tr_mix = open(train_mix_scp,'w')
tr_s1 = open(train_s1_scp,'w')
tr_re = open(train_re_scp,'w')
for root, dirs, files in os.walk(train_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
tr_s1.write(file+" "+root+'/'+file)
tr_s1.write('\n')
elif 're.wav' in file:
tr_re.write(file + " " + root + '/' + file)
tr_re.write('\n')
else:
tr_mix.write(file + " " + root + '/' + file)
tr_mix.write('\n')
tr_mix.close()
tr_s1.close()
tr_re.close()
tt_mix = open(test_mix_scp,'w')
tt_s1 = open(test_s1_scp,'w')
tt_re = open(test_re_scp,'w')
for root, dirs, files in os.walk(test_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
tt_s1.write(file+" "+root+'/'+file)
tt_s1.write('\n')
elif 're.wav' in file:
tt_re.write(file + " " + root + '/' + file)
tt_re.write('\n')
else:
tt_mix.write(file + " " + root + '/' + file)
tt_mix.write('\n')
tt_mix.close()
tt_s1.close()
tt_re.close()
val_mix = open(val_mix_scp,'w')
val_s1 = open(val_s1_scp,'w')
val_re = open(val_re_scp,'w')
for root, dirs, files in os.walk(vl_mix):
files.sort()
for file in files:
if 'lab.wav' in file:
val_s1.write(file+" "+root+'/'+file)
val_s1.write('\n')
elif 're.wav' in file:
val_re.write(file + " " + root + '/' + file)
val_re.write('\n')
else:
val_mix.write(file + " " + root + '/' + file)
val_mix.write('\n')
val_mix.close()
val_s1.close()
val_re.close()
| 2,673
| 34.653333
| 89
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/test_tasnet_one_hot.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot,TSDNet_plus_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
def time_to_frame(tm):
return int(tm/(10.0/312))
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onset, self.offset = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet_one_hot()
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = opt['save_tsv_path']
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
cls = str(self.clss[index])
onset = self.onset[index]
offset = self.offset[index]
onset_frame = time_to_frame(onset)
offset_frame = time_to_frame(offset)
cls_vec = torch.zeros(41)
cls_vec[self.clss[index]] = 1.
cls_vec = cls_vec.unsqueeze(0)
cls_index = cls_vec.argmax(1)
# cls_index = torch.from_numpy(cls_index)
print('i ',i)
# print('cls_index ',cls_index)
# assert 1==2
cls_index = cls_index.to(self.device)
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
# cls_index = cls_index[None,:]
# print('cls_index ',cls_index.shape)
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.tsdnet(mix, ref,cls_index)
# print('onset_frame, offset_frame',onset_frame, offset_frame)
# assert 1==2
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
pred = est_tsd_time_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(
self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path,
event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path, segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_new/TSDNet_one_hot_scale156/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 9,953
| 48.034483
| 171
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/test_tasnet_wav.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav, read_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import Conv_TasNet
from logger.set_logger import setup_logger
import logging
from config.option import parse
import tqdm
class Separation():
def __init__(self, mix_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = read_wav(mix_path)
opt = parse(yaml_path, is_tain=False)
net = ConvTasNet(**opt['Conv_Tasnet'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
self.logger = get_logger(__name__)
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.gpuid=tuple(gpuid)
def inference(self, file_path):
with torch.no_grad():
egs=self.mix.to(self.device)
norm = torch.norm(egs,float('inf'))
if len(self.gpuid) != 0:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
else:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach()) for s in ests]
index=0
for s in spks:
s = s[:egs.shape[0]]
#norm
s = s*norm/torch.max(torch.abs(s))
s = s.unsqueeze(0)
index += 1
os.makedirs(file_path+'/spk'+str(index), exist_ok=True)
filename=file_path+'/spk'+str(index)+'/'+key
write_wav(filename, s, 8000)
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='../create_scp/tt_mix.scp', help='Path to mix scp file.')
parser.add_argument(
'-yaml', type=str, default='./config/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='./checkpoint/Conv_Tasnet_skip/best.pt', help="Path to model file.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/conv_tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.yaml, args.model, gpuid)
separation.inference(args.save_path)
if __name__ == "__main__":
main()
| 2,864
| 37.2
| 104
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/test_tasnet.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
import datetime
import uuid
from pathlib import Path
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss,_,_ = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet()
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = os.path.join(opt['save_tsv_path'], opt['name'],
"{}_{}".format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'), uuid.uuid1().hex))
Path(self.save_tsv_path).mkdir(exist_ok=True, parents=True) # make dir
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
cls = str(self.clss[index])
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
x_cls, out_tsd_time, out_tsd_up = self.tsdnet(mix, ref)
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
pred = out_tsd_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path, event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path,
segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_audio/TSDNet_audio_2gru/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix_new.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re_new.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf_new.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 9,282
| 48.116402
| 167
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/train_Tasnet.py
|
import sys
sys.path.append('./')
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model.model import TSDNet,TSDNet_one_hot, TSDNet_plus_one_hot
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Tasnet,trainer_Tasnet_one_hot,trainer_Tasnet_one_hot_regresion
import torch.optim.lr_scheduler as lr_scheduler
import random
import torch.backends.cudnn as cudnn
DEVICE = 'cpu'
if torch.cuda.is_available():
DEVICE = 'cuda'
torch.backends.cudnn.deterministic = True
DEVICE = torch.device(DEVICE)
seed = 19980228
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
def make_dataloader(opt):
# make train's dataloader
train_dataset = Datasets(
opt['datasets']['train']['dataroot_mix'],
opt['datasets']['train']['dataroot_targets'][0],
opt['datasets']['train']['dataroot_targets'][1],
opt['datasets']['train']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets(
opt['datasets']['val']['dataroot_mix'],
opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1],
opt['datasets']['val']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make test dataloader
test_dataset = Datasets(
opt['datasets']['test']['dataroot_mix'],
opt['datasets']['test']['dataroot_targets'][0],
opt['datasets']['test']['dataroot_targets'][1],
opt['datasets']['test']['dataroot_targets'][2],
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
test_dataloader = Loader(test_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
return train_dataloader, val_dataloader, test_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(
description='Parameters for training Conv-TasNet')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of Conv-Tasnet")
logger.info(opt['logger']['experimental_description'])
print(opt['model_name'])
if opt['model_name'] == 'TSDNet_one_hot':
net = TSDNet_one_hot(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] == 'TSDNet_plus_one_hot':
net = TSDNet_plus_one_hot(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] =='TSDNet':
net = TSDNet(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
else:
assert 1==2
# build optimizer
logger.info("Building the optimizer of Conv-Tasnet")
optimizer = make_optimizer(net.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of Conv-Tasnet')
train_dataloader, val_dataloader, test_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}, Test Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader), len(test_dataloader)))
# build scheduler
# scheduler = ReduceLROnPlateau(
# optimizer, mode='min',
# factor=opt['scheduler']['factor'],
# patience=opt['scheduler']['patience'],
# verbose=True, min_lr=opt['scheduler']['min_lr'])
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, opt['train']['epoch'])
# build trainer
logger.info('Building the Trainer of Conv-Tasnet')
if opt['one_hot']:
if opt['reg']:
trainer = trainer_Tasnet_one_hot_regresion.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
else:
trainer = trainer_Tasnet_one_hot.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
else:
trainer = trainer_Tasnet.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
trainer.run()
if __name__ == "__main__":
train()
| 7,358
| 46.477419
| 145
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/tsd_utils.py
|
import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
import augment
#import dataset
from scipy.interpolate import interp1d
def parse_config_or_kwargs(config_file, **kwargs):
"""parse_config_or_kwargs
:param config_file: Config file that has parameters, yaml format
:param **kwargs: Other alternative parameters or overwrites for config
"""
with open(config_file) as con_read:
yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)
arguments = dict(yaml_config, **kwargs)
return arguments
def find_contiguous_regions(activity_array): # in this part, if you cannot understand the binary operation, I think you can write a O(n) complexity method
"""Find contiguous regions from bool valued numpy.array.
Copy of https://dcase-repo.github.io/dcase_util/_modules/dcase_util/data/decisions.html#DecisionEncoder
Reason is:
1. This does not belong to a class necessarily
2. Import DecisionEncoder requires sndfile over some other imports..which causes some problems on clusters
"""
change_indices = np.logical_xor(activity_array[1:], activity_array[:-1]).nonzero()[0]
change_indices += 1
if activity_array[0]:
# If the first element of activity_array is True add 0 at the beginning
change_indices = np.r_[0, change_indices]
if activity_array[-1]:
# If the last element of activity_array is True, add the length of the array
change_indices = np.r_[change_indices, activity_array.size]
# print(change_indices.reshape((-1, 2)))
# Reshape the result into two columns
return change_indices.reshape((-1, 2))
def split_train_cv(
data_frame: pd.DataFrame,
frac: float = 0.9,
y=None, # Only for stratified, computes necessary split
**kwargs):
"""split_train_cv
:param data_frame:
:type data_frame: pd.DataFrame
:param frac:
:type frac: float
"""
if kwargs.get('mode',
None) == 'urbansed': # Filenames are DATA_-1 DATA_-2 etc
data_frame.loc[:, 'id'] = data_frame.groupby(
data_frame['filename'].str.split('_').apply(
lambda x: '_'.join(x[:-1]))).ngroup()
sampler = np.random.permutation(data_frame['id'].nunique())
num_train = int(frac * len(sampler))
train_indexes = sampler[:num_train]
cv_indexes = sampler[num_train:]
train_data = data_frame[data_frame['id'].isin(train_indexes)]
cv_data = data_frame[data_frame['id'].isin(cv_indexes)]
del train_data['id']
del cv_data['id']
elif kwargs.get('mode', None) == 'stratified': # stratified --> 分层的 ?
# Use statified sampling
from skmultilearn.model_selection import iterative_train_test_split
index_train, _, index_cv, _ = iterative_train_test_split(
data_frame.index.values.reshape(-1, 1), y, test_size=1. - frac)
train_data = data_frame[data_frame.index.isin(index_train.squeeze())]
cv_data = data_frame[data_frame.index.isin(index_cv.squeeze())] # cv --> cross validation
else:
# Simply split train_test
train_data = data_frame.sample(frac=frac, random_state=10)
cv_data = data_frame[~data_frame.index.isin(train_data.index)]
return train_data, cv_data
def parse_transforms(transform_list):
"""parse_transforms
parses the config files transformation strings to coresponding methods
:param transform_list: String list
"""
transforms = []
for trans in transform_list:
if trans == 'shift':
transforms.append(augment.TimeShift(0, 50))
elif trans == 'freqmask':
transforms.append(augment.FreqMask(2, 8))
elif trans == 'timemask':
transforms.append(augment.TimeMask(2, 60))
return torch.nn.Sequential(*transforms)
def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'): # print yaml file
"""pprint_dict
:param outputfun: function to use, defaults to sys.stdout
:param in_dict: dict to print
"""
if formatter == 'yaml':
format_fun = yaml.dump
elif formatter == 'pretty':
format_fun = pformat
for line in format_fun(in_dict).split('\n'):
outputfun(line)
def getfile_outlogger(outputfile):
log_format = "[<green>{time:YYYY-MM-DD HH:mm:ss}</green>] {message}"
logger.configure(handlers=[{"sink": sys.stderr, "format": log_format}])
if outputfile:
logger.add(outputfile, enqueue=True, format=log_format)
return logger
# according label, get encoder
def train_labelencoder(labels: pd.Series, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
if isinstance(labels[0], six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist() # split label according to ','
elif isinstance(labels[0], np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(labels[0], collections.Iterable):
label_array = labels
encoder = pre.MultiLabelBinarizer(sparse_output=sparse)
encoder.fit(label_array)
return encoder
def encode_labels(labels: pd.Series, encoder=None, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
instance = labels.iloc[0]
if isinstance(instance, six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist()
elif isinstance(instance, np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(instance, collections.Iterable):
label_array = labels
# get label_array, it is a list ,contain a lot of label, this label are string type
if not encoder:
encoder = pre.MultiLabelBinarizer(sparse_output=sparse) # if we encoder is None, we should init a encoder firstly.
encoder.fit(label_array)
labels_encoded = encoder.transform(label_array) # transform string to digit
return labels_encoded, encoder
# return pd.arrays.SparseArray(
# [row.toarray().ravel() for row in labels_encoded]), encoder
def decode_with_timestamps(events,labels: np.array):
"""decode_with_timestamps
Decodes the predicted label array (2d) into a list of
[(Labelname, onset, offset), ...]
:param encoder: Encoder during training
:type encoder: pre.MultiLabelBinarizer
:param labels: n-dim array
:type labels: np.array
"""
# print('labels ',labels.shape)
# assert 1==2
if labels.ndim == 3:
return [_decode_with_timestamps(events,lab) for lab in labels]
else:
return _decode_with_timestamps(events,labels)
def median_filter(x, window_size, threshold=0.5):
"""median_filter
:param x: input prediction array of shape (B, T, C) or (B, T).
Input is a sequence of probabilities 0 <= x <= 1
:param window_size: An integer to use
:param threshold: Binary thresholding threshold
"""
x = binarize(x, threshold=threshold) # transfer to 0 or 1
if x.ndim == 3:
size = (1, window_size, 1)
elif x.ndim == 2 and x.shape[0] == 1:
# Assume input is class-specific median filtering
# E.g, Batch x Time [1, 501]
size = (1, window_size)
elif x.ndim == 2 and x.shape[0] > 1:
# Assume input is standard median pooling, class-independent
# E.g., Time x Class [501, 10]
size = (window_size, 1)
return scipy.ndimage.median_filter(x, size=size)
def _decode_with_timestamps(events,labels):
result_labels = []
#print('labels ',labels.shape)
# print(labels)
change_indices = find_contiguous_regions(labels)
# print(change_indices)
# assert 1==2
for row in change_indices:
result_labels.append((events,row[0], row[1]))
return result_labels
def inverse_transform_labels(encoder, pred):
if pred.ndim == 3:
return [encoder.inverse_transform(x) for x in pred]
else:
return encoder.inverse_transform(pred)
def binarize(pred, threshold=0.5):
# Batch_wise
if pred.ndim == 3:
return np.array(
[pre.binarize(sub, threshold=threshold) for sub in pred])
else:
return pre.binarize(pred, threshold=threshold)
def double_threshold(x, high_thres, low_thres, n_connect=1):
"""double_threshold
Helper function to calculate double threshold for n-dim arrays
:param x: input array
:param high_thres: high threshold value
:param low_thres: Low threshold value
:param n_connect: Distance of <= n clusters will be merged
"""
assert x.ndim <= 3, "Whoops something went wrong with the input ({}), check if its <= 3 dims".format(
x.shape)
if x.ndim == 3:
apply_dim = 1
elif x.ndim < 3:
apply_dim = 0
# x is assumed to be 3d: (batch, time, dim)
# Assumed to be 2d : (time, dim)
# Assumed to be 1d : (time)
# time axis is therefore at 1 for 3d and 0 for 2d (
return np.apply_along_axis(lambda x: _double_threshold(
x, high_thres, low_thres, n_connect=n_connect),
axis=apply_dim,
arr=x)
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True): # in nature, double_threshold considers boundary question
"""_double_threshold
Computes a double threshold over the input array
:param x: input array, needs to be 1d
:param high_thres: High threshold over the array
:param low_thres: Low threshold over the array
:param n_connect: Postprocessing, maximal distance between clusters to connect
:param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros.
"""
assert x.ndim == 1, "Input needs to be 1d"
high_locations = np.where(x > high_thres)[0] # return the index, where value is greater than high_thres
locations = x > low_thres # return true of false
encoded_pairs = find_contiguous_regions(locations)
# print('encoded_pairs ',encoded_pairs)
filtered_list = list(
filter(
lambda pair:
((pair[0] <= high_locations) & (high_locations <= pair[1])).any(),
encoded_pairs)) # find encoded_pair where inclide a high_lacations
#print('filtered_list ',filtered_list)
filtered_list = connect_(filtered_list, n_connect) # if the distance of two pair is less than n_connect, we can merge them
if return_arr:
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in filtered_list:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
return filtered_list
def connect_clusters(x, n=1):
if x.ndim == 1:
return connect_clusters_(x, n)
if x.ndim >= 2:
return np.apply_along_axis(lambda a: connect_clusters_(a, n=n), -2, x)
def connect_clusters_(x, n=1):
"""connect_clusters_
Connects clustered predictions (0,1) in x with range n
:param x: Input array. zero-one format
:param n: Number of frames to skip until connection can be made
"""
assert x.ndim == 1, "input needs to be 1d"
reg = find_contiguous_regions(x)
start_end = connect_(reg, n=n)
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in start_end:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
def connect_(pairs, n=1):
"""connect_
Connects two adjacent clusters if their distance is <= n
:param pairs: Clusters of iterateables e.g., [(1,5),(7,10)]
:param n: distance between two clusters
"""
if len(pairs) == 0:
return []
start_, end_ = pairs[0]
new_pairs = []
for i, (next_item, cur_item) in enumerate(zip(pairs[1:], pairs[0:])):
end_ = next_item[1]
if next_item[0] - cur_item[1] <= n:
pass
else:
new_pairs.append((start_, cur_item[1]))
start_ = next_item[0]
new_pairs.append((start_, end_))
return new_pairs
def predictions_to_time(df, ratio):
df.onset = df.onset * ratio
df.offset = df.offset * ratio
return df
def upgrade_resolution(arr, scale):
print('arr ',arr.shape)
x = np.arange(0, arr.shape[0])
f = interp1d(x, arr, kind='linear', axis=0, fill_value='extrapolate')
scale_x = np.arange(0, arr.shape[0], 1 / scale)
up_scale = f(scale_x)
return up_scale
# a = [0.1,0.2,0.3,0.8,0.4,0.1,0.3,0.9,0.4]
# a = np.array(a)
# b = a>0.2
# _double_threshold(a,0.7,0.2)
| 13,313
| 35.377049
| 169
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/train_rnn.py
|
import sys
sys.path.append('./')
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset import Datasets
from model import model_rnn
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Dual_RNN
def make_dataloader(opt):
# make train's dataloader
train_dataset = Datasets(
opt['datasets']['train']['dataroot_mix'],
[opt['datasets']['train']['dataroot_targets'][0],
opt['datasets']['train']['dataroot_targets'][1]],
**opt['datasets']['audio_setting'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets(
opt['datasets']['val']['dataroot_mix'],
[opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1]],
**opt['datasets']['audio_setting'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=False)
return train_dataloader, val_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(
description='Parameters for training Dual-Path-RNN')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of Dual-Path-RNN")
Dual_Path_RNN = model_rnn.Dual_RNN_model(**opt['Dual_Path_RNN'])
# build optimizer
logger.info("Building the optimizer of Dual-Path-RNN")
optimizer = make_optimizer(Dual_Path_RNN.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of Dual-Path-RNN')
train_dataloader, val_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader)))
# build scheduler
scheduler = ReduceLROnPlateau(
optimizer, mode='min',
factor=opt['scheduler']['factor'],
patience=opt['scheduler']['patience'],
verbose=True, min_lr=opt['scheduler']['min_lr'])
# build trainer
logger.info('Building the Trainer of Dual-Path-RNN')
trainer = trainer_Dual_RNN.Trainer(train_dataloader, val_dataloader, Dual_Path_RNN, optimizer, scheduler, opt)
trainer.run()
if __name__ == "__main__":
train()
| 3,546
| 37.554348
| 114
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/train_Tasnet_tse.py
|
import sys
sys.path.append('./')
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets_tse # using add tse results
from model.model import TSDNet_tse,TSDNet_one_hot, TSDNet_plus_one_hot
from logger import set_logger
import logging
from config import option
import argparse
import torch
from trainer import trainer_Tasnet,trainer_Tasnet_one_hot,trainer_Tasnet_one_hot_regresion, trainer_Tasnet_tse
import torch.optim.lr_scheduler as lr_scheduler
import random
import torch.backends.cudnn as cudnn
DEVICE = 'cpu'
if torch.cuda.is_available():
DEVICE = 'cuda'
torch.backends.cudnn.deterministic = True
DEVICE = torch.device(DEVICE)
seed = 1508758
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
cudnn.deterministic = True
def make_dataloader(opt):
# make train's dataloader
train_dataset = Datasets_tse(
opt['datasets']['train']['dataroot_mix'],
opt['datasets']['train']['dataroot_targets'][0],
opt['datasets']['train']['dataroot_targets'][1],
opt['datasets']['train']['dataroot_targets'][2],
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tr_tse.scp',
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
train_dataloader = Loader(train_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make validation dataloader
val_dataset = Datasets_tse(
opt['datasets']['val']['dataroot_mix'],
opt['datasets']['val']['dataroot_targets'][0],
opt['datasets']['val']['dataroot_targets'][1],
opt['datasets']['val']['dataroot_targets'][2],
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_tse.scp',
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
val_dataloader = Loader(val_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
# make test dataloader
test_dataset = Datasets_tse(
opt['datasets']['test']['dataroot_mix'],
opt['datasets']['test']['dataroot_targets'][0],
opt['datasets']['test']['dataroot_targets'][1],
opt['datasets']['test']['dataroot_targets'][2],
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_tse.scp',
opt['datasets']['audio_setting']['sample_rate'],
opt['datasets']['audio_setting']['class_num'],
opt['datasets']['audio_setting']['audio_length'])
test_dataloader = Loader(test_dataset,
batch_size=opt['datasets']['dataloader_setting']['batch_size'],
num_workers=opt['datasets']['dataloader_setting']['num_workers'],
shuffle=opt['datasets']['dataloader_setting']['shuffle'])
return train_dataloader, val_dataloader, test_dataloader
def make_optimizer(params, opt):
optimizer = getattr(torch.optim, opt['optim']['name'])
if opt['optim']['name'] == 'Adam':
optimizer = optimizer(
params, lr=opt['optim']['lr'], weight_decay=opt['optim']['weight_decay'])
else:
optimizer = optimizer(params, lr=opt['optim']['lr'], weight_decay=opt['optim']
['weight_decay'], momentum=opt['optim']['momentum'])
return optimizer
def train():
parser = argparse.ArgumentParser(
description='Parameters for training Conv-TasNet')
parser.add_argument('--opt', type=str, help='Path to option YAML file.')
args = parser.parse_args()
opt = option.parse(args.opt)
set_logger.setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
logger = logging.getLogger(opt['logger']['name'])
# build model
logger.info("Building the model of Conv-Tasnet")
logger.info(opt['logger']['experimental_description'])
print(opt['model_name'])
if opt['model_name'] == 'TSDNet_one_hot':
net = TSDNet_one_hot(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] =='TSDNet':
net = TSDNet(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth'
)
elif opt['model_name'] =='TSDNet_tse':
net = TSDNet_tse(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth',
use_frame = opt['use_frame'],
only_ref=opt['only_ref']
)
else:
assert 1==2
# build optimizer
logger.info("Building the optimizer of Conv-Tasnet")
optimizer = make_optimizer(net.parameters(), opt)
# build dataloader
logger.info('Building the dataloader of Conv-Tasnet')
train_dataloader, val_dataloader, test_dataloader = make_dataloader(opt)
logger.info('Train Datasets Length: {}, Val Datasets Length: {}, Test Datasets Length: {}'.format(
len(train_dataloader), len(val_dataloader), len(test_dataloader)))
# build scheduler
scheduler = lr_scheduler.CosineAnnealingLR(optimizer, opt['train']['epoch'])
# build trainer
logger.info('Building the Trainer of Conv-Tasnet')
if opt['one_hot']:
assert 1==2
else:
trainer = trainer_Tasnet_tse.Trainer(train_dataloader, val_dataloader, test_dataloader, net, optimizer, scheduler, opt)
trainer.run()
if __name__ == "__main__":
train()
| 7,255
| 47.373333
| 131
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/dualrnn_test_wav.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav, read_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model_rnn import Dual_RNN_model
from logger.set_logger import setup_logger
import logging
from config.option import parse
import tqdm
class Separation():
def __init__(self, mix_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = read_wav(mix_path)
opt = parse(yaml_path)
net = Dual_RNN_model(**opt['Dual_Path_RNN'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net
self.gpuid = gpuid
def inference(self, file_path):
self.net.eval()
with torch.no_grad():
egs=self.mix
norm = torch.norm(egs,float('inf'))
if len(self.gpuid) != 0:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
else:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
print(ests[0].shape)
spks=[torch.squeeze(s.detach()) for s in ests]
index=0
for s in spks:
#norm
s = s - torch.mean(s)
s = s*norm/torch.max(torch.abs(s))
index += 1
os.makedirs(file_path+'/spk'+str(index), exist_ok=True)
filename=file_path+'/spk'+str(index)+'/'+'test.wav'
write_wav(filename, s, 16000)
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='1_mix.wav', help='Path to mix scp file.')
parser.add_argument(
'-yaml', type=str, default='./config/train_rnn_opt.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='./checkpoint/Dual_Path_RNN_opt/best.pt', help="Path to model file.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./test', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.yaml, args.model, [])
separation.inference(args.save_path)
if __name__ == "__main__":
main()
| 2,918
| 38.986301
| 105
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/augment.py
|
import torch
import logging
import torch.nn as nn
import numpy as np
class TimeShift(nn.Module):
def __init__(self, mean, std):
super().__init__()
self.mean = mean
self.std = std
def forward(self, x):
if self.training:
shift = torch.empty(1).normal_(self.mean, self.std).int().item()
x = torch.roll(x, shift, dims=0)
return x
class TimeMask(nn.Module):
def __init__(self, n=1, p=50):
super().__init__()
self.p = p
self.n = n
def forward(self, x):
time, freq = x.shape
if self.training:
for i in range(self.n):
t = torch.empty(1, dtype=int).random_(self.p).item()
to_sample = max(time - t, 1)
t0 = torch.empty(1, dtype=int).random_(to_sample).item()
x[t0:t0 + t, :] = 0
return x
class FreqMask(nn.Module):
def __init__(self, n=1, p=12):
super().__init__()
self.p = p
self.n = n
def forward(self, x):
time, freq = x.shape
if self.training:
for i in range(self.n):
f = torch.empty(1, dtype=int).random_(self.p).item()
f0 = torch.empty(1, dtype=int).random_(freq - f).item()
x[:, f0:f0 + f] = 0.
return x
| 1,333
| 23.703704
| 76
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/utils.py
| 0
| 0
| 0
|
py
|
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/test_tasnet_tse.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot, TSDNet_tse
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
import datetime
import uuid
from pathlib import Path
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, tse_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.tse_audio = handle_scp(tse_scp)
self.clss,_,_ = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet_tse(nFrameLen=opt['datasets']['audio_setting']['nFrameLen'],
nFrameShift=opt['datasets']['audio_setting']['nFrameShift'],
cls_num = opt['datasets']['audio_setting']['class_num'],
CNN10_settings=opt['Conv_Tasnet']['CNN10_settings'],
pretrainedCNN10='/apdcephfs/private_donchaoyang/tsss/Dual-Path-RNN-Pytorch2/model/Cnn10_mAP=0.380.pth',
use_frame = opt['use_frame'],
only_ref = opt['only_ref']
)
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = os.path.join(opt['save_tsv_path'], opt['name'],
"{}_{}".format(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%m'), uuid.uuid1().hex))
Path(self.save_tsv_path).mkdir(exist_ok=True, parents=True) # make dir
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
tse_index = index.replace('.wav','_tse.wav')
cls = str(self.clss[index])
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
tse_audio = read_wav(self.tse_audio[tse_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
tse_audio = tse_audio.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
tse_audio = tse_audio[None,:]
x_cls, out_tsd_time, out_tsd_up = self.tsdnet(mix, ref, tse_audio)
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
pred = out_tsd_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path, event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path,
segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train_tse.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_audio/TSDNet_audio_2gru_tse_ML2_fix_random_kaiming_norm_w_clip_w_frame/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_mix.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_re.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_tse_7_1.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 10,323
| 50.108911
| 214
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/metrics.py
|
import sed_eval
import tsd_utils as utils
import pandas as pd
from sklearn.preprocessing import binarize, MultiLabelBinarizer
import sklearn.metrics as skmetrics
import numpy as np
def get_audio_tagging_df(df):
return df.groupby('filename')['event_label'].unique().reset_index()
def audio_tagging_results(reference, estimated):
"""audio_tagging_results. Returns clip-level F1 Scores
:param reference: The ground truth dataframe as pd.DataFrame
:param estimated: Predicted labels by the model ( thresholded )
"""
if "event_label" in reference.columns:
classes = reference.event_label.dropna().unique().tolist(
) + estimated.event_label.dropna().unique().tolist()
encoder = MultiLabelBinarizer().fit([classes])
reference = get_audio_tagging_df(reference)
estimated = get_audio_tagging_df(estimated)
ref_labels, _ = utils.encode_labels(reference['event_label'],
encoder=encoder)
reference['event_label'] = ref_labels.tolist()
est_labels, _ = utils.encode_labels(estimated['event_label'],
encoder=encoder)
estimated['event_label'] = est_labels.tolist()
matching = reference.merge(estimated,
how='outer',
on="filename",
suffixes=["_ref", "_pred"])
def na_values(val):
if type(val) is np.ndarray:
return val
elif isinstance(val, list):
return np.array(val)
if pd.isna(val):
return np.zeros(len(encoder.classes_))
return val
ret_df = pd.DataFrame(columns=['label', 'f1', 'precision', 'recall'])
if not estimated.empty:
matching['event_label_pred'] = matching.event_label_pred.apply(
na_values)
matching['event_label_ref'] = matching.event_label_ref.apply(na_values)
y_true = np.vstack(matching['event_label_ref'].values)
y_pred = np.vstack(matching['event_label_pred'].values)
ret_df.loc[:, 'label'] = encoder.classes_
for avg in [None, 'macro', 'micro']:
avg_f1 = skmetrics.f1_score(y_true, y_pred, average=avg)
avg_pre = skmetrics.precision_score(y_true, y_pred, average=avg)
avg_rec = skmetrics.recall_score(y_true, y_pred, average=avg)
# avg_auc = skmetrics.roc_auc_score(y_true, y_pred, average=avg)
if avg == None:
# Add for each label non pooled stats
ret_df.loc[:, 'precision'] = avg_pre
ret_df.loc[:, 'recall'] = avg_rec
ret_df.loc[:, 'f1'] = avg_f1
# ret_df.loc[:, 'AUC'] = avg_auc
else:
# Append macro and micro results in last 2 rows
ret_df = ret_df.append(
{
'label': avg,
'precision': avg_pre,
'recall': avg_rec,
'f1': avg_f1,
# 'AUC': avg_auc
},
ignore_index=True)
return ret_df
def get_event_list_current_file(df, fname):
"""
Get list of events for a given filename
:param df: pd.DataFrame, the dataframe to search on
:param fname: the filename to extract the value from the dataframe
:return: list of events (dictionaries) for the given filename
"""
event_file = df[df["filename"] == fname]
if len(event_file) == 1:
if pd.isna(event_file["event_label"].iloc[0]):
event_list_for_current_file = [{"filename": fname}]
else:
event_list_for_current_file = event_file.to_dict('records')
else:
event_list_for_current_file = event_file.to_dict('records')
return event_list_for_current_file
def event_based_evaluation_df(reference,
estimated,
t_collar=0.200,
percentage_of_length=0.2):
"""
Calculate EventBasedMetric given a reference and estimated dataframe
:param reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
:param estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
:return: sed_eval.sound_event.EventBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
event_based_metric = sed_eval.sound_event.EventBasedMetrics(
event_label_list=classes,
t_collar=t_collar,
percentage_of_length=percentage_of_length,
empty_system_output_handling='zero_score')
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(
reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(
estimated, fname)
event_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file,
)
return event_based_metric
def segment_based_evaluation_df(reference, estimated, time_resolution=1.):
evaluated_files = reference["filename"].unique() # get filename
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
segment_based_metric = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=classes, time_resolution=time_resolution)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(
reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(
estimated, fname)
# if len(estimated_event_list_for_current_file) !=0:
# print('fname ',fname)
# print('reference_event_list_for_current_file ',reference_event_list_for_current_file)
# print('estimated_event_list_for_current_file ',estimated_event_list_for_current_file)
# assert 1==2
segment_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file)
return segment_based_metric
def compute_metrics(valid_df, pred_df, time_resolution=1.):
metric_event = event_based_evaluation_df(valid_df,
pred_df,
t_collar=0.200,
percentage_of_length=0.2)
metric_segment = segment_based_evaluation_df(
valid_df, pred_df, time_resolution=time_resolution)
return metric_event, metric_segment
def roc(y_true, y_pred, average=None):
return skmetrics.roc_auc_score(y_true, y_pred, average=average)
def mAP(y_true, y_pred, average=None):
return skmetrics.average_precision_score(y_true, y_pred, average=average)
| 7,386
| 38.715054
| 118
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/draw.py
|
import torchaudio
import matplotlib
import matplotlib.pyplot as plt
[width, height] = matplotlib.rcParams['figure.figsize']
if width < 10:
matplotlib.rcParams['figure.figsize'] = [width * 2.5, height]
if __name__ == "__main__":
# filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/train/train_1.wav"
filename = "/apdcephfs/private_helinwang/tsss/tsss_mixed/test_offset/test_offset_10_re.wav"
waveform, sample_rate = torchaudio.load(filename)
print("Shape of waveform: {}".format(waveform.size()))
print("Sample rate of waveform: {}".format(sample_rate))
plt.figure()
plt.plot(waveform.t().numpy())
# plt.title('test_offset_100_mix')
plt.xticks([])
plt.yticks([])
plt.axis('off')
plt.savefig('test_offset_10_re.png')
| 775
| 32.73913
| 95
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/test_tasnet_one_hot_reg.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model import TSDNet,TSDNet_one_hot,TSDNet_plus_one_hot
from logger.set_logger import setup_logger
import logging
from config.option import parse
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from torch.utils.data import DataLoader as Loader
from data_loader.Dataset_light import Datasets
from model import model
from logger import set_logger
from config import option
import argparse
import torch
import time
import soundfile as sf
import metrics # import metrics.py file
import tsd_utils as utils # import utils.py
import pandas as pd
import numpy as np
from tabulate import tabulate
def time_to_frame(tm,st=True):
radio = 10.0/624
if st:
n_fame = tm//radio
else:
n_fame = math.ceil(tm/radio)
if n_fame >= 624:
n_fame = 623
if n_fame < 0:
n_fame = 0
return int(n_fame)
# def time_to_frame(tm):
# return int(tm/(10.0/312))
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Separation():
def __init__(self, mix_scp, ref_scp, inf_scp, yaml_path, model, gpuid, pred_file='./tsd_result.tsv'):
super(Separation, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onset, self.offset = handle_scp_inf(inf_scp)
self.key = list(self.mix_audio.keys())
opt = parse(yaml_path)
tsdnet = TSDNet_one_hot()
dicts = torch.load(model, map_location='cpu')
tsdnet.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.tsdnet=tsdnet.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.pred_file = pred_file
self.label_path = opt['label_path']
self.save_tsv_path = opt['save_tsv_path']
def test(self):
self.tsdnet.eval()
time_predictions = []
class_result_file = 'class_result_{}.txt'
event_file = 'event_{}.txt'
segment_file = 'segment_{}.txt'
with torch.no_grad():
for i in range(len(self.key)):
index = self.key[i]
ref_index = index.replace('.wav', '_re.wav')
cls = str(self.clss[index])
onset = self.onset[index]
offset = self.offset[index]
onset_frame = time_to_frame(onset)
offset_frame = time_to_frame(offset)
cls_vec = torch.zeros(41)
cls_vec[self.clss[index]] = 1.
cls_vec = cls_vec.unsqueeze(0)
cls_index = cls_vec.argmax(1)
# cls_index = torch.from_numpy(cls_index)
# print('cls_index ',cls_index)
# assert 1==2
cls_index = cls_index.to(self.device)
cls = 'class_' + cls
mix = read_wav(self.mix_audio[index])
ref = read_wav(self.ref_audio[ref_index])
mix = mix.to(self.device)
ref = ref.to(self.device)
mix = mix[None,:]
ref = ref[None,:]
# cls_index = cls_index[None,:]
# print('cls_index ',cls_index.shape)
x_cls, out_tsd_time, out_tsd_up,_ = self.tsdnet(mix, ref,cls_index)
# print('onset_frame, offset_frame',onset_frame, offset_frame)
# assert 1==2
# x_cls: <bs,50>
# out_tsd_time: <bs,t/2>
# out_tsd_up: <bs,t>
out_tsd_time = out_tsd_time.detach().cpu().numpy()
out_tsd_up = out_tsd_up.detach().cpu().numpy()
# print('out_tsd_time ',out_tsd_time)
# print('onset ',onset)
# print('offset ',offset)
# print('out_tsd_time ',out_tsd_time.shape)
# print('out_tsd_up ',out_tsd_up.shape)
print('out_tsd_time ',out_tsd_time)
print('out_tsd_up ',out_tsd_up)
st = out_tsd_time.argmax(1)
ed = out_tsd_up.argmax(1)
# print('st ',st)
# print('ed ',ed)
# assert 1==2
# assert 1==2
st_time = st
if st_time < 0:
st_time = 0
ed_time = ed
if ed_time > 10:
ed_time= 10
# print('st_time ',st_time)
# print('ed_time ',ed_time)
start_frame = time_to_frame(st_time)
end_frame = time_to_frame(ed_time)
# print('start_frame ',start_frame)
# print('end_frame ',end_frame)
# assert 1==2
pred = np.zeros(624)
pred[start_frame:end_frame] = 1.0
pred = pred[None,:]
# pred = out_tsd_up.detach().cpu().numpy() # transpose to numpy
# pred = pred[:,:,0]
# print(pred.shape)
thres = 0.5
window_size = 1
filtered_pred = utils.median_filter(pred, window_size=window_size, threshold=thres)
decoded_pred = [] #
decoded_pred_ = utils.decode_with_timestamps(str(cls),filtered_pred[0,:])
if len(decoded_pred_) == 0: # neg deal
decoded_pred_.append((str(cls),0,0))
decoded_pred.append(decoded_pred_)
for num_batch in range(len(decoded_pred)): # when we test our model,the batch_size is 1
#print('len(decoded_pred) ',len(decoded_pred))
filename = index.split('/')[-1]
# Save each frame output, for later visualization
label_prediction = decoded_pred[num_batch] # frame predict
for event_label, onset, offset in label_prediction:
time_predictions.append({
'filename': filename,
'onset': onset,
'offset': offset,
'event_label': str(event_label)}) # get real predict results,including event_label,onset,offset
assert len(time_predictions) > 0, "No outputs, lower threshold?"
pred_df = pd.DataFrame(time_predictions, columns=['filename', 'onset', 'offset','event_label']) # it store the happen event and its time information
time_ratio = 10.0/pred.shape[1] # calculate time
pred_df = utils.predictions_to_time(pred_df, ratio=time_ratio) # transform the number of frame to real time
label_path = self.label_path
test_data_filename = os.path.splitext(os.path.basename(label_path))[0]
print('test_data_filename ',test_data_filename)
pred_file = 'hard_predictions_{}.txt'
if pred_file: # it name is hard_predictions...
pred_df.to_csv(os.path.join(self.save_tsv_path, pred_file.format(test_data_filename)),
index=False, sep="\t")
strong_labels_df = pd.read_csv(self.label_path, sep='\t') # get
if not np.issubdtype(strong_labels_df['filename'].dtype, np.number):
strong_labels_df['filename'] = strong_labels_df['filename'].apply(os.path.basename)
sed_eval = True
if sed_eval:
event_result, segment_result = metrics.compute_metrics(
strong_labels_df, pred_df, time_resolution=0.2) # calculate f1
print("Event Based Results:\n{}".format(event_result))
event_results_dict = event_result.results_class_wise_metrics()
class_wise_results_df = pd.DataFrame().from_dict({
f: event_results_dict[f]['f_measure']
for f in event_results_dict.keys()}).T
class_wise_results_df.to_csv(os.path.join(
self.save_tsv_path, class_result_file.format(test_data_filename)), sep='\t')
print("Class wise F1-Macro:\n{}".format(
tabulate(class_wise_results_df, headers='keys', tablefmt='github')))
if event_file:
with open(os.path.join(self.save_tsv_path,
event_file.format(test_data_filename)), 'w') as wp:
wp.write(event_result.__str__())
print("=" * 100)
print(segment_result)
if segment_file:
with open(os.path.join(self.save_tsv_path, segment_file.format(test_data_filename)), 'w') as wp:
wp.write(segment_result.__str__())
event_based_results = pd.DataFrame(
event_result.results_class_wise_average_metrics()['f_measure'], index=['event_based'])
segment_based_results = pd.DataFrame(
segment_result.results_class_wise_average_metrics()
['f_measure'], index=['segment_based'])
result_quick_report = pd.concat((event_based_results, segment_based_results))
# Add two columns
with open(os.path.join(self.save_tsv_path, 'quick_report_{}.md'.format(test_data_filename)), 'w') as wp:
print(tabulate(result_quick_report, headers='keys', tablefmt='github'), file=wp)
print("Quick Report: \n{}".format(tabulate(result_quick_report, headers='keys', tablefmt='github')))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-yaml', type=str, default='./config/Conv_Tasnet/train.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='/apdcephfs/share_1316500/donchaoyang/tsss/TSD_exp/checkpoint_fsd2018_new/TSDNet_one_hot_reg_new/best.pt', help="Path to model file.")
parser.add_argument(
'-max_num', type=str, default=10000, help="Max number for testing samples.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/Conv_Tasnet/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_mix.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_re.scp',
'/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/tt_inf.scp',
args.yaml, args.model, gpuid)
separation.test()
if __name__ == "__main__":
main()
| 11,496
| 45.358871
| 170
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/dualrnn_test.py
|
import os
import torch
from data_loader.AudioReader import AudioReader, write_wav
import argparse
from torch.nn.parallel import data_parallel
from model.model_rnn import Dual_RNN_model
from logger.set_logger import setup_logger
import logging
from config.option import parse
import tqdm
class Separation():
def __init__(self, mix_path, yaml_path, model, gpuid):
super(Separation, self).__init__()
self.mix = AudioReader(mix_path, sample_rate=8000)
opt = parse(yaml_path)
net = Dual_RNN_model(**opt['Dual_Path_RNN'])
dicts = torch.load(model, map_location='cpu')
net.load_state_dict(dicts["model_state_dict"])
setup_logger(opt['logger']['name'], opt['logger']['path'],
screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.logger.info('Load checkpoint from {}, epoch {: d}'.format(model, dicts["epoch"]))
self.net=net.cuda()
self.device=torch.device('cuda:{}'.format(
gpuid[0]) if len(gpuid) > 0 else 'cpu')
self.gpuid=tuple(gpuid)
def inference(self, file_path):
with torch.no_grad():
for key, egs in tqdm.tqdm(self.mix):
#self.logger.info("Compute on utterance {}...".format(key))
egs=egs.to(self.device)
norm = torch.norm(egs,float('inf'))
if len(self.gpuid) != 0:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach().cpu()) for s in ests]
else:
if egs.dim() == 1:
egs = torch.unsqueeze(egs, 0)
ests=self.net(egs)
spks=[torch.squeeze(s.detach()) for s in ests]
index=0
for s in spks:
s = s[:egs.shape[1]]
s = s - torch.mean(s)
s = s/torch.max(torch.abs(s))
#norm
#s = s*norm/torch.max(torch.abs(s))
s = s.unsqueeze(0)
index += 1
os.makedirs(file_path+'/spk'+str(index), exist_ok=True)
filename=file_path+'/spk'+str(index)+'/'+key
write_wav(filename, s, 8000)
break
self.logger.info("Compute over {:d} utterances".format(len(self.mix)))
def main():
parser=argparse.ArgumentParser()
parser.add_argument(
'-mix_scp', type=str, default='../create_scp/tt_mix.scp', help='Path to mix scp file.')
parser.add_argument(
'-yaml', type=str, default='./config/train_rnn_opt.yml', help='Path to yaml file.')
parser.add_argument(
'-model', type=str, default='./checkpoint/Dual_Path_RNN_opt/best.pt', help="Path to model file.")
parser.add_argument(
'-gpuid', type=str, default='0', help='Enter GPU id number')
parser.add_argument(
'-save_path', type=str, default='./result/dual-rnn/', help='save result path')
args=parser.parse_args()
gpuid=[int(i) for i in args.gpuid.split(',')]
separation=Separation(args.mix_scp, args.yaml, args.model, gpuid)
separation.inference(args.save_path)
if __name__ == "__main__":
main()
| 3,384
| 40.790123
| 105
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet_tse.py
|
import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
tse = tse.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
tse = tse.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
# l = Loss(out, s1)
# print('est_tsd ',est_tsd.shape)
# print('tsd_lab ',tsd_lab.shape)
# assert 1==2
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
tse = tse.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref, tse)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 13,121
| 44.404844
| 146
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet_one_hot_regresion.py
|
import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss, get_loss_one_hot, get_loss_one_hot_focal, get_loss_one_hot_focal_sim,get_loss_one_hot_reg,get_loss_one_hot_reg_two
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.opt = opt
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, real_time in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
real_time = real_time.to(self.device)
# print('onset ', onset)
# print('offset ', offset)
# print('real_time ',real_time)
# assert 1==2
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref,cls_index.long())
else:
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref,cls_index.long())
# l = Loss(out, s1)
if self.opt['two']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg_two(est_tsd, est_tsd2, real_time)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg(est_tsd,real_time)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, real_time in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
real_time = real_time.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
# l = Loss(out, s1)
if self.opt['two']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg_two(est_tsd, est_tsd2, real_time)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg(est_tsd,real_time)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab,sim_lab,real_time in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
real_time = real_time.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd, est_tsd2, sim_cos = self.convtasnet(mix, ref, cls_index.long())
if self.opt['two']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg_two(est_tsd, est_tsd2, real_time)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_reg(est_tsd,real_time)
# l = Loss(out, s1)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 14,648
| 47.506623
| 147
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/trainer/trainer_Dual_RNN.py
|
import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import Loss
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, Dual_RNN, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.dualrnn = Dual_RNN.to(self.device)
self.logger.info(
'Loading Dual-Path-RNN parameters: {:.3f} Mb'.format(check_parameters(self.dualrnn)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.dualrnn = Dual_RNN.to(self.device)
self.logger.info(
'Loading Dual-Path-RNN parameters: {:.3f} Mb'.format(check_parameters(self.dualrnn)))
if opt['resume']['state']:
ckp = torch.load(os.path.join(
opt['resume']['path'], 'best.pt'), map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
Dual_RNN.load_state_dict(
ckp['model_state_dict'])
self.dualrnn = Dual_RNN.to(self.device)
optimizer.load_state_dict(ckp['optim_state_dict'])
self.optimizer = optimizer
lr = self.optimizer.param_groups[0]['lr']
self.adjust_learning_rate(self.optimizer, lr*0.5)
else:
self.dualrnn = Dual_RNN.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info(
'Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.dualrnn.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
num_index = 1
start_time = time.time()
for mix, ref in self.train_dataloader:
mix = mix.to(self.device)
ref = [ref[i].to(self.device) for i in range(self.num_spks)]
self.optimizer.zero_grad()
if self.gpuid:
out = torch.nn.parallel.data_parallel(self.dualrnn,mix,device_ids=self.gpuid)
#out = self.dualrnn(mix)
else:
out = self.dualrnn(mix)
l = Loss(out, ref)
epoch_loss = l
total_loss += epoch_loss.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.dualrnn.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss/num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss/num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, (end_time-start_time)/60)
self.logger.info(message)
return total_loss
def validation(self, epoch):
self.logger.info(
'Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.dualrnn.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
start_time = time.time()
with torch.no_grad():
for mix, ref in self.val_dataloader:
mix = mix.to(self.device)
ref = [ref[i].to(self.device) for i in range(self.num_spks)]
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.dualrnn)
#out = model(mix)
out = torch.nn.parallel.data_parallel(self.dualrnn,mix,device_ids=self.gpuid)
else:
out = self.dualrnn(mix)
l = Loss(out, ref)
epoch_loss = l
total_loss += epoch_loss.item()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss/num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss/num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, (end_time-start_time)/60)
self.logger.info(message)
return total_loss
def run(self):
train_loss = []
val_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(
self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss = self.train(self.cur_epoch)
v_loss = self.validation(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
# schedule here
self.scheduler.step(v_loss)
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.legend()
#plt.xticks(l, lx)
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.dualrnn.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 8,747
| 39.5
| 112
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet.py
|
import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info('Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
# l = Loss(out, s1)
# print('est_tsd ',est_tsd.shape)
# print('tsd_lab ',tsd_lab.shape)
# assert 1==2
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
else:
est_cls, est_tsd, est_tsd_up = self.convtasnet(mix, ref)
# l = Loss(out, s1)
epoch_loss, loss_cls, loss_tsd = get_loss(est_cls, cls, est_tsd, tsd_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 12,954
| 44.297203
| 146
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/trainer/__init__.py
|
from .trainer_Tasnet import *
| 30
| 14.5
| 29
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/trainer/trainer_Tasnet_one_hot.py
|
import sys
sys.path.append('../')
from utils.util import check_parameters
import time
import logging
from logger.set_logger import setup_logger
from model.loss import get_loss, get_loss_one_hot, get_loss_one_hot_focal, get_loss_one_hot_focal_sim
import torch
import os
import matplotlib.pyplot as plt
from torch.nn.parallel import data_parallel
class Trainer(object):
def __init__(self, train_dataloader, val_dataloader, test_dataloader, Conv_Tasnet, optimizer, scheduler, opt):
super(Trainer).__init__()
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
self.test_dataloader = test_dataloader
self.scheduler = scheduler
self.num_spks = opt['num_spks']
self.cur_epoch = 0
self.total_epoch = opt['train']['epoch']
self.early_stop = opt['train']['early_stop']
self.opt = opt
self.print_freq = opt['logger']['print_freq']
# setup_logger(opt['logger']['name'], opt['logger']['path'],
# screen=opt['logger']['screen'], tofile=opt['logger']['tofile'])
self.logger = logging.getLogger(opt['logger']['name'])
self.checkpoint = opt['train']['path']
self.name = opt['name']
self.nFrameShift = opt['datasets']['audio_setting']['nFrameShift']
self.audio_length = opt['datasets']['audio_setting']['audio_length']
self.sr = opt['datasets']['audio_setting']['sample_rate']
self.ratio = 0.3
if opt['train']['gpuid']:
self.logger.info('Load Nvida GPU .....')
self.device = torch.device(
'cuda:{}'.format(opt['train']['gpuid'][0]))
self.gpuid = opt['train']['gpuid']
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
else:
self.logger.info('Load CPU ...........')
self.device = torch.device('cpu')
self.convtasnet = Conv_Tasnet.to(self.device)
self.logger.info(
'Loading Conv-TasNet parameters: {:.3f} Mb'.format(check_parameters(self.convtasnet)))
if opt['resume']['state']:
ckp = torch.load(opt['resume']['path'], map_location='cpu')
self.cur_epoch = ckp['epoch']
self.logger.info("Resume from checkpoint {}: epoch {:.3f}".format(
opt['resume']['path'], self.cur_epoch))
self.convtasnet = Conv_Tasnet.load_state_dict(
ckp['model_state_dict']).to(self.device)
self.optimizer = optimizer.load_state_dict(ckp['optim_state_dict'])
else:
self.convtasnet = Conv_Tasnet.to(self.device)
self.optimizer = optimizer
if opt['optim']['clip_norm']:
self.clip_norm = opt['optim']['clip_norm']
self.logger.info(
"Gradient clipping by {}, default L2".format(self.clip_norm))
else:
self.clip_norm = 0
def train(self, epoch):
self.logger.info(
'Start training from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.train()
num_batchs = len(self.train_dataloader)
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
num_index = 1
start_time = time.time()
for mix, s1, ref, cls, onset, offset, tsd_lab,sim_lab,L_lab in self.train_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
L_lab = L_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix, ref)
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
# l = Loss(out, s1)
if self.opt['sim']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal_sim(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
elif self.opt['focal_loss']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd_time, L_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
epoch_loss.backward()
if self.clip_norm:
torch.nn.utils.clip_grad_norm_(
self.convtasnet.parameters(), self.clip_norm)
self.optimizer.step()
if num_index % self.print_freq == 0:
message = '<epoch:{:d}, iter:{:d}, lr:{:.3e}, total_loss:{:.3f}, total_loss_cls:{:.3f}, total_loss_tsd:{:.3f}>'.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss / num_index,
total_loss_cls / num_index,
total_loss_tsd / num_index)
self.logger.info(message)
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def validation(self, epoch):
self.logger.info('Start Validation from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.val_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.val_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
L_lab = L_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
#model = torch.nn.DataParallel(self.convtasnet)
#out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet,mix,device_ids=self.gpuid)
est_cls, est_tsd_time, est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
# l = Loss(out, s1)
if self.opt['sim']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal_sim(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
elif self.opt['focal_loss']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd_time, tsd_lab, sim_cos, sim_lab)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd_time, L_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def test(self, epoch):
self.logger.info(
'Start Test from epoch: {:d}, iter: {:d}'.format(epoch, 0))
self.convtasnet.eval()
num_batchs = len(self.test_dataloader)
num_index = 1
total_loss = 0.0
total_loss_cls = 0.0
total_loss_tsd = 0.0
start_time = time.time()
with torch.no_grad():
for mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab in self.test_dataloader:
mix = mix.to(self.device)
ref = ref.to(self.device)
s1 = [s1.to(self.device) for i in range(self.num_spks)]
cls = cls.to(self.device)
cls_index = cls.argmax(1)
onset = onset.to(self.device)
offset = offset.to(self.device)
tsd_lab = tsd_lab.to(self.device)
sim_lab = sim_lab.to(self.device)
L_lab = L_lab.to(self.device)
self.optimizer.zero_grad()
if self.gpuid:
# model = torch.nn.DataParallel(self.convtasnet)
# out = model(mix)
# out = torch.nn.parallel.data_parallel(self.convtasnet, mix, device_ids=self.gpuid)
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
else:
est_cls, est_tsd_time,est_tsd_time_up, sim_cos = self.convtasnet(mix, ref, cls_index.long())
if self.opt['sim']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal_sim(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
elif self.opt['focal_loss']:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
else:
epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd_time, L_lab, sim_cos, sim_lab)
# l = Loss(out, s1)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot_focal(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
#epoch_loss, loss_cls, loss_tsd = get_loss_one_hot(est_cls, cls, est_tsd, tsd_lab, sim_cos, sim_lab)
total_loss += epoch_loss.item()
total_loss_cls += loss_cls.item()
total_loss_tsd += loss_tsd.item()
num_index += 1
end_time = time.time()
total_loss = total_loss / num_index
total_loss_cls = total_loss_cls / num_index
total_loss_tsd = total_loss_tsd / num_index
message = 'Finished *** <epoch:{:d}, iter:{:d}, lr:{:.3e}, loss:{:.3f}, loss_cls:{:.3f}, loss_tsd:{:.3f}, Total time:{:.3f} min> '.format(
epoch, num_index, self.optimizer.param_groups[0]['lr'], total_loss, total_loss_cls, total_loss_tsd, (end_time - start_time) / 60)
self.logger.info(message)
return total_loss, total_loss_cls, total_loss_tsd
def run(self):
train_loss = []
val_loss = []
test_loss = []
with torch.cuda.device(self.gpuid[0]):
self.save_checkpoint(self.cur_epoch, best=False)
v_loss,_,_ = self.validation(self.cur_epoch)
best_loss = v_loss
self.logger.info("Starting epoch from {:d}, loss = {:.4f}".format(self.cur_epoch, best_loss))
no_improve = 0
# starting training part
while self.cur_epoch < self.total_epoch:
self.cur_epoch += 1
t_loss,_,_ = self.train(self.cur_epoch)
v_loss,_,_ = self.validation(self.cur_epoch)
tt_loss,_,_ = self.test(self.cur_epoch)
train_loss.append(t_loss)
val_loss.append(v_loss)
test_loss.append(tt_loss)
# schedule here
self.scheduler.step()
if v_loss >= best_loss:
no_improve += 1
self.logger.info(
'No improvement, Best Loss: {:.4f}'.format(best_loss))
else:
best_loss = v_loss
no_improve = 0
self.save_checkpoint(self.cur_epoch, best=True)
self.logger.info('Epoch: {:d}, Now Best Loss Change: {:.4f}'.format(
self.cur_epoch, best_loss))
self.logger.info('Epoch: {:d}, Best Loss Test: {:.4f}'.format(
self.cur_epoch, tt_loss))
if no_improve == self.early_stop:
self.logger.info(
"Stop training cause no impr for {:d} epochs".format(
no_improve))
break
self.save_checkpoint(self.cur_epoch, best=False)
self.logger.info("Training for {:d}/{:d} epoches done!".format(
self.cur_epoch, self.total_epoch))
# draw loss image
plt.title("Loss of train, val and test")
x = [i for i in range(self.cur_epoch)]
plt.plot(x, train_loss, 'b-', label=u'train_loss', linewidth=0.8)
plt.plot(x, val_loss, 'c-', label=u'val_loss', linewidth=0.8)
plt.plot(x, test_loss, 'g', label=u'test_loss', linewidth=0.8)
plt.legend()
plt.ylabel('loss')
plt.xlabel('epoch')
plt.savefig('loss.png')
def save_checkpoint(self, epoch, best=True):
'''
save model
best: the best model
'''
os.makedirs(os.path.join(self.checkpoint, self.name), exist_ok=True)
torch.save({
'epoch': epoch,
'model_state_dict': self.convtasnet.state_dict(),
'optim_state_dict': self.optimizer.state_dict()
},
os.path.join(self.checkpoint, self.name, '{0}.pt'.format('best' if best else 'last')))
| 15,168
| 48.734426
| 146
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/data_loader/AudioData.py
|
import torch.nn.functional as F
from utils import util
import torch
import torchaudio
import sys
sys.path.append('../')
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input:
scp_path (str): a different scp file address
sample_rate (int, optional): sample rate (default: 8000)
chunk_size (int, optional): split audio size (default: 32000(4 s))
least_size (int, optional): Minimum split size (default: 16000(2 s))
Output:
split audio (list)
'''
def __init__(self, scp_path, sample_rate=8000, chunk_size=32000, least_size=16000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = util.handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
self.audio = []
self.chunk_size = chunk_size
self.least_size = least_size
self.split()
def split(self):
'''
split audio with chunk_size and least_size
'''
for key in self.keys:
utt = read_wav(self.index_dict[key])
if utt.shape[0] < self.least_size:
continue
if utt.shape[0] > self.least_size and utt.shape[0] < self.chunk_size:
gap = self.chunk_size-utt.shape[0]
self.audio.append(F.pad(utt, (0, gap), mode='constant'))
if utt.shape[0] >= self.chunk_size:
start = 0
while True:
if start + self.chunk_size > utt.shape[0]:
break
self.audio.append(utt[start:start+self.chunk_size])
start += self.least_size
if __name__ == "__main__":
a = AudioReader("/home/likai/data1/create_scp/cv_mix.scp")
audio = a.audio
print(len(audio))
| 2,751
| 30.272727
| 87
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/data_loader/Dataset.py
|
import sys
sys.path.append('../')
from data_loader.AudioData import AudioReader
import torch
from torch.utils.data import Dataset
import numpy as np
class Datasets(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
chunk_size (int, optional): split audio size (default: 32000(4 s))
least_size (int, optional): Minimum split size (default: 16000(2 s))
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, sample_rate=32000, chunk_size=64000, least_size=64000):
super(Datasets, self).__init__()
self.mix_audio = AudioReader(
mix_scp, sample_rate=sample_rate, chunk_size=chunk_size, least_size=least_size).audio
self.ref_audio = AudioReader(
ref_scp, sample_rate=sample_rate, chunk_size=chunk_size, least_size=least_size).audio
self.s1_audio = AudioReader(
s1_scp, sample_rate=sample_rate, chunk_size=chunk_size, least_size=least_size).audio
def __len__(self):
return len(self.mix_audio)
def __getitem__(self, index):
return self.mix_audio[index], self.s1_audio[index], self.ref_audio[index]
if __name__ == "__main__":
dataset = Datasets("/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_mix.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_s1.scp", "/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps_debug/tto_re.scp")
for i in dataset.mix_audio:
print(i.shape)
if i.shape[0] != 64000:
print('fail')
| 1,666
| 36.886364
| 183
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/data_loader/__init__.py
|
from .AudioData import *
from .Dataset import *
| 47
| 23
| 24
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/data_loader/AudioReader.py
|
import sys
sys.path.append('../')
import torchaudio
import torch
from utils.util import handle_scp
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
def write_wav(fname, src, sample_rate):
'''
Write wav file
input:
fname: wav file path
src: frames of audio
sample_rate: An integer which is the sample rate of the audio
output:
None
'''
torchaudio.save(fname, src, sample_rate)
class AudioReader(object):
'''
Class that reads Wav format files
Input as a different scp file address
Output a matrix of wav files in all scp files.
'''
def __init__(self, scp_path, sample_rate=8000):
super(AudioReader, self).__init__()
self.sample_rate = sample_rate
self.index_dict = handle_scp(scp_path)
self.keys = list(self.index_dict.keys())
def _load(self, key):
src, sr = read_wav(self.index_dict[key], return_rate=True)
if self.sample_rate is not None and sr != self.sample_rate:
raise RuntimeError('SampleRate mismatch: {:d} vs {:d}'.format(
sr, self.sample_rate))
return src
def __len__(self):
return len(self.keys)
def __iter__(self):
for key in self.keys:
yield key, self._load(key)
def __getitem__(self, index):
if type(index) not in [int, str]:
raise IndexError('Unsupported index type: {}'.format(type(index)))
if type(index) == int:
num_uttrs = len(self.keys)
if num_uttrs < index and index < 0:
raise KeyError('Interger index out of range, {:d} vs {:d}'.format(
index, num_uttrs))
index = self.keys[index]
if index not in self.index_dict:
raise KeyError("Missing utterance {}!".format(index))
return self._load(index)
if __name__ == "__main__":
r = AudioReader('/home/likai/data1/create_scp/cv_s2.scp')
index = 0
print(r[1])
| 2,556
| 28.732558
| 82
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/data_loader/Dataset_light.py
|
import sys
sys.path.append('../')
import torch
from torch.utils.data import DataLoader, Dataset
import torch.nn.functional as F
import random
import numpy as np
import soundfile as sf
import torchaudio
from utils.util import handle_scp, handle_scp_inf
from model.model import STFT
import os
import pickle
import math
nFrameLen = 512
nFrameShift = 256
nFFT = 512
stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
def time_to_frame(tm,M):
ans = int(tm/(10.0/M))
if ans < 0:
ans = 0
if ans > M:
ans = M
return ans
def read_wav(fname, return_rate=False):
'''
Read wavfile using Pytorch audio
input:
fname: wav file path
return_rate: Whether to return the sampling rate
output:
src: output tensor of size C x L
L is the number of audio frames
C is the number of channels.
sr: sample rate
'''
src, sr = torchaudio.load(fname, channels_first=True)
if return_rate:
return src.squeeze(), sr
else:
return src.squeeze()
class Datasets(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, inf_scp=None, sr=16000, cls_num=50, audio_length=10, hop_size=256):
super(Datasets, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.s1_audio = handle_scp(s1_scp)
self.ref_audio = handle_scp(ref_scp)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_scp)
self.sr = sr
self.cls_num = cls_num
self.audio_length = audio_length
self.samples = sr * audio_length
self.max_frame = (self.samples // hop_size - 1) // 2
self.key = list(self.mix_audio.keys())
def __len__(self):
return len(self.key)
def __getitem__(self, index):
index = self.key[index]
s1_index = index.replace('.wav', '_lab.wav')
ref_index = index.replace('.wav', '_re.wav')
mix = read_wav(self.mix_audio[index])
s1 = read_wav(self.s1_audio[s1_index])
ref = read_wav(self.ref_audio[ref_index])
cls = torch.zeros(self.cls_num)
cls[self.clss[index]] = 1.
tsd_lab = torch.zeros(self.max_frame)
sim_lab = torch.zeros(self.max_frame)
real_time = torch.zeros(2)
tmp_st = math.floor(self.onsets[index])
if tmp_st < 0:
tmp_st = 0
tmp_ed = math.ceil(self.offsets[index])
if tmp_ed > 10:
tmp_ed = 10
real_time[0] = tmp_st
real_time[1] = tmp_ed
M = 156
start_frame = round(self.max_frame * self.onsets[index] / self.audio_length) if round(self.max_frame * self.onsets[index] / self.audio_length) >= 0 else 0
end_frame = round(self.max_frame * self.offsets[index] / self.audio_length) if round(self.max_frame * self.offsets[index] / self.audio_length) < self.max_frame else self.max_frame - 1
L_st = time_to_frame(self.onsets[index], M)
L_ed = time_to_frame(self.offsets[index], M)
L_lab = torch.zeros(M)
L_lab[L_st:L_ed] = 1.0
tsd_lab[start_frame:end_frame] = 1.
sim_lab[start_frame:end_frame] = 1.0
if start_frame>0:
sim_lab[0:start_frame] = -1.0
if end_frame < self.max_frame:
sim_lab[end_frame:] = -1.0
onset = self.onsets[index]
offset = self.offsets[index]
return mix, s1, ref, cls, onset, offset, tsd_lab, sim_lab, L_lab
def get_mean_std(self):
preNormFile = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/norm.info'
# if os.path.exists(preNormFile):
# print('normfile exists, just load it!')
# return
fea_norm_cal = []
lab_norm_cal = []
print('Calculate mean and std.')
num = 0
for i in self.key:
mix = read_wav(self.mix_audio[i])
print(self.mix_audio[i])
s1 = read_wav(self.mix_audio[i].replace('.wav', '_lab.wav'))
fea, _= stft(mix[None, :]) #1,f,t
lab, _= stft(s1[None, :]) #1,f,t
fea = torch.log(fea ** 2 + 1e-20)
lab = torch.log(lab ** 2 + 1e-20)
fea = fea[0].numpy()
lab = lab[0].numpy()
if num == 0:
fea_norm_cal = fea
lab_norm_cal = lab
else:
fea_norm_cal = np.concatenate((fea_norm_cal,fea), -1)
lab_norm_cal = np.concatenate((lab_norm_cal,lab), -1)
num += 1
if num > 5000:
break
n_frame = np.shape(fea_norm_cal)[-1]
self.fea_mean = np.mean(fea_norm_cal, axis=-1)
self.lab_mean = np.mean(lab_norm_cal, axis=-1)
print(n_frame)
print('fea_mean and fea_std size: {}'.format(np.shape(self.fea_mean)[0]))
print('lab_mean and lab_std size: {}'.format(np.shape(self.lab_mean)[0]))
for i in range(n_frame):
if i == 0:
self.fea_std = np.square(fea_norm_cal[:,i] - self.fea_mean)
self.lab_std = np.square(lab_norm_cal[:,i] - self.lab_mean)
else:
self.fea_std += np.square(fea_norm_cal[:,i] - self.fea_mean)
self.lab_std += np.square(lab_norm_cal[:,i] - self.lab_mean)
self.fea_std = np.sqrt(self.fea_std / n_frame)
self.lab_std = np.sqrt(self.lab_std / n_frame)
print(f'restore mean and std in {preNormFile}')
export_dict = {
'feaMean': self.fea_mean.astype(np.float32),
'feaStd': self.fea_std.astype(np.float32),
'labMean': self.lab_mean.astype(np.float32),
'labStd': self.lab_std.astype(np.float32)
}
with open(preNormFile, 'wb') as fid:
pickle.dump(export_dict, fid)
class Datasets_tse(Dataset):
'''
Load audio data
mix_scp: file path of mix audio (type: str)
ref_scp: file path of ground truth audio (type: list[spk1,spk2])
'''
def __init__(self, mix_scp=None, s1_scp=None, ref_scp=None, inf_scp=None, tse_scp=None, sr=16000, cls_num=50, audio_length=10, hop_size=256):
super(Datasets_tse, self).__init__()
self.mix_audio = handle_scp(mix_scp)
self.s1_audio = handle_scp(s1_scp)
self.ref_audio = handle_scp(ref_scp)
self.tse_audio = handle_scp(tse_scp)
self.clss, self.onsets, self.offsets = handle_scp_inf(inf_scp)
self.sr = sr
self.cls_num = cls_num
self.audio_length = audio_length
self.samples = sr * audio_length
self.max_frame = (self.samples // hop_size - 1) // 2
self.key = list(self.mix_audio.keys())
def __len__(self):
return len(self.key)
def __getitem__(self, index):
index = self.key[index]
s1_index = index.replace('.wav', '_lab.wav')
ref_index = index.replace('.wav', '_re.wav')
tse_index = index.replace('.wav','_tse.wav')
mix = read_wav(self.mix_audio[index])
s1 = read_wav(self.s1_audio[s1_index])
ref = read_wav(self.ref_audio[ref_index])
tse = read_wav(self.tse_audio[tse_index])
cls = torch.zeros(self.cls_num)
cls[self.clss[index]] = 1.
tsd_lab = torch.zeros(self.max_frame)
sim_lab = torch.zeros(self.max_frame)
real_time = torch.zeros(2)
tmp_st = math.floor(self.onsets[index])
if tmp_st < 0:
tmp_st = 0
tmp_ed = math.ceil(self.offsets[index])
if tmp_ed > 10:
tmp_ed = 10
real_time[0] = tmp_st
real_time[1] = tmp_ed
M = 156
start_frame = round(self.max_frame * self.onsets[index] / self.audio_length) if round(self.max_frame * self.onsets[index] / self.audio_length) >= 0 else 0
end_frame = round(self.max_frame * self.offsets[index] / self.audio_length) if round(self.max_frame * self.offsets[index] / self.audio_length) < self.max_frame else self.max_frame - 1
L_st = time_to_frame(self.onsets[index], M)
L_ed = time_to_frame(self.offsets[index], M)
L_lab = torch.zeros(M)
L_lab[L_st:L_ed] = 1.0
tsd_lab[start_frame:end_frame] = 1.
sim_lab[start_frame:end_frame] = 1.0
if start_frame>0:
sim_lab[0:start_frame] = -1.0
if end_frame < self.max_frame:
sim_lab[end_frame:] = -1.0
onset = self.onsets[index]
offset = self.offsets[index]
return mix, s1, ref, tse, cls, onset, offset, tsd_lab, sim_lab, L_lab
if __name__ == "__main__":
datasets = Datasets("/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_mix.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_s1.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_re.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_inf.scp",
"/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/scps/tr_tse.scp",
16000,
50,
10)
print(datasets.key)
# datasets.get_mean_std()
| 9,413
| 37.740741
| 191
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/config/option.py
|
import yaml
def parse(opt_path):
with open(opt_path, mode='r') as f:
opt = yaml.load(f,Loader=yaml.FullLoader)
opt['resume']['path'] = opt['resume']['path']+'/'+opt['name']
opt['logger']['path'] = opt['logger']['path']+'/'+opt['name']
return opt
if __name__ == "__main__":
parse('train.yml')
| 327
| 24.230769
| 65
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/config/__init__.py
|
from .option import *
| 21
| 21
| 21
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/logger/__init__.py
|
from .set_logger import *
| 25
| 25
| 25
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/logger/set_logger.py
|
import logging
from datetime import datetime
import os
def get_timestamp():
return datetime.now().strftime('%y%m%d-%H%M%S')
def setup_logger(logger_name, root, level=logging.INFO, screen=False, tofile=False):
'''set up logger'''
lg = logging.getLogger(logger_name)
formatter = logging.Formatter('%(asctime)s [%(pathname)s:%(lineno)s - %(levelname)s ] %(message)s',
datefmt='%y-%m-%d %H:%M:%S')
lg.setLevel(level)
os.makedirs(root,exist_ok=True)
if tofile:
log_file = os.path.join(root, '_{}.log'.format(get_timestamp()))
fh = logging.FileHandler(log_file, mode='w')
fh.setFormatter(formatter)
lg.addHandler(fh)
if screen:
sh = logging.StreamHandler()
sh.setFormatter(formatter)
lg.addHandler(sh)
if __name__ == "__main__":
setup_logger('base','root',level=logging.INFO,screen=True, tofile=False)
logger = logging.getLogger('base')
logger.info('hello')
| 990
| 32.033333
| 103
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/utils/util.py
|
import torch
import torch.nn as nn
def handle_scp(scp_path):
'''
Read scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 2:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 2:
key, value = scp_parts
if key in scp_dict:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict[key] = value
return scp_dict
def handle_scp_inf(scp_path):
'''
Read information scp file script
input:
scp_path: .scp file's file path
output:
scp_dict: {'key':'wave file path'}
'''
scp_dict_cls = dict()
scp_dict_onset = dict()
scp_dict_offset = dict()
line = 0
lines = open(scp_path, 'r').readlines()
for l in lines:
scp_parts = l.strip().split()
line += 1
if len(scp_parts) != 4:
raise RuntimeError("For {}, format error in line[{:d}]: {}".format(
scp_path, line, scp_parts))
if len(scp_parts) == 4:
key, cls, onset, offset = scp_parts
if key in scp_dict_cls:
raise ValueError("Duplicated key \'{0}\' exists in {1}".format(
key, scp_path))
scp_dict_cls[key] = int(cls)
scp_dict_onset[key] = float(onset)
scp_dict_offset[key] = float(offset)
return scp_dict_cls, scp_dict_onset, scp_dict_offset
def check_parameters(net):
'''
Returns module parameters. Mb
'''
parameters = sum(param.numel() for param in net.parameters())
return parameters / 10**6
| 1,932
| 26.225352
| 79
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/utils/get_sed_label.py
|
import pandas as pd
save_name = []
save_onset = []
save_offset = []
save_event = []
with open('/apdcephfs/share_1316500/donchaoyang/tsss/tsss_data/fsd_2018/scps/val_inf_new.scp', 'r') as file:
for line in file:
split_line = line.strip().split(' ')
save_name.append(split_line[0])
#target_audio = split_line[1]
class_event = split_line[1]
save_event.append('class_'+class_event)
save_onset.append(split_line[2])
save_offset.append(split_line[3])
#print(class_event)
# print('split_line ',split_line)
# assert 1==2
dict = {'filename': save_name, 'onset': save_onset, 'offset': save_offset, 'event_label': save_event}
df = pd.DataFrame(dict)
df.to_csv('strong_label_fsd2018_val_new.tsv',index=False,sep='\t')
| 789
| 38.5
| 108
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/utils/__init__.py
|
from .util import *
| 19
| 19
| 19
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/model/model_rnn.py
|
import sys
sys.path.append('../')
import torch.nn.functional as F
from torch import nn
import torch
from utils.util import check_parameters
import warnings
warnings.filterwarnings('ignore')
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, shape, eps=1e-8, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
if shape == 3:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
if shape == 4:
self.weight = nn.Parameter(torch.ones(self.dim, 1, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x K x S or N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x K x S
# gln: mean,var N x 1 x 1
if x.dim() == 4:
mean = torch.mean(x, (1, 2, 3), keepdim=True)
var = torch.mean((x-mean)**2, (1, 2, 3), keepdim=True)
if self.elementwise_affine:
x = self.weight*(x-mean)/torch.sqrt(var+self.eps)+self.bias
else:
x = (x-mean)/torch.sqrt(var+self.eps)
if x.dim() == 3:
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x-mean)**2, (1, 2), keepdim=True)
if self.elementwise_affine:
x = self.weight*(x-mean)/torch.sqrt(var+self.eps)+self.bias
else:
x = (x-mean)/torch.sqrt(var+self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine, eps=1e-8)
def forward(self, x):
# x: N x C x K x S or N x C x L
# N x K x S x C
if x.dim() == 4:
x = x.permute(0, 2, 3, 1).contiguous()
# N x K x S x C == only channel norm
x = super().forward(x)
# N x C x K x S
x = x.permute(0, 3, 1, 2).contiguous()
if x.dim() == 3:
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim, shape):
if norm == 'gln':
return GlobalLayerNorm(dim, shape, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim, eps=1e-8)
else:
return nn.BatchNorm1d(dim)
class Encoder(nn.Module):
'''
Conv-Tasnet Encoder part
kernel_size: the length of filters
out_channels: the number of filters
'''
def __init__(self, kernel_size=2, out_channels=64):
super(Encoder, self).__init__()
self.conv1d = nn.Conv1d(in_channels=1, out_channels=out_channels,
kernel_size=kernel_size, stride=kernel_size//2, groups=1, bias=False)
def forward(self, x):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
x: [B, C, T_out]
T_out is the number of time steps
"""
# B x T -> B x 1 x T
x = torch.unsqueeze(x, dim=1)
# B x 1 x T -> B x C x T_out
x = self.conv1d(x)
x = F.relu(x)
return x
class Decoder(nn.ConvTranspose1d):
'''
Decoder of the TasNet
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution
or a deconvolution (although it is not an actual deconvolution operation).
'''
def __init__(self, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
def forward(self, x):
"""
x: [B, N, L]
"""
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 3/4D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if torch.squeeze(x).dim() == 1:
x = torch.squeeze(x, dim=1)
else:
x = torch.squeeze(x)
return x
class Dual_RNN_Block(nn.Module):
'''
Implementation of the intra-RNN and the inter-RNN
input:
in_channels: The number of expected features in the input x
out_channels: The number of features in the hidden state h
rnn_type: RNN, LSTM, GRU
norm: gln = "Global Norm", cln = "Cumulative Norm", ln = "Layer Norm"
dropout: If non-zero, introduces a Dropout layer on the outputs
of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
bidirectional: If True, becomes a bidirectional LSTM. Default: False
'''
def __init__(self, out_channels,
hidden_channels, rnn_type='LSTM', norm='ln',
dropout=0, bidirectional=False, num_spks=2):
super(Dual_RNN_Block, self).__init__()
# RNN model
self.intra_rnn = getattr(nn, rnn_type)(
out_channels, hidden_channels, 1, batch_first=True, dropout=dropout, bidirectional=bidirectional)
self.inter_rnn = getattr(nn, rnn_type)(
out_channels, hidden_channels, 1, batch_first=True, dropout=dropout, bidirectional=bidirectional)
# Norm
self.intra_norm = select_norm(norm, out_channels, 4)
self.inter_norm = select_norm(norm, out_channels, 4)
# Linear
self.intra_linear = nn.Linear(
hidden_channels*2 if bidirectional else hidden_channels, out_channels)
self.inter_linear = nn.Linear(
hidden_channels*2 if bidirectional else hidden_channels, out_channels)
def forward(self, x):
'''
x: [B, N, K, S]
out: [Spks, B, N, K, S]
'''
B, N, K, S = x.shape
# intra RNN
# [BS, K, N]
intra_rnn = x.permute(0, 3, 2, 1).contiguous().view(B*S, K, N)
# [BS, K, H]
intra_rnn, _ = self.intra_rnn(intra_rnn)
# [BS, K, N]
intra_rnn = self.intra_linear(intra_rnn.contiguous().view(B*S*K, -1)).view(B*S, K, -1)
# [B, S, K, N]
intra_rnn = intra_rnn.view(B, S, K, N)
# [B, N, K, S]
intra_rnn = intra_rnn.permute(0, 3, 2, 1).contiguous()
intra_rnn = self.intra_norm(intra_rnn)
# [B, N, K, S]
intra_rnn = intra_rnn + x
# inter RNN
# [BK, S, N]
inter_rnn = intra_rnn.permute(0, 2, 3, 1).contiguous().view(B*K, S, N)
# [BK, S, H]
inter_rnn, _ = self.inter_rnn(inter_rnn)
# [BK, S, N]
inter_rnn = self.inter_linear(inter_rnn.contiguous().view(B*S*K, -1)).view(B*K, S, -1)
# [B, K, S, N]
inter_rnn = inter_rnn.view(B, K, S, N)
# [B, N, K, S]
inter_rnn = inter_rnn.permute(0, 3, 1, 2).contiguous()
inter_rnn = self.inter_norm(inter_rnn)
# [B, N, K, S]
out = inter_rnn + intra_rnn
return out
class Dual_Path_RNN(nn.Module):
'''
Implementation of the Dual-Path-RNN model
input:
in_channels: The number of expected features in the input x
out_channels: The number of features in the hidden state h
rnn_type: RNN, LSTM, GRU
norm: gln = "Global Norm", cln = "Cumulative Norm", ln = "Layer Norm"
dropout: If non-zero, introduces a Dropout layer on the outputs
of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
bidirectional: If True, becomes a bidirectional LSTM. Default: False
num_layers: number of Dual-Path-Block
K: the length of chunk
num_spks: the number of speakers
'''
def __init__(self, in_channels, out_channels, hidden_channels,
rnn_type='LSTM', norm='ln', dropout=0,
bidirectional=False, num_layers=4, K=200, num_spks=2):
super(Dual_Path_RNN, self).__init__()
self.K = K
self.num_spks = num_spks
self.num_layers = num_layers
self.norm = select_norm(norm, in_channels, 3)
self.conv1d = nn.Conv1d(in_channels, out_channels, 1, bias=False)
self.dual_rnn = nn.ModuleList([])
for i in range(num_layers):
self.dual_rnn.append(Dual_RNN_Block(out_channels, hidden_channels,
rnn_type=rnn_type, norm=norm, dropout=dropout,
bidirectional=bidirectional))
self.conv2d = nn.Conv2d(
out_channels, out_channels*num_spks, kernel_size=1)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1, bias=False)
self.prelu = nn.PReLU()
self.activation = nn.ReLU()
# gated output layer
self.output = nn.Sequential(nn.Conv1d(out_channels, out_channels, 1),
nn.Tanh()
)
self.output_gate = nn.Sequential(nn.Conv1d(out_channels, out_channels, 1),
nn.Sigmoid()
)
def forward(self, x):
'''
x: [B, N, L]
'''
# [B, N, L]
x = self.norm(x)
# [B, N, L]
x = self.conv1d(x)
# [B, N, K, S]
x, gap = self._Segmentation(x, self.K)
# [B, N*spks, K, S]
for i in range(self.num_layers):
x = self.dual_rnn[i](x)
x = self.prelu(x)
x = self.conv2d(x)
# [B*spks, N, K, S]
B, _, K, S = x.shape
x = x.view(B*self.num_spks,-1, K, S)
# [B*spks, N, L]
x = self._over_add(x, gap)
x = self.output(x)*self.output_gate(x)
# [spks*B, N, L]
x = self.end_conv1x1(x)
# [B*spks, N, L] -> [B, spks, N, L]
_, N, L = x.shape
x = x.view(B, self.num_spks, N, L)
x = self.activation(x)
# [spks, B, N, L]
x = x.transpose(0, 1)
return x
def _padding(self, input, K):
'''
padding the audio times
K: chunks of length
P: hop size
input: [B, N, L]
'''
B, N, L = input.shape
P = K // 2
gap = K - (P + L % K) % K
if gap > 0:
pad = torch.Tensor(torch.zeros(B, N, gap)).type(input.type())
input = torch.cat([input, pad], dim=2)
_pad = torch.Tensor(torch.zeros(B, N, P)).type(input.type())
input = torch.cat([_pad, input, _pad], dim=2)
return input, gap
def _Segmentation(self, input, K):
'''
the segmentation stage splits
K: chunks of length
P: hop size
input: [B, N, L]
output: [B, N, K, S]
'''
B, N, L = input.shape
P = K // 2
input, gap = self._padding(input, K)
# [B, N, K, S]
input1 = input[:, :, :-P].contiguous().view(B, N, -1, K)
input2 = input[:, :, P:].contiguous().view(B, N, -1, K)
input = torch.cat([input1, input2], dim=3).view(
B, N, -1, K).transpose(2, 3)
return input.contiguous(), gap
def _over_add(self, input, gap):
'''
Merge sequence
input: [B, N, K, S]
gap: padding length
output: [B, N, L]
'''
B, N, K, S = input.shape
P = K // 2
# [B, N, S, K]
input = input.transpose(2, 3).contiguous().view(B, N, -1, K * 2)
input1 = input[:, :, :, :K].contiguous().view(B, N, -1)[:, :, P:]
input2 = input[:, :, :, K:].contiguous().view(B, N, -1)[:, :, :-P]
input = input1 + input2
# [B, N, L]
if gap > 0:
input = input[:, :, :-gap]
return input
class Dual_RNN_model(nn.Module):
'''
model of Dual Path RNN
input:
in_channels: The number of expected features in the input x
out_channels: The number of features in the hidden state h
hidden_channels: The hidden size of RNN
kernel_size: Encoder and Decoder Kernel size
rnn_type: RNN, LSTM, GRU
norm: gln = "Global Norm", cln = "Cumulative Norm", ln = "Layer Norm"
dropout: If non-zero, introduces a Dropout layer on the outputs
of each LSTM layer except the last layer,
with dropout probability equal to dropout. Default: 0
bidirectional: If True, becomes a bidirectional LSTM. Default: False
num_layers: number of Dual-Path-Block
K: the length of chunk
num_spks: the number of speakers
'''
def __init__(self, in_channels, out_channels, hidden_channels,
kernel_size=2, rnn_type='LSTM', norm='ln', dropout=0,
bidirectional=False, num_layers=4, K=200, num_spks=2):
super(Dual_RNN_model,self).__init__()
self.encoder = Encoder(kernel_size=kernel_size,out_channels=in_channels)
self.separation = Dual_Path_RNN(in_channels, out_channels, hidden_channels,
rnn_type=rnn_type, norm=norm, dropout=dropout,
bidirectional=bidirectional, num_layers=num_layers, K=K, num_spks=num_spks)
self.decoder = Decoder(in_channels=in_channels, out_channels=1, kernel_size=kernel_size, stride=kernel_size//2, bias=False)
self.num_spks = num_spks
def forward(self, x):
'''
x: [B, L]
'''
# [B, N, L]
e = self.encoder(x)
# [spks, B, N, L]
s = self.separation(e)
# [B, N, L] -> [B, L]
out = [s[i]*e for i in range(self.num_spks)]
audio = [self.decoder(out[i]) for i in range(self.num_spks)]
return audio
if __name__ == "__main__":
rnn = Dual_RNN_model(256, 64, 128,bidirectional=True, norm='ln', num_layers=6)
#encoder = Encoder(16, 512)
x = torch.ones(1, 100)
out = rnn(x)
print("{:.3f}".format(check_parameters(rnn)*1000000))
print(rnn)
| 15,142
| 35.314149
| 131
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/model/PANNS.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class CNN10(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, classes_num):
super(CNN10, self).__init__()
window = 'hann'
center = True
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
# Spec augmenter
self.spec_augmenter = SpecAugmentation(time_drop_width=64, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(64)
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.fc1 = nn.Linear(512, 512, bias=True)
self.fc_audioset = nn.Linear(512, classes_num, bias=True)
self.init_weight()
def init_weight(self):
init_bn(self.bn0)
init_layer(self.fc1)
init_layer(self.fc_audioset)
def forward(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block2(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block3(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block4(x, pool_size=(2, 2), pool_type='avg')
# print('x ',x.shape) # [4, 512, 31, 4]
# assert 1==2
x = torch.mean(x, dim=3)
(x1, _) = torch.max(x, dim=2)
x2 = torch.mean(x, dim=2)
x = x1 + x2
x = F.relu_(self.fc1(x))
embedding = x
return embedding
def extract_frame(self, input):
"""
Input: (batch_size, data_length)"""
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins)
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
x = self.conv_block1(x, pool_size=(2, 2), pool_type='avg')
x = self.conv_block2(x, pool_size=(1, 2), pool_type='avg')
x = self.conv_block3(x, pool_size=(1, 2), pool_type='avg')
x = self.conv_block4(x, pool_size=(1, 2), pool_type='avg')
# print('x ',x.shape) # [4, 512, 31, 4]
# assert 1==2
x = torch.mean(x, dim=3).transpose(1,2) # b,312,512
return x
| 5,456
| 39.422222
| 107
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/model/loss.py
|
import torch
from itertools import permutations
import numpy as np
from pypesq import pesq
import torch.nn as nn
class FocalLoss(nn.Module):
def __init__(self,alpha=0.35, gamma=2):
super(FocalLoss,self).__init__()
self.gamma = gamma
self.alpha = alpha
self.eps = 1e-9
#self.bce = BCELoss()
def forward(self,predict,target,frame_level_time=None):
# print()
tmp = self.alpha*(1-predict)**self.gamma # t = 1
# print('predict ',predict.shape)
# print('target ',target.shape)
# assert 1==2
# tmp = self.alpha
# print('tmp ',tmp.shape)
tmp2 = (1-self.alpha)*(predict)**self.gamma # t=0
# tmp2 = (1-self.alpha)
# print('tmp2 ',tmp2.shape)
# print(torch.log(predict).shape)
# print('target ',target.shape)
# assert 1==2
loss = (-target*tmp*torch.log(predict+self.eps)).mean() + (-(1-target)*tmp2*torch.log(1-predict+self.eps)).mean()
#loss = (-target*torch.log(predict)).mean() + (-(1-target)*torch.log(1-predict)).mean()
#loss_bce = (-target*torch.log(predict)).mean() + (-(1-target)*torch.log(1-predict)).mean()
# print('loss_bce ',loss_bce)
# loss_function = torch.nn.BCELoss()
# loss = loss_function(predict, target.squeeze())
# print(self.bce(predict,target))
# print('loss ',loss)
# assert 1==2
return loss
def nll_loss(output, target):
'''Negative likelihood loss. The output should be obtained using F.log_softmax(x).
Args:
output: (N, classes_num)
target: (N, classes_num)
'''
loss = - torch.mean(target * output)
return loss
def tsd_loss(output, target):
'''BCE loss.
Args:
output: (N)
target: (N)
'''
loss_function = torch.nn.BCELoss()
loss = loss_function(output, target.squeeze())
return loss
def sisnr_loss(x, s, eps=1e-8):
"""
calculate training loss
input:
x: separated signal, N x S tensor
s: reference signal, N x S tensor
Return:
sisnr: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
def l2norm(mat, keepdim=False):
return torch.norm(mat, dim=-1, keepdim=keepdim)
if x.shape != s.shape:
raise RuntimeError(
"Dimention mismatch when calculate si-snr, {} vs {}".format(
x.shape, s.shape))
x_zm = x - torch.mean(x, dim=-1, keepdim=True)
s_zm = s - torch.mean(s, dim=-1, keepdim=True)
t = torch.sum(
x_zm * s_zm, dim=-1,
keepdim=True) * s_zm / (l2norm(s_zm, keepdim=True)**2 + eps)
loss = -20. * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))
return torch.sum(loss) / x.shape[0]
def sisnri(x, s, m):
"""
Arguments:
x: separated signal, BS x S
s: reference signal, BS x S
m: mixture signal, BS x S
Return:
sisnri: N tensor
"""
sisnr = sisnr_loss(x, s)
sisnr_ori = sisnr_loss(m, s)
return sisnr_ori - sisnr
def lfb_mse_loss(x, s):
"""
est_spec, ref_spec: BS x F x T
return: log fbank MSE: BS tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :, :s.shape[-1]]
else:
s = s[:, :, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1]*x.shape[2])
return t
def mse_loss(x, s):
"""
calculate training loss
input:
x: separated signal, N x S tensor
s: reference signal, N x S tensor
Return:
return: N tensor
"""
if x.shape != s.shape:
if x.shape[-1] > s.shape[-1]:
x = x[:, :s.shape[-1]]
else:
s = s[:, :x.shape[-1]]
t = torch.sum((x - s) ** 2)/(x.shape[0]*x.shape[1])
return t
def get_pesq(est_wav, lab_wav):
num = est_wav.shape[0]
score = 0.0
for i in range(num):
score += pesq(est_wav[i].cpu().detach(), lab_wav[i].cpu().detach(), 16000)
score = score / num
return score
def get_loss(est_cls, lab_cls, est_tsd, lab_tsd):
loss_cls = nll_loss(est_cls, lab_cls)
loss_tsd = tsd_loss(est_tsd, lab_tsd)
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_cls * 10. + loss_tsd
return loss, loss_cls, loss_tsd
def get_loss_one_hot(est_cls, lab_cls, est_tsd, lab_tsd, sim_cos=None, sim_lab=None):
# loss_cls = nll_loss(est_cls, lab_cls)
# print('est_tsd ', est_tsd.shape)
# print('lab_tsd ',lab_tsd.shape)
# assert 1==2
loss_tsd = tsd_loss(est_tsd, lab_tsd)
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_tsd
return loss, loss_cls, loss_tsd
def get_loss_one_hot_reg(est,lab):
# loss_cls = nll_loss(est_cls, lab_cls)
loss_tsd = mse_loss(est,lab)
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_tsd
return loss, loss_cls, loss_tsd
def get_loss_one_hot_reg_two(st, ed, lab):
crossentropyloss = nn.CrossEntropyLoss()
# loss_cls = nll_loss(est_cls, lab_cls)
loss_st = crossentropyloss(st,lab[:,0].long())
loss_ed = crossentropyloss(ed,lab[:,1].long())
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_st + loss_st
return loss, loss_st, loss_st
def get_loss_one_hot_focal(est_cls, lab_cls, est_tsd, lab_tsd, sim_cos=None, sim_lab=None):
# loss_cls = nll_loss(est_cls, lab_cls)
focalLoss = FocalLoss()
loss_tsd = focalLoss(est_tsd, lab_tsd.squeeze())
# print('loss_tsd ',loss_tsd)
# assert 1==2
# loss_cls = loss_tsd
# if sim_cos !=None:
# loss_cls = mse_loss(sim_cos,sim_lab)
# else:
# loss_cls = loss_tsd
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = loss_tsd
# assert 1==2
return loss, loss_cls, loss_tsd
def get_loss_one_hot_focal_sim(est_cls, lab_cls, est_tsd, lab_tsd, sim_cos=None, sim_lab=None):
# loss_cls = nll_loss(est_cls, lab_cls)
focalLoss = FocalLoss()
loss_tsd = focalLoss(est_tsd, lab_tsd.squeeze())
# print('loss_tsd ',loss_tsd)
# assert 1==2
# loss_cls = loss_tsd
if sim_cos !=None:
loss_cls = mse_loss(sim_cos,sim_lab)
else:
loss_cls = loss_tsd
# print('loss_cls ',loss_cls)
# print('loss_tsd ',loss_tsd)
# assert 1==2
loss = 2*loss_tsd + loss_cls
# assert 1==2
return loss, loss_cls, loss_tsd
| 7,028
| 29.428571
| 121
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/model/model.py
|
import torch
from torch import nn
import torch.nn.functional as F
import sys
sys.path.append('../')
from utils.util import check_parameters
from model.PANNS import CNN10
from model.tsd import TSD, TSD2, TSD2_tse, TSD_plus, TSD_plus_sim, TSD_IS,TSD_regresion,TSD_regresion_two_cls
import math
def init_kernel(frame_len,
frame_hop,
num_fft=None,
window="sqrt_hann"):
if window != "sqrt_hann":
raise RuntimeError("Now only support sqrt hanning window in order "
"to make signal perfectly reconstructed")
if not num_fft:
# FFT points
fft_size = 2 ** math.ceil(math.log2(frame_len))
else:
fft_size = num_fft
# window [window_length]
window = torch.hann_window(frame_len) ** 0.5
S_ = 0.5 * (fft_size * fft_size / frame_hop) ** 0.5
# window_length, F, 2 (real+imag)
kernel = torch.rfft(torch.eye(fft_size) / S_, 1)[:frame_len]
# 2, F, window_length
kernel = torch.transpose(kernel, 0, 2) * window
# 2F, 1, window_length
kernel = torch.reshape(kernel, (fft_size + 2, 1, frame_len))
return kernel
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
NOTE:
1) Recommend sqrt_hann window with 2**N frame length, because it
could achieve perfect reconstruction after overlap-add
2) Now haven't consider padding problems yet
"""
def __init__(self,
frame_len,
frame_hop,
window="sqrt_hann",
num_fft=None):
super(STFTBase, self).__init__()
K = init_kernel(
frame_len,
frame_hop,
num_fft=num_fft,
window=window)
self.K = nn.Parameter(K, requires_grad=False)
self.stride = frame_hop
self.window = window
def freeze(self):
self.K.requires_grad = False
def unfreeze(self):
self.K.requires_grad = True
def check_nan(self):
num_nan = torch.sum(torch.isnan(self.K))
if num_nan:
raise RuntimeError(
"detect nan in STFT kernels: {:d}".format(num_nan))
def extra_repr(self):
return "window={0}, stride={1}, requires_grad={2}, kernel_size={3[0]}x{3[2]}".format(
self.window, self.stride, self.K.requires_grad, self.K.shape)
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, **kwargs)
def forward(self, x):
"""
Accept raw waveform and output magnitude and phase
x: input signal, N x 1 x S or N x S
m: magnitude, N x F x T
p: phase, N x F x T
"""
if x.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
x.dim()))
self.check_nan()
# if N x S, reshape N x 1 x S
if x.dim() == 2:
x = torch.unsqueeze(x, 1)
# N x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# N x F x T
r, i = torch.chunk(c, 2, dim=1)
m = (r ** 2 + i ** 2) ** 0.5
p = torch.atan2(i, r)
return m, p
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, **kwargs)
def forward(self, m, p, squeeze=False):
"""
Accept phase & magnitude and output raw waveform
m, p: N x F x T
s: N x C x S
"""
if p.dim() != m.dim() or p.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
p.dim()))
self.check_nan()
# if F x T, reshape 1 x F x T
if p.dim() == 2:
p = torch.unsqueeze(p, 0)
m = torch.unsqueeze(m, 0)
r = m * torch.cos(p)
i = m * torch.sin(p)
# N x 2F x T
c = torch.cat([r, i], dim=1)
# N x 2F x T
s = F.conv_transpose1d(c, self.K, stride=self.stride, padding=0)
if squeeze:
s = torch.squeeze(s)
return s
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim)
else:
return nn.BatchNorm1d(dim)
class Conv1D(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False, fusion='concat', usingEmb=True, usingTsd=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.usingTsd = usingTsd
self.usingEmb = usingEmb
self.fusion = fusion # concat, add, multiply
if usingEmb:
if fusion == 'concat':
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + emb_channels + 1, out_channels, kernel_size=1)
elif fusion == 'add':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
elif fusion == 'multiply':
self.preCNN = nn.Conv1d(emb_channels, in_channels, kernel_size=1)
if not usingTsd:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels + 1, out_channels, kernel_size=1)
else:
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
tsd: [B x 1 x T]
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
if self.usingEmb:
if self.fusion == 'concat':
if not self.usingTsd:
x_ = torch.cat([x, emb], 1)
else:
x_ = torch.cat([x, emb, tsd], 1)
elif self.fusion == 'add':
x_ = self.PReLu1(self.preCNN(emb)) + x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
elif self.fusion == 'multiply':
x_ = self.PReLu1(self.preCNN(emb)) * x
if not self.usingTsd:
x_ = x_
else:
x_ = torch.cat([x_, tsd], 1)
else:
x_ = x
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class ExtractionNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=1, fusion='concat', usingEmb=[True,True,True], usingTsd=[False,False,False]):
super(ExtractionNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[0], usingTsd=usingTsd[0])
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[1], usingTsd=usingTsd[1])
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal, fusion=fusion, usingEmb=usingEmb[2], usingTsd=usingTsd[2])
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb=None, tsd=None):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
x = self.PReLu(x)
# B x C x T
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_1_front(x, emb, tsd)
x = self.conv_block_1_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_2_front(x, emb, tsd)
x = self.conv_block_2_back(x)
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block_3_front(x, emb, tsd)
x = self.conv_block_3_back(x)
x = F.dropout(x, p=0.2, training=self.training)
# B x N x T
x = self.PReLu(x)
x = self.end_conv1x1(x)
x = self.activation(x)
return x
class TSENet(nn.Module):
'''
TSENet module
N Number of filters in autoencoder
B Number of channels in bottleneck and the residual paths’ 1 × 1-conv blocks
H Number of channels in convolutional blocks
P Kernel size in convolutional blocks
X Number of convolutional blocks in each repeat
R Number of repeats
'''
def __init__(self,
N=512,
B=128,
H=512,
P=3,
X=8,
R=3,
norm="gln",
num_spks=1,
causal=False,
cls_num=50,
nFrameLen=512,
nFrameShift=256,
nFFT=512,
fusion='concat',
usingEmb=[True,True,True],
usingTsd=[False,False,False],
CNN10_settings=[16000,1024,320,64,50,8000,527,512,128],
fixCNN10=False,
fixTSDNet=True,
pretrainedCNN10=None,
pretrainedTSDNet=None,
threshold=0.5,
):
super(TSENet, self).__init__()
self.device = torch.device('cuda')
self.stft = STFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.istft = iSTFT(frame_len=nFrameLen, frame_hop=nFrameShift, num_fft=nFFT)
self.front_CNN = nn.Conv1d(nFrameShift+1, N, 1)
self.PReLu = nn.PReLU()
self.extractor = ExtractionNet(conv1d_block=X, in_channels=N,
out_channels=B, final_channels=nFrameShift + 1, out_sp_channels=H, kernel_size=P,
norm=norm, causal=causal, num_spks=num_spks, fusion=fusion,
usingEmb=usingEmb, usingTsd=usingTsd)
self.num_spks = num_spks
self.conditioner = CNN10(sample_rate=CNN10_settings[0], window_size=CNN10_settings[1],
hop_size=CNN10_settings[2], mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5],
classes_num=CNN10_settings[6])
self.cls1 = nn.Linear(CNN10_settings[7], CNN10_settings[8])
self.cls2 = nn.Linear(CNN10_settings[8], cls_num)
self.fixCNN10 = fixCNN10
self.fixTSDNet = fixTSDNet
self.pretrainedCNN10 = pretrainedCNN10
self.pretrainedTSDNet = pretrainedTSDNet
self.usingEmb = usingEmb
self.usingTsd = usingTsd
self.threshold = threshold
self.init_conditioner()
self.emb_fc = nn.Linear(CNN10_settings[7], CNN10_settings[8])
self.onehot = nn.Embedding(cls_num, CNN10_settings[8])
if usingTsd[0] or usingTsd[1] or usingTsd[2]:
self.tsdnet = TSDNet(nFrameLen=nFrameLen, nFrameShift=nFrameShift, cls_num=cls_num, CNN10_settings=CNN10_settings)
self.init_TSDNet()
self.epsilon = 1e-20
def init_conditioner(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
if self.fixCNN10:
for p in self.conditioner.parameters():
p.requires_grad = False
def init_TSDNet(self):
if self.pretrainedTSDNet:
device = torch.device('cuda')
dicts = torch.load(self.pretrainedTSDNet, map_location=device)
self.tsdnet.load_state_dict(dicts["model_state_dict"])
if self.fixTSDNet:
for p in self.tsdnet.parameters():
p.requires_grad = False
def forward(self, x, ref, cls_index, label=None, inf=False):
"""
Input:
x: [B, T], B is batch size, T is times
ref: [B, T], B is batch size, T is times
Returns:
audio: [B, T]
"""
# B x T -> B x C x T
x_magnitude, x_phase = self.stft(x)
x_encoder = torch.log(x_magnitude ** 2 + self.epsilon) # bs, 257, 249
if not inf:
label_magnitude, label_phase = self.stft(label)
if self.usingTsd[0] or self.usingTsd[1] or self.usingTsd[2]:
_, _, out_tsd_up = self.tsdnet(x, ref)
tsdMask = torch.zeros(x_magnitude.shape[0], x_magnitude.shape[2]).cuda()
tsdMask[out_tsd_up > self.threshold] = 1.
tsdMask = tsdMask[:, None, :]
else:
tsdMask = None
# B x T -> B x C -> B x C x T
out_enc = self.conditioner(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
x_cls = self.PReLu(self.cls1(out_enc))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
emb_onehot = self.onehot(cls_index)
emb_onehot = F.dropout(emb_onehot, p=0.2, training=self.training)
x_encoder = self.PReLu(self.front_CNN(x_encoder))
# mask = self.extractor(x_encoder, emb, tsdMask)
mask = self.extractor(x_encoder, emb_onehot, tsdMask)
x_ex = x_magnitude * mask
gt = label_magnitude / (x_magnitude + self.epsilon) * torch.cos(label_phase - x_phase) # PSM
gt = torch.clamp(gt, min=0., max=1.) # Truncated to [0,1]
audio_encoder = self.istft(x_ex, x_phase)
audio = [audio_encoder[:, 0]]
return audio, mask, gt, x_cls, emb, emb_onehot
class TSDNet(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet, self).__init__()
self.PReLu = nn.PReLU()
self.conditioner = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.pretrainedCNN10 = pretrainedCNN10
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
# print(CNN10_settings)
self.tsd = TSD2(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
self.init_fc_layer(self.cls1) # new add
self.init_fc_layer(self.cls2) # new add
self.init_fc_layer(self.emb_fc) # new add
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, x, ref):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
out_enc = self.conditioner(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
out_tsd_time, out_tsd_up, _ = self.tsd(x, emb)
x_cls = self.PReLu(self.cls1(emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time, out_tsd_up
class TSDNet_tse(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None, use_frame=False, only_ref=True):
super(TSDNet_tse, self).__init__()
self.PReLu = nn.PReLU()
self.conditioner = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.pretrainedCNN10 = pretrainedCNN10
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
self.only_ref = only_ref
# print(CNN10_settings)
self.tsd = TSD2_tse(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5], use_frame=use_frame)
# print('self.tsd ',self.tsd)
# assert 1==2
self.init_fc_layer(self.cls1) # new add
self.init_fc_layer(self.cls2) # new add
self.init_fc_layer(self.emb_fc) # new add
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, x, ref, tse_audio):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# clip level condition
# clip_ref = torch.cat([ref, tse_audio],dim=1)
if self.only_ref:
clip_ref = ref # 只用ref
else:
clip_ref = torch.cat([ref, tse_audio],dim=1)
clip_enc = self.conditioner(clip_ref)
#print('clip_enc ',clip_enc.shape)
clip_emb = self.emb_fc(clip_enc)
clip_emb = self.PReLu(clip_emb)
# frame level condition
frame_enc = self.conditioner.extract_frame(tse_audio)
frame_emb = self.emb_fc(frame_enc)
frame_emb = self.PReLu(frame_emb)
out_tsd_time, out_tsd_up = self.tsd(x, clip_emb, frame_emb)
x_cls = self.PReLu(self.cls1(clip_emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time, out_tsd_up
class TSDNet_one_hot(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet_one_hot, self).__init__()
self.PReLu = nn.PReLU()
# self.conditioner = CNN10(sample_rate=16000, window_size=1024,
# hop_size=320, mel_bins=64, fmin=50, fmax=8000,
# classes_num=527)
# self.cls1 = nn.Linear(128, 128)
# self.cls2 = nn.Linear(128, cls_num)
# self.pretrainedCNN10 = pretrainedCNN10
# self.init_ref()
# self.emb_fc = nn.Linear(512, 128)
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# print(CNN10_settings)
self.tsd = TSD(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref, onehot=None):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# out_enc = self.conditioner(ref)
# emb = self.emb_fc(out_enc)
# emb = self.PReLu(emb)
emb_onehot = self.conditioner_one_hot(onehot)
# print('emb_onehot ',emb_onehot.shape)
# assert 1==2
out_tsd_time, out_tsd_up, sim_cos = self.tsd(x, emb_onehot)
# x_cls = self.PReLu(self.cls1(emb))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1)
x_cls = torch.zeros(1).cuda()
return x_cls, out_tsd_time, out_tsd_up, sim_cos # st,ed
class TSDNet_plus_one_hot(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=41, CNN10_settings=[16000,1024,320,64,50,8000,527,512,128], pretrainedCNN10=None):
super(TSDNet_plus_one_hot, self).__init__()
self.PReLu = nn.PReLU()
# self.conditioner = CNN10(sample_rate=16000, window_size=1024,
# hop_size=320, mel_bins=64, fmin=50, fmax=8000,
# classes_num=527)
# self.cls1 = nn.Linear(128, 128)
# self.cls2 = nn.Linear(128, cls_num)
# self.pretrainedCNN10 = pretrainedCNN10
# self.init_ref()
# self.emb_fc = nn.Linear(512, 128)
self.conditioner_one_hot = nn.Embedding(cls_num,128)
# print(CNN10_settings)
self.tsd = TSD_plus_sim(sample_rate=CNN10_settings[0], window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=CNN10_settings[3], fmin=CNN10_settings[4], fmax=CNN10_settings[5])
# print('self.tsd ',self.tsd)
# assert 1==2
def init_ref(self):
if self.pretrainedCNN10:
device = torch.device('cuda')
checkpoint = torch.load(self.pretrainedCNN10, map_location=device)
self.conditioner.load_state_dict(checkpoint['model'])
def forward(self, x, ref,onehot=None):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
# out_enc = self.conditioner(ref)
# emb = self.emb_fc(out_enc)
# emb = self.PReLu(emb)
emb_onehot = self.conditioner_one_hot(onehot)
# print('emb_onehot ',emb_onehot.shape)
# assert 1==2
out_tsd_up, out_tsd_time,sim_cos = self.tsd(x, emb_onehot)
# x_cls = self.PReLu(self.cls1(emb))
# x_cls = F.dropout(x_cls, p=0.5, training=self.training)
# x_cls = self.cls2(x_cls)
# x_cls = F.log_softmax(x_cls, dim=-1)
x_cls = torch.zeros(1).cuda()
return x_cls, out_tsd_time, out_tsd_up, sim_cos
if __name__ == "__main__":
conv = Conv_TasNet().cuda()
# encoder = Encoder(16, 512)
x = torch.randn(4, 64000).cuda()
label = torch.randn(4, 64000).cuda()
ref = torch.randn(4, 64000).cuda()
audio, lps, lab = conv(x, ref, label)
print(audio[0].shape)
# print("{:.3f}".format(check_parameters(conv)))
| 31,331
| 38.067332
| 177
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/model/model_tf.py
|
import torch
from torch import nn
import torch.nn.functional as F
import sys
import pickle
sys.path.append('../')
from utils.util import check_parameters
from model.PANNS import ResNet38, CNN10
from model.tsd import TSD
def init_kernel(frame_len,
frame_hop,
num_fft=None,
window="sqrt_hann"):
if window != "sqrt_hann":
raise RuntimeError("Now only support sqrt hanning window in order "
"to make signal perfectly reconstructed")
if not num_fft:
# FFT points
fft_size = 2 ** math.ceil(math.log2(frame_len))
else:
fft_size = num_fft
# window [window_length]
window = torch.hann_window(frame_len) ** 0.5
S_ = 0.5 * (fft_size * fft_size / frame_hop) ** 0.5
# window_length, F, 2 (real+imag)
kernel = torch.rfft(torch.eye(fft_size) / S_, 1)[:frame_len]
# 2, F, window_length
kernel = torch.transpose(kernel, 0, 2) * window
# 2F, 1, window_length
kernel = torch.reshape(kernel, (fft_size + 2, 1, frame_len))
return kernel
class STFTBase(nn.Module):
"""
Base layer for (i)STFT
NOTE:
1) Recommend sqrt_hann window with 2**N frame length, because it
could achieve perfect reconstruction after overlap-add
2) Now haven't consider padding problems yet
"""
def __init__(self,
frame_len,
frame_hop,
window="sqrt_hann",
num_fft=None):
super(STFTBase, self).__init__()
K = init_kernel(
frame_len,
frame_hop,
num_fft=num_fft,
window=window)
self.K = nn.Parameter(K, requires_grad=False)
self.stride = frame_hop
self.window = window
def freeze(self):
self.K.requires_grad = False
def unfreeze(self):
self.K.requires_grad = True
def check_nan(self):
num_nan = torch.sum(torch.isnan(self.K))
if num_nan:
raise RuntimeError(
"detect nan in STFT kernels: {:d}".format(num_nan))
def extra_repr(self):
return "window={0}, stride={1}, requires_grad={2}, kernel_size={3[0]}x{3[2]}".format(
self.window, self.stride, self.K.requires_grad, self.K.shape)
class STFT(STFTBase):
"""
Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(STFT, self).__init__(*args, **kwargs)
def forward(self, x):
"""
Accept raw waveform and output magnitude and phase
x: input signal, N x 1 x S or N x S
m: magnitude, N x F x T
p: phase, N x F x T
"""
if x.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
x.dim()))
self.check_nan()
# if N x S, reshape N x 1 x S
if x.dim() == 2:
x = torch.unsqueeze(x, 1)
# N x 2F x T
c = F.conv1d(x, self.K, stride=self.stride, padding=0)
# N x F x T
r, i = torch.chunk(c, 2, dim=1)
m = (r ** 2 + i ** 2) ** 0.5
p = torch.atan2(i, r)
return m, p
class iSTFT(STFTBase):
"""
Inverse Short-time Fourier Transform as a Layer
"""
def __init__(self, *args, **kwargs):
super(iSTFT, self).__init__(*args, **kwargs)
def forward(self, m, p, squeeze=False):
"""
Accept phase & magnitude and output raw waveform
m, p: N x F x T
s: N x C x S
"""
if p.dim() != m.dim() or p.dim() not in [2, 3]:
raise RuntimeError("Expect 2D/3D tensor, but got {:d}D".format(
p.dim()))
self.check_nan()
# if F x T, reshape 1 x F x T
if p.dim() == 2:
p = torch.unsqueeze(p, 0)
m = torch.unsqueeze(m, 0)
r = m * torch.cos(p)
i = m * torch.sin(p)
# N x 2F x T
c = torch.cat([r, i], dim=1)
# N x 2F x T
s = F.conv_transpose1d(c, self.K, stride=self.stride, padding=0)
if squeeze:
s = torch.squeeze(s)
return s
class GlobalLayerNorm(nn.Module):
'''
Calculate Global Layer Normalization
dim: (int or list or torch.Size) –
input shape from an expected input of size
eps: a value added to the denominator for numerical stability.
elementwise_affine: a boolean value that when set to True,
this module has learnable per-element affine parameters
initialized to ones (for weights) and zeros (for biases).
'''
def __init__(self, dim, eps=1e-05, elementwise_affine=True):
super(GlobalLayerNorm, self).__init__()
self.dim = dim
self.eps = eps
self.elementwise_affine = elementwise_affine
if self.elementwise_affine:
self.weight = nn.Parameter(torch.ones(self.dim, 1))
self.bias = nn.Parameter(torch.zeros(self.dim, 1))
else:
self.register_parameter('weight', None)
self.register_parameter('bias', None)
def forward(self, x):
# x = N x C x L
# N x 1 x 1
# cln: mean,var N x 1 x L
# gln: mean,var N x 1 x 1
if x.dim() != 3:
raise RuntimeError("{} accept 3D tensor as input".format(
self.__name__))
mean = torch.mean(x, (1, 2), keepdim=True)
var = torch.mean((x - mean) ** 2, (1, 2), keepdim=True)
# N x C x L
if self.elementwise_affine:
x = self.weight * (x - mean) / torch.sqrt(var + self.eps) + self.bias
else:
x = (x - mean) / torch.sqrt(var + self.eps)
return x
class CumulativeLayerNorm(nn.LayerNorm):
'''
Calculate Cumulative Layer Normalization
dim: you want to norm dim
elementwise_affine: learnable per-element affine parameters
'''
def __init__(self, dim, elementwise_affine=True):
super(CumulativeLayerNorm, self).__init__(
dim, elementwise_affine=elementwise_affine)
def forward(self, x):
# x: N x C x L
# N x L x C
x = torch.transpose(x, 1, 2)
# N x L x C == only channel norm
x = super().forward(x)
# N x C x L
x = torch.transpose(x, 1, 2)
return x
def select_norm(norm, dim):
if norm == 'gln':
return GlobalLayerNorm(dim, elementwise_affine=True)
if norm == 'cln':
return CumulativeLayerNorm(dim, elementwise_affine=True)
if norm == 'ln':
return nn.GroupNorm(1, dim)
else:
return nn.BatchNorm1d(dim)
class Conv1D(nn.Module):
'''
Build the Conv1D structure
causal: if True is causal setting
'''
def __init__(self, in_channels=256, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x):
"""
Input:
x: [B x C x T], B is batch size, T is times
Returns:
x: [B, C, T]
"""
# B x C x T -> B x C_o x T_o
x_conv = self.conv1x1(x)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Conv1D_emb(nn.Module):
'''
Build the Conv1D structure with embedding
causal: if True is causal setting
'''
def __init__(self, in_channels=256, emb_channels=128, out_channels=512,
kernel_size=3, dilation=1, norm='gln', causal=False):
super(Conv1D_emb, self).__init__()
self.causal = causal
self.conv1x1 = nn.Conv1d(in_channels+emb_channels, out_channels, kernel_size=1)
self.PReLu1 = nn.PReLU()
self.norm1 = select_norm(norm, out_channels)
self.pad = (dilation * (kernel_size - 1)
) // 2 if not causal else dilation * (kernel_size - 1)
self.dwconv = nn.Conv1d(out_channels, out_channels, kernel_size=kernel_size,
groups=out_channels, padding=self.pad, dilation=dilation)
self.PReLu2 = nn.PReLU()
self.norm2 = select_norm(norm, out_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, in_channels, 1)
def forward(self, x, emb):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C']
Returns:
x: [B, C, T]
"""
T = x.shape[-1]
emb = torch.unsqueeze(emb, -1)
# B x C' X T
emb = emb.repeat(1, 1, T)
# B x (C + C') X T
x_ = torch.cat([x, emb], 1)
# B x (C + C') X T -> B x C_o x T_o
x_conv = self.conv1x1(x_)
x_conv = self.PReLu1(x_conv)
x_conv = self.norm1(x_conv)
# B x C_o x T_o
x_conv = self.dwconv(x_conv)
x_conv = self.PReLu2(x_conv)
x_conv = self.norm2(x_conv)
# B x C_o x T_o -> B x C x T
if self.causal:
x_conv = x_conv[:, :, :-self.pad]
x_conv = self.end_conv1x1(x_conv)
return x + x_conv
class Encoder(nn.Module):
'''
Conv-Tasnet Encoder part
kernel_size: the length of filters
out_channels: the number of filters
'''
def __init__(self, kernel_size=2, out_channels=64):
super(Encoder, self).__init__()
self.conv1d = nn.Conv1d(in_channels=1, out_channels=out_channels,
kernel_size=kernel_size, stride=kernel_size // 2, groups=1)
def forward(self, x):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
x: [B, C, T_out]
T_out is the number of time steps
"""
# B x T -> B x 1 x T
x = torch.unsqueeze(x, dim=1)
# B x 1 x T -> B x C x T_out
x = self.conv1d(x)
x = F.relu(x)
return x
class Decoder(nn.ConvTranspose1d):
'''
Decoder of the TasNet
This module can be seen as the gradient of Conv1d with respect to its input.
It is also known as a fractionally-strided convolution
or a deconvolution (although it is not an actual deconvolution operation).
'''
def __init__(self, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
def forward(self, x):
"""
x: N x L or N x C x L
"""
if x.dim() not in [2, 3]:
raise RuntimeError("{} accept 2/3D tensor as input".format(
self.__name__))
x = super().forward(x if x.dim() == 3 else torch.unsqueeze(x, 1))
if torch.squeeze(x).dim() == 1:
x = torch.squeeze(x, dim=1)
else:
x = torch.squeeze(x)
return x
class Separation_TasNet(nn.Module):
'''
TasNet Separation part
LayerNorm -> 1x1Conv -> 1-D Conv .... -> output
'''
def __init__(self, repeats=3, conv1d_block=8, in_channels=64, out_channels=128, emb_channels=128, final_channels=257,
out_sp_channels=512, kernel_size=3, norm='gln', causal=False, num_spks=2):
super(Separation_TasNet, self).__init__()
self.conv1x1 = nn.Conv1d(in_channels, out_channels, 1)
# self.conv1d_list = self._Sequential(
# repeats, conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
# kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_1_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
self.conv_block_1_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_2_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
self.conv_block_2_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
self.conv_block_3_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
self.conv_block_3_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
kernel_size=kernel_size, norm=norm, causal=causal)
# self.conv_block_4_front = Conv1D_emb(in_channels=out_channels, emb_channels=emb_channels, out_channels=out_sp_channels,
# kernel_size=kernel_size, dilation=1, norm=norm, causal=causal)
# self.conv_block_4_back = self._Sequential_block(conv1d_block, in_channels=out_channels, out_channels=out_sp_channels,
# kernel_size=kernel_size, norm=norm, causal=causal)
self.PReLu = nn.PReLU()
self.norm = select_norm('cln', in_channels)
self.end_conv1x1 = nn.Conv1d(out_channels, num_spks * final_channels, 1)
# self.activation = nn.Sigmoid()
self.num_spks = num_spks
def _Sequential_block(self, num_blocks, **block_kwargs):
'''
Sequential 1-D Conv Block
input:
num_block: how many blocks in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
Conv1D_lists = [Conv1D(
**block_kwargs, dilation=(2 ** i)) for i in range(num_blocks)]
return nn.Sequential(*Conv1D_lists)
def _Sequential(self, num_repeats, num_blocks, **block_kwargs):
'''
Sequential repeats
input:
num_repeats: Number of repeats
num_blocks: Number of block in every repeats
**block_kwargs: parameters of Conv1D_Block
'''
repeats_lists = [self._Sequential_block(
num_blocks, **block_kwargs) for i in range(num_repeats)]
return nn.Sequential(*repeats_lists)
def forward(self, x, emb):
"""
Input:
x: [B x C x T], B is batch size, T is times
emb: [B x C x T], B is batch size, T is times
Returns:
x: [num_spks, B, N, T]
"""
# B x C x T
x = self.norm(x)
x = self.conv1x1(x)
# B x C x T
x = self.conv_block_1_front(x, emb)
x = self.conv_block_1_back(x)
x = self.conv_block_2_front(x, emb)
x = self.conv_block_2_back(x)
x = self.conv_block_3_front(x, emb)
x = self.conv_block_3_back(x)
# x = self.conv_block_4_front(x, emb)
# x = self.conv_block_4_back(x)
# B x N x T
x = self.PReLu(x)
x = self.end_conv1x1(x)
# x = self.activation(x)
return x
class TSDNet(nn.Module):
'''
TSDNet module
'''
def __init__(self, nFrameLen=512, nFrameShift=256, cls_num=50):
super(TSDNet, self).__init__()
self.PReLu = nn.PReLU()
self.encoder_ref = CNN10(sample_rate=16000, window_size=1024,
hop_size=320, mel_bins=64, fmin=50, fmax=8000,
classes_num=527)
self.cls1 = nn.Linear(128, 128)
self.cls2 = nn.Linear(128, cls_num)
self.init_ref()
self.emb_fc = nn.Linear(512, 128)
self.tsd = TSD(sample_rate=16000, window_size=nFrameLen,
hop_size=nFrameShift, mel_bins=64, fmin=50, fmax=8000)
def init_ref(self):
device = torch.device('cuda')
checkpoint_path = '/apdcephfs/private_helinwang/tsss/Dual-Path-RNN-Pytorch/model/Cnn10_mAP=0.380.pth'
checkpoint = torch.load(checkpoint_path, map_location=device)
self.encoder_ref.load_state_dict(checkpoint['model'])
def forward(self, x, ref):
"""
Input:
x: [B, T], B is batch size, T is times
Returns:
"""
out_enc = self.encoder_ref(ref)
emb = self.emb_fc(out_enc)
emb = self.PReLu(emb)
out_tsd_up, out_tsd_time = self.tsd(x, emb)
x_cls = self.PReLu(self.cls1(emb))
x_cls = F.dropout(x_cls, p=0.5, training=self.training)
x_cls = self.cls2(x_cls)
x_cls = F.log_softmax(x_cls, dim=-1)
return x_cls, out_tsd_time
if __name__ == "__main__":
conv = TSDNet().cuda()
# encoder = Encoder(16, 512)
x = torch.randn(4, 64000).cuda()
label = torch.randn(4, 64000).cuda()
ref = torch.randn(4, 64000).cuda()
audio, lps, lab = conv(x, ref, label)
print(audio[0].shape)
# print("{:.3f}".format(check_parameters(conv)))
| 17,741
| 34.342629
| 129
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/model/__init__.py
|
from .loss import *
from .model import *
| 40
| 19.5
| 20
|
py
|
Tim-TSENet
|
Tim-TSENet-main/TSDNET/model/tsd.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.)
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.)
def init_weights(m):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def frame_shift(features):
batch_size, _, _, _ = features.shape
shifted_feature = []
for idx in range(batch_size):
shift = int(random.gauss(0, 10))
shifted_feature.append(torch.roll(features[idx], shift, dims=2))
return torch.stack(shifted_feature)
class TimeShift(nn.Module):
def __init__(self, mean, std):
super().__init__()
self.mean = mean
self.std = std
def forward(self, x):
if self.training:
shift = torch.empty(1).normal_(self.mean, self.std).int().item()
x = torch.roll(x, shift, dims=2)
return x
class ConvBlock(nn.Module):
def __init__(self, in_channels, out_channels):
super(ConvBlock, self).__init__()
self.conv1 = nn.Conv2d(in_channels=in_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.conv2 = nn.Conv2d(in_channels=out_channels,
out_channels=out_channels,
kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1), bias=False)
self.bn1 = nn.BatchNorm2d(out_channels)
self.bn2 = nn.BatchNorm2d(out_channels)
self.init_weight()
def init_weight(self):
init_layer(self.conv1)
init_layer(self.conv2)
init_bn(self.bn1)
init_bn(self.bn2)
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
x = input
x = F.relu_(self.bn1(self.conv1(x)))
x = F.relu_(self.bn2(self.conv2(x)))
if pool_type == 'max':
x = F.max_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg':
x = F.avg_pool2d(x, kernel_size=pool_size)
elif pool_type == 'avg+max':
x1 = F.avg_pool2d(x, kernel_size=pool_size)
x2 = F.max_pool2d(x, kernel_size=pool_size)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Block2D(nn.Module):
def __init__(self, cin, cout, kernel_size=3, padding=1):
super().__init__()
self.block = nn.Sequential(
nn.BatchNorm2d(cin),
nn.Conv2d(cin,
cout,
kernel_size=kernel_size,
padding=padding,
bias=False),
nn.LeakyReLU(inplace=True, negative_slope=0.1))
def forward(self, x):
return self.block(x)
class Cnn10(nn.Module):
def __init__(self,scale=2):
super(Cnn10, self).__init__()
self.conv_block1 = ConvBlock(in_channels=1, out_channels=64)
self.conv_block2 = ConvBlock(in_channels=64, out_channels=128)
self.conv_block3 = ConvBlock(in_channels=128, out_channels=256)
self.conv_block4 = ConvBlock(in_channels=256, out_channels=512)
self.scale = scale
def forward(self, input, pool_size=(2, 2), pool_type='avg'):
"""
Input: (batch_size, data_length)"""
if self.scale == 8:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (2,4)
pool_size4 = (1,4)
elif self.scale == 4:
pool_size1 = (2,2)
pool_size2 = (2,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
elif self.scale == 2:
pool_size1 = (2,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
else:
pool_size1 = (1,2)
pool_size2 = (1,2)
pool_size3 = (1,4)
pool_size4 = (1,4)
x = self.conv_block1(input, pool_size=pool_size1, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block2(x, pool_size=pool_size2, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block3(x, pool_size=pool_size3, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
x = self.conv_block4(x, pool_size=pool_size4, pool_type='avg')
x = F.dropout(x, p=0.2, training=self.training)
return x
class conv1d(nn.Module):
def __init__(self, nin, nout, kernel_size=3, stride=1, padding='VALID', dilation=1):
super(conv1d, self).__init__()
if padding == 'VALID':
dconv_pad = 0
elif padding == 'SAME':
dconv_pad = dilation * ((kernel_size - 1) // 2)
else:
raise ValueError("Padding Mode Error!")
self.conv = nn.Conv1d(nin, nout, kernel_size=kernel_size, stride=stride, padding=dconv_pad)
self.act = nn.ReLU()
self.init_layer(self.conv)
def init_layer(self, layer): # relu
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.1)
def forward(self, x):
out = self.act(self.conv(x))
return out
class Fusion(nn.Module):
def __init__(self, inputdim_1, inputdim_2, n_fac):
super().__init__()
self.fuse_layer1 = conv1d(inputdim_1, inputdim_1*n_fac,1) # 128*4
self.fuse_layer2 = conv1d(inputdim_2, inputdim_1*n_fac,1) # 128*4
# self.fuse_layer1.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
# self.fuse_layer2.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
self.avg_pool = nn.AvgPool1d(n_fac, stride=n_fac) # 沿着最后一个维度进行pooling
def forward(self,embedding, mix_embed):
embedding = embedding.permute(0,2,1)
fuse1_out = self.fuse_layer1(embedding) # [2, 501, 2560] ,512*5, 1D卷积融合,spk_embeding ,扩大其维度
fuse1_out = fuse1_out.permute(0,2,1)
mix_embed = mix_embed.permute(0,2,1)
fuse2_out = self.fuse_layer2(mix_embed) # [2, 501, 2560] ,512*5, 1D卷积融合,spk_embeding ,扩大其维度
fuse2_out = fuse2_out.permute(0,2,1)
as_embs = torch.mul(fuse1_out, fuse2_out) # 相乘 [2, 501, 2560]
# (10, 501, 512)
as_embs = self.avg_pool(as_embs) # [2, 501, 512] 相当于 2560//5
return as_embs
class TSD(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(128, 128, bidirectional=True, batch_first=True) # 先用一个gru试试
self.fc = nn.Linear(256, 256)
self.fusion = Fusion(128,4)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = self.fusion(emb,x) # 512
#x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_up[:,:,0], sim_cos
class TSD2(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD2, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(128, 128, 2, bidirectional=True, batch_first=True) # 先用一个gru试试
self.fc = nn.Linear(256, 256)
self.fusion = Fusion(128,128,4)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.fc.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
self.init_rnn_layer(self.gru) # 2022/2/12 new add to solve the problem of initiaze
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = self.fusion(emb,x) # 512
#x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_up[:,:,0], sim_cos
class TSD2_tse(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax, use_frame):
super(TSD2_tse, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.use_frame = use_frame
if self.use_frame:
self.gru = nn.GRU(128, 128, 2, bidirectional=True, batch_first=True)
self.fusion = Fusion(128,256,4) # embed,mix
else:
self.gru = nn.GRU(128, 128, 2, bidirectional=True, batch_first=True)
self.fusion = Fusion(128,128,4)
self.fc = nn.Linear(256, 256)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.fc.apply(init_weights) # 2022/2/12 new add to solve the problem of initiaze
self.init_rnn_layer(self.gru) # 2022/2/12 new add to solve the problem of initiaze
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def init_rnn_layer(self, layer):
for name, param in layer.named_parameters():
if name.startswith("weight"):
nn.init.kaiming_normal_(param)
else:
nn.init.zeros_(param)
def init_fc_layer(self, layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.kaiming_normal_(layer.weight)
nn.init.constant_(layer.bias, 0.)
def forward(self, input, emb, frame_emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,312,128)
frame_emb_up = torch.nn.functional.interpolate(
frame_emb.transpose(1, 2), # [b, 250, 128]
x.shape[1], # 501
mode='linear',
align_corners=False).transpose(1, 2)
# print('frame_emb_up ',frame_emb_up.shape)
# assert 1==2
# if we decide use frame level
if self.use_frame:
# sim_cos = self.cos(x,emb)
x = torch.cat((x, frame_emb_up), dim=2)
# else:
# sim_cos = self.cos(x,emb)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = self.fusion(emb,x) # 512
#x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_up[:,:,0]
class TSD_L(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_L, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (2, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 256)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
# decision_up = torch.nn.functional.interpolate(
# decision_time.transpose(1, 2), # [16, 2, 156]
# time, # 501
# mode='linear',
# align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_time[:,:,0], decision_time[:,:,0],sim_cos
class TSD_plus(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_plus, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = Cnn10()
self.gru = nn.GRU(640, 512, bidirectional=True, batch_first=True)
self.fc = nn.Linear(1024, 256)
self.outputlayer = nn.Linear(256, 2)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape) # 512
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_up[:,:,0], decision_time[:,:,0],x
class TSD_plus_sim(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_plus_sim, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.spec_augmenter = SpecAugmentation(time_drop_width=60, time_stripes_num=2,
freq_drop_width=8, freq_stripes_num=2)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.time_shift = TimeShift(0, 50)
self.features = Cnn10()
self.gru = nn.GRU(640, 512, bidirectional=True, batch_first=True)
self.sim_fc = nn.Linear(512,128)
self.fc = nn.Linear(1024, 256)
self.outputlayer = nn.Linear(256, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
if self.training:
# print('x, ', x.shape)
x = self.time_shift(x)
x = self.spec_augmenter(x)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape) # 512
# assert 1==2
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
x_sim = self.sim_fc(x)
sim_cos = self.cos(x_sim, emb)
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
x = self.fc(x)
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 156, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 156]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # (16, 624, 2)
return decision_up[:,:,0], decision_time[:,:,0],sim_cos
class TSD_IS(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super().__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 64),
nn.MaxPool2d((2, 4)),
Block2D(64, 64),
nn.MaxPool2d((1, 4)),
Block2D(64, 64),
nn.MaxPool2d((1, 4)))
# with torch.no_grad():
# rnn_input_dim = self.features(torch.randn(1, 1, 500,inputdim)).shape
# rnn_input_dim = rnn_input_dim[1] * rnn_input_dim[-1]
self.gru = nn.GRU(64, 62, bidirectional=True, batch_first=True)
self.gru2 = nn.GRU(124+128, 124, bidirectional=True, batch_first=True)
# self.fc = nn.Linear(248,2)
self.outputlayer = nn.Linear(248, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.bn0.apply(init_bn)
self.outputlayer.apply(init_weights)
def forward(self,input,embedding):
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
batch, ch, time, dim = x.shape # (b,1,t,d)
# x = x.unsqueeze(1) # (b,1,t,d)
# print('x ',x.shape)
x = self.features(x) #
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
self.gru2.flatten_parameters()
x = x.transpose(1, 2).contiguous().flatten(-2) # 重新拷贝一份x,之后推平-2:-1之间的维度 # (b,250,64)
# print('x ',x.shape)
# assert 1==2
x, _ = self.gru(x)
embedding = embedding.unsqueeze(1)
embedding = embedding.repeat(1, x.shape[1], 1)
sim_cos = torch.zeros(1).cuda()
x = torch.cat((x, embedding), dim=2) # [B, T, 128 + emb_dim]
x, _ = self.gru2(x) # x torch.Size([16, 125, 256])
decision_time = torch.softmax(self.outputlayer(x),dim=2) # x torch.Size([16, 125, 2])
decision_up = torch.nn.functional.interpolate(
decision_time.transpose(1, 2), # [16, 2, 125]
time, # 501
mode='linear',
align_corners=False).transpose(1, 2) # 从125插值回 501 ?--> (16,501,2)
return decision_up[:,:,0], decision_time[:,:,0],sim_cos
class TSD_regresion_two_cls(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_regresion_two_cls, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 64)
self.fc2 = nn.Linear(64,16)
self.st_fc = nn.Linear(16*312, 10) #
self.ed_fc = nn.Linear(16*312,11)
self.PReLu1 = nn.PReLU()
# self.outputlayer = nn.Linear(16*312, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
# self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
# print('x ',x.shape)
# assert 1==2
x = self.fc(x)
x = self.PReLu1(x)
x = self.fc2(x)
x = x.flatten(-2)
x_st = self.st_fc(x)
x_ed = self.ed_fc(x)
decision_time = x_st
decision_up = x_ed
return decision_up, decision_time, sim_cos
class TSD_regresion(nn.Module):
def __init__(self, sample_rate, window_size, hop_size, mel_bins, fmin, fmax):
super(TSD_regresion, self).__init__()
window = 'hann'
center = False
pad_mode = 'reflect'
ref = 1.0
amin = 1e-10
top_db = None
# Spectrogram extractor
self.spectrogram_extractor = Spectrogram(n_fft=window_size, hop_length=hop_size,
win_length=window_size, window=window, center=center,
pad_mode=pad_mode,
freeze_parameters=True)
# Logmel feature extractor
self.logmel_extractor = LogmelFilterBank(sr=sample_rate, n_fft=window_size,
n_mels=mel_bins, fmin=fmin, fmax=fmax, ref=ref, amin=amin,
top_db=top_db,
freeze_parameters=True)
self.bn0 = nn.BatchNorm2d(mel_bins)
self.features = nn.Sequential(
Block2D(1, 32),
nn.LPPool2d(4, (2, 4)),
Block2D(32, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
Block2D(128, 128),
Block2D(128, 128),
nn.LPPool2d(4, (1, 4)),
nn.Dropout(0.3),
)
self.gru = nn.GRU(256, 256, bidirectional=True, batch_first=True)
self.fc = nn.Linear(512, 64)
self.fc2 = nn.Linear(64,16)
self.PReLu1 = nn.PReLU()
self.outputlayer = nn.Linear(16*312, 2)
self.cos = nn.CosineSimilarity(dim=2, eps=1e-6)
self.features.apply(init_weights)
self.outputlayer.apply(init_weights)
self.bn0.apply(init_bn)
def forward(self, input, emb):
"""
Input: (batch_size, data_length)"""
# print('input ',input.shape)
x = self.spectrogram_extractor(input) # (batch_size, 1, time_steps, freq_bins)
x = self.logmel_extractor(x) # (batch_size, 1, time_steps, mel_bins) # torch.Size([32, 1, 624, 128])
x = x.transpose(1, 3)
x = self.bn0(x)
x = x.transpose(1, 3)
# print('x ',x.shape)
batch, ch, time, dim = x.shape # (b,1,t,d)
x = self.features(x)
x = x.transpose(1, 2).contiguous().flatten(-2) # (b,156,128)
# print('x ',x.shape)
emb = emb.unsqueeze(1)
emb = emb.repeat(1, x.shape[1], 1)
sim_cos = self.cos(x,emb)
# print('x ',x.shape)
# print('emb ',emb.shape)
# print('sim_cos ',sim_cos)
# assert 1==2
x = torch.cat((x, emb), dim=2) # [B, T, 128 + emb_dim]
if not hasattr(self, '_flattened'):
self.gru.flatten_parameters()
x, _ = self.gru(x) # torch.Size([16, 161, 256])
# print('x ',x.shape)
# assert 1==2
x = self.fc(x)
x = self.PReLu1(x)
x = self.fc2(x)
x = x.flatten(-2)
x = self.outputlayer(x)
decision_time = x
decision_up = x
return decision_up, decision_time, sim_cos
| 37,974
| 41.100887
| 109
|
py
|
SOF-VSR
|
SOF-VSR-master/TIP/modules.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from torch.autograd import Variable
class SOFVSR(nn.Module):
def __init__(self, cfg, n_frames=3, is_training=True):
super(SOFVSR, self).__init__()
self.scale = cfg.scale
self.is_training = is_training
self.OFR = OFRnet(scale=cfg.scale, channels=320)
self.SR = SRnet(scale=cfg.scale, channels=320, n_frames=n_frames)
def forward(self, x):
b, n_frames, c, h, w = x.size() # x: b*n*c*h*w
idx_center = (n_frames - 1) // 2
# motion estimation
flow_L1 = []
flow_L2 = []
flow_L3 = []
input = []
for idx_frame in range(n_frames):
if idx_frame != idx_center:
input.append(torch.cat((x[:,idx_frame,:,:,:], x[:,idx_center,:,:,:]), 1))
optical_flow_L1, optical_flow_L2, optical_flow_L3 = self.OFR(torch.cat(input, 0))
optical_flow_L1 = optical_flow_L1.view(-1, b, 2, h//2, w//2)
optical_flow_L2 = optical_flow_L2.view(-1, b, 2, h, w)
optical_flow_L3 = optical_flow_L3.view(-1, b, 2, h*self.scale, w*self.scale)
# motion compensation
draft_cube = []
draft_cube.append(x[:, idx_center, :, :, :])
for idx_frame in range(n_frames):
if idx_frame == idx_center:
flow_L1.append([])
flow_L2.append([])
flow_L3.append([])
if idx_frame != idx_center:
if idx_frame < idx_center:
idx = idx_frame
if idx_frame > idx_center:
idx = idx_frame - 1
flow_L1.append(optical_flow_L1[idx, :, :, :, :])
flow_L2.append(optical_flow_L2[idx, :, :, :, :])
flow_L3.append(optical_flow_L3[idx, :, :, :, :])
for i in range(self.scale):
for j in range(self.scale):
draft = optical_flow_warp(x[:, idx_frame, :, :, :],
optical_flow_L3[idx, :, :, i::self.scale, j::self.scale] / self.scale)
draft_cube.append(draft)
draft_cube = torch.cat(draft_cube, 1)
# super-resolution
SR = self.SR(draft_cube)
if self.is_training:
return flow_L1, flow_L2, flow_L3, SR
if not self.is_training:
return SR
class OFRnet(nn.Module):
def __init__(self, scale, channels):
super(OFRnet, self).__init__()
self.pool = nn.AvgPool2d(2)
self.scale = scale
## RNN part
self.RNN1 = nn.Sequential(
nn.Conv2d(4, channels, 3, 1, 1, bias=False),
nn.LeakyReLU(0.1, inplace=True),
CasResB(3, channels)
)
self.RNN2 = nn.Sequential(
nn.Conv2d(channels, 2, 3, 1, 1, bias=False),
)
# SR part
SR = []
SR.append(CasResB(3, channels))
if self.scale == 4:
SR.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(2))
SR.append(nn.LeakyReLU(0.1, inplace=True))
SR.append(nn.Conv2d(64, 64 * 4, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(2))
SR.append(nn.LeakyReLU(0.1, inplace=True))
elif self.scale == 3:
SR.append(nn.Conv2d(channels, 64 * 9, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(3))
SR.append(nn.LeakyReLU(0.1, inplace=True))
elif self.scale == 2:
SR.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
SR.append(nn.PixelShuffle(2))
SR.append(nn.LeakyReLU(0.1, inplace=True))
SR.append(nn.Conv2d(64, 2, 3, 1, 1, bias=False))
self.SR = nn.Sequential(*SR)
def __call__(self, x): # x: b*2*h*w
#Part 1
x_L1 = self.pool(x)
b, c, h, w = x_L1.size()
input_L1 = torch.cat((x_L1, torch.zeros(b, 2, h, w).cuda()), 1)
optical_flow_L1 = self.RNN2(self.RNN1(input_L1))
optical_flow_L1_upscaled = F.interpolate(optical_flow_L1, scale_factor=2, mode='bilinear', align_corners=False) * 2
#Part 2
x_L2 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], 1), optical_flow_L1_upscaled)
input_L2 = torch.cat((x_L2, torch.unsqueeze(x[:, 1, :, :], 1), optical_flow_L1_upscaled), 1)
optical_flow_L2 = self.RNN2(self.RNN1(input_L2)) + optical_flow_L1_upscaled
#Part 3
x_L3 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], 1), optical_flow_L2)
input_L3 = torch.cat((x_L3, torch.unsqueeze(x[:, 1, :, :], 1), optical_flow_L2), 1)
optical_flow_L3 = self.SR(self.RNN1(input_L3)) + \
F.interpolate(optical_flow_L2, scale_factor=self.scale, mode='bilinear', align_corners=False) * self.scale
return optical_flow_L1, optical_flow_L2, optical_flow_L3
class SRnet(nn.Module):
def __init__(self, scale, channels, n_frames):
super(SRnet, self).__init__()
body = []
body.append(nn.Conv2d(1 * scale ** 2 * (n_frames-1) + 1, channels, 3, 1, 1, bias=False))
body.append(nn.LeakyReLU(0.1, inplace=True))
body.append(CasResB(8, channels))
if scale == 4:
body.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(2))
body.append(nn.LeakyReLU(0.1, inplace=True))
body.append(nn.Conv2d(64, 64 * 4, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(2))
body.append(nn.LeakyReLU(0.1, inplace=True))
elif scale == 3:
body.append(nn.Conv2d(channels, 64 * 9, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(3))
body.append(nn.LeakyReLU(0.1, inplace=True))
elif scale == 2:
body.append(nn.Conv2d(channels, 64 * 4, 1, 1, 0, bias=False))
body.append(nn.PixelShuffle(2))
body.append(nn.LeakyReLU(0.1, inplace=True))
body.append(nn.Conv2d(64, 1, 3, 1, 1, bias=True))
self.body = nn.Sequential(*body)
def __call__(self, x):
out = self.body(x)
return out
class ResB(nn.Module):
def __init__(self, channels):
super(ResB, self).__init__()
self.body = nn.Sequential(
nn.Conv2d(channels//2, channels//2, 1, 1, 0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
nn.Conv2d(channels//2, channels//2, 3, 1, 1, bias=False, groups=channels//2),
nn.Conv2d(channels // 2, channels // 2, 1, 1, 0, bias=False),
nn.LeakyReLU(0.1, inplace=True),
)
def forward(self, x):
input = x[:, x.shape[1]//2:, :, :]
out = torch.cat((x[:, :x.shape[1]//2, :, :], self.body(input)), 1)
return channel_shuffle(out, 2)
class CasResB(nn.Module):
def __init__(self, n_ResB, channels):
super(CasResB, self).__init__()
body = []
for i in range(n_ResB):
body.append(ResB(channels))
self.body = nn.Sequential(*body)
def forward(self, x):
return self.body(x)
def channel_shuffle(x, groups):
b, c, h, w = x.size()
x = x.view(b, groups, c//groups, h, w)
x = x.permute(0, 2, 1, 3, 4).contiguous()
x = x.view(b, -1, h, w)
return x
def optical_flow_warp(image, image_optical_flow):
"""
Arguments
image_ref: reference images tensor, (b, c, h, w)
image_optical_flow: optical flow to image_ref (b, 2, h, w)
"""
b, _ , h, w = image.size()
grid = np.meshgrid(range(w), range(h))
grid = np.stack(grid, axis=-1).astype(np.float64)
grid[:, :, 0] = grid[:, :, 0] * 2 / (w - 1) -1
grid[:, :, 1] = grid[:, :, 1] * 2 / (h - 1) -1
grid = grid.transpose(2, 0, 1)
grid = np.tile(grid, (b, 1, 1, 1))
grid = Variable(torch.Tensor(grid))
if image_optical_flow.is_cuda == True:
grid = grid.cuda()
flow_0 = torch.unsqueeze(image_optical_flow[:, 0, :, :] * 31 / (w - 1), dim=1)
flow_1 = torch.unsqueeze(image_optical_flow[:, 1, :, :] * 31 / (h - 1), dim=1)
grid = grid + torch.cat((flow_0, flow_1),1)
grid = grid.transpose(1, 2)
grid = grid.transpose(3, 2)
output = F.grid_sample(image, grid, padding_mode='border')
return output
| 8,356
| 37.334862
| 132
|
py
|
SOF-VSR
|
SOF-VSR-master/TIP/data_utils.py
|
from PIL import Image
from torch.utils.data.dataset import Dataset
from modules import optical_flow_warp
import numpy as np
import os
import torch
import random
class TrainsetLoader(Dataset):
def __init__(self, cfg):
super(TrainsetLoader).__init__()
self.trainset_dir = cfg.trainset_dir
self.scale = cfg.scale
self.patch_size = cfg.patch_size
self.n_iters = cfg.n_iters * cfg.batch_size
self.video_list = os.listdir(cfg.trainset_dir)
self.degradation = cfg.degradation
def __getitem__(self, idx):
idx_video = random.randint(0, len(self.video_list)-1)
idx_frame = random.randint(0, 28) # #frames of training videos is 31, 31-3=28
lr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/lr_x' + str(self.scale) + '_' + self.degradation
hr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/hr'
# read HR & LR frames
LR0 = Image.open(lr_dir + '/lr' + str(idx_frame) + '.png')
LR1 = Image.open(lr_dir + '/lr' + str(idx_frame + 1) + '.png')
LR2 = Image.open(lr_dir + '/lr' + str(idx_frame + 2) + '.png')
HR0 = Image.open(hr_dir + '/hr' + str(idx_frame) + '.png')
HR1 = Image.open(hr_dir + '/hr' + str(idx_frame + 1) + '.png')
HR2 = Image.open(hr_dir + '/hr' + str(idx_frame + 2) + '.png')
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
HR0 = np.array(HR0, dtype=np.float32) / 255.0
HR1 = np.array(HR1, dtype=np.float32) / 255.0
HR2 = np.array(HR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
HR0 = rgb2y(HR0)
HR1 = rgb2y(HR1)
HR2 = rgb2y(HR2)
LR0 = rgb2y(LR0)
LR1 = rgb2y(LR1)
LR2 = rgb2y(LR2)
# crop patchs randomly
HR0, HR1, HR2, LR0, LR1, LR2 = random_crop(HR0, HR1, HR2, LR0, LR1, LR2, self.patch_size, self.scale)
HR0 = HR0[:, :, np.newaxis]
HR1 = HR1[:, :, np.newaxis]
HR2 = HR2[:, :, np.newaxis]
LR0 = LR0[:, :, np.newaxis]
LR1 = LR1[:, :, np.newaxis]
LR2 = LR2[:, :, np.newaxis]
HR = np.concatenate((HR0, HR1, HR2), axis=2)
LR = np.concatenate((LR0, LR1, LR2), axis=2)
# data augmentation
LR, HR = augmentation()(LR, HR)
return toTensor(LR), toTensor(HR)
def __len__(self):
return self.n_iters
class TestsetLoader(Dataset):
def __init__(self, cfg, video_name):
super(TestsetLoader).__init__()
self.dataset_dir = cfg.testset_dir + '/' + video_name
self.degradation = cfg.degradation
self.scale = cfg.scale
self.frame_list = os.listdir(self.dataset_dir + '/lr_x' + str(self.scale) + '_' + self.degradation)
def __getitem__(self, idx):
dir = self.dataset_dir + '/lr_x' + str(self.scale) + '_' + self.degradation
LR0 = Image.open(dir + '/' + 'lr_' + str(idx+1).rjust(2, '0') + '.png')
LR1 = Image.open(dir + '/' + 'lr_' + str(idx+2).rjust(2, '0') + '.png')
LR2 = Image.open(dir + '/' + 'lr_' + str(idx+3).rjust(2, '0') + '.png')
W, H = LR1.size
# H and W should be divisible by 2
W = int(W // 2) * 2
H = int(H // 2) * 2
LR0 = LR0.crop([0, 0, W, H])
LR1 = LR1.crop([0, 0, W, H])
LR2 = LR2.crop([0, 0, W, H])
LR1_bicubic = LR1.resize((W*self.scale, H*self.scale), Image.BICUBIC)
LR1_bicubic = np.array(LR1_bicubic, dtype=np.float32) / 255.0
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
LR0_y, _, _ = rgb2ycbcr(LR0)
LR1_y, _, _ = rgb2ycbcr(LR1)
LR2_y, _, _ = rgb2ycbcr(LR2)
LR0_y = LR0_y[:, :, np.newaxis]
LR1_y = LR1_y[:, :, np.newaxis]
LR2_y = LR2_y[:, :, np.newaxis]
LR = np.concatenate((LR0_y, LR1_y, LR2_y), axis=2)
LR = toTensor(LR)
# generate Cr, Cb channels using bicubic interpolation
_, SR_cb, SR_cr = rgb2ycbcr(LR1_bicubic)
return LR, SR_cb, SR_cr
def __len__(self):
return len(self.frame_list) - 2
class augmentation(object):
def __call__(self, input, target):
if random.random()<0.5:
input = input[:, ::-1, :]
target = target[:, ::-1, :]
if random.random()<0.5:
input = input[::-1, :, :]
target = target[::-1, :, :]
if random.random()<0.5:
input = input.transpose(1, 0, 2)
target = target.transpose(1, 0, 2)
return np.ascontiguousarray(input), np.ascontiguousarray(target)
def random_crop(HR0, HR1, HR2, LR0, LR1, LR2, patch_size_lr, scale):
h_hr, w_hr = HR0.shape
h_lr = h_hr // scale
w_lr = w_hr // scale
idx_h = random.randint(10, h_lr - patch_size_lr - 10)
idx_w = random.randint(10, w_lr - patch_size_lr - 10)
h_start_hr = (idx_h - 1) * scale
h_end_hr = (idx_h - 1 + patch_size_lr) * scale
w_start_hr = (idx_w - 1) * scale
w_end_hr = (idx_w - 1 + patch_size_lr) * scale
h_start_lr = idx_h - 1
h_end_lr = idx_h - 1 + patch_size_lr
w_start_lr = idx_w - 1
w_end_lr = idx_w - 1 + patch_size_lr
HR0 = HR0[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR1 = HR1[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR2 = HR2[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
LR0 = LR0[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR1 = LR1[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR2 = LR2[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
return HR0, HR1, HR2, LR0, LR1, LR2
def toTensor(img):
img = torch.from_numpy(img.transpose((2, 0, 1)))
return img
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
def rgb2y(img_rgb):
## the range of img_rgb should be (0, 1)
image_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] +16 / 255.0
return image_y
def OFR_loss(x0, x1, optical_flow):
warped = optical_flow_warp(x0, optical_flow)
loss = torch.mean(torch.abs(x1 - warped)) + 0.1 * L1_regularization(optical_flow)
return loss
def L1_regularization(image):
b, _, h, w = image.size()
reg_x_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 1:, 0:w-1]
reg_y_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 0:h-1, 1:]
reg_L1 = torch.abs(reg_x_1) + torch.abs(reg_y_1)
return torch.sum(reg_L1) / (b*(h-1)*(w-1))
| 7,589
| 36.389163
| 143
|
py
|
SOF-VSR
|
SOF-VSR-master/TIP/train.py
|
from torch.autograd import Variable
from torch.utils.data import DataLoader
from modules import SOFVSR
from data_utils import TrainsetLoader, OFR_loss
import torch.backends.cudnn as cudnn
import argparse
import torch
import numpy as np
import torch.nn.functional as F
import os
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--degradation", type=str, default='BI')
parser.add_argument("--scale", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--patch_size', type=int, default=32)
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_iters', type=int, default=200000, help='number of iterations to train')
parser.add_argument('--trainset_dir', type=str, default='data/train')
return parser.parse_args()
def main(cfg):
# model
net = SOFVSR(cfg, is_training=True)
if cfg.gpu_mode:
net.cuda()
cudnn.benchmark = True
# dataloader
train_set = TrainsetLoader(cfg)
train_loader = DataLoader(train_set, num_workers=4, batch_size=cfg.batch_size, shuffle=True)
# train
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
milestones = [80000, 160000]
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.1)
criterion = torch.nn.MSELoss()
loss_list = []
for idx_iter, (LR, HR) in enumerate(train_loader):
scheduler.step()
# data
b, n_frames, h_lr, w_lr = LR.size()
idx_center = (n_frames - 1) // 2
LR, HR = Variable(LR), Variable(HR)
if cfg.gpu_mode:
LR = LR.cuda()
HR = HR.cuda()
LR = LR.view(b, -1, 1, h_lr, w_lr)
HR = HR.view(b, -1, 1, h_lr * cfg.scale, w_lr * cfg.scale)
# inference
flow_L1, flow_L2, flow_L3, SR = net(LR)
# loss
loss_SR = criterion(SR, HR[:, idx_center, :, :, :])
loss_OFR = torch.zeros(1).cuda()
for i in range(n_frames):
if i != idx_center:
loss_L1 = OFR_loss(F.avg_pool2d(LR[:, i, :, :, :], kernel_size=2),
F.avg_pool2d(LR[:, idx_center, :, :, :], kernel_size=2),
flow_L1[i])
loss_L2 = OFR_loss(LR[:, i, :, :, :], LR[:, idx_center, :, :, :], flow_L2[i])
loss_L3 = OFR_loss(HR[:, i, :, :, :], HR[:, idx_center, :, :, :], flow_L3[i])
loss_OFR = loss_OFR + loss_L3 + 0.2 * loss_L2 + 0.1 * loss_L1
loss = loss_SR + 0.01 * loss_OFR / (n_frames - 1)
loss_list.append(loss.data.cpu())
# backwards
optimizer.zero_grad()
loss.backward()
optimizer.step()
# save checkpoint
if idx_iter % 5000 == 0:
print('Iteration---%6d, loss---%f' % (idx_iter + 1, np.array(loss_list).mean()))
save_path = 'log/' + cfg.degradation + '_x' + str(cfg.scale)
save_name = cfg.degradation + '_x' + str(cfg.scale) + '_iter' + str(idx_iter) + '.pth'
if not os.path.exists(save_path):
os.mkdir(save_path)
torch.save(net.state_dict(), save_path + '/' + save_name)
loss_list = []
if __name__ == '__main__':
cfg = parse_args()
main(cfg)
| 3,333
| 31.686275
| 100
|
py
|
SOF-VSR
|
SOF-VSR-master/TIP/demo_Vid4.py
|
from torch.autograd import Variable
from torch.utils.data import DataLoader
from data_utils import TestsetLoader, ycbcr2rgb
from modules import SOFVSR
from torchvision.transforms import ToPILImage
import numpy as np
import os
import argparse
import torch
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--degradation", type=str, default='BD')
parser.add_argument("--scale", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=True)
parser.add_argument('--testset_dir', type=str, default='data/test/Vid4')
parser.add_argument('--chop_forward', type=bool, default=False)
return parser.parse_args()
def chop_forward(x, model, scale, shave=16, min_size=5000, nGPUs=1):
# divide into 4 patches
b, n, c, h, w = x.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
inputlist = [
x[:, :, :, 0:h_size, 0:w_size],
x[:, :, :, 0:h_size, (w - w_size):w],
x[:, :, :, (h - h_size):h, 0:w_size],
x[:, :, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
outputlist = []
for i in range(0, 4, nGPUs):
input_batch = torch.cat(inputlist[i:(i + nGPUs)], dim=0)
output_batch = model(input_batch)
outputlist.append(output_batch.data)
else:
outputlist = [
chop_forward(patch, model, scale, shave, min_size, nGPUs) \
for patch in inputlist]
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
output = Variable(x.data.new(1, 1, h, w), volatile=True)
output[:, :, 0:h_half, 0:w_half] = outputlist[0][:, :, 0:h_half, 0:w_half]
output[:, :, 0:h_half, w_half:w] = outputlist[1][:, :, 0:h_half, (w_size - w + w_half):w_size]
output[:, :, h_half:h, 0:w_half] = outputlist[2][:, :, (h_size - h + h_half):h_size, 0:w_half]
output[:, :, h_half:h, w_half:w] = outputlist[3][:, :, (h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output
def main(cfg):
# model
net = SOFVSR(cfg, is_training=False)
ckpt = torch.load('./log/' + cfg.degradation + '_x' + str(cfg.scale) + '.pth')
net.load_state_dict(ckpt)
if cfg.gpu_mode:
net.cuda()
with torch.no_grad():
video_list = os.listdir(cfg.testset_dir)
for idx_video in range(len(video_list)):
video_name = video_list[idx_video]
# dataloader
test_set = TestsetLoader(cfg, video_name)
test_loader = DataLoader(test_set, num_workers=1, batch_size=1, shuffle=False)
for idx_iter, (LR_y_cube, SR_cb, SR_cr) in enumerate(test_loader):
# data
b, n_frames, h_lr, w_lr = LR_y_cube.size()
LR_y_cube = Variable(LR_y_cube)
LR_y_cube = LR_y_cube.view(b, -1, 1, h_lr, w_lr)
if cfg.gpu_mode:
LR_y_cube = LR_y_cube.cuda()
if cfg.chop_forward:
# crop borders to ensure each patch can be divisible by 2
_, _, _, h, w = LR_y_cube.size()
h = int(h//16) * 16
w = int(w//16) * 16
LR_y_cube = LR_y_cube[:, :, :, :h, :w]
SR_cb = SR_cb[:, :h * cfg.scale, :w * cfg.scale]
SR_cr = SR_cr[:, :h * cfg.scale, :w * cfg.scale]
SR_y = chop_forward(LR_y_cube, net, cfg.scale).squeeze(0)
else:
SR_y = net(LR_y_cube).squeeze(0)
else:
SR_y = net(LR_y_cube).squeeze(0)
SR_y = np.array(SR_y.data.cpu())
SR_ycbcr = np.concatenate((SR_y, SR_cb, SR_cr), axis=0).transpose(1,2,0)
SR_rgb = ycbcr2rgb(SR_ycbcr) * 255.0
SR_rgb = np.clip(SR_rgb, 0, 255)
SR_rgb = ToPILImage()(np.round(SR_rgb).astype(np.uint8))
if not os.path.exists('results/Vid4'):
os.mkdir('results/Vid4')
if not os.path.exists('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale)):
os.mkdir('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale))
if not os.path.exists('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale) + '/' + video_name):
os.mkdir('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale) + '/' + video_name)
SR_rgb.save('results/Vid4/' + cfg.degradation + '_x' + str(cfg.scale) + '/' + video_name + '/sr_' + str(idx_iter+2).rjust(2,'0') + '.png')
if __name__ == '__main__':
cfg = parse_args()
main(cfg)
| 4,835
| 39.3
| 154
|
py
|
SOF-VSR
|
SOF-VSR-master/ACCV/modules.py
|
import torch
import torch.nn as nn
import numpy as np
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
def optical_flow_warp(image, image_optical_flow):
"""
Arguments
image_ref: reference images tensor, (b, c, h, w)
image_optical_flow: optical flow to image_ref (b, 2, h, w)
"""
b, _ , h, w = image.size()
grid = np.meshgrid(range(w), range(h))
grid = np.stack(grid, axis=-1).astype(np.float64)
grid[:, :, 0] = grid[:, :, 0] * 2 / (w - 1) -1
grid[:, :, 1] = grid[:, :, 1] * 2 / (h - 1) -1
grid = grid.transpose(2, 0, 1)
grid = np.tile(grid, (b, 1, 1, 1))
grid = Variable(torch.Tensor(grid))
if image_optical_flow.is_cuda == True:
grid = grid.cuda()
flow_0 = torch.unsqueeze(image_optical_flow[:, 0, :, :] * 31 / (w - 1), dim=1)
flow_1 = torch.unsqueeze(image_optical_flow[:, 1, :, :] * 31 / (h - 1), dim=1)
grid = grid + torch.cat((flow_0, flow_1),1)
grid = grid.transpose(1, 2)
grid = grid.transpose(3, 2)
output = F.grid_sample(image, grid, padding_mode='border')
return output
class make_dense(nn.Module):
def __init__(self, channels_in, channels_out, kernel_size=3):
super(make_dense, self).__init__()
self.leaky_relu = nn.LeakyReLU(0.1, inplace=True)
self.conv = nn.Conv2d(channels_in, channels_out, kernel_size=kernel_size, padding=(kernel_size - 1) // 2,
bias=False)
def forward(self, x):
out = self.leaky_relu(self.conv(x))
out = torch.cat((x, out), 1)
return out
class RDB(nn.Module):
def __init__(self, nDenselayer, channels, growth):
super(RDB, self).__init__()
modules = []
channels_buffer = channels
for i in range(nDenselayer):
modules.append(make_dense(channels_buffer, growth))
channels_buffer += growth
self.dense_layers = nn.Sequential(*modules)
self.conv_1x1 = nn.Conv2d(channels_buffer, channels, kernel_size=1, padding=0, bias=False)
def forward(self, x):
out = self.dense_layers(x)
out = self.conv_1x1(out)
out = out + x
return out
class OFRnet(nn.Module):
def __init__(self, upscale_factor, is_training):
super(OFRnet, self).__init__()
self.pool = nn.AvgPool2d(kernel_size = 2)
self.upsample = nn.Upsample(scale_factor = 2, mode = 'bilinear')
self.final_upsample = nn.Upsample(scale_factor = upscale_factor, mode='bilinear')
self.shuffle = nn.PixelShuffle(upscale_factor)
self.upscale_factor = upscale_factor
self.is_training = is_training
# Level 1
self.conv_L1_1 = nn.Conv2d(2, 32, 3, 1, 1, bias=False)
self.RDB1_1 = RDB(4, 32, 32)
self.RDB1_2 = RDB(4, 32, 32)
self.bottleneck_L1 = nn.Conv2d(64, 2, 3, 1, 1, bias=False)
self.conv_L1_2 = nn.Conv2d(2, 2, 3, 1, 1, bias=True)
# Level 2
self.conv_L2_1 = nn.Conv2d(6, 32, 3, 1, 1, bias=False)
self.RDB2_1 = RDB(4, 32, 32)
self.RDB2_2 = RDB(4, 32, 32)
self.bottleneck_L2 = nn.Conv2d(64, 2, 3, 1, 1, bias=False)
self.conv_L2_2 = nn.Conv2d(2, 2, 3, 1, 1, bias=True)
# Level 3
self.conv_L3_1 = nn.Conv2d(6, 32, 3, 1, 1, bias=False)
self.RDB3_1 = RDB(4, 32, 32)
self.RDB3_2 = RDB(4, 32, 32)
self.bottleneck_L3 = nn.Conv2d(64, 2*upscale_factor**2, 3, 1, 1, bias=False)
self.conv_L3_2 = nn.Conv2d(2*upscale_factor**2, 2*upscale_factor**2, 3, 1, 1, bias=True)
def forward(self, x):
# Level 1
x_L1 = self.pool(x)
_, _, h, w = x_L1.size()
input_L1 = self.conv_L1_1(x_L1)
buffer_1 = self.RDB1_1(input_L1)
buffer_2 = self.RDB1_2(buffer_1)
buffer = torch.cat((buffer_1, buffer_2), 1)
optical_flow_L1 = self.bottleneck_L1(buffer)
optical_flow_L1 = self.conv_L1_2(optical_flow_L1)
optical_flow_L1_upscaled = self.upsample(optical_flow_L1) # *2
if self.is_training is True:
x_L1_res = optical_flow_warp(torch.unsqueeze(x_L1[:, 0, :, :], dim=1), optical_flow_L1) - torch.unsqueeze(x_L1[:, 1, :, :], dim=1)
# Level 2
x_L2 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], dim=1), optical_flow_L1_upscaled)
x_L2_res = torch.unsqueeze(x[:, 1, :, :], dim=1) - x_L2
x_L2 = torch.cat((x, x_L2, x_L2_res,optical_flow_L1_upscaled), 1)
input_L2 = self.conv_L2_1(x_L2)
buffer_1 = self.RDB2_1(input_L2)
buffer_2 = self.RDB2_2(buffer_1)
buffer = torch.cat((buffer_1, buffer_2), 1)
optical_flow_L2 = self.bottleneck_L2(buffer)
optical_flow_L2 = self.conv_L2_2(optical_flow_L2)
optical_flow_L2 = optical_flow_L2 + optical_flow_L1_upscaled
if self.is_training is True:
x_L2_res = optical_flow_warp(torch.unsqueeze(x_L2[:, 0, :, :], dim=1), optical_flow_L2) - torch.unsqueeze(x_L2[:, 1, :, :], dim=1)
# Level 3
x_L3 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], dim=1), optical_flow_L2)
x_L3_res = torch.unsqueeze(x[:, 1, :, :], dim=1) - x_L3
x_L3 = torch.cat((x, x_L3, x_L3_res, optical_flow_L2), 1)
input_L3 = self.conv_L3_1(x_L3)
buffer_1 = self.RDB3_1(input_L3)
buffer_2 = self.RDB3_2(buffer_1)
buffer = torch.cat((buffer_1, buffer_2), 1)
optical_flow_L3 = self.bottleneck_L3(buffer)
optical_flow_L3 = self.conv_L3_2(optical_flow_L3)
optical_flow_L3 = self.shuffle(optical_flow_L3) + self.final_upsample(optical_flow_L2) # *4
if self.is_training is False:
return optical_flow_L3
if self.is_training is True:
return x_L1_res, x_L2_res, optical_flow_L1, optical_flow_L2, optical_flow_L3
class SRnet(nn.Module):
def __init__(self, upscale_factor, is_training):
super(SRnet, self).__init__()
self.conv = nn.Conv2d(35, 64, 3, 1, 1, bias=False)
self.RDB_1 = RDB(5, 64, 32)
self.RDB_2 = RDB(5, 64, 32)
self.RDB_3 = RDB(5, 64, 32)
self.RDB_4 = RDB(5, 64, 32)
self.RDB_5 = RDB(5, 64, 32)
self.bottleneck = nn.Conv2d(384, upscale_factor ** 2, 1, 1, 0, bias=False)
self.conv_2 = nn.Conv2d(upscale_factor ** 2, upscale_factor ** 2, 3, 1, 1, bias=True)
self.shuffle = nn.PixelShuffle(upscale_factor=upscale_factor)
self.is_training = is_training
def forward(self, x):
input = self.conv(x)
buffer_1 = self.RDB_1(input)
buffer_2 = self.RDB_2(buffer_1)
buffer_3 = self.RDB_3(buffer_2)
buffer_4 = self.RDB_4(buffer_3)
buffer_5 = self.RDB_5(buffer_4)
output = torch.cat((buffer_1, buffer_2, buffer_3, buffer_4, buffer_5, input), 1)
output = self.bottleneck(output)
output = self.conv_2(output)
output = self.shuffle(output)
return output
class SOFVSR(nn.Module):
def __init__(self, upscale_factor, is_training=False):
super(SOFVSR, self).__init__()
self.upscale_factor = upscale_factor
self.is_training = is_training
self.OFRnet = OFRnet(upscale_factor=upscale_factor, is_training=is_training)
self.SRnet = SRnet(upscale_factor=upscale_factor, is_training=is_training)
def forward(self, x):
input_01 = torch.cat((torch.unsqueeze(x[:, 0, :, :], dim=1), torch.unsqueeze(x[:, 1, :, :], dim=1)), 1)
input_21 = torch.cat((torch.unsqueeze(x[:, 2, :, :], dim=1), torch.unsqueeze(x[:, 1, :, :], dim=1)), 1)
if self.is_training is False:
flow_01_L3 = self.OFRnet(input_01)
flow_21_L3 = self.OFRnet(input_21)
if self.is_training is True:
res_01_L1, res_01_L2, flow_01_L1, flow_01_L2, flow_01_L3 = self.OFRnet(input_01)
res_21_L1, res_21_L2, flow_21_L1, flow_21_L2, flow_21_L3 = self.OFRnet(input_21)
draft_cube = x
for i in range(self.upscale_factor):
for j in range(self.upscale_factor):
draft_01 = optical_flow_warp(torch.unsqueeze(x[:, 0, :, :], dim=1), flow_01_L3[:, :, i::self.upscale_factor, j::self.upscale_factor]/self.upscale_factor)
draft_21 = optical_flow_warp(torch.unsqueeze(x[:, 2, :, :], dim=1), flow_21_L3[:, :, i::self.upscale_factor, j::self.upscale_factor]/self.upscale_factor)
draft_cube = torch.cat((draft_cube, draft_01, draft_21),1)
output = self.SRnet(draft_cube)
if self.is_training is False:
return torch.squeeze(output)
if self.is_training is True:
return (res_01_L1, res_01_L2, flow_01_L1, flow_01_L2, flow_01_L3), \
(res_21_L1, res_21_L2, flow_21_L1, flow_21_L2, flow_21_L3), output
| 8,788
| 46.766304
| 169
|
py
|
SOF-VSR
|
SOF-VSR-master/ACCV/data_utils.py
|
import numpy as np
from PIL import Image
import os
import torch
from torch.utils.data.dataset import Dataset
import math
import random
class TrainsetLoader(Dataset):
def __init__(self, trainset_dir, upscale_factor, patch_size, n_iters):
super(TrainsetLoader).__init__()
self.trainset_dir = trainset_dir
self.upscale_factor = upscale_factor
self.patch_size = patch_size
self.n_iters = n_iters
self.video_list = os.listdir(trainset_dir)
def __getitem__(self, idx):
idx_video = random.randint(0, self.video_list.__len__()-1)
idx_frame = random.randint(0, 28)
lr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/lr_x' + str(self.upscale_factor) + '_BI'
hr_dir = self.trainset_dir + '/' + self.video_list[idx_video] + '/hr'
# read HR & LR frames
LR0 = Image.open(lr_dir + '/lr' + str(idx_frame) + '.png')
LR1 = Image.open(lr_dir + '/lr' + str(idx_frame + 1) + '.png')
LR2 = Image.open(lr_dir + '/lr' + str(idx_frame + 2) + '.png')
HR0 = Image.open(hr_dir + '/hr' + str(idx_frame) + '.png')
HR1 = Image.open(hr_dir + '/hr' + str(idx_frame + 1) + '.png')
HR2 = Image.open(hr_dir + '/hr' + str(idx_frame + 2) + '.png')
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
HR0 = np.array(HR0, dtype=np.float32) / 255.0
HR1 = np.array(HR1, dtype=np.float32) / 255.0
HR2 = np.array(HR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
HR0 = rgb2y(HR0)
HR1 = rgb2y(HR1)
HR2 = rgb2y(HR2)
LR0 = rgb2y(LR0)
LR1 = rgb2y(LR1)
LR2 = rgb2y(LR2)
# crop patchs randomly
HR0, HR1, HR2, LR0, LR1, LR2 = random_crop(HR0, HR1, HR2, LR0, LR1, LR2, self.patch_size, self.upscale_factor)
HR0 = HR0[:, :, np.newaxis]
HR1 = HR1[:, :, np.newaxis]
HR2 = HR2[:, :, np.newaxis]
LR0 = LR0[:, :, np.newaxis]
LR1 = LR1[:, :, np.newaxis]
LR2 = LR2[:, :, np.newaxis]
HR = np.concatenate((HR0, HR1, HR2), axis=2)
LR = np.concatenate((LR0, LR1, LR2), axis=2)
# data augmentation
LR, HR = augmentation()(LR, HR)
return toTensor(LR), toTensor(HR)
def __len__(self):
return self.n_iters
class TestsetLoader(Dataset):
def __init__(self, dataset_dir, upscale_factor):
super(TestsetLoader).__init__()
self.dataset_dir = dataset_dir
self.upscale_factor = upscale_factor
self.frame_list = os.listdir(self.dataset_dir + '/lr_x' + str(self.upscale_factor))
def __getitem__(self, idx):
dir = self.dataset_dir + '/lr_x' + str(self.upscale_factor)
LR0 = Image.open(dir + '/' + 'lr_' + str(idx+1).rjust(2, '0') + '.png')
LR1 = Image.open(dir + '/' + 'lr_' + str(idx+2).rjust(2, '0') + '.png')
LR2 = Image.open(dir + '/' + 'lr_' + str(idx+3).rjust(2, '0') + '.png')
W, H = LR1.size
# H and W should be divisible by 2
W = int(W // 2) * 2
H = int(H // 2) * 2
LR0 = LR0.crop([0, 0, W, H])
LR1 = LR1.crop([0, 0, W, H])
LR2 = LR2.crop([0, 0, W, H])
LR1_bicubic = LR1.resize((W*self.upscale_factor, H*self.upscale_factor), Image.BICUBIC)
LR1_bicubic = np.array(LR1_bicubic, dtype=np.float32) / 255.0
LR0 = np.array(LR0, dtype=np.float32) / 255.0
LR1 = np.array(LR1, dtype=np.float32) / 255.0
LR2 = np.array(LR2, dtype=np.float32) / 255.0
# extract Y channel for LR inputs
LR0_y, _, _ = rgb2ycbcr(LR0)
LR1_y, _, _ = rgb2ycbcr(LR1)
LR2_y, _, _ = rgb2ycbcr(LR2)
LR0_y = LR0_y[:, :, np.newaxis]
LR1_y = LR1_y[:, :, np.newaxis]
LR2_y = LR2_y[:, :, np.newaxis]
LR = np.concatenate((LR0_y, LR1_y, LR2_y), axis=2)
LR = toTensor(LR)
# generate Cr, Cb channels using bicubic interpolation
_, SR_cb, SR_cr = rgb2ycbcr(LR1_bicubic)
return LR, SR_cb, SR_cr
def __len__(self):
return self.frame_list.__len__() - 2
class augmentation(object):
def __call__(self, input, target):
if random.random()<0.5:
input = input[:, ::-1, :]
target = target[:, ::-1, :]
if random.random()<0.5:
input = input[::-1, :, :]
target = target[::-1, :, :]
if random.random()<0.5:
input = input.transpose(1, 0, 2)
target = target.transpose(1, 0, 2)
return np.ascontiguousarray(input), np.ascontiguousarray(target)
def random_crop(HR0, HR1, HR2, LR0, LR1, LR2, patch_size_lr, upscale_factor):
h_hr, w_hr = HR0.shape
h_lr = h_hr // upscale_factor
w_lr = w_hr // upscale_factor
idx_h = random.randint(10, h_lr - patch_size_lr - 10)
idx_w = random.randint(10, w_lr - patch_size_lr - 10)
h_start_hr = (idx_h - 1) * upscale_factor
h_end_hr = (idx_h - 1 + patch_size_lr) * upscale_factor
w_start_hr = (idx_w - 1) * upscale_factor
w_end_hr = (idx_w - 1 + patch_size_lr) * upscale_factor
h_start_lr = idx_h - 1
h_end_lr = idx_h - 1 + patch_size_lr
w_start_lr = idx_w - 1
w_end_lr = idx_w - 1 + patch_size_lr
HR0 = HR0[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR1 = HR1[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
HR2 = HR2[h_start_hr:h_end_hr, w_start_hr:w_end_hr]
LR0 = LR0[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR1 = LR1[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
LR2 = LR2[h_start_lr:h_end_lr, w_start_lr:w_end_lr]
return HR0, HR1, HR2, LR0, LR1, LR2
def toTensor(img):
img = torch.from_numpy(img.transpose((2, 0, 1)))
return img
def rgb2ycbcr(img_rgb):
## the range of img_rgb should be (0, 1)
img_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] + 16 / 255.0
img_cb = -0.148 * img_rgb[:, :, 0] - 0.291 * img_rgb[:, :, 1] + 0.439 * img_rgb[:, :, 2] + 128 / 255.0
img_cr = 0.439 * img_rgb[:, :, 0] - 0.368 * img_rgb[:, :, 1] - 0.071 * img_rgb[:, :, 2] + 128 / 255.0
return img_y, img_cb, img_cr
def ycbcr2rgb(img_ycbcr):
## the range of img_ycbcr should be (0, 1)
img_r = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 1.596 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_g = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) - 0.392 * (img_ycbcr[:, :, 1] - 128 / 255.0) - 0.813 * (img_ycbcr[:, :, 2] - 128 / 255.0)
img_b = 1.164 * (img_ycbcr[:, :, 0] - 16 / 255.0) + 2.017 * (img_ycbcr[:, :, 1] - 128 / 255.0)
img_r = img_r[:, :, np.newaxis]
img_g = img_g[:, :, np.newaxis]
img_b = img_b[:, :, np.newaxis]
img_rgb = np.concatenate((img_r, img_g, img_b), 2)
return img_rgb
def rgb2y(img_rgb):
## the range of img_rgb should be (0, 1)
image_y = 0.257 * img_rgb[:, :, 0] + 0.504 * img_rgb[:, :, 1] + 0.098 * img_rgb[:, :, 2] +16 / 255.0
return image_y
| 6,999
| 40.666667
| 143
|
py
|
SOF-VSR
|
SOF-VSR-master/ACCV/train.py
|
import os
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from modules import SOFVSR, optical_flow_warp
import argparse
from data_utils import TrainsetLoader
import numpy as np
import matplotlib.pyplot as plt
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--upscale_factor", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=False)
parser.add_argument('--patch_size', type=int, default=32)
parser.add_argument('--batch_size', type=int, default=16)
parser.add_argument('--n_iters', type=int, default=300000, help='number of iterations to train')
parser.add_argument('--trainset_dir', type=str, default='data/train')
return parser.parse_args()
def main(cfg):
use_gpu = cfg.gpu_mode
net = SOFVSR(cfg.upscale_factor, is_training=True)
if use_gpu:
net.cuda()
cudnn.benchmark = True
train_set = TrainsetLoader(cfg.trainset_dir, cfg.upscale_factor, cfg.patch_size, cfg.n_iters*cfg.batch_size)
train_loader = DataLoader(train_set, num_workers=4, batch_size=cfg.batch_size, shuffle=True)
# train
optimizer = torch.optim.Adam(net.parameters(), lr=1e-4)
criterion_L2 = torch.nn.MSELoss()
if use_gpu:
criterion_L2 = criterion_L2.cuda()
milestones = [50000, 100000, 150000, 200000, 250000]
scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=0.5)
loss_list = []
for idx_iter, (LR, HR) in enumerate(train_loader):
scheduler.step()
LR, HR = Variable(LR), Variable(HR)
if use_gpu:
LR = LR.cuda()
HR = HR.cuda()
(res_01_L1, res_01_L2, flow_01_L1, flow_01_L2, flow_01_L3), (
res_21_L1, res_21_L2, flow_21_L1, flow_21_L2, flow_21_L3), SR = net(LR)
warped_01 = optical_flow_warp(torch.unsqueeze(HR[:, 0, :, :], dim=1), flow_01_L3)
warped_21 = optical_flow_warp(torch.unsqueeze(HR[:, 2, :, :], dim=1), flow_21_L3)
# losses
loss_SR = criterion_L2(SR, torch.unsqueeze(HR[:, 1, :, :], 1))
loss_OFR_1 = 1 * (criterion_L2(warped_01, torch.unsqueeze(HR[:, 1, :, :], 1)) + 0.01 * L1_regularization(flow_01_L3)) + \
0.25 * (torch.mean(res_01_L2 ** 2) + 0.01 * L1_regularization(flow_01_L2)) + \
0.125 * (torch.mean(res_01_L1 ** 2) + 0.01 * L1_regularization(flow_01_L1))
loss_OFR_2 = 1 * (criterion_L2(warped_21, torch.unsqueeze(HR[:, 1, :, :], 1)) + 0.01 * L1_regularization(flow_21_L3)) + \
0.25 * (torch.mean(res_21_L2 ** 2) + 0.01 * L1_regularization(flow_21_L2)) + \
0.125 * (torch.mean(res_21_L1 ** 2) + 0.01 * L1_regularization(flow_21_L1))
loss = loss_SR + 0.01 * (loss_OFR_1 + loss_OFR_2) / 2
loss_list.append(loss.data.cpu())
optimizer.zero_grad()
loss.backward()
optimizer.step()
# save checkpoint
if idx_iter % 5000 == 0:
print('Iteration---%6d, loss---%f' % (idx_iter + 1, np.array(loss_list).mean()))
torch.save(net.state_dict(), 'log/BI_x' + str(cfg.upscale_factor) + '_iter' + str(idx_iter) + '.pth')
loss_list = []
def L1_regularization(image):
b, _, h, w = image.size()
reg_x_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 1:, 0:w-1]
reg_y_1 = image[:, :, 0:h-1, 0:w-1] - image[:, :, 0:h-1, 1:]
reg_L1 = torch.abs(reg_x_1) + torch.abs(reg_y_1)
return torch.sum(reg_L1) / (b*(h-1)*(w-1))
if __name__ == '__main__':
cfg = parse_args()
main(cfg)
| 3,641
| 38.16129
| 129
|
py
|
SOF-VSR
|
SOF-VSR-master/ACCV/demo_Vid4.py
|
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from data_utils import TestsetLoader, ycbcr2rgb
import numpy as np
from torchvision.transforms import ToPILImage
import os
import argparse
from modules import SOFVSR
import math
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--video_name", type=str, default="calendar")
parser.add_argument("--upscale_factor", type=int, default=4)
parser.add_argument('--gpu_mode', type=bool, default=False)
parser.add_argument('--chop_forward', type=bool, default=False)
return parser.parse_args()
def chop_forward(x, model, scale, shave=16, min_size=5000, nGPUs=1):
b, c, h, w = x.size()
h_half, w_half = h // 2, w // 2
h_size, w_size = h_half + shave, w_half + shave
inputlist = [
x[:, :, 0:h_size, 0:w_size],
x[:, :, 0:h_size, (w - w_size):w],
x[:, :, (h - h_size):h, 0:w_size],
x[:, :, (h - h_size):h, (w - w_size):w]]
if w_size * h_size < min_size:
outputlist = []
for i in range(0, 4, nGPUs):
input_batch = torch.cat(inputlist[i:(i + nGPUs)], dim=0)
output_batch = model(input_batch)
outputlist.append(output_batch.data)
else:
outputlist = [
chop_forward(patch, model, scale, shave, min_size, nGPUs) \
for patch in inputlist]
h, w = scale * h, scale * w
h_half, w_half = scale * h_half, scale * w_half
h_size, w_size = scale * h_size, scale * w_size
shave *= scale
output = Variable(x.data.new(h, w), volatile=True)
output[0:h_half, 0:w_half] = outputlist[0][0:h_half, 0:w_half]
output[0:h_half, w_half:w] = outputlist[1][0:h_half, (w_size - w + w_half):w_size]
output[h_half:h, 0:w_half] = outputlist[2][(h_size - h + h_half):h_size, 0:w_half]
output[h_half:h, w_half:w] = outputlist[3][(h_size - h + h_half):h_size, (w_size - w + w_half):w_size]
return output
def main(cfg):
video_name = cfg.video_name
upscale_factor = cfg.upscale_factor
use_gpu = cfg.gpu_mode
test_set = TestsetLoader('data/test/'+ video_name, upscale_factor)
test_loader = DataLoader(test_set, num_workers=1, batch_size=1, shuffle=False)
net = SOFVSR(upscale_factor=upscale_factor)
ckpt = torch.load('./log/SOFVSR_x' + str(upscale_factor) + '.pth')
net.load_state_dict(ckpt)
if use_gpu:
net.cuda()
for idx_iter, (LR_y_cube, SR_cb, SR_cr) in enumerate(test_loader):
LR_y_cube = Variable(LR_y_cube)
if use_gpu:
LR_y_cube = LR_y_cube.cuda()
if cfg.chop_forward:
# crop borders to ensure each patch can be divisible by 2
_, _, h, w = LR_y_cube.size()
h = int(h//16) * 16
w = int(w//16) * 16
LR_y_cube = LR_y_cube[:, :, :h, :w]
SR_cb = SR_cb[:, :h * upscale_factor, :w * upscale_factor]
SR_cr = SR_cr[:, :h * upscale_factor, :w * upscale_factor]
SR_y = chop_forward(LR_y_cube, net, cfg.upscale_factor)
else:
SR_y = net(LR_y_cube)
else:
SR_y = net(LR_y_cube)
SR_y = np.array(SR_y.data)
SR_y = SR_y[np.newaxis, :, :]
SR_ycbcr = np.concatenate((SR_y, SR_cb, SR_cr), axis=0).transpose(1,2,0)
SR_rgb = ycbcr2rgb(SR_ycbcr) * 255.0
SR_rgb = np.clip(SR_rgb, 0, 255)
SR_rgb = ToPILImage()(SR_rgb.astype(np.uint8))
if not os.path.exists('results/' + video_name):
os.mkdir('results/' + video_name)
SR_rgb.save('results/'+video_name+'/sr_'+ str(idx_iter+2).rjust(2,'0') + '.png')
if __name__ == '__main__':
cfg = parse_args()
main(cfg)
| 3,762
| 37.010101
| 106
|
py
|
SOF-VSR
|
SOF-VSR-master/ACCV/metrics/evaluation.py
|
import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
from math import log10
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
def ssim(img1, img2, upscale_factor, window_size=11, size_average=True):
_, channel, h, w = img1.size()
img1_ = img1[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
img2_ = img2[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1_.get_device())
window = window.type_as(img1_)
return _ssim(img1_, img2_, window, window_size, channel, size_average)
def psnr(img1, img2, upscale_factor):
_, _, h, w = img1.size()
img1_ = img1[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
img2_ = img2[:, :, 6 + upscale_factor: h - 6 - upscale_factor, 6 + upscale_factor: w - 6 - upscale_factor]
mse = torch.sum((img1_ - img2_) ** 2) / img1_.numel()
psnr = 10 * log10(1 / mse)
return psnr
| 2,423
| 40.793103
| 114
|
py
|
finmag
|
finmag-master/install/memory-warning.py
|
import sys
from psutil import virtual_memory
mem = virtual_memory()
mem.total # total physical virtual_memory
GB = float(1024**3) # float need if we run with python 2
if mem.total / GB < 3.8:
print("Warning: building native modules may fail due to not enough physical memory.")
print("You have {:.1f} GB available.\n".format(mem.total/GB))
print("\tContext: The C++ compiler needs lots of RAM. 4GB seem sufficient (Nov 2016)\n")
print("\tIf you are on OSX / Windows, and using Docker, try this")
print("""\t\tdocker-machine stop
\t\tVBoxManage modifyvm default --memory 4096
\t\tdocker-machine start\n""")
print("\tSource: http://stackoverflow.com/questions/32834082/how-to-increase-docker-machine-memory-mac")
print("\nIf you are using OSX, and use Docker >= 1.13, then click")
print("on the Docker item, then -> Preferences -> Advanced and")
print("adjust the memory to be >= 4.0GB")
sys.exit(1)
| 944
| 41.954545
| 108
|
py
|
finmag
|
finmag-master/install/user-only-system/test_libraries.py
|
def test_dolfin():
import dolfin
print("Found dolfin %s" % dolfin.__version__)
def test_scipy():
import scipy
print("Found scipy %s" % scipy.__version__)
def test_numpy():
import numpy
print("Found numpy %s" % numpy.__version__)
def test_matplotlib():
import matplotlib
print("Found matplotlib %s" % matplotlib.__version__)
def test_ipython():
import IPython
print("Found Ipython %s" % IPython.__version__)
def report_module_presence_and_version(modulename):
try:
mod = __import__(modulename)
except ImportError:
return 'missing (ImportError)'
return mod.__version__
if __name__ == "__main__": # run as python program
import sys
print("%15s -> %s" % ("Python",sys.version.split()[0]))
for modulename in ['IPython', 'numpy',
'matplotlib', 'scipy',
'dolfin']:
print("%15s -> %s" % (modulename,
report_module_presence_and_version(modulename)))
| 1,011
| 25.631579
| 76
|
py
|
finmag
|
finmag-master/examples/exchange_demag/simple_1D_finmag.py
|
import dolfin as df
from finmag.field import Field
from finmag.energies import Exchange
import numpy as np
import matplotlib.pylab as plt
import os
import subprocess
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
# run nmag
subprocess.call("nsim {} --clean".format(os.path.join(MODULE_DIR, "simple_1D_nmag.py")), shell=True)
nd = np.load(os.path.join(MODULE_DIR, "nmag_hansconf.npy"))
# run finmag
mesh = df.IntervalMesh(100, 0, 10e-9)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
DG0 = df.FunctionSpace(mesh, "DG", 0)
m = Field(S3, df.Expression(("cos(x[0]*pi/10e-9)", "sin(x[0]*pi/10e-9)", "0"), degree=1))
Ms = Field(DG0, 1.0)
exchange = Exchange(1.3e-11)
exchange.setup(m, Ms)
fd = exchange.energy_density()
# draw an ASCII table of the findings
table_border = "+" + "-" * 8 + "+" + "-" * 64 + "+"
table_entries = "| {:<6} | {:<20} {:<20} {:<20} |"
table_entries_f = "| {:<6} | {:<20.8f} {:<20.8f} {:<20g} |"
print table_border
print table_entries.format(" ", "min", "max", "delta")
print table_border
print table_entries_f.format("finmag", min(fd), max(fd), max(fd)-min(fd))
print table_entries_f.format("nmag", min(nd), max(nd), max(nd)-min(nd))
print table_border
# draw a plot of the two exchange energy densities
xs = mesh.coordinates().flatten()
figure, (upper_axis, lower_axis) = plt.subplots(2, 1, sharex=True)
upper_axis.plot(xs, fd, "b-", label="finmag")
lower_axis.plot(xs, nd, "r-", label="nmag")
upper_axis.legend().draw_frame(False)
lower_axis.legend().draw_frame(False)
EPSILON = 1e-6 # to make sure the same zoom is used on both parts of the figure
upper_axis.set_ylim(np.mean(fd) - EPSILON, np.mean(fd) + EPSILON)
lower_axis.set_ylim(np.mean(nd) - EPSILON, np.mean(nd) + EPSILON)
# make the plot prettier
# from https://github.com/matplotlib/matplotlib/blob/master/examples/pylab_examples/broken_axis.py
upper_axis.spines['bottom'].set_visible(False)
lower_axis.spines['top'].set_visible(False)
upper_axis.xaxis.tick_top()
upper_axis.tick_params(labeltop="off")
lower_axis.xaxis.tick_bottom()
#plt.show()
plt.savefig(os.path.join(MODULE_DIR, "simple1D.png"))
plt.close()
| 2,129
| 32.809524
| 100
|
py
|
finmag
|
finmag-master/examples/exchange_demag/run_nmag.py
|
import nmag
from nmag import SI
mat_Py = nmag.MagMaterial(name="Py",
Ms=SI(0.86e6,"A/m"),
exchange_coupling=SI(13.0e-12, "J/m"),
llg_damping=0.5)
sim = nmag.Simulation("bar")
sim.load_mesh("bar30_30_100.nmesh.h5",
[("Py", mat_Py)],
unit_length=SI(1e-9,"m"))
sim.set_m([1,0,1])
dt = SI(5e-12, "s")
for i in range(0, 61):
sim.advance_time(dt*i) #compute time development
if i % 10 == 0: #every 10 loop iterations,
sim.save_data(fields="all") #save averages and all
#fields spatially resolved
else:
sim.save_data() #otherwise just save averages
if i == 10:
f = open("nmag_exch_Edensity.txt", "w")
f2 = open("nmag_demag_Edensity.txt", "w")
for i in range(100):
f.write("%g " % sim.probe_subfield_siv("E_exch_Py", [15e-9, 15e-9, 1e-9*i]))
f2.write("%g " % sim.probe_subfield_siv("E_demag_Py", [15e-9, 15e-9, 1e-9*i]))
f.close()
f2.close()
| 1,158
| 30.324324
| 90
|
py
|
finmag
|
finmag-master/examples/exchange_demag/simple_1D_nmag.py
|
import numpy as np
import nmag, os
from nmag import SI
import nmeshlib.unidmesher as unidmesher
def main():
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
L = 10
mesh_unit = SI(1e-9, "m") # mesh unit (1 nm)
layers = [(0.0, L)] # the mesh
discretization = 0.1 # discretization
# Initial magnetization
xfactor = float(SI("m")/(L*mesh_unit))
def m0(r):
return [np.cos(r[0]*np.pi*xfactor), np.sin(r[0]*np.pi*xfactor), 0]
mat_Py = nmag.MagMaterial(name="Py",
Ms=SI(1,"A/m"))
sim = nmag.Simulation("Hans' configuration", do_demag=False)
mesh_file_name = '1d.nmesh'
mesh_lists = unidmesher.mesh_1d(layers, discretization)
unidmesher.write_mesh(mesh_lists, out=mesh_file_name)
sim.load_mesh(mesh_file_name,
[("Py", mat_Py)],
unit_length=mesh_unit)
sim.set_m(m0)
np.save(os.path.join(MODULE_DIR, "nmag_hansconf.npy"), sim.get_subfield("E_exch_Py"))
if __name__ == '__main__':
main()
| 1,054
| 25.375
| 89
|
py
|
finmag
|
finmag-master/examples/exchange_demag/test_exchange_demag.py
|
import os
import logging
import matplotlib
matplotlib.use('Agg')
import pylab as p
import numpy as np
import dolfin as df
from finmag import Simulation as Sim
from finmag.energies import Exchange, Demag
from finmag.util.meshes import from_geofile, mesh_volume
import pytest
logger = logging.getLogger(name='finmag')
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
REL_TOLERANCE = 5e-4
Ms = 0.86e6
unit_length = 1e-9
mesh = from_geofile(os.path.join(MODULE_DIR, "bar30_30_100.geo"))
def run_finmag():
"""Run the finmag simulation and store data in averages.txt."""
sim = Sim(mesh, Ms, unit_length=unit_length)
sim.alpha = 0.5
sim.set_m((1, 0, 1))
exchange = Exchange(13.0e-12)
sim.add(exchange)
demag = Demag(solver="FK")
sim.add(demag)
fh = open(os.path.join(MODULE_DIR, "averages.txt"), "w")
fe = open(os.path.join(MODULE_DIR, "energies.txt"), "w")
logger.info("Time integration")
times = np.linspace(0, 3.0e-10, 61)
for counter, t in enumerate(times):
# Integrate
sim.run_until(t)
# Save averages to file
mx, my, mz = sim.m_average
fh.write(str(t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
# Energies
E_e = exchange.compute_energy()
E_d = demag.compute_energy()
fe.write(str(E_e) + " " + str(E_d) + "\n")
# Energy densities
if counter == 10:
exch_energy = exchange.energy_density_function()
demag_energy = demag.energy_density_function()
finmag_exch, finmag_demag = [], []
R = range(100)
for i in R:
finmag_exch.append(exch_energy([15, 15, i]))
finmag_demag.append(demag_energy([15, 15, i]))
# Store data
np.save(os.path.join(MODULE_DIR, "finmag_exch_density.npy"), np.array(finmag_exch))
np.save(os.path.join(MODULE_DIR, "finmag_demag_density.npy"), np.array(finmag_demag))
fh.close()
fe.close()
@pytest.mark.slow
def test_compare_averages():
ref = np.loadtxt(os.path.join(MODULE_DIR, "averages_ref.txt"))
if not os.path.isfile(os.path.join(MODULE_DIR, "averages.txt")) \
or (os.path.getctime(os.path.join(MODULE_DIR, "averages.txt")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
computed = np.loadtxt(os.path.join(MODULE_DIR, "averages.txt"))
dt = ref[:,0] - computed[:,0]
assert np.max(dt) < 1e-15, "Compare timesteps."
ref1, computed1 = np.delete(ref, [0], 1), np.delete(computed, [0], 1)
diff = ref1 - computed1
print "max difference: %g" % np.max(diff)
rel_diff = np.abs(diff / np.sqrt(ref1[0]**2 + ref1[1]**2 + ref1[2]**2))
print "test_averages, max. relative difference per axis:"
print np.nanmax(rel_diff, axis=0)
err = np.nanmax(rel_diff)
if err > 1e-2:
print "nmag:\n", ref1
print "finmag:\n", computed1
assert err < REL_TOLERANCE, "Relative error = {} is larger " \
"than tolerance (= {})".format(err, REL_TOLERANCE)
# Plot nmag data
nmagt = list(ref[:,0])*3
nmagy = list(ref[:,1]) + list(ref[:,2]) + list(ref[:,3])
p.plot(nmagt, nmagy, 'o', mfc='w', label='nmag')
# Plot finmag data
t = computed[:, 0]
x = computed[:, 1]
y = computed[:, 2]
z = computed[:, 3]
p.plot(t, x, 'k', label='$m_\mathrm{x}$ finmag')
p.plot(t, y, 'b-.', label='$m_\mathrm{y}$')
p.plot(t, z, 'r', label='$m_\mathrm{z}$')
p.axis([0, max(t), -0.2, 1.1])
p.xlabel("time (s)")
p.ylabel("$m$")
p.legend(loc='center right')
p.savefig(os.path.join(MODULE_DIR, "exchange_demag.pdf"))
p.savefig(os.path.join(MODULE_DIR, "exchange_demag.png"))
#p.show
p.close()
print "Comparison of development written to exchange_demag.pdf"
@pytest.mark.slow
def test_compare_energies():
ref = np.loadtxt(os.path.join(MODULE_DIR, "energies_ref.txt"))
if not os.path.isfile(os.path.join(MODULE_DIR, "energies.txt")) \
or (os.path.getctime(os.path.join(MODULE_DIR, "energies.txt")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
computed = np.loadtxt(os.path.join(MODULE_DIR, "energies.txt"))
assert np.size(ref) == np.size(computed), "Compare number of energies."
vol = mesh_volume(mesh)*unit_length**mesh.topology().dim()
#30x30x100nm^3 = 30x30x100=9000
# Compare exchange energy...
exch = computed[:, 0]/vol # <-- ... density!
exch_nmag = ref[:, 0]
diff = abs(exch - exch_nmag)
rel_diff = np.abs(diff / max(exch))
print "Exchange energy, max relative error:", max(rel_diff)
assert max(rel_diff) < 0.002, \
"Max relative error in exchange energy = {} is larger than " \
"tolerance (= {})".format(max(rel_diff), REL_TOLERANCE)
# Compare demag energy
demag = computed[:, 1]/vol
demag_nmag = ref[:, 1]
diff = abs(demag - demag_nmag)
rel_diff = np.abs(diff / max(demag))
print "Demag energy, max relative error:", max(rel_diff)
# Don't really know why this is ten times higher than everyting else.
assert max(rel_diff) < REL_TOLERANCE*10, \
"Max relative error in demag energy = {} is larger than " \
"tolerance (= {})".format(max(rel_diff), REL_TOLERANCE)
# Plot
p.plot(exch_nmag, 'o', mfc='w', label='nmag')
p.plot(exch, label='finmag')
p.xlabel("time step")
p.ylabel("$e_\mathrm{exch}\, (\mathrm{Jm^{-3}})$")
p.legend()
p.savefig(os.path.join(MODULE_DIR, "exchange_energy.pdf"))
p.savefig(os.path.join(MODULE_DIR, "exchange_energy.png"))
p.close()
p.plot(demag_nmag, 'o', mfc='w', label='nmag')
p.plot(demag, label='finmag')
p.xlabel("time step")
p.ylabel("$e_\mathrm{demag}\, (\mathrm{Jm^{-3}})$")
p.legend()
p.savefig(os.path.join(MODULE_DIR, "demag_energy.pdf"))
p.savefig(os.path.join(MODULE_DIR, "demag_energy.png"))
#p.show()
p.close()
print "Energy plots written to exchange_energy.pdf and demag_energy.pdf"
@pytest.mark.slow
def test_compare_energy_density():
"""
After ten time steps, compute the energy density through
the center of the bar (seen from x and y) from z=0 to z=100,
and compare the results with nmag and oomf.
"""
R = range(100)
# Run simulation only if not run before or changed since last time.
if not (os.path.isfile(os.path.join(MODULE_DIR, "finmag_exch_density.npy"))):
run_finmag()
elif (os.path.getctime(os.path.join(MODULE_DIR, "finmag_exch_density.npy")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
if not (os.path.isfile(os.path.join(MODULE_DIR, "finmag_demag_density.npy"))):
run_finmag()
elif (os.path.getctime(os.path.join(MODULE_DIR, "finmag_demag_density.npy")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag()
# Read finmag data
finmag_exch = np.load(os.path.join(MODULE_DIR, "finmag_exch_density.npy"))
finmag_demag = np.load(os.path.join(MODULE_DIR, "finmag_demag_density.npy"))
# Read nmag data
nmag_exch = [float(i) for i in open(os.path.join(MODULE_DIR, "nmag_exch_Edensity.txt"), "r").read().split()]
nmag_demag = [float(i) for i in open(os.path.join(MODULE_DIR, "nmag_demag_Edensity.txt"), "r").read().split()]
# Compare with nmag
nmag_exch = np.array(nmag_exch)
nmag_demag = np.array(nmag_demag)
rel_error_exch_nmag = np.abs(finmag_exch - nmag_exch)/np.linalg.norm(nmag_exch)
rel_error_demag_nmag = np.abs(finmag_demag - nmag_demag)/np.linalg.norm(nmag_demag)
print "Exchange energy density, max relative error from nmag:", max(rel_error_exch_nmag)
print "Demag energy density, max relative error from nmag:", max(rel_error_demag_nmag)
TOL_EXCH = 3e-2
TOL_DEMAG = 1e-2
assert max(rel_error_exch_nmag) < TOL_EXCH, \
"Exchange energy density, max relative error from nmag = {} is " \
"larger than tolerance (= {})".format(max(rel_error_exch_nmag), TOL_EXCH)
assert max(rel_error_demag_nmag) < TOL_DEMAG, \
"Demag energy density, max relative error from nmag = {} is larger " \
"than tolarance (= {})".format(max(rel_error_demag_nmag), TOL_DEMAG)
# Read oommf data
oommf_exch = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_exch_Edensity.txt"))
oommf_demag = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_demag_Edensity.txt"))
oommf_coords = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_coords_z_axis.txt")) * 1e9
# Compare with oomf - FIXME: doesn't work at the moment
#rel_error_exch_oomf = np.abs(finmag_exch - oommf_exch)/np.linalg.norm(oommf_exch)
#rel_error_demag_oomf = np.abs(finmag_demag - oommf_demag)/np.linalg.norm(oommf_demag)
#print "Rel error exch, oommf:", max(rel_error_exch_oommf)
#print "Rel error demag, oommf:", max(rel_error_demag_oommf)
# Plot exchange energy density
p.plot(R, finmag_exch, 'k-')
p.plot(R, nmag_exch, 'r^:', alpha=0.5)
p.plot(oommf_coords, oommf_exch, "bv:", alpha=0.5)
p.xlabel("$x\, (\mathrm{nm})$")
p.ylabel("$e_\mathrm{exch}\, (\mathrm{Jm^{-3}})$")
p.legend(["finmag", "nmag", "oommf"], loc="upper center")
p.savefig(os.path.join(MODULE_DIR, "exchange_density.pdf"))
p.savefig(os.path.join(MODULE_DIR, "exchange_density.png"))
p.close()
# Plot demag energy density
p.plot(R, finmag_demag, 'k-')
p.plot(R, nmag_demag, 'r^:', alpha=0.5)
p.plot(oommf_coords, oommf_demag, "bv:", alpha=0.5)
p.xlabel("$x\, (\mathrm{nm})$")
p.ylabel("$e_\mathrm{demag}\, (\mathrm{Jm^{-3}})$")
p.legend(["finmag", "nmag", "oommf"], loc="upper center")
p.savefig(os.path.join(MODULE_DIR, "demag_density.pdf"))
p.savefig(os.path.join(MODULE_DIR, "demag_density.png"))
#p.show()
p.close()
print "Energy density plots written to exchange_density.pdf and demag_density.pdf"
if __name__ == '__main__':
run_finmag()
test_compare_averages()
test_compare_energies()
test_compare_energy_density()
| 10,063
| 36.412639
| 114
|
py
|
finmag
|
finmag-master/examples/exchange_demag/ztest_exchange_demag_new.py
|
""" What is this file? It appears that Gabrial has taken the test_exchange_demag.py and
tried to add the GCR solver for the nmag 2 example. The test fails as the system seems to
hang somewhere.
He then copied that file from test_exchange_demag.py to ztest_exchange_demag_new.py. Note
the z in the beginning which preventst the file from being run automatically as a
regression test.
It would be good to understand at some point why the GCR solver hangs, but it also
be good to many other things.
Hans, 26 June 5:30am, somewhere over Malaysia.
"""
import os
import logging
import pylab as p
import numpy as np
import dolfin as df
import progressbar as pb
import finmag.util.helpers as h
from finmag import Simulation as Sim
from finmag.energies import Exchange, Demag
from finmag.util.meshes import from_geofile, mesh_volume
logger = logging.getLogger(name='finmag')
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
REL_TOLERANCE = 1e-4
Ms = 0.86e6
unit_length = 1e-9
mesh = from_geofile(os.path.join(MODULE_DIR, "bar30_30_100.geo"))
demagsolvers = ["GCR"]
def run_finmag(demagsolver):
"""Run the finmag simulation and store data in (demagsolvertype)averages.txt."""
sim = Sim(mesh, Ms, unit_length=unit_length)
sim.alpha = 0.5
sim.set_m((1, 0, 1))
exchange = Exchange(13.0e-12)
sim.add(exchange)
demag = Demag(solver=demagsolver)
sim.add(demag)
fh = open(os.path.join(MODULE_DIR, demagsolver+"averages.txt"), "w")
fe = open(os.path.join(MODULE_DIR, demagsolver+"energies.txt"), "w")
# Progressbar
bar = pb.ProgressBar(maxval=60, \
widgets=[pb.ETA(), pb.Bar('=', '[', ']'), ' ', pb.Percentage()])
logger.info("Time integration")
times = np.linspace(0, 3.0e-10, 61)
#times = np.linspace(0, 3.0e-10, 100000)
for counter, t in enumerate(times):
bar.update(counter)
# Integrate
sim.run_until(t)
print counter
print ("press return to continue")
_ = raw_input()
# Save averages to file
mx, my, mz = sim.m_average
fh.write(str(t) + " " + str(mx) + " " + str(my) + " " + str(mz) + "\n")
# Energies
E_e = exchange.compute_energy()
E_d = demag.compute_energy()
fe.write(str(E_e) + " " + str(E_d) + "\n")
# Energy densities
if counter == 10:
exch_energy = exchange.energy_density_function()
demag_energy = demag.demag.energy_density_function()
finmag_exch, finmag_demag = [], []
R = range(100)
for i in R:
finmag_exch.append(exch_energy([15, 15, i]))
finmag_demag.append(demag_energy([15, 15, i]))
# Store data
np.save(os.path.join(MODULE_DIR, "finmag%s_exch_density.npy"%demagsolver), np.array(finmag_exch))
np.save(os.path.join(MODULE_DIR, "finmag%s_demag_density.npy"%demagsolver), np.array(finmag_demag))
fh.close()
fe.close()
def test_compare_averages():
for demagsolver in demagsolvers:
if not os.path.isfile(os.path.join(MODULE_DIR, demagsolver+"averages.txt")) \
or (os.path.getctime(os.path.join(MODULE_DIR, demagsolver+"averages.txt")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag(demagsolver)
computed = np.loadtxt(os.path.join(MODULE_DIR, demagsolver+"averages.txt"))
ref = np.loadtxt(os.path.join(MODULE_DIR, demagsolver+"averages_ref.txt"))
dt = ref[:,0] - computed[:,0]
assert np.max(dt) < 1e-15, "Compare timesteps."
ref1, computed1 = np.delete(ref, [0], 1), np.delete(computed, [0], 1)
diff = ref1 - computed1
print "max difference: %g" % np.max(diff)
rel_diff = np.abs(diff / np.sqrt(ref1[0]**2 + ref1[1]**2 + ref1[2]**2))
print "test_averages, max. relative difference per axis:"
print np.nanmax(rel_diff, axis=0)
err = np.nanmax(rel_diff)
if err > 1e-2:
print "nmag:\n", ref1
print "finmag%s:\n"%demagsolver, computed1
assert err < REL_TOLERANCE, "Relative error = %g" % err
# Plot nmag data
nmagt = list(ref[:,0])*3
nmagy = list(ref[:,1]) + list(ref[:,2]) + list(ref[:,3])
p.plot(nmagt, nmagy, 'o', mfc='w', label='nmag')
# Plot finmag data seperate plots for each finmag demag solver
t = computed[:, 0]
x = computed[:, 1]
y = computed[:, 2]
z = computed[:, 3]
p.plot(t, x, 'k', label='$\mathsf{m_x}$')
p.plot(t, y, 'r', label='$\mathsf{m_y}$')
p.plot(t, z, 'b', label='$\mathsf{m_z}$')
p.axis([0, max(t), -0.2, 1.1])
p.xlabel("Time")
p.ylabel("m")
p.title("Finmag%s vs Nmag"%demagsolver)
p.legend(loc='center right')
p.savefig(os.path.join(MODULE_DIR, demagsolver+"exchange_demag.png"))
#p.show
p.close()
print "Comparison of development written to %sexchange_demag.png"%demagsolver
def test_compare_energies():
#Dictionary for finmag exchange results
exch = {}
#Dictionary for finmag demag results
demag = {}
for demagsolver in demagsolvers:
ref = np.loadtxt(os.path.join(MODULE_DIR, demagsolver+"energies_ref.txt"))
if not os.path.isfile(os.path.join(MODULE_DIR, demagsolver+"energies.txt")) \
or (os.path.getctime(os.path.join(MODULE_DIR, demagsolver+"energies.txt")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag(demagsolver)
computed = np.loadtxt(os.path.join(MODULE_DIR, demagsolver+"energies.txt"))
assert np.size(ref) == np.size(computed), "Compare number of energies."
vol = mesh_volume(mesh)*unit_length**mesh.topology().dim()
#30x30x100nm^3 = 30x30x100=9000
# Compare exchange energy
exch[demagsolver] = computed[:, 0]/vol
exch_nmag = ref[:, 0]
diff = abs(exch[demagsolver] - exch_nmag)
rel_diff = np.abs(diff / max(exch[demagsolver]))
print "Exchange energy Finmag %s Nmag , max relative error:"%demagsolver, max(rel_diff)
assert max(rel_diff) < REL_TOLERANCE, \
"Max relative error in Finmag %s Nmag exchange energy is %g"%(demagsolver, max(rel_diff))
# Compare demag energy
demag[demagsolver] = computed[:, 1]/vol
demag_nmag = ref[:, 1]
diff = abs(demag[demagsolver] - demag_nmag)
rel_diff = np.abs(diff / max(demag[demagsolver]))
print "Finmag %s Nmag Demag energy, max relative error: %g"%(demagsolver, max(rel_diff))
# Don't really know why this is ten times higher than everyting else.
assert max(rel_diff) < REL_TOLERANCE*10, \
"Max relative error in Finmag %s Nmag demag energy is %g" %(demagsolver,max(rel_diff))
# Plot both FK and GCR demags on the same plot.
p.title("Finmag vs Nmag Exchange energy")
p.plot(exch_nmag, 'o', mfc='w', label='Nmag')
p.plot(exch["FK"], label='Finmag FK')
p.plot(exch["GCR"], label='Finmag GCR')
p.xlabel("Time step")
p.ylabel("$\mathsf{E_{exch}}$")
p.legend()
p.savefig(os.path.join(MODULE_DIR, "exchange_energy.png"))
p.close()
p.plot(demag_nmag, 'o', mfc='w', label='Nmag')
p.plot(demag, label='Finmag')
p.xlabel("Time step")
p.ylabel("$\mathsf{E_{demag}}$")
p.legend()
p.savefig(os.path.join(MODULE_DIR, "demag_energy.png"))
#p.show()%
p.close()
print "Energy plots written to exchange_energy.png and demag_energy.png"
def test_compare_energy_density():
"""
After ten time steps, compute the energy density through
the center of the bar (seen from x and y) from z=0 to z=100,
and compare the results with nmag and oomf.
"""
R = range(100)
finmag_exch = {}
nmag_demag = {}
for demagsolver in demagsolvers:
# Run simulation only if not run before or changed since last time.
if not (os.path.isfile(os.path.join(MODULE_DIR, demagsolver+"finmag_exch_density.npy"))):
run_finmag(demagsolver)
elif (os.path.getctime(os.path.join(MODULE_DIR, demagsolver+"finmag_exch_density.npy")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag(demagsolver)
if not (os.path.isfile(os.path.join(MODULE_DIR, demagsolver+"finmag_demag_density.npy"))):
run_finmag(demagsolver)
elif (os.path.getctime(os.path.join(MODULE_DIR, demagsolver+"finmag_demag_density.npy")) <
os.path.getctime(os.path.abspath(__file__))):
run_finmag(demagsolver)
# Read finmag data
finmag_exch = np.load(os.path.join(MODULE_DIR, demagsolver+"finmag_exch_density.npy"))
finmag_demag = np.load(os.path.join(MODULE_DIR, demagsolve+"finmag_demag_density.npy"))
# Read nmag data
nmag_exch = [float(i) for i in open(os.path.join(MODULE_DIR, "nmag_exch_Edensity.txt"), "r").read().split()]
nmag_demag = [float(i) for i in open(os.path.join(MODULE_DIR, "nmag_demag_Edensity.txt"), "r").read().split()]
# Compare with nmag
nmag_exch[demagsolver] = np.array(nmag_exch)
nmag_demag[demagsolver] = np.array(nmag_demag)
rel_error_exch_nmag = np.abs(finmag_exch[demagsolver] - nmag_exch)/np.linalg.norm(nmag_exch)
rel_error_demag_nmag = np.abs(finmag_demag[demagsolver] - nmag_demag)/np.linalg.norm(nmag_demag)
print "Finmag %s Exchange energy density, max relative error from nmag: %g"%(demagsolver, max(rel_error_exch_nmag))
print "Finmag %s Demag energy density, max relative error from nmag: %g"%(demagsolver, max(rel_error_demag_nmag))
assert max(rel_error_exch_nmag) < 3e-2, \
"Finmag %s Exchange energy density, max relative error from nmag is %g" %(demagsolver,max(rel_error_exch_nmag))
assert max(rel_error_demag_nmag) < 1e-2, \
"Finmag %s Demag energy density, max relative error from nmag is %g" %(demagsolver,max(rel_error_demag_nmag))
# Read oommf data
oommf_exch = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_exch_Edensity.txt"))
oommf_demag = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_demag_Edensity.txt"))
oommf_coords = np.genfromtxt(os.path.join(MODULE_DIR, "oommf_coords_z_axis.txt")) * 1e9
# Compare with oomf - FIXME: doesn't work at the moment
#rel_error_exch_oomf = np.abs(finmag_exch - oommf_exch)/np.linalg.norm(oommf_exch)
#rel_error_demag_oomf = np.abs(finmag_demag - oommf_demag)/np.linalg.norm(oommf_demag)
#print "Rel error exch, oommf:", max(rel_error_exch_oommf)
#print "Rel error demag, oommf:", max(rel_error_demag_oommf)
# Plot exchange energy density
p.plot(R, finmag_exch["FK"], 'o-',
R,finmag_exch["GCR"], 'v-',
R,nmag_exch, 'x-',
oommf_coords, oommf_exch, "+-")
p.xlabel("nm")
p.title("Exchange energy density")
p.legend(["Finmag FK","Finmag GCR", "Nmag", "oommf"], loc="upper center")
p.savefig(os.path.join(MODULE_DIR, "exchange_density.png"))
p.close()
# Plot demag energy density
p.plot(R, finmag_demag["FK"],
R, finmag_demag["GCR"],
'o-', R, nmag_demag,
'x-', oommf_coords,
oommf_demag, "+-")
p.xlabel("nm")
p.title("Demag energy density")
p.legend(["Finmag FK","Finmag GCR", "Nmag", "oommf"], loc="upper center")
p.savefig(os.path.join(MODULE_DIR, "demag_density.png"))
#p.show()
p.close()
print "Energy density plots written to exchange_density.png and demag_density.png"
if __name__ == '__main__':
test_compare_averages()
test_compare_energies()
test_compare_energy_density()
| 11,768
| 39.723183
| 123
|
py
|
finmag
|
finmag-master/examples/exchange_demag/timings/run_nmag.py
|
import os, time
import nmag
from nmag import SI
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
setupstart=time.time()
mat_Py = nmag.MagMaterial(name="Py",
Ms=SI(0.86e6,"A/m"),
exchange_coupling=SI(13.0e-12, "J/m"),
llg_damping=0.5)
sim = nmag.Simulation("bar")
sim.load_mesh(os.path.join(MODULE_DIR, "bar.nmesh.h5"),
[("Py", mat_Py)],
unit_length=SI(1e-9,"m"))
sim.set_m([1,0,1])
dt = SI(5e-12, "s")
sim.save_data()
dynamicsstart=time.time()
for i in range(0, 61):
sim.advance_time(dt*i)
endtime = time.time()
output = open(os.path.join(MODULE_DIR, "results.rst"), "w")
output.write("Nmag results:\n")
output.write("-------------\n")
output.write("Setup: %.3f sec.\n" % (dynamicsstart-setupstart))
output.write("Dynamics: %.3f sec.\n" % (endtime-dynamicsstart))
output.close()
| 906
| 23.513514
| 64
|
py
|
finmag
|
finmag-master/examples/exchange_demag/timings/run_finmag.py
|
import commands
import os
import time
import pprint
import dolfin as df
from aeon import timer
from finmag.util.meshes import from_geofile
from finmag import Simulation
from finmag.energies import Exchange, Demag
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
####
# Run the nmag2 example on nmag and finmag and compare timings
####
# Create meshes
neutralmesh = 'netgen -geofile=bar.geo -meshfiletype="Neutral Format" -meshfile=bar.neutral -batchmode'
nmeshimport = 'nmeshimport --netgen bar.neutral bar.nmesh.h5'
if not os.path.isfile(os.path.join(MODULE_DIR, "bar.nmesh.h5")):
commands.getstatusoutput(neutralmesh)
commands.getstatusoutput(nmeshimport)
if not os.path.isfile(os.path.join(MODULE_DIR, "bar.xml.gz")):
from_geofile(os.path.join(MODULE_DIR, "bar.geo"))
# Run nmag
print "Running nmag..."
commands.getstatusoutput("nsim run_nmag.py --clean")
print "Done."
# Setup
setupstart = time.time()
mesh = df.Mesh(os.path.join(MODULE_DIR, "bar.xml.gz"))
sim = Simulation(mesh, Ms=0.86e6, unit_length=1e-9)
sim.set_m((1, 0, 1))
demag = Demag()
demag.parameters["phi_1_solver"] = "minres"
demag.parameters["phi_2_solver"] = "gmres"
demag.parameters["phi_2_preconditioner"] = "sor"
sim.add(demag)
sim.add(Exchange(13.0e-12))
# Dynamics
dynamicsstart = time.time()
sim.run_until(3.0e-10)
endtime = time.time()
# Write output to results.rst
output = open(os.path.join(MODULE_DIR, "results.rst"), "a")
output.write("\nFinmag results:\n")
output.write("---------------\n")
output.write("Setup: %.3f sec.\n" % (dynamicsstart - setupstart))
output.write("Dynamics: %.3f sec.\n" % (endtime - dynamicsstart))
output.write("\nFinmag solver parameters:\n")
output.write("-------------------------\n")
pp = pprint.PrettyPrinter()
output.write("\nfirst linear solve\n{}\n".format(pp.pformat(demag._poisson_solver.parameters.to_dict())))
output.write("\nsecond linear solve\n{}\n\n".format(pp.pformat(demag._laplace_solver.parameters.to_dict())))
output.write(str(timer))
output.close()
# Cleanup
files = ["bar_bi.xml", "bar.grid", "bar_mat.xml", "bar.neutral", "bar.xml.bak", "run_nmag_log.log", "bar_dat.ndt"]
for file in files:
fname = os.path.join(MODULE_DIR, file)
if os.path.isfile(fname):
os.remove(fname)
| 2,248
| 31.594203
| 114
|
py
|
finmag
|
finmag-master/examples/time-dependent-applied-field/test_appfield.py
|
import os
import numpy as np
import pylab
import pytest
import dolfin as df
from finmag.field import Field
from finmag.physics.llg import LLG
from finmag.energies import TimeZeeman
from finmag.drivers.llg_integrator import llg_integrator
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
@pytest.mark.requires_X_display
def test_external_field_depends_on_t():
tfinal = 0.3*1e-9
dt = 0.001e-9
simplices = 2
L = 10e-9
mesh = df.IntervalMesh(simplices, 0, L)
S1 = df.FunctionSpace(mesh, "Lagrange", 1)
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1, dim=3)
GHz=1e9
omega= 100*GHz
llg=LLG(S1, S3)
llg.set_m(df.Constant((1, 0, 0)))
#This is the time dependent field
H_app_expr = df.Expression(("0.0", "0.0","H0*sin(omega*t)"), H0=1e5, omega=omega, t=0.0, degree=1)
H_app = TimeZeeman(H_app_expr)
Ms_field = Field(df.FunctionSpace(mesh, 'DG', 0), 8.6e5)
H_app.setup(llg.m_field, Ms=Ms_field)
#define function that updates that expression, and the field object
def update_H_ext(t):
print "update_H_ext being called for t=%g" % t
H_app.update(t)
llg.effective_field.add(H_app, with_time_update=update_H_ext)
#nothing special from here, just setting up time integration
integrator = llg_integrator(llg, llg.m_field)
#to gather data for later analysis
mlist = []
tlist = []
hext = []
#time loop
times = np.linspace(0, tfinal, tfinal/dt + 1)
for t in times:
integrator.advance_time(t)
print "Integrating time: %g" % t
mlist.append(llg.m_average)
tlist.append(t)
hext.append(H_app.H((0)))
#only plotting and data analysis from here on
mx = [tmp[0] for tmp in mlist]
my = [tmp[1] for tmp in mlist]
mz = [tmp[2] for tmp in mlist]
pylab.plot(tlist,mx,label='m_x')
pylab.plot(tlist,my,label='m_y')
pylab.plot(tlist,mz,label='m_z')
pylab.xlabel('time [s]')
pylab.legend()
pylab.savefig(os.path.join(MODULE_DIR, 'results.png'))
pylab.close()
#if max_step is not provided, or chosen too large,
#the external field appears not smooth in this plot.
#What seems to happen is that the ode integrator
#returns the solution without calling the rhs side again
#if we request very small time steps.
#This is only for debugging.
pylab.plot(tlist,hext,'-x')
pylab.ylabel('external field [A/m]')
pylab.xlabel('time [s]')
pylab.savefig(os.path.join(MODULE_DIR, 'hext.png'))
pylab.close()
#Then try to fit sinusoidal curve through results
def sinusoidalfit(t,omega,phi,A,B):
return A*np.cos(omega*t+phi)+B
#if scipy available
try:
import scipy.optimize
except ImportError:
print "Couldn't import scipy.optimize, skipping test"
else:
popt,pcov = scipy.optimize.curve_fit(
sinusoidalfit,np.array(tlist),np.array(my),
p0=(omega*1.04,0.,0.1,0.2))
#p0 is the set of parameters with which the fitting
#routine starts
print "popt=",popt
fittedomega,fittedphi,fittedA,fittedB=popt
f=open(os.path.join(MODULE_DIR, "fittedresults.txt"),"w")
print >>f, "Fitted omega : %9g" % (fittedomega)
print >>f, "Rel error in omega fit : %9g" % ((fittedomega-omega)/omega)
print >>f, "Fitted phi : %9f" % (fittedphi)
print >>f, "Fitted Amplitude (A) : %9f" % (fittedA)
print >>f, "Fitted Amp-offset (B) : %9f" % (fittedB)
pylab.plot(tlist,my,label='my - simulated')
pylab.plot(tlist,
sinusoidalfit(np.array(tlist),*popt),
'-x',label='m_y - fit')
pylab.xlabel('time [s]')
pylab.legend()
pylab.savefig(os.path.join(MODULE_DIR, 'fit.png'))
deviation = np.sqrt(sum((sinusoidalfit(np.array(tlist),*popt)-my)**2))/len(tlist)
print >>f, "stddev=%g" % deviation
f.close()
assert (fittedomega-omega)/omega < 1e-4
assert deviation < 5e-4
if __name__ == "__main__":
test_external_field_depends_on_t()
| 4,134
| 31.81746
| 102
|
py
|
finmag
|
finmag-master/examples/scheduling/sim_with_scheduling.py
|
import logging
import dolfin as df
from finmag import Simulation
from finmag.energies import Exchange, Demag, Zeeman
log = logging.getLogger(name="finmag")
log.setLevel(logging.ERROR) # To better show output of this program.
# Three example functions that will be added to the schedule.
# They will get called with the simulation object as their first parameter.
def progress(s):
print "We have integrated up to t = {:.3f} ns.".format(1e9 * s.t)
print "Average magnetisation is m = {}.".format(s.m_average)
def halfway_done(s):
print "We are halfway done!"
def done(s):
print "Woohoo!"
def example_simulation():
Ms = 8.6e5
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(40, 20, 20), 10, 5, 5)
example = Simulation(mesh, Ms, name="sim_with_scheduling")
example.set_m((0.1, 1, 0))
example.add(Exchange(13.0e-12))
example.add(Demag())
example.add(Zeeman((Ms/2, 0, 0)))
return example
if __name__ == "__main__":
# Assemble a simulation as usual.
sim = example_simulation()
# Add the functions to the scheduler using sim.schedule.
t_final = 1.05e-9
sim.schedule(progress, every=1e-10, at_end=True) # `every` keyword with optional `at_end`
sim.schedule(halfway_done, at=t_final/2) # `at` keyword
sim.schedule(done, at_end=True) # `at_end` used on its own
# Now integrate. The functions will get called according to the schedule.
sim.run_until(t_final)
| 1,445
| 27.352941
| 93
|
py
|
finmag
|
finmag-master/examples/scheduling/sim_with_progressbar.py
|
import sys
import logging
from sim_with_scheduling import example_simulation
log = logging.getLogger(name="finmag")
log.setLevel(logging.ERROR) # To better show output of this program.
# This script will update a progress bar on the screen while the simulation runs.
sim = example_simulation()
# Sets up the progress bar.
toolbar_width = 42
print "\nRunning simulation. Please wait..."
sys.stdout.write("[{}]".format(" " * toolbar_width))
sys.stdout.flush()
sys.stdout.write("\b" * (toolbar_width-1)) # return to start of line, after '['
# Moves the arrow towards the right.
def update_bar(s):
sys.stdout.write("\b\b--->")
sys.stdout.flush()
# Register the arrow moving function and run the simulation.
sim.schedule(update_bar, every=0.5e-10)
sim.run_until(1e-9)
sys.stdout.write("\b-] Done.\n")
| 814
| 24.46875
| 81
|
py
|
finmag
|
finmag-master/examples/scheduling/sim_with_notification.py
|
try:
import pynotify
except ImportError:
print "You need the Python bindings to libnotify for this example to work."
print "On Ubuntu, install the package 'python-notify'."
raise
from sim_with_scheduling import example_simulation
# This script will show a notification on the screen when the simulation completes.
# Initialise the notification system with our application name (this is
# required by the libnotify library) and create a notification.
pynotify.init("Finmag")
my_notification = pynotify.Notification("Simulation completed")
# The function we will add to the scheduler.
# It will display the current simulation time when the simulation completes.
def notify_when_done(sim, note):
msg = "Integrated up to t = {} ns.".format(sim.t * 1e9)
note.set_property("body", msg)
note.show()
sim = example_simulation()
# Register the function with the scheduler.
sim.schedule(notify_when_done, args=[my_notification], at_end=True)
# Integrate for one nanosecond. When the integration is done, the
# notification will be shown.
sim.run_until(1e-9)
| 1,082
| 32.84375
| 83
|
py
|
finmag
|
finmag-master/examples/magnetic_grain/suess_2001.py
|
import time
import numpy as np
import matplotlib.pyplot as plt
from finmag.util.meshes import cylinder
from finmag.util.consts import flux_density_to_field_strength
from finmag.util.helpers import spherical_to_cartesian
from finmag import Simulation
from finmag.energies import Exchange, Zeeman, UniaxialAnisotropy, Demag
Ms_Tesla = 0.5
Ms = flux_density_to_field_strength(Ms_Tesla)
A = 1e-11
K1 = 3e5
K1_axis = (0, 0, 1)
H_baseline = 2 * K1 / Ms_Tesla
H_mult = [0.95, 1.2, 2.8]
H_axis = - spherical_to_cartesian((1.0, 1.0 * np.pi/180, 0))
mesh = cylinder(6, 20, 2.0)
start_clock = time.clock()
for H in H_mult:
sim = Simulation(mesh, Ms, unit_length=1e-9)
sim.alpha = 0.02
sim.set_m(K1_axis)
sim.add(Exchange(A))
sim.add(UniaxialAnisotropy(K1, K1_axis))
sim.add(Demag())
sim.add(Zeeman(H * H_baseline * H_axis))
print "Running simulation for H = {} T.".format(H)
print sim.mesh_info()
t = 0; dt = 1e-12; t_failsafe = 2e-9;
dt_output = 100e-12;
ts = []; mzs = []
while True:
sim.run_until(t)
m = sim.m_average
ts.append(t*1e9); mzs.append(m[2])
if t % dt_output:
print "at t = {:.2} ns, m = {}.".format(1e9*t, m)
if m[2] < -0.5 or t >= t_failsafe:
break
t += dt
plt.plot(ts, mzs, label="$H_\mathrm{{ext}} = {}$".format(H))
stop_clock = time.clock()
print 'Simulations ran in {} seconds'.format(round(stop_clock - start_clock))
plt.xlabel('$\mathrm{{time (ns)}}$')
plt.ylabel('$m_\mathrm{{z}} (M_\mathrm{{S}})$')
plt.ylim([-0.6, 1])
plt.title("$m_\mathrm{z}$ as a function of time")
plt.legend()
plt.savefig("mz.png")
| 1,659
| 26.213115
| 77
|
py
|
finmag
|
finmag-master/examples/edge_damping/damping.py
|
import numpy as np
import dolfin as df
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
def damping_expression(alpha, xmin, xmax, width):
"""
Exponentially increasing damping at the edges along the x-axis.
Will increase alpha from its starting value `alpha` to 1 in a region
less than `width` away from `xmin` or `xmax`, where `xmin` and `xmax` are
the extremal x-coordinates of the mesh. Returns a df.Expression.
"""
eps = 0.01 # changes slope of exponential fit
a = alpha + eps
b = ((1 + eps) / a) ** (1.0 / width)
xa = xmin + width
xb = xmax - width
code = ("(x[0] <= xa || xb <= x[0])"
" ? a * pow(b, fabs(x[0] - (x[0] <= xa ? xa : xb))) - eps"
" : alpha")
expr = df.Expression(code, xa=xa, xb=xb, alpha=alpha, a=a, b=b, eps=eps, degree=1)
return expr
def plot_damping_profile(expr, mesh):
"""
Plot a given damping profile to file 'damping.png'.
The first argument `expr` should be a df.Expression (it can be obtained
using the function damping_expression in this module) and the second
argument should be a df.Mesh.
"""
xs = mesh.coordinates()[:, 0]
xmin = xs.min()
xmax = xs.max()
points = 1000
xs_plot = np.linspace(xmin, xmax, points)
alphas = np.zeros(points)
S1 = df.FunctionSpace(mesh, "CG", 1)
alpha_func = df.project(expr, S1)
for i, x in enumerate(xs_plot):
try:
alphas[i] = alpha_func(x, 0, 0)
except RuntimeError:
# could raise Exception due to now resolved bug in dolfin
# https://bitbucket.org/fenics-project/dolfin/issue/97/function-eval-does-not-find-a-point-that
alphas[i] = 0
plt.plot(xs_plot, alphas)
plt.xlabel("x (nm)")
plt.xlim((xmin, xmax))
plt.ylabel("damping")
plt.ylim((0, 1))
plt.grid()
plt.title("Spatial Profile of the Damping")
plt.savefig('damping.png')
print "Saved plot of damping to 'damping.png'."
if __name__ == "__main__":
from finmag.util.meshes import from_geofile
mesh = from_geofile("film.geo")
expr = damping_expression(0.02, 0, 1000, 200)
plot_damping_profile(expr, mesh)
| 2,211
| 30.15493
| 107
|
py
|
finmag
|
finmag-master/examples/std_prob_3/run.py
|
import os
import time
import logging
import numpy as np
import dolfin as df
from scipy.optimize import bisect
from finmag import Simulation
from finmag.energies import UniaxialAnisotropy, Exchange, Demag
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
"""
Micromag Standard Problem #3
specification:
http://www.ctcms.nist.gov/~rdm/mumag.org.html
solution with nmag:
http://magnonics.ex.ac.uk:3000/wiki/dynamag/Mumag3_nmag
a good write-up (Rave 1998):
http://www.sciencedirect.com/science/article/pii/S030488539800328X
"""
mu0 = 4.0 * np.pi * 10**-7 # vacuum permeability N/A^2
Ms = 1.0e6 # saturation magnetisation A/m
A = 13.0e-12 # exchange coupling strength J/m
Km = 0.5 * mu0 * Ms**2 # magnetostatic energy density kg/ms^2
lexch = (A/Km)**0.5 # exchange length m
K1 = 0.1 * Km
flower_init = (0, 0, 1)
def vortex_init(rs):
"""
from nmag's solution
http://magnonics.ex.ac.uk:3000/attachments/723/run.py
which cites Guslienko et al. APL 78 (24)
http://link.aip.org/link/doi/10.1063/1.1377850
"""
xs, ys, zs = rs
rho = xs**2 + ys**2
phi = np.arctan2(zs, xs)
b = 2 * lexch
m_phi = np.sin(2 * np.arctan(rho/b))
return np.array([np.sqrt(1.0 - m_phi**2), m_phi*np.cos(phi), -m_phi*np.sin(phi)])
energies = dict()
def run_simulation(lfactor, m_init, m_init_name=""):
L = lfactor * lexch
divisions = int(round(lfactor * 2)) # that magic number influences L
mesh = df.BoxMesh(df.Point(0, 0, 0), df.Point(L, L, L), divisions, divisions, divisions)
exchange = Exchange(A)
anisotropy = UniaxialAnisotropy(K1, [0, 0, 1])
demag = Demag()
sim = Simulation(mesh, Ms)
sim.set_m(m_init)
sim.add(exchange)
sim.add(anisotropy)
sim.add(demag)
sim.relax()
# Save average magnetisation.
mx, my, mz = sim.m_average
with open(os.path.join(MODULE_DIR, "data_m.txt"), "a") as f:
t = time.asctime()
f.write("{} {} {} {} {} {}\n".format(m_init_name, lfactor, mx, my, mz, t))
# Save energies.
# We could call sim.total_energy, but we want the individual contributions.
e_exc = exchange.compute_energy() / (sim.Volume * Km)
e_anis = anisotropy.compute_energy() / (sim.Volume * Km)
e_demag = demag.compute_energy() / (sim.Volume * Km)
e_total = e_exc + e_anis + e_demag # relative total energy density
with open(os.path.join(MODULE_DIR, "data_energies.txt"), "a") as f:
t = time.asctime()
f.write("{} {} {} {} {} {} {}\n".format(m_init_name,
lfactor, e_total, e_exc, e_anis, e_demag, t))
return e_total
def energy_difference(lfactor):
print "Running the two simulations for lfactor={}.".format(lfactor)
e_vortex = run_simulation(lfactor, vortex_init, "vortex")
e_flower = run_simulation(lfactor, flower_init, "flower")
diff = e_vortex - e_flower
with open(os.path.join(MODULE_DIR, "data_diffs.txt"), "a") as f:
t = time.asctime()
f.write("{} {} {} {} {}\n".format(lfactor, e_vortex, e_flower, diff, t))
return diff
if __name__ == "__main__":
print "Running standard problem 3."
single_domain_limit = bisect(energy_difference, 8, 8.5, xtol=0.1)
print "L = " + str(single_domain_limit) + "."
from table_for_doc import write_table
write_table()
| 3,406
| 32.732673
| 92
|
py
|
finmag
|
finmag-master/examples/std_prob_3/table_for_doc.py
|
import os
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
def write_table():
with open(os.path.join(MODULE_DIR, "data_energies.txt")) as f:
lines = f.readlines()
vor = lines[-2].split()
flo = lines[-1].split()
with open(os.path.join(MODULE_DIR, "table_template.txt")) as f:
table_template = f.read()
with open(os.path.join(MODULE_DIR, "doc_table.rst"), "w") as f:
f.write(table_template.format(
float(vor[5]), float(vor[3]), float(vor[4]), float(vor[2]),
float(flo[5]), float(flo[3]), float(flo[4]), float(flo[2])))
if __name__ == "__main__":
write_table()
| 653
| 31.7
| 72
|
py
|
finmag
|
finmag-master/examples/dispersion_curves/dispersion.py
|
import os
import subprocess
import numpy as np
import dolfin as df
from numpy import fft
from finmag import Simulation as Sim
from finmag.energies import Exchange, Demag, TimeZeeman
from finmag.util.meshes import from_geofile
meshfile = "width_modulated_bar.geo"
mesh = from_geofile(meshfile)
x0 = -540; x1 = -x0; dx_high_alpha = 10; xn_probe = x1 + 1;
Ms = 8.6e5
A = 13.0e-12
initial_m_file = "mxyz_0.npy"
m_for_fourier_analysis_file = "my_t.npy"
dispersion_data_file = "dispersion.dat"
dispersion_plot_file = "dispersion.png"
def relax_system():
"""
Relax the system to obtain the initial magnetisation for the subsequent simulation.
"""
sim = Sim(mesh, Ms, unit_length=1e-9)
sim.set_m((1, 0, 0))
sim.alpha = 1
sim.do_precession = False
sim.add(Exchange(A))
sim.add(Demag(solver="FK"))
sim.relax()
np.save(initial_m_file, sim.m)
def excite_system():
"""
Excite the relaxed system with the sinc pulse and save m_y to a file.
"""
sim = Sim(mesh, Ms, unit_length=1e-9)
alpha_expression = df.Expression("(x[0] < x_left || x[0] > x_right) ? 1.0 : 0.01",
x_left=x0+dx_high_alpha, x_right=x1-dx_high_alpha, degree=1)
sim.alpha = alpha_expression
sim.set_m(np.load(initial_m_file))
sim.add(Exchange(A))
sim.add(Demag(solver="FK"))
GHz = 1e9
omega = 50 * 2 * np.pi * GHz
sinc = (
"H_0"
" * (t == 0 ? 1 : sin(omega * t)/(omega * t))"
" * (x[0] == 0 ? 1 : sin(k_c * x[0])/(k_c * x[0]))")
H = df.Expression(("0.0", sinc, "0.0"), H_0=1e5, k_c=1.0, omega=omega, t=0.0, degree=1)
pulse = TimeZeeman(H)
t_0 = 50e-12
def update_pulse(t):
pulse.update(t - t_0)
sim.add(pulse, with_time_update=update_pulse)
xs = np.linspace(x0 + 1e-8, x1 - 1e-8, xn_probe)
ts = np.linspace(0, 2e-9, 2001)
my_along_x_axis_over_time = []
for t in ts:
sim.run_until(t)
my = np.array([sim._m(x, 0, 0)[1] for x in xs])
my_along_x_axis_over_time.append(my)
print "Simulation t = {:.2}.".format(t)
np.save(m_for_fourier_analysis_file, np.array(my_along_x_axis_over_time))
def compute_dispersion(dx, dt):
"""
Compute the dispersion relation, where *dx* is distance between points
where m_y was probed in nm, and *dt* is the length of time between
measurements in ns.
"""
my = np.load(m_for_fourier_analysis_file)
transformed = np.log10(np.power(np.abs(fft.fftshift(fft.fft2(my))), 2))
m, n = transformed.shape
print m,n
freq = fft.fftshift(fft.fftfreq(m, dt))
kx = fft.fftshift(fft.fftfreq(n, dx/(2.0*np.pi)))
with open(dispersion_data_file, "w") as f:
f.write('# kx (nm^-1) frequency (GHz) FFT_Power (arb. unit)\n')
for j in range(n):
for i in range(m):
f.write("%15g %15g %15g\n" % (kx[n-j-1], freq[i], transformed[i][j]))
f.write('\n')
if __name__ == '__main__':
if not os.path.exists(initial_m_file):
print "Creating initial magnetisation."
relax_system()
if not os.path.exists(m_for_fourier_analysis_file):
print "Running simulation with excitation."
excite_system()
if not os.path.exists(dispersion_data_file):
"Computing dispersion relation."
compute_dispersion(2, 1e-3)
if not os.path.exists(dispersion_plot_file):
print "Calling gnuplot to create dispersion plot."
cmd = ('gnuplot', 'plot.gnu')
subprocess.check_call(cmd)
| 3,525
| 31.348624
| 95
|
py
|
finmag
|
finmag-master/examples/std_prob_4/plot_averages.py
|
import os
import numpy as np
import matplotlib.pyplot as plt
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
# plots of average magnetisation components
averages_martinez = os.path.join(MODULE_DIR, "m_averages_ref_martinez.txt")
ref_t, ref_mx, ref_my, ref_mz = np.loadtxt(averages_martinez, unpack=True)
plt.plot(ref_t, ref_mx, "r-", label="$m_\mathrm{x}\,\mathrm{Martinez\, et\, al.}$")
plt.plot(ref_t, ref_my, "r:", label="$m_\mathrm{y}$")
plt.plot(ref_t, ref_mz, "r--", label="$m_\mathrm{z}$")
averages_finmag = os.path.join(MODULE_DIR, "dynamics.ndt")
t, mx, my, mz = np.loadtxt(averages_finmag, unpack=True)
t *= 1e9 # convert from s to ns
plt.plot(t, mx, "b-", label="$m_\mathrm{x}\,\mathrm{FinMag}$")
plt.plot(t, my, "b:")
plt.plot(t, mz, "b--")
plt.xlabel("$\mathrm{time}\, (\mathrm{ns})$")
plt.ylabel("$<m_i> = <M_i>/M_\mathrm{S}$")
plt.legend()
plt.xlim([0, 2])
plt.savefig(os.path.join(MODULE_DIR, "m_averages.pdf"))
| 942
| 33.925926
| 83
|
py
|
finmag
|
finmag-master/examples/std_prob_4/test_std_prob_4.py
|
import os
import pytest
import numpy as np
import dolfin as df
import matplotlib.pyplot as plt
from aeon import timer
from math import sqrt
from finmag.util.meshes import from_geofile
from finmag.util.consts import mu0
from finmag import Simulation
from finmag.energies import Zeeman, Demag, Exchange
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
m_0_file = os.path.join(MODULE_DIR, "m_0.npy")
m_at_crossing_file = os.path.join(MODULE_DIR, "m_at_crossing.npy")
"""
Micromag Standard Problem #4
specification:
http://www.ctcms.nist.gov/~rdm/mumag.org.html
"""
Ms = 8.0e5
A = 1.3e-11
alpha = 0.02
gamma = 2.211e5
def create_initial_s_state():
"""
Creates equilibrium s-state by slowly switching off a saturating field.
"""
mesh = from_geofile(os.path.join(MODULE_DIR, "bar.geo"))
sim = Simulation(mesh, Ms, name="relaxation", unit_length=1e-9)
sim.alpha = 0.5 # good enough for relaxation
sim.gamma = gamma
sim.set_m((1, 1, 1))
sim.add(Demag())
sim.add(Exchange(A))
# Saturating field of Ms in the [1, 1, 1] direction, that gets reduced
# every 10 picoseconds until it vanishes after one nanosecond.
H_initial = Ms * np.array((1, 1, 1)) / sqrt(3)
H_multipliers = list(np.linspace(0, 1))
H = Zeeman(H_initial)
def lower_H(sim):
try:
H_mult = H_multipliers.pop()
print "At t = {} s, lower external field to {} times initial field.".format(
sim.t, H_mult)
H.set_value(H_mult * H_initial)
except IndexError:
sim.remove_interaction(H.name)
print "External field is off."
return True
sim.add(H)
sim.schedule(lower_H, every=10e-12)
sim.run_until(0.5e-9)
sim.relax()
np.save(m_0_file, sim.m)
print "Saved magnetisation to {}.".format(m_0_file)
print "Average magnetisation is ({:.2g}, {:.2g}, {:.2g}).".format(*sim.m_average)
def run_simulation(stop_when_mx_eq_zero):
"""
Runs the simulation using field #1 from the problem description.
Stores the average magnetisation components regularly, as well as the
magnetisation when the x-component of the average magnetisation crosses
the value 0 for the first time.
"""
mesh = from_geofile(os.path.join(MODULE_DIR, "bar.geo"))
sim = Simulation(mesh, Ms, name="dynamics", unit_length=1e-9)
sim.alpha = alpha
sim.gamma = gamma
sim.set_m(np.load(m_0_file))
sim.add(Demag())
sim.add(Exchange(A))
"""
Conversion between mu0 * H in mT and H in A/m.
mu0 * H = 1 mT
= 1 * 1e-3 T
= 1 * 1e-3 Vs/m^2
divide by mu0 with mu0 = 4pi * 1e-7 Vs/Am
gives H = 1 / 4pi * 1e4 A/m
with the unit A/m which is indeed what we want.
Consequence:
Just divide the value of mu0 * H in Tesla
by mu0 to get the value of H in A/m.
"""
Hx = -24.6e-3 / mu0
Hy = 4.3e-3 / mu0
Hz = 0
sim.add(Zeeman((Hx, Hy, Hz)))
def check_if_crossed(sim):
mx, _, _ = sim.m_average
if mx <= 0:
print "The x-component of the spatially averaged magnetisation first crossed zero at t = {}.".format(sim.t)
np.save(m_at_crossing_file, sim.m)
# When this function returns True, it means this event is done
# and doesn't need to be triggered anymore.
# When we return False, it means we want to stop the simulation.
return not stop_when_mx_eq_zero
sim.schedule(check_if_crossed, every=1e-12)
sim.schedule('save_averages', every=10e-12, at_end=True)
sim.schedule('save_vtk', every=10e-12, at_end=True, overwrite=True)
sim.run_until(2.0e-9)
return sim.t
@pytest.mark.slow
def test_std_prob_4_field_1(stop_when_mx_eq_zero=True):
PRECISION = 4e-12
if not os.path.exists(m_0_file):
print "Couldn't find initial magnetisation, creating one."
create_initial_s_state()
print "Running simulation..."
t_0 = run_simulation(stop_when_mx_eq_zero)
print timer
t_ref_martinez = 0.13949e-9 # http://www.ctcms.nist.gov/~rdm/std4/Torres.html
assert abs(t_0 - t_ref_martinez) < PRECISION
if __name__ == "__main__":
test_std_prob_4_field_1(stop_when_mx_eq_zero=False)
| 4,362
| 29.089655
| 119
|
py
|
finmag
|
finmag-master/examples/llb/macrospin.py
|
import dolfin as df
import numpy as np
import matplotlib.pyplot as plt
from finmag.energies import Zeeman
from finmag.energies import Demag
from finmag.physics.llb.exchange import Exchange
from finmag.physics.llb.anisotropy import LLBAnisotropy
from finmag.physics.llb.material import Material
from finmag.physics.llb.llb import LLB
def average(m):
m.shape=(3,-1)
t=np.sqrt(np.sum(m*m,axis=0))
m.shape=(-1)
return np.average(t)
def saveplot(ts,me,filename):
fig=plt.figure()
plt.plot(ts,me)
plt.xlabel('Time (ps)')
plt.ylabel('me')
fig.savefig(filename)
def save_me(ts,me,me_input):
f=open('me.txt','w')
f.write('#Temperature me me_input\n')
for i in range(len(ts)):
tmp='%g %e %e\n'%(ts[i],
me[i],me_input[i])
f.write(tmp)
f.close()
def SpinTest(mesh,T,name):
mat = Material(mesh, name=name)
mat.set_m((1, 1, 1))
mat.T = T
llb = LLB(mat)
llb.alpha=0.1
llb.set_up_solver()
llb.interactions.append(mat)
max_time = 10e-12
ts = np.linspace(0, max_time, num=11)
me_average = []
mx=[]
mz=[]
for t in ts:
llb.run_until(t)
me_average.append(average(llb.m))
mx.append(llb.m[0])
mz.append(llb.m[-1])
#saveplot(ts,me_average,'tt.png')
#saveplot(ts,mx,'mx.png')
#saveplot(ts,mz,'mz.png')
return me_average[-1],mat.m_e
def SeriesTemperatureTest(mesh):
Ts1=[i for i in range(0,600,20)]
Ts2=[i for i in range(600,700,5)]
Ts3=[i for i in range(700,1000,20)]
Ts=Ts1+Ts2+Ts3
me=[]
me_input=[]
for t in Ts:
print 'temperature at %g'%t
me1,me2=SpinTest(mesh,t,'FePt')
me.append(me1)
me_input.append(me2)
fig=plt.figure()
p1,=plt.plot(Ts,me,'.')
p2,=plt.plot(Ts,me_input,'-')
plt.xlabel('Temperature (K)')
plt.ylabel('me')
plt.legend([p1,p2],['me','me-input'])
fig.savefig('FePt-me.png')
save_me(Ts,me,me_input)
def StochasticSpinTest(mesh,T):
mat = Material(mesh, name='Nickel')
#mat = Material(mesh)
mat.set_m((1, 0, 0))
mat.T = T
mat.alpha=0.1
print mat.T
print mat.compute_field()
dt=1e-15
llb = LLB(mat)
llb.set_up_stochastic_solver(dt=dt,use_evans2012_noise=True)
llb.interactions.append(mat)
n=1000
max_time = n*dt
ts = np.linspace(0, max_time, num=n+1)
me_average = []
mx=[]
mz=[]
for t in ts:
#print llb.m
llb.run_stochastic_until(t)
me_average.append(average(llb.m))
mx.append(llb.m[0])
mz.append(llb.m[-1])
print np.array(me_average)
saveplot(ts,me_average,'st.png')
return me_average[-1],mat.m_e
if __name__ == '__main__':
x0 = y0 = z0 = 0
x1 = 10e-9
y1 = 10e-9
z1 = 10e-9
nx = 1
ny = 1
nz = 1
mesh = df.BoxMesh(df.Point(x0, y0, z0), df.Point(x1, y1, z1), nx, ny, nz)
#mesh =df.Interval(1,0,50e-9)
print mesh.coordinates()
mat = Material(mesh, name='Nickel')
#print SpinTest(mesh,640,'Nickel')
#SeriesTemperatureTest(mesh)
print StochasticSpinTest(mesh,640)
| 3,279
| 20.578947
| 77
|
py
|
finmag
|
finmag-master/examples/nmag_example_2/run_nmag.py
|
import time
import nmag
from nmag import SI
start = time.time()
mat_Py = nmag.MagMaterial(name="Py",
Ms=SI(0.86e6,"A/m"),
exchange_coupling=SI(13.0e-12, "J/m"),
llg_damping=0.5)
sim = nmag.Simulation("nmag_bar")
sim.load_mesh("bar.nmesh.h5",
[("Py", mat_Py)],
unit_length=SI(1e-9,"m"))
sim.set_m([1,0,1])
dt = SI(5e-12, "s")
for i in range(0, 61):
sim.advance_time(dt*i) #compute time development
sim.save_data() #save averages
print "Simulation took {:.3} s.".format(time.time() - start)
| 645
| 22.925926
| 65
|
py
|
finmag
|
finmag-master/examples/nmag_example_2/run_finmag.py
|
from aeon import timer
from finmag import Simulation
from finmag.energies import Exchange, Demag
from finmag.util.meshes import from_geofile
def run_simulation(verbose=False):
mesh = from_geofile('bar.geo')
sim = Simulation(mesh, Ms=0.86e6, unit_length=1e-9, name="finmag_bar")
sim.set_m((1, 0, 1))
sim.set_tol(1e-6, 1e-6)
sim.alpha = 0.5
sim.add(Exchange(13.0e-12))
sim.add(Demag())
sim.schedule('save_averages', every=5e-12)
sim.schedule("eta", every=10e-12)
sim.run_until(3e-10)
print timer
if verbose:
print "The RHS was evaluated {} times, while the Jacobian was computed {} times.".format(
sim.integrator.stats()['nfevals'],
timer.get("sundials_jtimes", "LLG").calls)
if __name__ == "__main__":
run_simulation(verbose=False)
| 830
| 26.7
| 97
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.