repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
eco-dqn | eco-dqn-master/experiments/BA_40spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_40spin/eco",
graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 40,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,506 | 35.942623 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_40spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_40spin/s2v",
graph_save_loc="_graphs/validation/BA_40spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 35.770492 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/test/test_eco.py | """
Tests an agent.
"""
import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_100spin/eco",
graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 100,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,535 | 34.716535 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_100spin/test/test_s2v.py | """
Tests an agent.
"""
import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_100spin/s2v",
graph_save_loc="_graphs/validation/BA_100spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,512 | 34.81746 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import datetime
def run(save_loc="ER_40spin/eco",
graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
# info_str = "train_mpnn"
date = datetime.datetime.now().strftime("%Y-%m")
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 40,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,610 | 35.595238 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_40spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
import datetime
def run(save_loc="ER_40spin/s2v",
graph_save_loc="_graphs/validation/ER_40spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
# info_str = "train_s2v"
date = datetime.datetime.now().strftime("%Y-%m")
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0] * step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path, map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,588 | 35.420635 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_200spin/eco",
graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 200,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,509 | 35.967213 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_200spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_200spin/s2v",
graph_save_loc="_graphs/validation/BA_200spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,486 | 36.082645 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_200spin/eco",
graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 200,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,510 | 35.97541 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_200spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_200spin/s2v",
graph_save_loc="_graphs/validation/ER_200spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,487 | 36.090909 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_100spin/eco",
graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 100,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,510 | 35.97541 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_100spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_100spin/s2v",
graph_save_loc="_graphs/validation/ER_100spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,487 | 36.090909 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_60spin/eco",
graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 60,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,507 | 35.95082 | 108 | py |
eco-dqn | eco-dqn-master/experiments/ER_60spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="ER_60spin/s2v",
graph_save_loc="_graphs/validation/ER_60spin_p15_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 36.07438 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/test/test_eco.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
DEFAULT_OBSERVABLES)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_20spin/eco",
graph_save_loc="_graphs/validation/BA_20spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma = 0.95
step_factor = 2
env_args = {'observables': DEFAULT_OBSERVABLES,
'reward_signal': RewardSignal.BLS,
'extra_action': ExtraAction.NONE,
'optimisation_target': OptimisationTarget.CUT,
'spin_basis': SpinBasis.BINARY,
'norm_rewards': True,
'memory_length': None,
'horizon_length': None,
'stag_punishment': None,
'basin_reward': 1. / 20,
'reversible_spins': True}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,506 | 35.942623 | 108 | py |
eco-dqn | eco-dqn-master/experiments/BA_20spin/test/test_s2v.py | import os
import matplotlib.pyplot as plt
import torch
import src.envs.core as ising_env
from experiments.utils import test_network, load_graph_set
from src.envs.utils import (SingleGraphGenerator,
RewardSignal, ExtraAction,
OptimisationTarget, SpinBasis,
Observable)
from src.networks.mpnn import MPNN
try:
import seaborn as sns
plt.style.use('seaborn')
except ImportError:
pass
def run(save_loc="BA_20spin/s2v",
graph_save_loc="_graphs/validation/BA_20spin_m4_100graphs.pkl",
batched=True,
max_batch_size=None):
print("\n----- Running {} -----\n".format(os.path.basename(__file__)))
####################################################
# NETWORK LOCATION
####################################################
data_folder = os.path.join(save_loc, 'data')
network_folder = os.path.join(save_loc, 'network')
print("data folder :", data_folder)
print("network folder :", network_folder)
test_save_path = os.path.join(network_folder, 'test_scores.pkl')
network_save_path = os.path.join(network_folder, 'network_best.pth')
print("network params :", network_save_path)
####################################################
# NETWORK SETUP
####################################################
network_fn = MPNN
network_args = {
'n_layers': 3,
'n_features': 64,
'n_hid_readout': [],
'tied_weights': False
}
####################################################
# SET UP ENVIRONMENTAL AND VARIABLES
####################################################
gamma=1
step_factor = 1
env_args = {'observables':[Observable.SPIN_STATE],
'reward_signal':RewardSignal.DENSE,
'extra_action':ExtraAction.NONE,
'optimisation_target':OptimisationTarget.CUT,
'spin_basis':SpinBasis.BINARY,
'norm_rewards':True,
'memory_length':None,
'horizon_length':None,
'stag_punishment':None,
'basin_reward':None,
'reversible_spins':False}
####################################################
# LOAD VALIDATION GRAPHS
####################################################
graphs_test = load_graph_set(graph_save_loc)
####################################################
# SETUP NETWORK TO TEST
####################################################
test_env = ising_env.make("SpinSystem",
SingleGraphGenerator(graphs_test[0]),
graphs_test[0].shape[0]*step_factor,
**env_args)
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.device(device)
print("Set torch default device to {}.".format(device))
network = network_fn(n_obs_in=test_env.observation_space.shape[1],
**network_args).to(device)
network.load_state_dict(torch.load(network_save_path,map_location=device))
for param in network.parameters():
param.requires_grad = False
network.eval()
print("Sucessfully created agent with pre-trained MPNN.\nMPNN architecture\n\n{}".format(repr(network)))
####################################################
# TEST NETWORK ON VALIDATION GRAPHS
####################################################
results, results_raw, history = test_network(network, env_args, graphs_test, device, step_factor,
return_raw=True, return_history=True,
batched=batched, max_batch_size=max_batch_size)
results_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + ".pkl"
results_raw_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_raw.pkl"
history_fname = "results_" + os.path.splitext(os.path.split(graph_save_loc)[-1])[0] + "_history.pkl"
for res, fname, label in zip([results, results_raw, history],
[results_fname, results_raw_fname, history_fname],
["results", "results_raw", "history"]):
save_path = os.path.join(data_folder, fname)
res.to_pickle(save_path)
print("{} saved to {}".format(label, save_path))
if __name__ == "__main__":
run() | 4,485 | 35.770492 | 108 | py |
USLN | USLN-master/test.py | from PIL import Image
import os
import numpy as np
import torch
from model import USLN
from SegDataset import read_file_list
from tqdm import trange
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = USLN()
model.load_state_dict(torch.load(r'logs/UFO.pth'))
model.eval()
model = model.to(device)
test, path_list_images_test= read_file_list( type='test')
for id in trange(len(test)):
image = Image.open(test[id]).convert('RGB')
input = np.transpose(np.array(image, np.float64),(2,0,1))
input=input/255
input = torch.from_numpy(input).type(torch.FloatTensor)
input = input.to(device)
input= input.unsqueeze(0)
output = model(input)
output_np=output.cpu().detach().numpy().copy()
output_np=output_np.squeeze()
predictimag=np.transpose(output_np, [1, 2, 0])*255
a=Image.fromarray(predictimag.astype('uint8'))
a.save(os.path.join(r"datasets/pred", path_list_images_test[id]))
| 961 | 21.904762 | 69 | py |
USLN | USLN-master/loss.py | import torch
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from math import exp
import torch.nn as nn
from torchvision import models
class VGG_loss(nn.Module):
def __init__(self, model):
super(VGG_loss, self).__init__()
self.features = nn.Sequential(*list(model.children())[0][:-3])
self.l1loss = nn.L1Loss()
def forward(self, x,y):
x_vgg=self.features(x)
y_vgg=self.features(y)
loss=self.l1loss(x_vgg, y_vgg)
return loss
def gaussian(window_size, sigma):
gauss = torch.Tensor([exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])
return gauss / gauss.sum()
def create_window(window_size, channel):
_1D_window = gaussian(window_size, 1.5).unsqueeze(1)
_2D_window = _1D_window.mm(_1D_window.t()).float().unsqueeze(0).unsqueeze(0)
window = Variable(_2D_window.expand(channel, 1, window_size, window_size).contiguous())
return window
def _ssim(img1, img2, window, window_size, channel, size_average=True):
mu1 = F.conv2d(img1, window, padding=window_size // 2, groups=channel)
mu2 = F.conv2d(img2, window, padding=window_size // 2, groups=channel)
mu1_sq = mu1.pow(2)
mu2_sq = mu2.pow(2)
mu1_mu2 = mu1 * mu2
sigma1_sq = F.conv2d(img1 * img1, window, padding=window_size // 2, groups=channel) - mu1_sq
sigma2_sq = F.conv2d(img2 * img2, window, padding=window_size // 2, groups=channel) - mu2_sq
sigma12 = F.conv2d(img1 * img2, window, padding=window_size // 2, groups=channel) - mu1_mu2
C1 = 0.01 ** 2
C2 = 0.03 ** 2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * (sigma1_sq + sigma2_sq + C2))
if size_average:
return ssim_map.mean()
else:
return ssim_map.mean(1).mean(1).mean(1)
class SSIM(torch.nn.Module):
def __init__(self, window_size=11, size_average=True):
super(SSIM, self).__init__()
self.window_size = window_size
self.size_average = size_average
self.channel = 1
self.window = create_window(window_size, self.channel)
def forward(self, img1, img2):
(_, channel, _, _) = img1.size()
if channel == self.channel and self.window.data.type() == img1.data.type():
window = self.window
else:
window = create_window(self.window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
self.window = window
self.channel = channel
return 1-_ssim(img1, img2, window, self.window_size, channel, self.size_average)
def ssim(img1, img2, window_size=11, size_average=True):
(_, channel, _, _) = img1.size()
window = create_window(window_size, channel)
if img1.is_cuda:
window = window.cuda(img1.get_device())
window = window.type_as(img1)
return _ssim(img1, img2, window, window_size, channel, size_average)
class Combinedloss(torch.nn.Module):
def __init__(self):
super(Combinedloss, self).__init__()
self.ssim=SSIM()
self.l1loss = torch.nn.L1Loss()
self.l2loss = torch.nn.MSELoss()
vgg = models.vgg19_bn(pretrained=True)
self.vggloss = VGG_loss(vgg)
def forward(self, out, label):
ssim_loss = self.ssim(out, label)
l1_loss = self.l1loss(out, label)
vgg_loss = self.vggloss(out, label)
total_loss = 0.25*ssim_loss + l1_loss +vgg_loss
# total_loss = 0.25 * ssim_loss + l1_loss
return total_loss | 3,620 | 33.485714 | 114 | py |
USLN | USLN-master/color_change.py | import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# torch.cuda.set_device(0)
# device='cpu'
def rgb2hsi(img):
img = torch.clamp(img, 0, 1)
r = img[:, 0, :, :]
g = img[:, 1, :, :]
b = img[:, 2, :, :]
i = (r + g + b) / 3
s = 1 - 3 * img.min(1)[0] / (r + g + b + 1e-5)
x1 = (2 * r - b - g) / 2
x2 = ((r - g) ** 2 + (r - b) * (g - b) + 1e-5) ** 0.5
angle = torch.arccos(x1 / x2) / 2 / torch.pi
# h = torch.Tensor(img.shape[0], img.shape[2], img.shape[3]).to(img.device)
h = (b <= r) * angle + (b > r) * (1 - angle)
h = h.unsqueeze(1)
s = s.unsqueeze(1)
i = i.unsqueeze(1)
out = torch.cat((h, s, i), dim=1)
return out
def hsi2rgb(img):
img = torch.clamp(img, 0, 1)
h = img[:, 0, :, :]
s = img[:, 1, :, :]
i = img[:, 2, :, :]
r = torch.zeros_like(h)
g = torch.zeros_like(h)
b = torch.zeros_like(h)
h1 = torch.zeros_like(h)
hi0 = (h < 1 / 3)
hi2 = (h >= 2 / 3)
hi1 = 1 - hi0.int() - hi2.int()
hi1 = (hi1 == 1)
h1[hi0] = 2 * torch.pi * h[hi0]
h1[hi1] = 2 * torch.pi * (h[hi1] - 1 / 3)
h1[hi2] = 2 * torch.pi * (h[hi2] - 2 / 3)
p = i * (1 - s)
q = i * (1 + s * torch.cos(h1) / (torch.cos(torch.pi / 3 - h1) + 1e-5))
r[hi0] = q[hi0]
b[hi0] = p[hi0]
g[hi0] = 3 * i[hi0] - r[hi0] - b[hi0]
g[hi1] = q[hi1]
r[hi1] = p[hi1]
b[hi1] = 3 * i[hi1] - r[hi1] - g[hi1]
b[hi2] = q[hi2]
g[hi2] = p[hi2]
r[hi2] = 3 * i[hi2] - g[hi2] - b[hi2]
r = r.unsqueeze(1)
g = g.unsqueeze(1)
b = b.unsqueeze(1)
out = torch.cat((r, g, b), dim=1)
return out
def rgb2hsv(img):
img = torch.clamp(img, 0, 1)
hue = torch.Tensor(img.shape[0], img.shape[2], img.shape[3]).to(img.device)
hue[img[:, 2] == img.max(1)[0]] = 4.0 + ((img[:, 0] - img[:, 1]) / (img.max(1)[0] - img.min(1)[0] + 1e-5))[
img[:, 2] == img.max(1)[0]]
hue[img[:, 1] == img.max(1)[0]] = 2.0 + ((img[:, 2] - img[:, 0]) / (img.max(1)[0] - img.min(1)[0] + 1e-5))[
img[:, 1] == img.max(1)[0]]
hue[img[:, 0] == img.max(1)[0]] = (0.0 + ((img[:, 1] - img[:, 2]) / (img.max(1)[0] - img.min(1)[0] + 1e-5))[
img[:, 0] == img.max(1)[0]]) % 6
hue[img.min(1)[0] == img.max(1)[0]] = 0.0
hue = hue / 6
saturation = (img.max(1)[0] - img.min(1)[0]) / (img.max(1)[0] + 1e-5)
saturation[img.max(1)[0] == 0] = 0
value = img.max(1)[0]
hue = hue.unsqueeze(1)
saturation = saturation.unsqueeze(1)
value = value.unsqueeze(1)
hsv = torch.cat([hue, saturation, value], dim=1)
return hsv
def hsv2rgb(hsv):
h, s, v = hsv[:, 0, :, :], hsv[:, 1, :, :], hsv[:, 2, :, :]
# 对出界值的处理
h = h % 1
s = torch.clamp(s, 0, 1)
v = torch.clamp(v, 0, 1)
r = torch.zeros_like(h)
g = torch.zeros_like(h)
b = torch.zeros_like(h)
hi = torch.floor(h * 6)
f = h * 6 - hi
p = v * (1 - s)
q = v * (1 - (f * s))
t = v * (1 - ((1 - f) * s))
hi0 = hi == 0
hi1 = hi == 1
hi2 = hi == 2
hi3 = hi == 3
hi4 = hi == 4
hi5 = hi == 5
r[hi0] = v[hi0]
g[hi0] = t[hi0]
b[hi0] = p[hi0]
r[hi1] = q[hi1]
g[hi1] = v[hi1]
b[hi1] = p[hi1]
r[hi2] = p[hi2]
g[hi2] = v[hi2]
b[hi2] = t[hi2]
r[hi3] = p[hi3]
g[hi3] = q[hi3]
b[hi3] = v[hi3]
r[hi4] = t[hi4]
g[hi4] = p[hi4]
b[hi4] = v[hi4]
r[hi5] = v[hi5]
g[hi5] = p[hi5]
b[hi5] = q[hi5]
r = r.unsqueeze(1)
g = g.unsqueeze(1)
b = b.unsqueeze(1)
rgb = torch.cat([r, g, b], dim=1)
return rgb
MAT_RGB2XYZ = torch.Tensor([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]]).to(device)
MAT_XYZ2RGB = torch.Tensor([[ 3.2405, -1.5372, -0.4985],
[-0.9693, 1.8760, 0.0416],
[ 0.0556, -0.2040, 1.0573]]).to(device)
XYZ_REF_WHITE = torch.Tensor([0.95047, 1.0, 1.08883]).to(device)
def rgb2lab(rgb):
rgb=torch.clamp(rgb,0,1)
return xyz_to_lab(rgb_to_xyz(rgb))
def lab2rgb(lab):
lab=torch.clamp(lab,0,1)
return xyz_to_rgb(lab_to_xyz(lab))
def rgb_to_xyz(rgb):
# convert dtype from uint8 to float
# xyz = rgb.astype(np.float64) / 255.0
# xyz = rgb.astype(np.float64)
xyz = rgb
# gamma correction
mask = xyz > 0.04045
abc=torch.zeros_like(xyz)
abc[mask] = ((xyz[mask] + 0.055) / 1.055)**2.4
abc[~mask] = xyz[~mask]/12.92
xyz = abc.permute(0, 2, 3, 1)
# linear transform
xyz = torch.matmul(xyz , MAT_RGB2XYZ.T)
xyz = xyz.permute(0, 3, 1, 2)
return xyz
def xyz_to_lab(xyz):
xyz=xyz.permute(0, 2, 3, 1)
xyz = xyz/XYZ_REF_WHITE
# nonlinear transform
mask = xyz > 0.008856
xyz[mask] = torch.pow(xyz[mask], 1.0 / 3.0)
xyz[~mask] = 7.787 * xyz[~mask] + 16.0 / 116.0
x, y, z = xyz[..., 0], xyz[..., 1], xyz[..., 2]
# linear transform
lab = torch.zeros_like(xyz)
# lab = torch.zeros(xyz.shape, requires_grad=True)
lab[..., 0] = (116.0 * y) - 16.0 # L channel
lab[..., 1] = 500.0 * (x - y) # a channel
lab[..., 2] = 200.0 * (y - z) # b channel
lab[..., 0] = lab[..., 0]/100 # L channel
lab[..., 1] = (lab[..., 1]+86.183030)/184.416084 # a channel
lab[..., 2] = (lab[..., 2]+107.857300)/202.335422 # b channel
lab=lab.permute(0, 3, 1, 2)
return lab
def lab_to_xyz(lab):
lab=lab.permute(0, 2, 3, 1)
l, a, b = lab[..., 0], lab[..., 1], lab[..., 2]
l=l*100
a=a*184.416084-86.183030
b=b*202.335422-107.857300
xyz = torch.zeros_like(lab)
# xyz = torch.zeros(lab.shape,requires_grad=True)
xyz[..., 1] = (l + 16.0) / 116.0
xyz[..., 0] = a / 500.0 + xyz[..., 1]
xyz[..., 2] = xyz[..., 1] - b / 200.0
# index = xyz[..., 2] < 0
# xyz[index, 2] = 0
torch.clamp(xyz, min=0.0)
# nonlinear transform
mask = xyz > 0.2068966
xyz[mask] = torch.pow(xyz[mask], 3.0)
xyz[~mask] = (xyz[~mask] - 16.0 / 116.0) / 7.787
# de-normalization
xyz = xyz*XYZ_REF_WHITE
xyz=xyz.permute(0, 3, 1, 2)
return xyz
def xyz_to_rgb(xyz):
rgb = xyz.permute(0, 2, 3, 1)
rgb = torch.matmul(rgb, MAT_XYZ2RGB.T)
# gamma correction
mask = rgb > 0.0031308
rgb[mask] = 1.055 * torch.pow(rgb[mask], 1.0 / 2.4) - 0.055
rgb[~mask] = rgb[~mask] * 12.92
# clip and convert dtype from float to uint8
# rgb = np.round(255.0 * np.clip(rgb, 0, 1)).astype(np.uint8)
rgb = torch.clip(rgb, 0, 1)
rgb = rgb.permute(0, 3, 1, 2)
return rgb
if __name__ == '__main__':
with torch.autograd.set_detect_anomaly(True):
rgb = torch.Tensor([[[0.8, 0.5, 0.5]]])
rgb.requires_grad_()
rgb = torch.unsqueeze(rgb.permute(2,0,1),0)
xzy=rgb_to_xyz(rgb)
lab=xyz_to_lab(xzy)
xzy1=lab_to_xyz(lab)
rgb1=xyz_to_rgb(xzy1)
print(rgb1)
rgb1 = rgb1.sum()
rgb1.backward()
| 7,245 | 26.656489 | 116 | py |
USLN | USLN-master/model.py | import torch.nn as nn
from ptflops import get_model_complexity_info
from color_change import *
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
class WB(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(3, 3, kernel_size=1, stride=1)
self.conv2 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv3 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv4 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.tanh=nn.Tanh()
def forward(self, x):
out1 = self.avgpool(x)
out2 = self.conv(x)
out3 = self.maxpool(x)
out4 = self.conv1(x)
out =self.conv2(out2 / (out1 + 1e-5)+self.tanh(self.conv4(x))) + self.conv3(out4 / (out3 + 1e-5)+self.tanh(self.conv5(x)))
return out
class RGBhs(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3,3,kernel_size=1,stride=1)
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
def forward(self,x):
min = -self.maxpool(-x)
max = self.maxpool(x)
out = (x - min) / (max - min + 1e-5)
out = self.conv(out)
return out
class Labhs(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(3,3,kernel_size=1,stride=1)
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
def forward(self,x):
x = rgb2lab(x)
min = -self.maxpool(-x)
max = self.maxpool(x)
out = (x - min) / (max - min + 1e-5)
out = self.conv(out)
out = lab2rgb(out)
return out
class HSIhs(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, kernel_size=1, stride=1)
self.maxpool = nn.AdaptiveMaxPool2d(output_size=(1, 1))
def forward(self, x):
out = rgb2hsi(x)
h, si = torch.split(out, [1, 2], dim=1)
minsi = -self.maxpool(-si)
maxsi = self.maxpool(si)
si = (si - minsi) / (maxsi - minsi + 1e-5)
si = si + self.conv(si)
out = torch.cat((h, si), dim=1)
out = hsi2rgb(out)
return out
class USLN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv2 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv3 = nn.Conv2d(3, 3, kernel_size=3, stride=1,padding=1)
self.conv4 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv5 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.conv6 = nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1)
self.tanh=nn.Tanh()
self.step1=WB()
self.step2=RGBhs()
self.step3=HSIhs()
self.step4=Labhs()
self.relu=nn.ReLU()
self.sigmoid = nn.Sigmoid()
def forward(self,x):
out=self.step1(x)
out1=self.step2(out)+self.tanh(self.conv1(out))
out2 = self.step3(out)+self.tanh(self.conv2(out))
out3 = self.step4(out) + self.tanh(self.conv3(out))
out=self.conv4(out1)+self.conv5(out2)+self.conv6(out3)
out=1-self.relu(1-self.relu(out))
return out
if __name__ == '__main__':
net=USLN().to(device)
flops,params=get_model_complexity_info(net,(3,256,256))
print(flops,params)
| 3,686 | 31.342105 | 130 | py |
USLN | USLN-master/SegDataset.py | import os
from PIL import Image
import numpy as np
import torch
from torch.utils.data import Dataset
def read_file_list(type='train'):
path_list_images_train = os.listdir(r"datasets/images_train")
path_list_labels_train = os.listdir(r"datasets/labels_train")
path_list_images_val = os.listdir(r"datasets/images_val")
path_list_labels_val = os.listdir(r"datasets/labels_val")
path_list_images_test = os.listdir(r"datasets/images_test")
images_train = [os.path.join(r"datasets/images_train", i) for i in path_list_images_train]
labels_train = [os.path.join(r"datasets/labels_train", i) for i in path_list_labels_train]
images_val = [os.path.join(r"datasets/images_val", i) for i in path_list_images_val]
labels_val = [os.path.join(r"datasets/labels_val", i) for i in path_list_labels_val]
images_test = [os.path.join(r"datasets/images_test", i) for i in path_list_images_test]
if type == 'train':
return images_train, labels_train # 两者路径的列表
elif type == 'val':
return images_val, labels_val
elif type == 'test':
return images_test, path_list_images_test
def preprocess_input(image):
image /= 255.0
return image
class SegDataset(torch.utils.data.Dataset):
def __init__(self, type):
images, labels = read_file_list(type=type)
self.images = images
self.labels = labels
print('Read ' + str(len(self.images)) + ' valid examples')
def rand(self, a=0, b=1):
return np.random.rand() * (b - a) + a
def __getitem__(self, idx):
image = self.images[idx]
label = self.labels[idx]
image = Image.open(image).convert('RGB')
label = Image.open(label).convert('RGB')
image = np.transpose(preprocess_input(np.array(image, np.float64)), [2, 0, 1])
image = torch.from_numpy(image).type(torch.FloatTensor)
label = np.transpose(preprocess_input(np.array(label, np.float64)), [2, 0, 1])
label = torch.from_numpy(label).type(torch.FloatTensor)
return image, label # float32 tensor, uint8 tensor
def __len__(self):
return len(self.images)
| 2,160 | 28.60274 | 94 | py |
USLN | USLN-master/train.py | import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
import sys
from model import USLN
from SegDataset import SegDataset
from loss import Combinedloss
########################################################
num_workers = 0 if sys.platform.startswith('win32') else 8
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
#############################################################
torch.cuda.set_device(1) # 指定GPU运行
if __name__ == "__main__":
Init_Epoch = 0
Final_Epoch = 100
batch_size = 10
lr = 1e-2
model = USLN()
save_model_epoch = 1
model = model.to(device)
data_train = SegDataset('train')
data_test = SegDataset('val')
myloss = Combinedloss().to(device)
if True:
batch_size = batch_size
start_epoch = Init_Epoch
end_epoch = Final_Epoch
optimizer = optim.Adam(model.train().parameters(), lr=lr, weight_decay = 5e-4)
lr_scheduler = optim.lr_scheduler.StepLR(optimizer, step_size = 1, gamma = 0.94)
for epo in range(start_epoch, end_epoch):
train_loss = 0
model.train() # 启用batch normalization和drop out
train_iter = torch.utils.data.DataLoader(data_train, batch_size, shuffle=True,
drop_last=True, num_workers=num_workers,pin_memory=True)
test_iter = torch.utils.data.DataLoader(data_test, batch_size, drop_last=True,
num_workers=num_workers,pin_memory=True)
for index, (bag, bag_msk) in enumerate(train_iter):
bag = bag.to(device)
bag_msk = bag_msk.to(device)
optimizer.zero_grad()
output = model(bag)
loss = myloss(output, bag_msk)
loss.backward()
iter_loss = loss.item()
train_loss += iter_loss
optimizer.step()
if np.mod(index, 15) == 0:
print('epoch {}, {}/{},train loss is {}'.format(epo, index, len(train_iter), iter_loss))
# 验证
test_loss = 0
model.eval()
with torch.no_grad():
for index, (bag, bag_msk) in enumerate(test_iter):
bag = bag.to(device)
bag_msk = bag_msk.to(device)
optimizer.zero_grad()
output = model(bag)
loss = myloss(output, bag_msk)
# loss = criterion(output, torch.argmax(bag_msk, axis=1))
iter_loss = loss.item()
test_loss += iter_loss
print('<---------------------------------------------------->')
print('epoch: %f' % epo)
print('epoch train loss = %f, epoch test loss = %f'
% (train_loss / len(train_iter), test_loss / len(test_iter)))
lr_scheduler.step()
# 每5个epoch存储一次模型
if np.mod(epo, save_model_epoch) == 0:
# 只存储模型参数
torch.save(model.state_dict(), 'logs/ep%03d-loss%.3f-val_loss%.3f.pth' % (
(epo + 1), (100*train_loss / len(train_iter)), (100*test_loss / len(test_iter)))
)
print('saveing checkpoints/model_{}.pth'.format(epo))
| 3,435 | 31.415094 | 109 | py |
ccdetection | ccdetection-master/configurator.py | '''
Created on Nov 14, 2015
@author: Tommi Unruh
'''
import os
import stat
class Configurator(object):
"""
Writes and loads data from a config file.
"""
DEFAULT_HEAP_SIZE = "6G"
KEY_PHP7 = "php7"
KEY_NEO4J = "neo4j"
KEY_HEAPSIZE = "heap_size"
KEY_GRAPHDBS = "graphdbs"
KEY_BASE_DIR = "basedir"
KEY_PHP_JOERN = "phpjoern"
KEY_PHP_PARSER = "php_parser"
KEY_SPAWN_SCRIPT = "spawn_script"
KEY_PYTHON_JOERN = "python_joern"
KEY_BATCH_IMPORT = "batch_import"
KEY_PHP_PARSE_RESULTS = "php_parser_results"
PATH_PHP7 = "php7"
PATH_NEO4j = "neo4j"
PATH_GRAPHDBS = "graphs"
PATH_PHP_JOERN = "phpjoern"
PATH_PHP_PARSER = "AST_parser"
PATH_SPAWN_SCRIPT = "spawn_neodb.sh"
PATH_PYTHON_JOERN = "python-joern"
PATH_BATCH_IMPORT = "batch-import"
PATH_PHP_PARSE_RESULTS = "parse_results"
debugging = False
def __init__(self):
pass
@staticmethod
def readLine(_line):
"""
Return (key, value) of a read line.
None for empty lines and ValueError on format errors.
"""
if _line.strip() == "":
return None
key, value = _line.split("=", 1)
key = key.strip()
value = value.strip()
return (key, value)
@staticmethod
def load(config_path):
Configurator.readFullConfigFile(config_path)
@staticmethod
def readFullConfigFile(config_path):
Configurator.config = {}
with open(config_path, 'r') as fh:
cnt = 0
for _line in fh:
cnt += 1
# Parse line
try:
res = Configurator.readLine(_line)
if res:
key, value = res
Configurator.config[key] = value
except:
# Format error
raise ConfigException(
"Format error in config file on line %d." % (cnt)
)
@staticmethod
def getHeapVal():
heap_size_str = Configurator.config[Configurator.KEY_HEAPSIZE]
heap_size = [int(heap_size_str[:-1]), heap_size_str[-1]]
return heap_size
def setupConfig(self, config_path, base_dir, path, start_port=7473):
config_dict = {}
if path[-1] == "/":
path = path[:-1]
config_dict[self.KEY_PHP7] = path + "/" + self.PATH_PHP7
config_dict[self.KEY_NEO4J] = path + "/" + self.PATH_NEO4j
config_dict[self.KEY_BASE_DIR] = base_dir
config_dict[self.KEY_GRAPHDBS] = base_dir + "/" + self.PATH_GRAPHDBS
config_dict[self.KEY_PHP_JOERN] = path + "/" + self.PATH_PHP_JOERN
config_dict[self.KEY_PYTHON_JOERN] = path + "/" + self.PATH_PYTHON_JOERN
config_dict[self.KEY_BATCH_IMPORT] = path + "/" + self.PATH_BATCH_IMPORT
config_dict[self.KEY_SPAWN_SCRIPT] = base_dir + "/" + \
self.PATH_SPAWN_SCRIPT
config_dict[self.KEY_PHP_PARSER] = base_dir + "/" + self.PATH_PHP_PARSER
config_dict[self.KEY_PHP_PARSE_RESULTS] = (
config_dict[self.KEY_PHP_PARSER] + "/" +
self.PATH_PHP_PARSE_RESULTS
)
config_dict[self.KEY_HEAPSIZE] = self.DEFAULT_HEAP_SIZE
self.writeConfigFile(
config_path,
config_dict
)
if not os.path.exists(config_dict[self.KEY_GRAPHDBS]):
# Ignore the race condition here, it does not matter.
# Create the directory for the several graph databases.
os.makedirs(config_dict[self.KEY_GRAPHDBS])
if not os.path.exists(config_dict[self.KEY_PHP_PARSE_RESULTS]):
# Ignore the race condition here, it does not matter.
# Create the directory for the PHP AST parsing results.
os.makedirs(config_dict[self.KEY_PHP_PARSE_RESULTS])
# Write the server config file for every neoj4 instance.
# They differ in the ports (http and https) they use.
neo_4j_path = config_dict[self.KEY_NEO4J] + \
"/neo4j-0%d/conf/neo4j-server.properties"
# Check number of neo4j instances
neo4j_count = 0
for path in os.listdir(config_dict[self.KEY_NEO4J]):
if os.path.isdir(os.path.join(config_dict[self.KEY_NEO4J], path)):
if path[0:6] == "neo4j-":
neo4j_count += 1
for i in range(1, neo4j_count + 1):
self.writeNeo4jConfig(
neo_4j_path % i, start_port + i, start_port + 100 + i
)
# Make scripts executable
filepath = config_dict[self.KEY_SPAWN_SCRIPT]
st = os.stat(filepath)
os.chmod(filepath, st.st_mode | stat.S_IEXEC)
filepath = config_dict[self.KEY_PHP_PARSER] + "/parser"
st = os.stat(filepath)
os.chmod(filepath, st.st_mode | stat.S_IEXEC)
def writeConfigFile(self, filepath, _dict):
config_format = "%s = %s"
with open(filepath, 'w') as fh:
for key in _dict:
fh.write(config_format % (key, _dict[key]) + "\n")
@staticmethod
def isDebuggingEnabled():
return Configurator.debugging
@staticmethod
def setDebugging(_bool):
Configurator.debugging = _bool
@staticmethod
def getPath(_key):
val = Configurator.config[_key]
if val:
return val
else:
raise ValueError("'%s' is not specified in the config file.")
def writeNeo4jConfig(self, path, port, port_ssl):
default_config = """################################################################
# Neo4j configuration
#
################################################################
#***************************************************************
# Server configuration
#***************************************************************
# location of the database directory
org.neo4j.server.database.location=data/graph.db
# Let the webserver only listen on the specified IP. Default is localhost (only
# accept local connections). Uncomment to allow any connection. Please see the
# security section in the neo4j manual before modifying this.
#org.neo4j.server.webserver.address=0.0.0.0
#
# HTTP Connector
#
# http port (for all data, administrative, and UI access)
org.neo4j.server.webserver.port=%d
#
# HTTPS Connector
#
# Turn https-support on/off
org.neo4j.server.webserver.https.enabled=true
# https port (for all data, administrative, and UI access)
org.neo4j.server.webserver.https.port=%d
# Certificate location (auto generated if the file does not exist)
org.neo4j.server.webserver.https.cert.location=conf/ssl/snakeoil.cert
# Private key location (auto generated if the file does not exist)
org.neo4j.server.webserver.https.key.location=conf/ssl/snakeoil.key
# Internally generated keystore (don't try to put your own
# keystore there, it will get deleted when the server starts)
org.neo4j.server.webserver.https.keystore.location=data/keystore
#*****************************************************************
# Administration client configuration
#*****************************************************************
# location of the servers round-robin database directory. Possible values:
# - absolute path like /var/rrd
# - path relative to the server working directory like data/rrd
# - commented out, will default to the database data directory.
org.neo4j.server.webadmin.rrdb.location=data/rrd
# REST endpoint for the data API
# Note the / in the end is mandatory
org.neo4j.server.webadmin.data.uri=/db/data/
# REST endpoint of the administration API (used by Webadmin)
org.neo4j.server.webadmin.management.uri=/db/manage/
# Low-level graph engine tuning file
org.neo4j.server.db.tuning.properties=conf/neo4j.properties
# The console services to be enabled
org.neo4j.server.manage.console_engines=shell
# Comma separated list of JAX-RS packages containing JAX-RS resources, one
# package name for each mountpoint. The listed package names will be loaded
# under the mountpoints specified. Uncomment this line to mount the
# org.neo4j.examples.server.unmanaged.HelloWorldResource.java from
# neo4j-server-examples under /examples/unmanaged, resulting in a final URL of
# http://localhost:7474/examples/unmanaged/helloworld/{nodeId}
#org.neo4j.server.thirdparty_jaxrs_classes=org.neo4j.examples.server.unmanaged=/examples/unmanaged
#*****************************************************************
# HTTP logging configuration
#*****************************************************************
# HTTP logging is disabled. HTTP logging can be enabled by setting this
# property to 'true'.
org.neo4j.server.http.log.enabled=false
# Logging policy file that governs how HTTP log output is presented and
# archived. Note: changing the rollover and retention policy is sensible, but
# changing the output format is less so, since it is configured to use the
# ubiquitous common log format
org.neo4j.server.http.log.config=conf/neo4j-http-logging.xml"""
with open(path, 'w') as fh:
fh.write(default_config % (port, port_ssl))
class ConfigException(BaseException):
def __init__(self, msg=None):
if msg:
self.message = msg
else:
self.message = (
"Format error in config file."
)
def __str__(self):
return self.message | 9,907 | 34.010601 | 98 | py |
poincare_glove | poincare_glove-master/setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2014 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Run with:
sudo python ./setup.py install
"""
import os
import sys
import warnings
import ez_setup
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext
if sys.version_info[:2] < (2, 7) or (sys.version_info[:1] == 3 and sys.version_info[:2] < (3, 5)):
raise Exception('This version of gensim needs Python 2.7, 3.5 or later.')
ez_setup.use_setuptools()
# the following code is adapted from tornado's setup.py:
# https://github.com/tornadoweb/tornado/blob/master/setup.py
# to support installing without the extension on platforms where
# no compiler is available.
class custom_build_ext(build_ext):
"""Allow C extension building to fail.
The C extension speeds up word2vec and doc2vec training, but is not essential.
"""
warning_message = """
********************************************************************
WARNING: %s could not
be compiled. No C extensions are essential for gensim to run,
although they do result in significant speed improvements for some modules.
%s
Here are some hints for popular operating systems:
If you are seeing this message on Linux you probably need to
install GCC and/or the Python development package for your
version of Python.
Debian and Ubuntu users should issue the following command:
$ sudo apt-get install build-essential python-dev
RedHat, CentOS, and Fedora users should issue the following command:
$ sudo yum install gcc python-devel
If you are seeing this message on OSX please read the documentation
here:
http://api.mongodb.org/python/current/installation.html#osx
********************************************************************
"""
def run(self):
try:
build_ext.run(self)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(
self.warning_message +
"Extension modules" +
"There was an issue with your platform configuration - see above.")
def build_extension(self, ext):
name = ext.name
try:
build_ext.build_extension(self, ext)
except Exception:
e = sys.exc_info()[1]
sys.stdout.write('%s\n' % str(e))
warnings.warn(
self.warning_message +
"The %s extension module" % (name,) +
"The output above this warning shows how the compilation failed.")
# the following is needed to be able to add numpy's include dirs... without
# importing numpy directly in this script, before it's actually installed!
# http://stackoverflow.com/questions/19919905/how-to-bootstrap-numpy-installation-in-setup-py
def finalize_options(self):
build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
# https://docs.python.org/2/library/__builtin__.html#module-__builtin__
if isinstance(__builtins__, dict):
__builtins__["__NUMPY_SETUP__"] = False
else:
__builtins__.__NUMPY_SETUP__ = False
import numpy
self.include_dirs.append(numpy.get_include())
model_dir = os.path.join(os.path.dirname(__file__), 'gensim', 'models')
gensim_dir = os.path.join(os.path.dirname(__file__), 'gensim')
cmdclass = {'build_ext': custom_build_ext}
WHEELHOUSE_UPLOADER_COMMANDS = {'fetch_artifacts', 'upload_all'}
if WHEELHOUSE_UPLOADER_COMMANDS.intersection(sys.argv):
import wheelhouse_uploader.cmd
cmdclass.update(vars(wheelhouse_uploader.cmd))
OPT_LEVEL="-O3"
LONG_DESCRIPTION = u"""
==============================================
gensim -- Topic Modelling in Python
==============================================
|Travis|_
|Wheel|_
.. |Travis| image:: https://img.shields.io/travis/RaRe-Technologies/gensim/develop.svg
.. |Wheel| image:: https://img.shields.io/pypi/wheel/gensim.svg
.. _Travis: https://travis-ci.org/RaRe-Technologies/gensim
.. _Downloads: https://pypi.python.org/pypi/gensim
.. _License: http://radimrehurek.com/gensim/about.html
.. _Wheel: https://pypi.python.org/pypi/gensim
Gensim is a Python library for *topic modelling*, *document indexing* and *similarity retrieval* with large corpora.
Target audience is the *natural language processing* (NLP) and *information retrieval* (IR) community.
Features
---------
* All algorithms are **memory-independent** w.r.t. the corpus size (can process input larger than RAM, streamed, out-of-core),
* **Intuitive interfaces**
* easy to plug in your own input corpus/datastream (trivial streaming API)
* easy to extend with other Vector Space algorithms (trivial transformation API)
* Efficient multicore implementations of popular algorithms, such as online **Latent Semantic Analysis (LSA/LSI/SVD)**,
**Latent Dirichlet Allocation (LDA)**, **Random Projections (RP)**, **Hierarchical Dirichlet Process (HDP)** or **word2vec deep learning**.
* **Distributed computing**: can run *Latent Semantic Analysis* and *Latent Dirichlet Allocation* on a cluster of computers.
* Extensive `documentation and Jupyter Notebook tutorials <https://github.com/RaRe-Technologies/gensim/#documentation>`_.
If this feature list left you scratching your head, you can first read more about the `Vector
Space Model <http://en.wikipedia.org/wiki/Vector_space_model>`_ and `unsupervised
document analysis <http://en.wikipedia.org/wiki/Latent_semantic_indexing>`_ on Wikipedia.
Installation
------------
This software depends on `NumPy and Scipy <http://www.scipy.org/Download>`_, two Python packages for scientific computing.
You must have them installed prior to installing `gensim`.
It is also recommended you install a fast BLAS library before installing NumPy. This is optional, but using an optimized BLAS such as `ATLAS <http://math-atlas.sourceforge.net/>`_ or `OpenBLAS <http://xianyi.github.io/OpenBLAS/>`_ is known to improve performance by as much as an order of magnitude. On OS X, NumPy picks up the BLAS that comes with it automatically, so you don't need to do anything special.
The simple way to install `gensim` is::
pip install -U gensim
Or, if you have instead downloaded and unzipped the `source tar.gz <http://pypi.python.org/pypi/gensim>`_ package,
you'd run::
python setup.py test
python setup.py install
For alternative modes of installation (without root privileges, development
installation, optional install features), see the `install documentation <http://radimrehurek.com/gensim/install.html>`_.
This version has been tested under Python 2.7, 3.5 and 3.6. Support for Python 2.6, 3.3 and 3.4 was dropped in gensim 1.0.0. Install gensim 0.13.4 if you *must* use Python 2.6, 3.3 or 3.4. Support for Python 2.5 was dropped in gensim 0.10.0; install gensim 0.9.1 if you *must* use Python 2.5). Gensim's github repo is hooked against `Travis CI for automated testing <https://travis-ci.org/RaRe-Technologies/gensim>`_ on every commit push and pull request.
How come gensim is so fast and memory efficient? Isn't it pure Python, and isn't Python slow and greedy?
--------------------------------------------------------------------------------------------------------
Many scientific algorithms can be expressed in terms of large matrix operations (see the BLAS note above). Gensim taps into these low-level BLAS libraries, by means of its dependency on NumPy. So while gensim-the-top-level-code is pure Python, it actually executes highly optimized Fortran/C under the hood, including multithreading (if your BLAS is so configured).
Memory-wise, gensim makes heavy use of Python's built-in generators and iterators for streamed data processing. Memory efficiency was one of gensim's `design goals <http://radimrehurek.com/gensim/about.html>`_, and is a central feature of gensim, rather than something bolted on as an afterthought.
Documentation
-------------
* `QuickStart`_
* `Tutorials`_
* `Tutorial Videos`_
* `Official Documentation and Walkthrough`_
Citing gensim
-------------
When `citing gensim in academic papers and theses <https://scholar.google.cz/citations?view_op=view_citation&hl=en&user=9vG_kV0AAAAJ&citation_for_view=9vG_kV0AAAAJ:u-x6o8ySG0sC>`_, please use this BibTeX entry::
@inproceedings{rehurek_lrec,
title = {{Software Framework for Topic Modelling with Large Corpora}},
author = {Radim {\\v R}eh{\\r u}{\\v r}ek and Petr Sojka},
booktitle = {{Proceedings of the LREC 2010 Workshop on New
Challenges for NLP Frameworks}},
pages = {45--50},
year = 2010,
month = May,
day = 22,
publisher = {ELRA},
address = {Valletta, Malta},
language={English}
}
----------------
Gensim is open source software released under the `GNU LGPLv2.1 license <http://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html>`_.
Copyright (c) 2009-now Radim Rehurek
|Analytics|_
.. |Analytics| image:: https://ga-beacon.appspot.com/UA-24066335-5/your-repo/page-name
.. _Analytics: https://github.com/igrigorik/ga-beacon
.. _Official Documentation and Walkthrough: http://radimrehurek.com/gensim/
.. _Tutorials: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#tutorials
.. _Tutorial Videos: https://github.com/RaRe-Technologies/gensim/blob/develop/tutorials.md#videos
.. _QuickStart: https://github.com/RaRe-Technologies/gensim/blob/develop/docs/notebooks/gensim%20Quick%20Start.ipynb
"""
distributed_env = ['Pyro4 >= 4.27']
win_testenv = [
'pytest',
'pytest-rerunfailures',
'mock',
'cython',
'pyemd',
'testfixtures',
'scikit-learn',
'Morfessor==2.0.2a4',
]
linux_testenv = win_testenv + [
'annoy',
'tensorflow <= 1.3.0',
'keras >= 2.0.4',
]
setup(
name='gensim',
version='3.4.0',
description='Python framework for fast Vector Space Modelling',
long_description=LONG_DESCRIPTION,
ext_modules=[
Extension('gensim.models.word2vec_inner',
sources=['./gensim/models/word2vec_inner.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim.models.doc2vec_inner',
sources=['./gensim/models/doc2vec_inner.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim.corpora._mmreader',
extra_compile_args=[OPT_LEVEL],
sources=['./gensim/corpora/_mmreader.c']),
Extension('gensim.models.fasttext_inner',
sources=['./gensim/models/fasttext_inner.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim.models._utils_any2vec',
sources=['./gensim/models/_utils_any2vec.c'],
extra_compile_args=[OPT_LEVEL],
include_dirs=[model_dir]),
Extension('gensim._matutils',
extra_compile_args=[OPT_LEVEL],
sources=['./gensim/_matutils.c']),
],
cmdclass=cmdclass,
packages=find_packages(),
author=u'Radim Rehurek',
author_email='me@radimrehurek.com',
url='http://radimrehurek.com/gensim',
download_url='http://pypi.python.org/pypi/gensim',
license='LGPLv2.1',
keywords='Singular Value Decomposition, SVD, Latent Semantic Indexing, '
'LSA, LSI, Latent Dirichlet Allocation, LDA, '
'Hierarchical Dirichlet Process, HDP, Random Projections, '
'TFIDF, word2vec',
platforms='any',
zip_safe=False,
classifiers=[ # from http://pypi.python.org/pypi?%3Aaction=list_classifiers
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Text Processing :: Linguistic',
],
test_suite="gensim.test",
setup_requires=[
'numpy >= 1.11.3'
],
install_requires=[
'numpy >= 1.11.3',
'scipy >= 0.18.1',
'six >= 1.5.0',
'smart_open >= 1.2.1',
],
tests_require=linux_testenv,
extras_require={
'distributed': distributed_env,
'test-win': win_testenv,
'test': linux_testenv,
'docs': linux_testenv + distributed_env + ['sphinx', 'sphinxcontrib-napoleon', 'plotly', 'pattern', 'sphinxcontrib.programoutput'],
},
include_package_data=True,
)
| 12,941 | 38.218182 | 455 | py |
poincare_glove | poincare_glove-master/gensim/models/keyedvectors.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Author: Shiva Manne <manneshiva@gmail.com>
# Copyright (C) 2018 RaRe Technologies s.r.o.
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Word vector storage and similarity look-ups.
Common code independent of the way the vectors are trained(Word2Vec, FastText, WordRank, VarEmbed etc)
The word vectors are considered read-only in this class.
Initialize the vectors by training e.g. Word2Vec::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
>>> word_vectors = model.wv
Persist the word vectors to disk with::
>>> word_vectors.save(fname)
>>> word_vectors = KeyedVectors.load(fname)
The vectors can also be instantiated from an existing file on disk
in the original Google's word2vec C format as a KeyedVectors instance::
>>> from gensim.models import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the vectors. Some of them
are already built-in::
>>> word_vectors.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> word_vectors.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> word_vectors.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> word_vectors.similarity('woman', 'man')
0.73723527
Correlation with human opinion on word similarity::
>>> word_vectors.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> word_vectors.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
"""
from __future__ import division # py3 "true division"
import logging
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # noqa:F401
# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
from numpy import dot, zeros, float32 as REAL, float64 as DOUBLE, empty, memmap as np_memmap, \
double, array, vstack, sqrt, newaxis, integer, \
ndarray, sum as np_sum, average, prod, argmax, divide as np_divide, tanh, arctanh, arccosh, cos, log
from numpy.linalg import norm
import numpy as np
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import string_types, integer_types
from six.moves import xrange, zip
from scipy import sparse, stats
from gensim.utils import deprecated
from gensim.models.utils_any2vec import _save_word2vec_format, _load_word2vec_format, _compute_ngrams, _ft_hash
logger = logging.getLogger(__name__)
EPS = 1e-10
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class BaseKeyedVectors(utils.SaveLoad):
def __init__(self, vector_size, vector_dtype=REAL):
self.vectors = []
self.vocab = {}
self.vector_size = vector_size
self.vector_dtype = vector_dtype
self.index2entity = []
def save(self, fname_or_handle, **kwargs):
super(BaseKeyedVectors, self).save(fname_or_handle, **kwargs)
@classmethod
def load(cls, fname_or_handle, **kwargs):
return super(BaseKeyedVectors, cls).load(fname_or_handle, **kwargs)
def similarity(self, entity1, entity2):
"""Compute cosine similarity between entities, specified by string tag.
"""
raise NotImplementedError()
def most_similar(self, **kwargs):
"""Find the top-N most similar entities.
Possibly have `positive` and `negative` list of entities in `**kwargs`.
"""
return NotImplementedError()
def distance(self, entity1, entity2):
"""Compute distance between vectors of two input entities, specified by string tag.
"""
raise NotImplementedError()
def distances(self, entity1, other_entities=()):
"""Compute distances from given entity (string tag) to all entities in `other_entity`.
If `other_entities` is empty, return distance between `entity1` and all entities in vocab.
"""
raise NotImplementedError()
def embedding_norm(self, word):
"""Compute the norm of the target embedding for a given word
"""
raise NotImplementedError()
def get_vector(self, entity):
"""Accept a single entity as input, specified by string tag.
Returns the entity's representations in vector space, as a 1D numpy array.
"""
if entity in self.vocab:
result = self.vectors[self.vocab[entity].index]
result.setflags(write=False)
return result
else:
raise KeyError("'%s' not in vocabulary" % entity)
def __getitem__(self, entities):
"""
Accept a single entity (string tag) or list of entities as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if isinstance(entities, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.get_vector(entities)
return vstack([self.get_vector(entity) for entity in entities])
def __contains__(self, entity):
return entity in self.vocab
def most_similar_to_given(self, entity1, entities_list):
"""Return the entity from entities_list most similar to entity1."""
return entities_list[argmax([self.similarity(entity1, entity) for entity in entities_list])]
def closer_than(self, entity1, entity2):
"""Returns all entities that are closer to `entity1` than `entity2` is to `entity1`."""
all_distances = self.distances(entity1)
e1_index = self.vocab[entity1].index
e2_index = self.vocab[entity2].index
closer_node_indices = np.where(all_distances < all_distances[e2_index])[0]
return [self.index2entity[index] for index in closer_node_indices if index != e1_index]
def rank(self, entity1, entity2):
"""Rank of the distance of `entity2` from `entity1`, in relation to distances of all entities from `entity1`."""
return len(self.closer_than(entity1, entity2)) + 1
class WordEmbeddingsKeyedVectors(BaseKeyedVectors):
"""Class containing common methods for operations over word vectors."""
def __init__(self, vector_size, vector_dtype=REAL, init_pretrained_config=None):
super(WordEmbeddingsKeyedVectors, self).__init__(vector_size=vector_size, vector_dtype=vector_dtype)
self.vectors_norm = None
self.index2word = []
self.index2freq = []
self.init_pretrained_config = init_pretrained_config
@property
@deprecated("Attribute will be removed in 4.0.0, use self instead")
def wv(self):
return self
@property
def index2entity(self):
return self.index2word
@index2entity.setter
def index2entity(self, value):
self.index2word = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors instead")
def syn0(self):
return self.vectors
@syn0.setter
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors instead")
def syn0(self, value):
self.vectors = value
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_norm instead")
def syn0norm(self):
return self.vectors_norm
@syn0norm.setter
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_norm instead")
def syn0norm(self, value):
self.vectors_norm = value
def __contains__(self, word):
return word in self.vocab
def save(self, *args, **kwargs):
"""Saves the keyedvectors. This saved model can be loaded again using
:func:`~gensim.models.*2vec.*2VecKeyedVectors.load` which supports
operations on trained word vectors like `most_similar`.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['vectors_norm'])
super(WordEmbeddingsKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Examples
--------
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
if use_norm:
result = self.vectors_norm[self.vocab[word].index]
else:
result = self.vectors[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def get_vector(self, word):
return self.word_vec(word)
def words_closer_than(self, w1, w2):
"""
Returns all words that are closer to `w1` than `w2` is to `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
list (str)
List of words that are closer to `w1` than `w2` is to `w1`.
Examples
--------
>>> model.words_closer_than('carnivore', 'mammal')
['dog', 'canine']
"""
return super(WordEmbeddingsKeyedVectors, self).closer_than(w1, w2)
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
Parameters
----------
word : str
Word
topn : int
Number of top-N similar words to return. If topn is False, similar_by_word returns
the vector of similarity scores.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
Example::
>>> trained_model.similar_by_word('graph')
[('user', 0.9999163150787354), ...]
"""
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words by vector.
Parameters
----------
vector : numpy.array
vector from which similarities are to be computed.
expected shape (dim,)
topn : int
Number of top-N similar words to return. If topn is False, similar_by_vector returns
the vector of similarity scores.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def similarity_matrix(self, dictionary, tfidf=None, threshold=0.0, exponent=2.0, nonzero_limit=100, dtype=REAL):
"""Constructs a term similarity matrix for computing Soft Cosine Measure.
Constructs a a sparse term similarity matrix in the :class:`scipy.sparse.csc_matrix` format for computing
Soft Cosine Measure between documents.
Parameters
----------
dictionary : :class:`~gensim.corpora.dictionary.Dictionary`
A dictionary that specifies a mapping between words and the indices of rows and columns
of the resulting term similarity matrix.
tfidf : :class:`gensim.models.tfidfmodel.TfidfModel`, optional
A model that specifies the relative importance of the terms in the dictionary. The rows
of the term similarity matrix will be build in an increasing order of importance of terms,
or in the order of term identifiers if None.
threshold : float, optional
Only pairs of words whose embeddings are more similar than `threshold` are considered
when building the sparse term similarity matrix.
exponent : float, optional
The exponent applied to the similarity between two word embeddings when building the term similarity matrix.
nonzero_limit : int, optional
The maximum number of non-zero elements outside the diagonal in a single row or column
of the term similarity matrix. Setting `nonzero_limit` to a constant ensures that the
time complexity of computing the Soft Cosine Measure will be linear in the document
length rather than quadratic.
dtype : numpy.dtype, optional
Data-type of the term similarity matrix.
Returns
-------
:class:`scipy.sparse.csc_matrix`
Term similarity matrix.
See Also
--------
:func:`gensim.matutils.softcossim`
The Soft Cosine Measure.
:class:`gensim.similarities.docsim.SoftCosineSimilarity`
A class for performing corpus-based similarity queries with Soft Cosine Measure.
Notes
-----
The constructed matrix corresponds to the matrix Mrel defined in section 2.1 of
`Delphine Charlet and Geraldine Damnati, "SimBow at SemEval-2017 Task 3: Soft-Cosine Semantic Similarity
between Questions for Community Question Answering", 2017
<http://www.aclweb.org/anthology/S/S17/S17-2051.pdf>`__.
"""
logger.info("constructing a term similarity matrix")
matrix_order = len(dictionary)
matrix_nonzero = [1] * matrix_order
matrix = sparse.identity(matrix_order, dtype=dtype, format="dok")
num_skipped = 0
# Decide the order of rows.
if tfidf is None:
word_indices = range(matrix_order)
else:
assert max(tfidf.idfs) < matrix_order
word_indices = [
index for index, _ in sorted(tfidf.idfs.items(), key=lambda x: x[1], reverse=True)
]
# Traverse rows.
for row_number, w1_index in enumerate(word_indices):
if row_number % 1000 == 0:
logger.info(
"PROGRESS: at %.02f%% rows (%d / %d, %d skipped, %.06f%% density)",
100.0 * (row_number + 1) / matrix_order, row_number + 1, matrix_order,
num_skipped, 100.0 * matrix.getnnz() / matrix_order**2)
w1 = dictionary[w1_index]
if w1 not in self.vocab:
num_skipped += 1
continue # A word from the dictionary is not present in the word2vec model.
# Traverse upper triangle columns.
if matrix_order <= nonzero_limit + 1: # Traverse all columns.
columns = (
(w2_index, self.similarity(w1, dictionary[w2_index]))
for w2_index in range(w1_index + 1, matrix_order)
if w1_index != w2_index and dictionary[w2_index] in self.vocab)
else: # Traverse only columns corresponding to the embeddings closest to w1.
num_nonzero = matrix_nonzero[w1_index] - 1
columns = (
(dictionary.token2id[w2], similarity)
for _, (w2, similarity)
in zip(
range(nonzero_limit - num_nonzero),
self.most_similar(positive=[w1], topn=nonzero_limit - num_nonzero)
)
if w2 in dictionary.token2id
)
columns = sorted(columns, key=lambda x: x[0])
for w2_index, similarity in columns:
# Ensure that we don't exceed `nonzero_limit` by mirroring the upper triangle.
if similarity > threshold and matrix_nonzero[w2_index] <= nonzero_limit:
element = similarity**exponent
matrix[w1_index, w2_index] = element
matrix_nonzero[w1_index] += 1
matrix[w2_index, w1_index] = element
matrix_nonzero[w2_index] += 1
logger.info(
"constructed a term similarity matrix with %0.6f %% nonzero elements",
100.0 * matrix.getnnz() / matrix_order**2
)
return matrix.tocsc()
def wmdistance(self, document1, document2):
"""
Compute the Word Mover's Distance between two documents. When using this
code, please consider citing the following papers:
.. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching".
.. Ofir Pele and Michael Werman, "Fast and robust earth mover's distances".
.. Matt Kusner et al. "From Word Embeddings To Document Distances".
Note that if one of the documents have no words that exist in the
Word2Vec vocab, `float('inf')` (i.e. infinity) will be returned.
This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler).
Example:
>>> # Train word2vec model.
>>> model = Word2Vec(sentences)
>>> # Some sentences to test.
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>> # Remove their stopwords.
>>> from nltk.corpus import stopwords
>>> stopwords = nltk.corpus.stopwords.words('english')
>>> sentence_obama = [w for w in sentence_obama if w not in stopwords]
>>> sentence_president = [w for w in sentence_president if w not in stopwords]
>>> # Compute WMD.
>>> distance = model.wmdistance(sentence_obama, sentence_president)
"""
if not PYEMD_EXT:
raise ImportError("Please install pyemd Python package to compute WMD.")
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).', diff1, diff2)
if len(document1) == 0 or len(document2) == 0:
logger.info(
"At least one of the documents had no words that werein the vocabulary. "
"Aborting (returning inf)."
)
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if t1 not in docset1 or t2 not in docset2:
continue
# Compute Euclidean distance between word vectors.
distance_matrix[i, j] = sqrt(np_sum((self[t1] - self[t2])**2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd(d1, d2, distance_matrix)
@staticmethod
def log_accuracy(section):
# Instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers
idx = argmax(section["correct"])
correct, incorrect = section["correct"][idx], section["incorrect"][idx]
if correct + incorrect > 0:
print("{}: {:.1f}% ({}/{}) for t={:.2f}".format(
section['section'], 100.0 * correct / (correct + incorrect), correct, correct + incorrect, idx*0.1
))
def eval_accuracy_for_batch(self, batch, section, most_similar, restrict_vocab, case_insensitive, debug=False):
if len(batch) == 0:
return
batch_arr = np.array(batch).reshape((-1, 4))
A = batch_arr[:, 0]
B = batch_arr[:, 1]
C = batch_arr[:, 2]
expected = [self.index2word[i].upper() if case_insensitive else self.index2word[i] for i in batch_arr[:, 3]]
# find the most likely prediction, ignoring OOV words and input words
results = most_similar(self, positive=[B, C], negative=A, restrict_vocab=restrict_vocab, debug=debug)
for result in results:
# correct, incorrect = [], []
correct, incorrect = 0, 0
predicted = [word.upper() for word in result[0]] if case_insensitive else result[0]
for i, info in enumerate(zip(expected, predicted, A, B, C)):
exp, pred, a, b, c = info
# Instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers
if pred == exp:
correct += 1
else:
incorrect += 1
section["correct"].append(correct)
section["incorrect"].append(incorrect)
batch.clear()
def accuracy(self, questions, restrict_vocab=30000, most_similar=None, case_insensitive=True, debug=False, verbose=True):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See questions-words.txt in
https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip
for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab`
words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then
case normalization is performed.
Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before
evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens
and question words. In case of multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
if not most_similar:
most_similar = VanillaWordEmbeddingsKeyedVectors.batch_most_similar_analogy
sections, section = [], None
batch = []
original_vocab = self.vocab
self.vocab = ok_vocab
for line_no, line in enumerate(utils.smart_open(questions)):
line = utils.to_unicode(line)
if line.startswith(': '):
# Evaluate previous section.
self.eval_accuracy_for_batch(batch=batch, section=section, most_similar=most_similar,
restrict_vocab=restrict_vocab, case_insensitive=case_insensitive,
debug=debug)
# a new section starts => store the old section
if section:
sections.append(section)
if verbose:
self.log_accuracy(section)
# Only evaluate one section when running in debug mode.
if debug:
return sections
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except ValueError:
logger.info("skipping invalid line #%i in %s", line_no, questions)
continue
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s", line_no, line.strip())
continue
batch.append([self.vocab[w].index for w in [a, b, c, expected]])
# Evaluate last section.
self.eval_accuracy_for_batch(batch=batch, section=section, most_similar=most_similar,
restrict_vocab=restrict_vocab, case_insensitive=case_insensitive, debug=debug)
self.vocab = original_vocab
if section:
# store the last section, too
sections.append(section)
if verbose:
self.log_accuracy(section)
# Instead of adding the correct/incorrect words to a list, I am just counting the number of correct/incorrect answers
if len(sections) > 1:
sem = {
'section': 'semantic',
'correct': np.array([np.array(section["correct"])
for section in filter(lambda s: not s['section'].startswith('gram'), sections)]).sum(axis=0),
'incorrect': np.array([np.array(section["incorrect"])
for section in filter(lambda s: not s['section'].startswith('gram'), sections)]).sum(axis=0),
}
syn = {
'section': 'syntactic',
'correct': np.array([np.array(section["correct"])
for section in filter(lambda s: s['section'].startswith('gram'), sections)]).sum(axis=0),
'incorrect': np.array([np.array(section["incorrect"])
for section in filter(lambda s: s['section'].startswith('gram'), sections)]).sum(axis=0),
}
total = {
'section': 'total',
'correct': np.array([np.array(section["correct"]) for section in sections]).sum(axis=0),
'incorrect': np.array([np.array(section["incorrect"]) for section in sections]).sum(axis=0),
}
if len(sections) > 1:
if verbose:
self.log_accuracy(sem)
self.log_accuracy(syn)
idx = argmax(sem["correct"])
sem["correct"] = [sem["correct"][idx]]
sem["incorrect"] = [sem["incorrect"][idx]]
sem["t_argmax"] = [idx * 0.1]
sections.append(sem)
idx = argmax(syn["correct"])
syn["correct"] = [syn["correct"][idx]]
syn["incorrect"] = [syn["incorrect"][idx]]
syn["t_argmax"] = [idx * 0.1]
sections.append(syn)
if verbose:
self.log_accuracy(total)
idx = argmax(total["correct"])
total["correct"] = [total["correct"][idx]]
total["incorrect"] = [total["incorrect"][idx]]
total["t_argmax"] = [idx * 0.1]
sections.append(total)
return sections
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
logger.debug('Pearson correlation coefficient against %s: %.4f', pairs, pearson[0])
logger.debug('Spearman rank-order correlation coefficient against %s: %.4f', pairs, spearman[0])
logger.debug('Pairs with unknown words ratio: %.1f%%', oov)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False, debug_file=None):
"""
Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where
lines are 3-tuples, each consisting of a word pair and a similarity value, separated by `delimiter`.
An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html or https://www.cl.cam.ac.uk/~fh295/simlex.html.
The model is evaluated using Pearson correlation coefficient and Spearman rank-order correlation coefficient
between the similarities from the dataset and the similarities produced by the model itself.
The results are printed to log and returned as a triple (pearson, spearman, ratio of pairs with unknown words).
Use `restrict_vocab` to ignore all word pairs containing a word not in the first `restrict_vocab`
words (default 300,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
If `case_insensitive` is True, the first `restrict_vocab` words are taken, and then case normalization
is performed.
Use `case_insensitive` to convert all words in the pairs and vocab to their uppercase form before
evaluating the model (default True). Useful when you expect case-mismatch between training tokens
and words pairs in the dataset. If there are multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
Use `dummy4unknown=True` to produce zero-valued similarities for pairs with out-of-vocabulary words.
Otherwise (default False), these pairs are skipped entirely.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
similarity_gold = []
similarity_model = []
oov = 0
original_vocab = self.vocab
self.vocab = ok_vocab
if debug_file:
f = open(debug_file, "w")
f.write("Word1,Word2,Gold standard(0-10),Model similarity (-hyp_dist^2)\n")
for line_no, line in enumerate(utils.smart_open(pairs)):
line = utils.to_unicode(line)
if line.startswith('#'):
# May be a comment
continue
else:
try:
if case_insensitive:
a, b, sim = [word.upper() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except (ValueError, TypeError):
logger.info('Skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
logger.debug('Zero similarity for line #%d with OOV words: %s', line_no, line.strip())
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('Skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
model_sim = self.similarity(a, b)
similarity_model.append(model_sim) # Similarity from the model
if debug_file:
f.write(a.lower() + "," + b.lower() + "," + str(sim) + "," + str(model_sim) + "\n")
if debug_file:
f.close()
self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
if dummy4unknown:
oov_ratio = float(oov) / len(similarity_gold) * 100
else:
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d', oov)
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return pearson, spearman, oov_ratio
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'vectors_norm', None) is None or replace:
print("init_sims from WordEmbeddings")
logger.info("precomputing L2-norms of word weight vectors; replace={}".format(replace))
dtype = REAL
if hasattr(self, 'vector_dtype'):
dtype = self.vector_dtype
if replace:
for i in xrange(self.vectors.shape[0]):
self.vectors[i, :] /= sqrt((self.vectors[i, :] ** 2).sum(-1))
self.vectors_norm = self.vectors
else:
self.vectors_norm = (self.vectors / sqrt((self.vectors ** 2).sum(-1))[..., newaxis]).astype(dtype)
class PoincareWordEmbeddingsKeyedVectors(WordEmbeddingsKeyedVectors):
"""
Class used for word embeddings on the Poincare ball which use the Poincare geodesic distance for the similarity
metric (instead of the cosine similarity).
"""
def __init__(self, vector_size, vector_dtype=REAL, trainables=None, init_near_border=False,
init_pretrained_config=False):
super(PoincareWordEmbeddingsKeyedVectors, self).__init__(vector_size=vector_size, vector_dtype=vector_dtype)
# If True, use Poincare distance to measure similarity between words. Otherwise, use cosine distance.
self.use_poincare_distance = True
self.trainables = trainables
self.init_near_border = init_near_border
self.init_pretrained_config = init_pretrained_config
def batch_most_similar_hyperbolic_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
This method computes the similarity (according to the formula defined for the hyperbolic space) between
the parallel transport of the input vector and the vectors for each word in the model and selects the word
that is closest to the position of the parallel transported vector.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
if not self.use_poincare_distance:
self.init_sims()
# Retrieve embeddings.
pos_emb = [
self.vectors[positive[0]],
self.vectors[positive[1]]
]
neg_emb = self.vectors[negative]
# Compute the parallel transport of the positive vector in the analogy question (i.e. c) using the new formula
parallel_transp1 = self.moebius_add_mat(
pos_emb[1],
self.gyr_mat(pos_emb[1], -neg_emb, self.moebius_add_mat(-neg_emb, pos_emb[0]))) # batch_size x vector_size
# Compute the parallel transport of the other positive vector (i.e. b) so the alternative formulation of the
# analogy question.
parallel_transp2 = self.moebius_add_mat(
pos_emb[0],
self.gyr_mat(pos_emb[0], -neg_emb, self.moebius_add_mat(-neg_emb, pos_emb[1]))) # batch_size x vector_size
# Compute the gyrolinear combination between the two parallel
# transported points.
t = 0.3
aux = self.moebius_add_mat(-parallel_transp1, parallel_transp2)
results = []
lin_comb_point = self.moebius_add_mat(parallel_transp1, self.moebius_mul_mat(aux, t))
# Compute similarity between parallel transported input and all words in the vocabulary.
if self.use_poincare_distance:
limited = self.vectors if restrict_vocab is None else self.vectors[:restrict_vocab] # vocab_size * vector_size
# NOTE!!! This is not actually the distance, but cosh(distance) (so only the argument of arccosh in the
# Poincare distance formula). However, cosh(x) is monotonous (for positive x) which means that we will get
# the same argmax in the end.
dists = self.cosh_distances_mat(lin_comb_point, limited) # batch_size * vocab_size
else:
# Get normalized vectors, if we use cosine distance.
limited_norm = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Normalize parallel transported vector.
lin_comb_point = lin_comb_point / norm(lin_comb_point, axis=1)[:, None]
dists = -dot(lin_comb_point, limited_norm.T) # batch_size * vocab_size
max_float = np.finfo(dists.dtype).max
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
dists[x, y] = max_float # batch_size * (vocab_size - 3)
dists[x, y] = max_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(dists[i], topn=10)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]]))[0])
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmin(dists, axis=1)
result = (
[self.index2word[i] for i in best_ids],
dists[batch_size_range, best_ids].astype(np.float32),
best)
results.append(result)
return results
def batch_most_similar_3distadd_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
Implements 3DISTADD. This replaces the cosine similarities in the 3COSADD formula with -dist.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
if not self.use_poincare_distance:
self.init_sims()
# Retrieve embeddings.
pos_emb = [
self.vectors[positive[0]],
self.vectors[positive[1]]
]
neg_emb = self.vectors[negative]
results = []
# Compute similarity between parallel transported input and all words in the vocabulary.
if self.use_poincare_distance:
limited = self.vectors if restrict_vocab is None else self.vectors[:restrict_vocab] # vocab_size * vector_size
# NOTE!!! This is not actually the distance, but cosh(distance) (so only the argument of arccosh in the
# Poincare distance formula). However, cosh(x) is monotonous (for positive x) which means that we will get
# the same argmax in the end.
if isinstance(self, MixPoincareWordEmbeddingsKeyedVectors):
dists = (self.mix_distances_mat(pos_emb[0], limited) + self.mix_distances_mat(pos_emb[1], limited) -
self.mix_distances_mat(neg_emb, limited)) # batch_size * vocab_size
else:
dists = (self.cosh_distances_mat(pos_emb[0], limited) + self.cosh_distances_mat(pos_emb[1], limited) -
self.cosh_distances_mat(neg_emb, limited)) # batch_size * vocab_size
else:
# Get normalized vectors, if we use cosine distance.
limited_norm = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Normalize parallel transported vector.
pos_emb[0] = pos_emb[0] / norm(pos_emb[0], axis=1)[:, None]
pos_emb[1] = pos_emb[1] / norm(pos_emb[1], axis=1)[:, None]
neg_emb = neg_emb / norm(neg_emb, axis=1)[:, None]
if isinstance(self, MixPoincareWordEmbeddingsKeyedVectors):
dists = (self.mix_distances_mat(pos_emb[0], limited_norm) + self.mix_distances_mat(pos_emb[1], limited_norm) -
self.mix_distances_mat(neg_emb, limited_norm)) # batch_size * vocab_size
else:
dists = -(dot(pos_emb[0], limited_norm.T) + dot(pos_emb[1], limited_norm.T) - dot(neg_emb, limited_norm.T)) # batch_size * vocab_size
max_float = np.finfo(dists.dtype).max
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
dists[x, y] = max_float # batch_size * (vocab_size - 3)
dists[x, y] = max_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(dists[i], topn=10)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]]))[0])
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmin(dists, axis=1)
result = (
[self.index2word[i] for i in best_ids],
dists[batch_size_range, best_ids].astype(np.float32),
best)
results.append(result)
return results
def cosh_distances_mat(self, vectors, other_vectors=None):
"""
Returns the argument of the arccosh function in the Poincare distance formula. Since arccosh(x) is a monotonous
function for x >= 1, this is enough to create a ranking and select the closest point to another reference point.
Parameters
----------
vectors: numpy.array
Vectors from which distances are to be computed.
other_vectors: numpy.array
For each vector in `other_vectors` distance from each vector in `vectors` is computed.
If None or empty, all words in vocab are considered (including the vectors in `vectors`).
Returns
-------
np.array
Returns a numpy.array that contains the distance between each row in `vectors`
and each row in `other_vectors`
"""
if other_vectors is None:
other_vectors = self.vectors
dot_ww = (other_vectors * other_vectors).sum(axis=1) # vocab_size * 1
beta_w = 1.0 / (1 - dot_ww) # vocab_size * 1
dot_vv = (vectors * vectors).sum(axis=1) # batch_size * 1
alpha_v = 1.0 / (1 - dot_vv) # batch_size * 1
dot_vw = dot(vectors, other_vectors.T) # batch_size * vocab_size
cosh_dists = 1 + (-2 * dot_vw + dot_ww + dot_vv[:, None]) * alpha_v[:, None] * beta_w # batch_size * vocab
return cosh_dists
def distances(self, word_or_vector, other_vectors=None):
"""
Compute the cosh of the Poincare distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_vectors: numpy.array or None
For each vector in `other_vectors` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector)
else:
input_vector = word_or_vector
if other_vectors is None:
other_vectors = self.vectors
if self.use_poincare_distance:
return self.cosh_distances_mat(np.array([input_vector]), other_vectors)
else:
return 1 - VanillaWordEmbeddingsKeyedVectors.cosine_similarities(input_vector, other_vectors)
def distance(self, word_or_vector1, word_or_vector2):
"""
Compute distance between two words or vectors inside the Poincare ball.
Example
--------
>>> trained_model.distance('woman', 'man')
"""
v1 = self.word_vec(word_or_vector1) if isinstance(word_or_vector1, string_types) else word_or_vector1
v2 = self.word_vec(word_or_vector2) if isinstance(word_or_vector2, string_types) else word_or_vector2
if self.use_poincare_distance:
diff = v1 - v2
dist = arccosh(1 + 2 * dot(diff, diff) / (1 - dot(v1, v1) + EPS) / (1 - dot(v2, v2) + EPS))
return dist
else:
return 1 - dot(matutils.unitvec(v1), matutils.unitvec(v2))
def similarity(self, w1, w2):
"""
Compute similarity between two words based on the Poincare distance between them.
Example
--------
>>> trained_model.similarity('woman', 'man')
"""
if self.use_poincare_distance:
return -self.distance(w1, w2)**2
# return -self.distance(w1, w2)**2 * norm(self[w1] - self[w2])**2
# return -norm(self[w1] - self[w2])**2
else:
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def embedding_norm(self, word_or_vector):
"""
Compute embedding Poincare norm for a given word.
Parameters
----------
w : string
word
"""
v = self[word_or_vector] if isinstance(word_or_vector, string_types) else word_or_vector
return arccosh(1 + 2 * dot(v, v) / (1 - dot(v, v)))
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'vectors_norm', None) is None or replace:
print("init_sims from PoincareWordEmbeddings")
logger.info("precomputing L2-norms of word weight vectors; replace={}".format(replace))
dtype = REAL
if hasattr(self, 'vector_dtype'):
dtype = self.vector_dtype
self.vectors_norm = np.empty_like(self.vectors, dtype=dtype)
# XXX: uncomment this line to compute gyrocosine
# norms = self.embedding_norms_mat(self.vectors)
norms = norm(self.vectors, axis=1)
self.vectors_norm = (self.vectors / norms[:, None]).astype(dtype)
@staticmethod
def moebius_add_mat(A, B):
"""
Return the result of the Moebius addition of the rows of matrix A with the rows of B.
Parameters
----------
A : numpy.array
matrix, first argument of addition
B : numpy.array
matrix, second argument of addition
Returns
-------
:obj: `numpy.array`
matrix; Result of Moebius addition of the rows of matrix A with the rows of B
"""
dot_aa = np.sum(A*A, axis=1)
dot_bb = np.sum(B*B, axis=1)
dot_ab = np.sum(A*B, axis=1)
denominator = 1 + 2 * dot_ab + dot_aa * dot_bb
coef_a = (1 + 2 * dot_ab + dot_bb) / denominator
coef_b = (1 - dot_aa) / denominator
return A * coef_a[:, None] + B * coef_b[:, None]
@staticmethod
def moebius_add(a, b):
"""
Return the result of the Moebius addition of the two vectors, a + b
Parameters
----------
a : numpy.array
vector, first argument of addition
b : numpy.array
vector, second argument of addition
Returns
-------
:obj: `numpy.array`
Result of Moebius addition a + b
"""
dot_aa = dot(a, a)
dot_bb = dot(b, b)
dot_ab = dot(a, b)
return ((1 + 2 * dot_ab + dot_bb) * a + (1 - dot_aa) * b) / (1 + 2 * dot_ab + dot_aa * dot_bb)
@staticmethod
def moebius_mul_mat(A, r):
"""
Return the result of the Moebius scalar multiplication of vector v with scalar r
Parameters
----------
A : numpy.array (2D matrix)
r : scalar
Returns
-------
:obj: `numpy.array`
Result of Moebius scalar multiplication between r and each of the rows of A
"""
norm_v = norm(A, axis=1)
return A * (tanh(r * arctanh(norm_v)) / (norm_v + 1e-10))[:, None]
@staticmethod
def moebius_mul(v, r):
"""
Return the result of the Moebius scalar multiplication of vector v with scalar r
Parameters
----------
v : numpy.array (1D vector)
r : scalar
Returns
-------
:obj: `numpy.array`
Result of Moebius scalar multiplication r * v
"""
norm_v = norm(v)
return tanh(r * arctanh(norm_v)) / norm_v * v
@staticmethod
def embedding_norms_mat(vectors):
"""
Compute embedding Poincare norm for a set of vectors.
Parameters
----------
vectors : matrix
np.array
"""
dot_vv = (vectors * vectors).sum(axis=1)
return arccosh(1 + 2 * dot_vv / (1 - dot_vv))
@staticmethod
def gyr(u, v, x):
"""
Return the result of gyr[u, v](x).
u : numpy.array (1D vector)
v : numpy.array (1D vector)
x : numpy.array (1D vector)
Returns
-------
:obj: `numpy.array`
Result of gyr[u, v](x)
"""
a = PoincareWordEmbeddingsKeyedVectors.moebius_add(u, v)
b = PoincareWordEmbeddingsKeyedVectors.moebius_add(u, PoincareWordEmbeddingsKeyedVectors.moebius_add(v, x))
return PoincareWordEmbeddingsKeyedVectors.moebius_add(-a, b)
@staticmethod
def gyr_mat(u, v, x):
"""
Return the result of gyr[u, v](x).
u : numpy.array (2D matrix)
v : numpy.array (2D matrix)
x : numpy.array (2D matrix)
Returns
-------
:obj: `numpy.array` (2D matrix)
Result of gyr[u, v](x)
"""
dot_uu = (u * u).sum(axis=1) # batch_size x 1
dot_vv = (v * v).sum(axis=1) # batch_size x 1
dot_uv = (u * v).sum(axis=1) # batch_size x 1
dot_ux = (u * x).sum(axis=1) # batch_size x 1
dot_vx = (v * x).sum(axis=1) # batch_size x 1
A = -dot_ux * dot_vv + dot_vx + 2 * dot_uv * dot_vx
B = -dot_vx * dot_uu - dot_ux
D = 1 + 2 * dot_uv + dot_uu * dot_vv
coef_u = 2 * A / D
coef_v = 2 * B / D
return x + u * coef_u[:, None] + v * coef_v[:, None]
@staticmethod
def exp_map_mat(V, X):
"""
Return the result of the exponential map applied from the tangent plane at point x, on the vector v that belongs
to the tangent plane
Parameters
----------
V : numpy.array
matrix, the rows are vectors that belong in the tangent plane at x
X : numpy.array
matrix, the rows are points on the manifold, where the tangent plane is considered
Returns
-------
:obj: `numpy.array`
Result of the exponential map on each of the rows of the output matrix
"""
norm_v = np.linalg.norm(V, axis=1)
dot_xx = np.sum(X*X, axis=1)
coef = tanh(1.0/dot_xx * norm_v) / norm_v
second_term = V * coef[:, None]
return PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(X, second_term)
@staticmethod
def log_map_mat(V, X):
"""
Return the result of the logarithmic map. The resulting point belongs to the tangent plane at point x.
Both x and v are points on the manifold
Parameters
----------
V : numpy.array
matrix, the rows are vectors that belong to the manifold
X : numpy.array
matrix, the rows are points on the manifold, where the tangent plane is considered
Returns
-------
:obj: `numpy.array`
Result of the logarithmic map on each of the rows of the output matrix
"""
add_result = PoincareWordEmbeddingsKeyedVectors.moebius_add_mat(-X, V)
norm_add_result = np.linalg.norm(add_result, axis=1)
dot_xx = np.sum(X*X, axis=1)
coef = dot_xx * arctanh(norm_add_result) / norm_add_result
return add_result * coef[:, None]
class MixPoincareWordEmbeddingsKeyedVectors(PoincareWordEmbeddingsKeyedVectors):
def __init__(self, vector_size, num_embs, vector_dtype=REAL, trainables=None, init_near_border=False,
init_pretrained_config=False):
super(MixPoincareWordEmbeddingsKeyedVectors, self).__init__(
vector_size=vector_size, vector_dtype=vector_dtype, trainables=trainables,
init_near_border=init_near_border, init_pretrained_config=init_pretrained_config)
self.num_embs = num_embs
self.small_emb_size = int(vector_size / num_embs)
def batch_most_similar_mix_hyperbolic_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
This method computes the similarity (according to the formula defined for the hyperbolic space) between
the parallel transport of the input vector and the vectors for each word in the model and selects the word
that is closest to the position of the parallel transported vector.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
if not self.use_poincare_distance:
self.init_sims()
# Retrieve embeddings.
pos_emb = [
self.vectors[positive[0]],
self.vectors[positive[1]]
]
neg_emb = self.vectors[negative]
parallel_transp1 = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
parallel_transp2 = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
aux = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
lin_comb_point = empty((batch_size, self.vector_size), dtype=self.vector_dtype)
small_emb_size = int(self.vector_size / self.num_embs)
# Compute gyro-parallel transport in each of the small dimensional spaces.
for i in range(self.num_embs):
# Compute the parallel transport of the positive vector in the analogy question (i.e. c) using the new
# formula
start = small_emb_size * i
end = small_emb_size * (i+1)
parallel_transp1[:, start:end] = self.moebius_add_mat(
pos_emb[1][:, start:end],
self.gyr_mat(pos_emb[1][:, start:end],
-neg_emb[:, start:end],
self.moebius_add_mat(-neg_emb[:, start:end],
pos_emb[0][:, start:end]))) # batch_size x vector_size
# Compute the parallel transport of the other positive vector (i.e. b) so the alternative formulation of the
# analogy question.
parallel_transp2[:, start:end] = self.moebius_add_mat(
pos_emb[0][:, start:end],
self.gyr_mat(pos_emb[0][:, start:end], -neg_emb[:, start:end],
self.moebius_add_mat(-neg_emb[:, start:end],
pos_emb[1][:, start:end]))) # batch_size x vector_size
aux[:, start:end] = self.moebius_add_mat(-parallel_transp1[:, start:end], parallel_transp2[:, start:end])
# Compute the gyrolinear combination between the two parallel
# transported points.
t = 0.3
results = []
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
lin_comb_point[:, start:end] = self.moebius_add_mat(parallel_transp1[:, start:end],
self.moebius_mul_mat(aux[:, start:end], t))
# Compute similarity between parallel transported input and all words in the vocabulary.
if self.use_poincare_distance:
limited = self.vectors if restrict_vocab is None else self.vectors[:restrict_vocab] # vocab_size * vector_size
else:
# Get normalized vectors, if we use cosine distance.
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Normalize parallel transported vector.
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
lin_comb_point[:, start:end] = lin_comb_point[:, start:end] / (norm(lin_comb_point[:, start:end], axis=1)[:, None] + 1e-5)
dists = self.mix_distances_mat(lin_comb_point, limited) # batch_size * vocab_size
max_float = np.finfo(dists.dtype).max
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
dists[x, y] = max_float # batch_size * (vocab_size - 3)
dists[x, y] = max_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(dists[i], topn=10)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]]))[0])
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmin(dists, axis=1)
result = (
[self.index2word[i] for i in best_ids],
dists[batch_size_range, best_ids].astype(np.float32),
best)
results.append(result)
return results
def mix_distances_mat(self, vectors, other_vectors=None):
"""
Return distance in the product of hyperbolic spaces, between the rows of `vectors` and the rows of
`other_vectors`.
Parameters
----------
vectors: numpy.array
Vectors from which distances are to be computed.
other_vectors: numpy.array
For each vector in `other_vectors` distance from each vector in `vectors` is computed.
If None or empty, all words in vocab are considered (including the vectors in `vectors`).
Returns
-------
np.array
Returns a numpy.array that contains the distance between each row in `vectors`
and each row in `other_vectors`
"""
dists = zeros((vectors.shape[0], other_vectors.shape[0]), dtype=self.vector_dtype)
small_emb_size = int(self.vector_size / self.num_embs)
if self.use_poincare_distance == True:
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
curr_dists = np.arccosh(
self.cosh_distances_mat(vectors[:, start:end], other_vectors[:, start:end]))
dists += curr_dists * curr_dists
dists = np.sqrt(dists)
else:
# The vectors need to be normalized!!!
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
curr_dists = -dot(vectors[:, start:end], other_vectors[:, start:end].T)
dists += curr_dists
return dists
def distances(self, word_or_vector, other_vectors=None):
"""
Compute the distance in a product of Poincare balls from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_vectors: numpy.array or None
For each vector in `other_vectors` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if self.use_poincare_distance:
use_norm = False
else:
use_norm = True
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector, use_norm=use_norm)
else:
input_vector = word_or_vector
if other_vectors is None:
if use_norm:
other_vectors = self.vectors_norm
else:
other_vectors = self.vectors
return self.mix_distances_mat(np.array([input_vector]), other_vectors)
def distance(self, word_or_vector1, word_or_vector2):
"""
Compute distance between two words or vectors represented in a Cartesian product of Poincare balls.
Example
--------
>>> trained_model.distance('woman', 'man')
"""
if self.use_poincare_distance:
use_norm = False
else:
use_norm = True
v1 = self.word_vec(word_or_vector1, use_norm=use_norm) if isinstance(word_or_vector1, string_types) else word_or_vector1
v2 = self.word_vec(word_or_vector2, use_norm=use_norm) if isinstance(word_or_vector2, string_types) else word_or_vector2
return self.mix_distances_mat(np.array([v1]), np.array([v2]))[0][0]
def similarity(self, w1, w2):
"""
Compute similarity between two words based on the Poincare distance between them.
Example
--------
>>> trained_model.similarity('woman', 'man')
"""
return -self.distance(w1, w2)
def embedding_norm(self, word_or_vector):
"""
Compute embedding norm in product of Poincare balls for a given word.
Parameters
----------
w : string
word
"""
v = self[word_or_vector] if isinstance(word_or_vector, string_types) else word_or_vector
small_emb_size = int(self.vector_size / self.num_embs)
norms = empty(self.num_embs)
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
if self.use_poincare_distance:
norms[i] = arccosh(1 + 2 * dot(v[start:end], v[start:end]) / (1 - dot(v[start:end], v[start:end])))
else:
norms[i] = norm(v[start:end])
return norm(norms)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'vectors_norm', None) is None or replace:
print("init_sims from MixPoincareWordEmbeddings")
logger.info("precomputing L2-norms of word weight vectors; replace={}".format(replace))
dtype = REAL
if hasattr(self, 'vector_dtype'):
dtype = self.vector_dtype
self.vectors_norm = np.empty_like(self.vectors, dtype=dtype)
small_emb_size = int(self.vector_size / self.num_embs)
for i in range(self.num_embs):
start = small_emb_size * i
end = small_emb_size * (i+1)
# norms = PoincareWordEmbeddingsKeyedVectors.embedding_norms_mat(self.vectors[:, start:end]) + 1e-5
norms = norm(self.vectors[:, start:end], axis=1) + 1e-5
self.vectors_norm[:, start:end] = (self.vectors[:, start:end] / norms[:, None]).astype(dtype)
class VanillaWordEmbeddingsKeyedVectors(WordEmbeddingsKeyedVectors):
"""
Class used as base class for vanilla word embeddings that use cosine similarity (e.g. word2vec, fasttext).
"""
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None, debug=False):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
Parameters
----------
positive : :obj: `list` of :obj: `str`
List of words that contribute positively.
negative : :obj: `list` of :obj: `str`
List of words that contribute negatively.
topn : int
Number of top-N similar words to return.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
Examples
--------
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
else:
mean.append(weight * self.word_vec(word, use_norm=True))
if word in self.vocab:
all_words.add(self.vocab[word].index)
if not mean:
raise ValueError("cannot compute similarity with no input")
dtype = REAL
if hasattr(self, "vector_dtype"):
dtype = self.vector_dtype
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(dtype)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab]
# Compute 3COSADD.
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def batch_most_similar_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
self.init_sims()
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
# Retrieve embeddings.
pos_emb = [
self.vectors_norm[positive[0]],
self.vectors_norm[positive[1]]
]
neg_emb = self.vectors_norm[negative]
# compute the weighted average of all input words, where positive words have weight 1
# and negative words have weight -1
weighted_mean = (pos_emb[0] + pos_emb[1] - neg_emb) / 3 # batch_size * vector_size
mean_norm = norm(weighted_mean, axis=1)
weighted_mean = weighted_mean / mean_norm[:, None]
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# Compute 3COSADD.
sims = dot(weighted_mean, limited.T) # batch_size * vocab_size
min_float = np.finfo(sims.dtype).min
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
sims[x, y] = min_float # batch_size * (vocab_size - 3)
best = []
if debug:
for i in batch_size_range:
top_ids = matutils.argsort(sims[i], topn=10, reverse=True)
curr_best = [(self.index2word[idx],
self.distances(limited[idx], np.array([neg_emb[i], pos_emb[0][i], pos_emb[1][i]])))
for idx in top_ids]
best.append(curr_best)
best_ids = np.argmax(sims, axis=1)
result = (
[self.index2word[i] for i in best_ids],
sims[batch_size_range, best_ids].astype(np.float32),
best)
return [result]
def batch_most_similar_cosmul_analogy(self, positive=None, negative=None, restrict_vocab=None, debug=False):
"""
Solve an analogy task. The result should be similar to the positive words and unlike the negative word.
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
Parameters
----------
positive : list of two numpy.array
List of two 2D numpy arrays. Each of them contains positive instances. The number of rows is equal to the
number of questions in a batch.
negative : numpy.array
2D array that contains on each row the embedding of the negative word from that question. The number of
rows is equal to the number of questions in a batch.
restrict_vocab : int
Optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (word, similarity)
"""
self.init_sims()
batch_size = len(negative)
# XXX: before calling this method, #accuracy is setting self.vocab to be only the restricted vocab.
# So here, self.vocab is actually self.vocab[:restricted]
# Retrieve embeddings.
pos_emb = [
self.vectors_norm[positive[0]],
self.vectors_norm[positive[1]]
]
neg_emb = self.vectors_norm[negative]
# # compute the weighted average of all input words, where positive words have weight 1
# # and negative words have weight -1
# weighted_mean = (pos_emb[0] + pos_emb[1] - neg_emb) / 3 # batch_size * vector_size
# mean_norm = norm(weighted_mean, axis=1)
# weighted_mean = weighted_mean / mean_norm[:, None]
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab] # vocab_size * vector_size
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [
(1 + dot(pos_emb[0], limited.T)) / 2, # batch_size * vocab_size
(1 + dot(pos_emb[1], limited.T)) / 2 # batch_size * vocab_size
]
neg_dists = (1 + dot(neg_emb, limited.T)) / 2 # batch_size * vocab_size
sims = pos_dists[0] * pos_dists[1] / (neg_dists + 0.000001) # batch_size * vocab_size
min_float = np.finfo(sims.dtype).min
batch_size_range = np.arange(batch_size)
x = np.concatenate((batch_size_range, batch_size_range, batch_size_range))
y = np.concatenate((positive[0], positive[1], negative))
sims[x, y] = min_float # batch_size * (vocab_size - 3)
best_ids = np.argmax(sims, axis=1)
result = (
[self.index2word[i] for i in best_ids],
sims[batch_size_range, best_ids].astype(np.float32))
return [result]
def most_similar_cosmul(self, positive=None, negative=None, topn=10, restrict_vocab=None, debug=False):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = {
self.vocab[word].index for word in positive + negative
if not isinstance(word, ndarray) and word in self.vocab
}
positive = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in positive
]
negative = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in negative
]
if not positive:
raise ValueError("cannot compute similarity with no input")
limited = self.vectors_norm if restrict_vocab is None else self.vectors_norm[:restrict_vocab]
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(limited, term)) / 2) for term in positive]
neg_dists = [((1 + dot(limited, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
@staticmethod
def cosine_similarities(vector_1, vectors_all):
"""
Return cosine similarities between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which similarities are to be computed.
expected shape (dim,)
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed.
expected shape (num_vectors, dim)
Returns
-------
:obj: `numpy.array`
Contains cosine distance between vector_1 and each row in vectors_all.
shape (num_vectors,)
"""
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
dot_products = dot(vectors_all, vector_1)
similarities = dot_products / (norm * all_norms)
return similarities
def distances(self, word_or_vector, other_words_or_vectors=()):
"""
Compute cosine distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_words_or_vectors : iterable(str) or numpy.array
For each word in `other_words_or_vectors` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words_or_vectors` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words_or_vectors` is absent from vocab.
"""
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector)
else:
input_vector = word_or_vector
if not len(other_words_or_vectors):
other_vectors = self.vectors
else:
if isinstance(other_words_or_vectors[0], string_types):
other_indices = [self.vocab[word].index for word in other_words_or_vectors]
other_vectors = self.vectors[other_indices]
else:
other_vectors = other_words_or_vectors
return 1 - self.cosine_similarities(input_vector, other_vectors)
def distance(self, word_or_vector1, word_or_vector2):
"""
Compute cosine distance between two words.
Examples
--------
>>> trained_model.distance('woman', 'man')
0.34
>>> trained_model.distance('woman', 'woman')
0.0
"""
v1 = self.word_vec(word_or_vector1) if isinstance(word_or_vector1, string_types) else word_or_vector1
v2 = self.word_vec(word_or_vector2) if isinstance(word_or_vector2, string_types) else word_or_vector2
return 1 - dot(matutils.unitvec(v1), matutils.unitvec(v2))
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Examples
--------
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def embedding_norm(self, word_or_vector):
"""
Compute embedding norm for a given word.
Parameters
----------
word_or_vector : string or array
word or vector
"""
v = self.word_vec(word_or_vector) if isinstance(word_or_vector, string_types) else word_or_vector
return norm(v)
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Examples
--------
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
if not(len(ws1) and len(ws2)):
raise ZeroDivisionError('At least one of the passed list is empty.')
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Parameters
----------
words : :obj: `list` of :obj: `str`
List of words
Returns
-------
str
The word further away from the mean of all words.
Example
-------
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
used_words = [word for word in words if word in self]
if len(used_words) != len(words):
ignored_words = set(words) - set(used_words)
logger.warning("vectors for words %s are not present in the model, ignoring these words", ignored_words)
if not used_words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.word_vec(word, use_norm=True) for word in used_words).astype(self.vector_dtype)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(self.vector_dtype)
dists = dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
class Word2VecKeyedVectors(VanillaWordEmbeddingsKeyedVectors):
"""Class to contain vectors and vocab for word2vec model.
Used to perform operations on the vectors such as vector lookup, distance, similarity etc.
"""
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in
fvocab : str
Optional file path used to save the vocabulary
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
"""
# from gensim.models.word2vec import save_word2vec_format
_save_word2vec_format(
fname, self.vocab, self.vectors, fvocab=fvocab, binary=binary, total_vec=total_vec)
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
Parameters
----------
fname : str
The file path to the saved word2vec-format file.
fvocab : str
Optional file path to the vocabulary.Word counts are read from `fvocab` filename,
if set (this is the file generated by `-save-vocab` flag of the original C tool).
binary : bool
If True, indicates whether the data is in binary word2vec format.
encoding : str
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
unicode_errors : str
default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
limit : int
Sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
datatype : :class: `numpy.float*`
(Experimental) Can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
Returns
-------
:obj: `~gensim.models.word2vec.Wod2Vec`
Returns the loaded model as an instance of :class: `~gensim.models.word2vec.Wod2Vec`.
"""
# from gensim.models.word2vec import load_word2vec_format
return _load_word2vec_format(
Word2VecKeyedVectors, fname, fvocab=fvocab, binary=binary, encoding=encoding, unicode_errors=unicode_errors,
limit=limit, datatype=datatype)
def get_keras_embedding(self, train_embeddings=False):
"""Return a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings
Parameters
----------
train_embeddings : bool
If False, the weights are frozen and stopped from being updated.
If True, the weights can/will be further trained/updated.
Returns
-------
:obj: `keras.layers.Embedding`
Embedding layer
"""
try:
from keras.layers import Embedding
except ImportError:
raise ImportError("Please install Keras to use this function")
weights = self.vectors
# set `trainable` as `False` to use the pretrained word embedding
# No extra mem usage here as `Embedding` layer doesn't create any new matrix for weights
layer = Embedding(
input_dim=weights.shape[0], output_dim=weights.shape[1],
weights=[weights], trainable=train_embeddings
)
return layer
KeyedVectors = Word2VecKeyedVectors # alias for backward compatibility
class Doc2VecKeyedVectors(BaseKeyedVectors):
def __init__(self, vector_size, mapfile_path):
super(Doc2VecKeyedVectors, self).__init__(vector_size=vector_size)
self.doctags = {} # string -> Doctag (only filled if necessary)
self.max_rawint = -1 # highest rawint-indexed doctag
self.offset2doctag = [] # int offset-past-(max_rawint+1) -> String (only filled if necessary)
self.count = 0
self.vectors_docs = []
self.mapfile_path = mapfile_path
self.vector_size = vector_size
self.vectors_docs_norm = None
@property
def index2entity(self):
return self.offset2doctag
@index2entity.setter
def index2entity(self, value):
self.offset2doctag = value
@property
@deprecated("Attribute will be removed in 4.0.0, use docvecs.vectors_docs instead")
def doctag_syn0(self):
return self.vectors_docs
@property
@deprecated("Attribute will be removed in 4.0.0, use docvecs.vectors_docs_norm instead")
def doctag_syn0norm(self):
return self.vectors_docs_norm
def __getitem__(self, index):
"""
Accept a single key (int or string tag) or list of keys as input.
If a single string or int, return designated tag's vector
representation, as a 1D numpy array.
If a list, return designated tags' vector representations as a
2D numpy array: #tags x #vector_size.
"""
if index in self:
if isinstance(index, string_types + integer_types + (integer,)):
return self.vectors_docs[self._int_index(index, self.doctags, self.max_rawint)]
return vstack([self[i] for i in index])
raise KeyError("tag '%s' not seen in training corpus/invalid" % index)
def __contains__(self, index):
if isinstance(index, integer_types + (integer,)):
return index < self.count
else:
return index in self.doctags
def __len__(self):
return self.count
def save(self, *args, **kwargs):
"""Saves the keyedvectors. This saved model can be loaded again using
:func:`~gensim.models.doc2vec.Doc2VecKeyedVectors.load` which supports
operations on trained document vectors like `most_similar`.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['vectors_docs_norm'])
super(Doc2VecKeyedVectors, self).save(*args, **kwargs)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training or inference** after doing a replace.
The model becomes effectively read-only = you can call `most_similar`, `similarity`
etc., but not `train` or `infer_vector`.
"""
if getattr(self, 'vectors_docs_norm', None) is None or replace:
logger.info("precomputing L2-norms of doc weight vectors")
if replace:
for i in xrange(self.vectors_docs.shape[0]):
self.vectors_docs[i, :] /= sqrt((self.vectors_docs[i, :] ** 2).sum(-1))
self.vectors_docs_norm = self.vectors_docs
else:
if self.mapfile_path:
self.vectors_docs_norm = np_memmap(
self.mapfile_path + '.vectors_docs_norm', dtype=REAL,
mode='w+', shape=self.vectors_docs.shape)
else:
self.vectors_docs_norm = empty(self.vectors_docs.shape, dtype=REAL)
np_divide(
self.vectors_docs, sqrt((self.vectors_docs ** 2).sum(-1))[..., newaxis], self.vectors_docs_norm)
def most_similar(self, positive=None, negative=None, topn=10, clip_start=0, clip_end=None, indexer=None):
"""
Find the top-N most similar docvecs known from training. Positive docs contribute
positively towards the similarity, negative docs negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given docs. Docs may be specified as vectors, integer indexes
of trained docvecs, or if the documents were originally presented with string tags,
by the corresponding tags.
The 'clip_start' and 'clip_end' allow limiting results to a particular contiguous
range of the underlying `vectors_docs_norm` vectors. (This may be useful if the ordering
there was chosen to be significant, such as more popular tag IDs in lower indexes.)
Parameters
----------
positive : :obj: `list`
List of Docs specifed as vectors, integer indexes of trained docvecs or string tags
that contribute positively.
negative : :obj: `list`
List of Docs specifed as vectors, integer indexes of trained docvecs or string tags
that contribute negatively.
topn : int
Number of top-N similar docvecs to return.
clip_start : int
Start clipping index.
clip_end : int
End clipping index.
Returns
-------
:obj: `list` of :obj: `tuple`
Returns a list of tuples (doc, similarity)
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
clip_end = clip_end or len(self.vectors_docs_norm)
if isinstance(positive, string_types + integer_types + (integer,)) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each doc, if not already present; default to 1.0 for positive and -1.0 for negative docs
positive = [
(doc, 1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in positive
]
negative = [
(doc, -1.0) if isinstance(doc, string_types + integer_types + (ndarray, integer))
else doc for doc in negative
]
# compute the weighted average of all docs
all_docs, mean = set(), []
for doc, weight in positive + negative:
if isinstance(doc, ndarray):
mean.append(weight * doc)
elif doc in self.doctags or doc < self.count:
mean.append(weight * self.vectors_docs_norm[self._int_index(doc, self.doctags, self.max_rawint)])
all_docs.add(self._int_index(doc, self.doctags, self.max_rawint))
else:
raise KeyError("doc '%s' not in trained set" % doc)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
dists = dot(self.vectors_docs_norm[clip_start:clip_end], mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_docs), reverse=True)
# ignore (don't return) docs from the input
result = [
(self._index_to_doctag(sim + clip_start, self.offset2doctag, self.max_rawint), float(dists[sim]))
for sim in best
if (sim + clip_start) not in all_docs
]
return result[:topn]
def doesnt_match(self, docs):
"""
Which doc from the given list doesn't go with the others?
(TODO: Accept vectors of out-of-training-set docs, as if from inference.)
Parameters
----------
docs : :obj: `list` of (str or int)
List of seen documents specified by their corresponding string tags or integer indices.
Returns
-------
str or int
The document further away from the mean of all the documents.
"""
self.init_sims()
docs = [doc for doc in docs if doc in self.doctags or 0 <= doc < self.count] # filter out unknowns
logger.debug("using docs %s", docs)
if not docs:
raise ValueError("cannot select a doc from an empty list")
vectors = vstack(
self.vectors_docs_norm[self._int_index(doc, self.doctags, self.max_rawint)] for doc in docs).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, docs))[0][1]
def similarity(self, d1, d2):
"""
Compute cosine similarity between two docvecs in the trained set, specified by int index or
string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
Parameters
----------
d1 : int or str
Indicate the first document by it's string tag or integer index.
d2 : int or str
Indicate the second document by it's string tag or integer index.
Returns
-------
float
The cosine similarity between the vectors of the two documents.
"""
return dot(matutils.unitvec(self[d1]), matutils.unitvec(self[d2]))
def n_similarity(self, ds1, ds2):
"""
Compute cosine similarity between two sets of docvecs from the trained set, specified by int
index or string tag. (TODO: Accept vectors of out-of-training-set docs, as if from inference.)
Parameters
----------
ds1 : :obj: `list` of (str or int)
Specify the first set of documents as a list of their integer indices or string tags.
ds2 : :obj: `list` of (str or int)
Specify the second set of documents as a list of their integer indices or string tags.
Returns
-------
float
The cosine similarity between the means of the documents in each of the two sets.
"""
v1 = [self[doc] for doc in ds1]
v2 = [self[doc] for doc in ds2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
def distance(self, d1, d2):
"""
Compute cosine distance between two documents.
"""
return 1 - self.similarity(d1, d2)
# required by base keyed vectors class
def distances(self, d1, other_docs=()):
"""Compute distances from given document (string tag or int index) to all documents in `other_docs`.
If `other_docs` is empty, return distance between `d1` and all documents seen during training.
"""
input_vector = self[d1]
if not other_docs:
other_vectors = self.vectors_docs
else:
other_vectors = self[other_docs]
return 1 - WordEmbeddingsKeyedVectors.cosine_similarities(input_vector, other_vectors)
def similarity_unseen_docs(self, model, doc_words1, doc_words2, alpha=0.1, min_alpha=0.0001, steps=5):
"""
Compute cosine similarity between two post-bulk out of training documents.
Parameters
----------
model : :obj: `~gensim.models.doc2vec.Doc2Vec`
An instance of a trained `Doc2Vec` model.
doc_words1 : :obj: `list` of :obj: `str`
The first document. Document should be a list of (word) tokens.
doc_words2 : :obj: `list` of :obj: `str`
The second document. Document should be a list of (word) tokens.
alpha : float
The initial learning rate.
min_alpha : float
Learning rate will linearly drop to `min_alpha` as training progresses.
steps : int
Number of times to train the new document.
Returns
-------
float
The cosine similarity between the unseen documents.
"""
d1 = model.infer_vector(doc_words=doc_words1, alpha=alpha, min_alpha=min_alpha, steps=steps)
d2 = model.infer_vector(doc_words=doc_words2, alpha=alpha, min_alpha=min_alpha, steps=steps)
return dot(matutils.unitvec(d1), matutils.unitvec(d2))
def save_word2vec_format(self, fname, prefix='*dt_', fvocab=None,
total_vec=None, binary=False, write_first_line=True):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in.
prefix : str
Uniquely identifies doctags from word vocab, and avoids collision
in case of repeated string in doctag and word vocab.
fvocab : str
Optional file path used to save the vocabulary
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
write_first_line : bool
Whether to print the first line in the file. Useful when saving doc-vectors after word-vectors.
"""
total_vec = total_vec or len(self)
with utils.smart_open(fname, 'ab') as fout:
if write_first_line:
logger.info("storing %sx%s projection weights into %s", total_vec, self.vectors_docs.shape[1], fname)
fout.write(utils.to_utf8("%s %s\n" % (total_vec, self.vectors_docs.shape[1])))
# store as in input order
for i in range(len(self)):
doctag = u"%s%s" % (prefix, self._index_to_doctag(i, self.offset2doctag, self.max_rawint))
row = self.vectors_docs[i]
if binary:
fout.write(utils.to_utf8(doctag) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (doctag, ' '.join("%f" % val for val in row))))
@staticmethod
def _int_index(index, doctags, max_rawint):
"""Return int index for either string or int index"""
if isinstance(index, integer_types + (integer,)):
return index
else:
return max_rawint + 1 + doctags[index].offset
@staticmethod
def _index_to_doctag(i_index, offset2doctag, max_rawint):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - max_rawint - 1
if 0 <= candidate_offset < len(offset2doctag):
return offset2doctag[candidate_offset]
else:
return i_index
# for backward compatibility
def index_to_doctag(self, i_index):
"""Return string key for given i_index, if available. Otherwise return raw int doctag (same int)."""
candidate_offset = i_index - self.max_rawint - 1
if 0 <= candidate_offset < len(self.offset2doctag):
return self.ffset2doctag[candidate_offset]
else:
return i_index
# for backward compatibility
def int_index(self, index, doctags, max_rawint):
"""Return int index for either string or int index"""
if isinstance(index, integer_types + (integer,)):
return index
else:
return max_rawint + 1 + doctags[index].offset
class FastTextKeyedVectors(VanillaWordEmbeddingsKeyedVectors):
"""
Class to contain vectors and vocab for the FastText training class and other methods not directly
involved in training such as most_similar()
"""
def __init__(self, vector_size, min_n, max_n):
super(FastTextKeyedVectors, self).__init__(vector_size=vector_size)
self.vectors_vocab = None
self.vectors_vocab_norm = None
self.vectors_ngrams = None
self.vectors_ngrams_norm = None
self.buckets_word = None
self.hash2index = {}
self.min_n = min_n
self.max_n = max_n
self.num_ngram_vectors = 0
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_vocab instead")
def syn0_vocab(self):
return self.vectors_vocab
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_vocab_norm instead")
def syn0_vocab_norm(self):
return self.vectors_vocab_norm
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_ngrams instead")
def syn0_ngrams(self):
return self.vectors_ngrams
@property
@deprecated("Attribute will be removed in 4.0.0, use self.wv.vectors_ngrams_norm instead")
def syn0_ngrams_norm(self):
return self.vectors_ngrams_norm
def __contains__(self, word):
"""
Check if `word` or any character ngrams in `word` are present in the vocabulary.
A vector for the word is guaranteed to exist if `__contains__` returns True.
"""
if word in self.vocab:
return True
else:
char_ngrams = _compute_ngrams(word, self.min_n, self.max_n)
return any(_ft_hash(ng) % self.bucket in self.hash2index for ng in char_ngrams)
def save(self, *args, **kwargs):
"""Saves the keyedvectors. This saved model can be loaded again using
:func:`~gensim.models.fasttext.FastTextKeyedVectors.load` which supports
getting vectors for out-of-vocabulary words.
Parameters
----------
fname : str
Path to the file.
"""
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get(
'ignore', ['vectors_norm', 'vectors_vocab_norm', 'vectors_ngrams_norm', 'buckets_word'])
super(FastTextKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
"""
if word in self.vocab:
return super(FastTextKeyedVectors, self).word_vec(word, use_norm)
else:
# from gensim.models.fasttext import compute_ngrams
word_vec = np.zeros(self.vectors_ngrams.shape[1], dtype=np.float32)
ngrams = _compute_ngrams(word, self.min_n, self.max_n)
if use_norm:
ngram_weights = self.vectors_ngrams_norm
else:
ngram_weights = self.vectors_ngrams
ngrams_found = 0
for ngram in ngrams:
ngram_hash = _ft_hash(ngram) % self.bucket
if ngram_hash in self.hash2index:
word_vec += ngram_weights[self.hash2index[ngram_hash]]
ngrams_found += 1
if word_vec.any():
return word_vec / max(1, ngrams_found)
else: # No ngrams of the word are present in self.ngrams
raise KeyError('all ngrams for word %s absent from model' % word)
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can only call `most_similar`, `similarity` etc.
"""
super(FastTextKeyedVectors, self).init_sims(replace)
if getattr(self, 'vectors_ngrams_norm', None) is None or replace:
logger.info("precomputing L2-norms of ngram weight vectors")
if replace:
for i in range(self.vectors_ngrams.shape[0]):
self.vectors_ngrams[i, :] /= sqrt((self.vectors_ngrams[i, :] ** 2).sum(-1))
self.vectors_ngrams_norm = self.vectors_ngrams
else:
self.vectors_ngrams_norm = \
(self.vectors_ngrams / sqrt((self.vectors_ngrams ** 2).sum(-1))[..., newaxis]).astype(REAL)
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
Parameters
----------
fname : str
The file path used to save the vectors in.
fvocab : str
Optional file path used to save the vocabulary.
binary : bool
If True, the data wil be saved in binary word2vec format, else it will be saved in plain text.
total_vec : int
Optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards).
"""
# from gensim.models.word2vec import save_word2vec_format
_save_word2vec_format(
fname, self.vocab, self.vectors, fvocab=fvocab, binary=binary, total_vec=total_vec)
| 117,148 | 41.185452 | 150 | py |
poincare_glove | poincare_glove-master/gensim/models/deprecated/keyedvectors.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2016 Radim Rehurek <me@radimrehurek.com>
# Licensed under the GNU LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Warnings
--------
.. deprecated:: 3.3.0
Use :mod:`gensim.models.keyedvectors` instead.
Word vector storage and similarity look-ups.
Common code independent of the way the vectors are trained(Word2Vec, FastText, WordRank, VarEmbed etc)
The word vectors are considered read-only in this class.
Initialize the vectors by training e.g. Word2Vec::
>>> model = Word2Vec(sentences, size=100, window=5, min_count=5, workers=4)
>>> word_vectors = model.wv
Persist the word vectors to disk with::
>>> word_vectors.save(fname)
>>> word_vectors = KeyedVectors.load(fname)
The vectors can also be instantiated from an existing file on disk
in the original Google's word2vec C format as a KeyedVectors instance::
>>> from gensim.models.keyedvectors import KeyedVectors
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.txt', binary=False) # C text format
>>> word_vectors = KeyedVectors.load_word2vec_format('/tmp/vectors.bin', binary=True) # C binary format
You can perform various syntactic/semantic NLP word tasks with the vectors. Some of them
are already built-in::
>>> word_vectors.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
>>> word_vectors.most_similar_cosmul(positive=['woman', 'king'], negative=['man'])
[('queen', 0.71382287), ...]
>>> word_vectors.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
>>> word_vectors.similarity('woman', 'man')
0.73723527
Correlation with human opinion on word similarity::
>>> word_vectors.evaluate_word_pairs(os.path.join(module_path, 'test_data','wordsim353.tsv'))
0.51, 0.62, 0.13
And on analogies::
>>> word_vectors.accuracy(os.path.join(module_path, 'test_data', 'questions-words.txt'))
and so on.
"""
from __future__ import division # py3 "true division"
import logging
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty # noqa:F401
# If pyemd C extension is available, import it.
# If pyemd is attempted to be used, but isn't installed, ImportError will be raised in wmdistance
try:
from pyemd import emd
PYEMD_EXT = True
except ImportError:
PYEMD_EXT = False
from numpy import dot, zeros, dtype, float32 as REAL,\
double, array, vstack, fromstring, sqrt, newaxis,\
ndarray, sum as np_sum, prod, ascontiguousarray,\
argmax
import numpy as np
from gensim import utils, matutils # utility fnc for pickling, common scipy operations etc
from gensim.corpora.dictionary import Dictionary
from six import string_types, iteritems
from six.moves import xrange
from scipy import stats
logger = logging.getLogger(__name__)
class Vocab(object):
"""
A single vocabulary item, used internally for collecting per-word frequency/sampling info,
and for constructing binary trees (incl. both word leaves and inner nodes).
"""
def __init__(self, **kwargs):
self.count = 0
self.__dict__.update(kwargs)
def __lt__(self, other): # used for sorting in a priority queue
return self.count < other.count
def __str__(self):
vals = ['%s:%r' % (key, self.__dict__[key]) for key in sorted(self.__dict__) if not key.startswith('_')]
return "%s(%s)" % (self.__class__.__name__, ', '.join(vals))
class KeyedVectorsBase(utils.SaveLoad):
"""
Base class to contain vectors and vocab for any set of vectors which are each associated with a key.
"""
def __init__(self):
self.syn0 = []
self.vocab = {}
self.index2word = []
self.vector_size = None
def save_word2vec_format(self, fname, fvocab=None, binary=False, total_vec=None):
"""
Store the input-hidden weight matrix in the same format used by the original
C word2vec-tool, for compatibility.
`fname` is the file used to save the vectors in
`fvocab` is an optional file used to save the vocabulary
`binary` is an optional boolean indicating whether the data is to be saved
in binary word2vec format (default: False)
`total_vec` is an optional parameter to explicitly specify total no. of vectors
(in case word vectors are appended with document vectors afterwards)
"""
if total_vec is None:
total_vec = len(self.vocab)
vector_size = self.syn0.shape[1]
if fvocab is not None:
logger.info("storing vocabulary in %s", fvocab)
with utils.smart_open(fvocab, 'wb') as vout:
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
vout.write(utils.to_utf8("%s %s\n" % (word, vocab.count)))
logger.info("storing %sx%s projection weights into %s", total_vec, vector_size, fname)
assert (len(self.vocab), vector_size) == self.syn0.shape
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8("%s %s\n" % (total_vec, vector_size)))
# store in sorted order: most frequent words at the top
for word, vocab in sorted(iteritems(self.vocab), key=lambda item: -item[1].count):
row = self.syn0[vocab.index]
if binary:
fout.write(utils.to_utf8(word) + b" " + row.tostring())
else:
fout.write(utils.to_utf8("%s %s\n" % (word, ' '.join("%f" % val for val in row))))
@classmethod
def load_word2vec_format(cls, fname, fvocab=None, binary=False, encoding='utf8', unicode_errors='strict',
limit=None, datatype=REAL):
"""
Load the input-hidden weight matrix from the original C word2vec-tool format.
Note that the information stored in the file is incomplete (the binary tree is missing),
so while you can query for word similarity etc., you cannot continue training
with a model loaded this way.
`binary` is a boolean indicating whether the data is in binary word2vec format.
`norm_only` is a boolean indicating whether to only store normalised word2vec vectors in memory.
Word counts are read from `fvocab` filename, if set (this is the file generated
by `-save-vocab` flag of the original C tool).
If you trained the C model using non-utf8 encoding for words, specify that
encoding in `encoding`.
`unicode_errors`, default 'strict', is a string suitable to be passed as the `errors`
argument to the unicode() (Python 2.x) or str() (Python 3.x) function. If your source
file may include word tokens truncated in the middle of a multibyte unicode character
(as is common from the original word2vec.c tool), 'ignore' or 'replace' may help.
`limit` sets a maximum number of word-vectors to read from the file. The default,
None, means read all.
`datatype` (experimental) can coerce dimensions to a non-default float type (such
as np.float16) to save memory. (Such types may result in much slower bulk operations
or incompatibility with optimized routines.)
"""
counts = None
if fvocab is not None:
logger.info("loading word counts from %s", fvocab)
counts = {}
with utils.smart_open(fvocab) as fin:
for line in fin:
word, count = utils.to_unicode(line).strip().split()
counts[word] = int(count)
logger.info("loading projection weights from %s", fname)
with utils.smart_open(fname) as fin:
header = utils.to_unicode(fin.readline(), encoding=encoding)
vocab_size, vector_size = (int(x) for x in header.split()) # throws for invalid file format
if limit:
vocab_size = min(vocab_size, limit)
result = cls()
result.vector_size = vector_size
result.syn0 = zeros((vocab_size, vector_size), dtype=datatype)
def add_word(word, weights):
word_id = len(result.vocab)
if word in result.vocab:
logger.warning("duplicate word '%s' in %s, ignoring all but first", word, fname)
return
if counts is None:
# most common scenario: no vocab file given. just make up some bogus counts, in descending order
result.vocab[word] = Vocab(index=word_id, count=vocab_size - word_id)
elif word in counts:
# use count from the vocab file
result.vocab[word] = Vocab(index=word_id, count=counts[word])
else:
# vocab file given, but word is missing -- set count to None (TODO: or raise?)
logger.warning("vocabulary file is incomplete: '%s' is missing", word)
result.vocab[word] = Vocab(index=word_id, count=None)
result.syn0[word_id] = weights
result.index2word.append(word)
if binary:
binary_len = dtype(REAL).itemsize * vector_size
for _ in xrange(vocab_size):
# mixed text and binary: read text first, then binary
word = []
while True:
ch = fin.read(1)
if ch == b' ':
break
if ch == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
if ch != b'\n': # ignore newlines in front of words (some binary files have)
word.append(ch)
word = utils.to_unicode(b''.join(word), encoding=encoding, errors=unicode_errors)
weights = fromstring(fin.read(binary_len), dtype=REAL)
add_word(word, weights)
else:
for line_no in xrange(vocab_size):
line = fin.readline()
if line == b'':
raise EOFError("unexpected end of input; is count incorrect or file otherwise damaged?")
parts = utils.to_unicode(line.rstrip(), encoding=encoding, errors=unicode_errors).split(" ")
if len(parts) != vector_size + 1:
raise ValueError("invalid vector on line %s (is this really the text format?)" % line_no)
word, weights = parts[0], [REAL(x) for x in parts[1:]]
add_word(word, weights)
if result.syn0.shape[0] != len(result.vocab):
logger.info(
"duplicate words detected, shrinking matrix size from %i to %i",
result.syn0.shape[0], len(result.vocab)
)
result.syn0 = ascontiguousarray(result.syn0[: len(result.vocab)])
assert (len(result.vocab), vector_size) == result.syn0.shape
logger.info("loaded %s matrix from %s", result.syn0.shape, fname)
return result
def similarity(self, w1, w2):
"""
Compute similarity between vectors of two input words.
To be implemented by child class.
"""
raise NotImplementedError
def distance(self, w1, w2):
"""
Compute distance between vectors of two input words.
To be implemented by child class.
"""
raise NotImplementedError
def distances(self, word_or_vector, other_words=()):
"""
Compute distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
To be implemented by child class.
"""
raise NotImplementedError
def word_vec(self, word):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
Example::
>>> trained_model.word_vec('office')
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
result = self.syn0[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def __getitem__(self, words):
"""
Accept a single word or a list of words as input.
If a single word: returns the word's representations in vector space, as
a 1D numpy array.
Multiple words: return the words' representations in vector space, as a
2d numpy array: #words x #vector_size. Matrix rows are in the same order
as in input.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
>>> trained_model[['office', 'products']]
array([ -1.40128313e-02, ...]
[ -1.70425311e-03, ...]
...)
"""
if isinstance(words, string_types):
# allow calls like trained_model['office'], as a shorthand for trained_model[['office']]
return self.word_vec(words)
return vstack([self.word_vec(word) for word in words])
def __contains__(self, word):
return word in self.vocab
def most_similar_to_given(self, w1, word_list):
"""Return the word from word_list most similar to w1.
Args:
w1 (str): a word
word_list (list): list of words containing a word most similar to w1
Returns:
the word in word_list with the highest similarity to w1
Raises:
KeyError: If w1 or any word in word_list is not in the vocabulary
Example::
>>> trained_model.most_similar_to_given('music', ['water', 'sound', 'backpack', 'mouse'])
'sound'
>>> trained_model.most_similar_to_given('snake', ['food', 'pencil', 'animal', 'phone'])
'animal'
"""
return word_list[argmax([self.similarity(w1, word) for word in word_list])]
def words_closer_than(self, w1, w2):
"""
Returns all words that are closer to `w1` than `w2` is to `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
list (str)
List of words that are closer to `w1` than `w2` is to `w1`.
Examples
--------
>>> model.words_closer_than('carnivore.n.01', 'mammal.n.01')
['dog.n.01', 'canine.n.02']
"""
all_distances = self.distances(w1)
w1_index = self.vocab[w1].index
w2_index = self.vocab[w2].index
closer_node_indices = np.where(all_distances < all_distances[w2_index])[0]
return [self.index2word[index] for index in closer_node_indices if index != w1_index]
def rank(self, w1, w2):
"""
Rank of the distance of `w2` from `w1`, in relation to distances of all words from `w1`.
Parameters
----------
w1 : str
Input word.
w2 : str
Input word.
Returns
-------
int
Rank of `w2` from `w1` in relation to all other nodes.
Examples
--------
>>> model.rank('mammal.n.01', 'carnivore.n.01')
3
"""
return len(self.words_closer_than(w1, w2)) + 1
class EuclideanKeyedVectors(KeyedVectorsBase):
"""
Class to contain vectors and vocab for the Word2Vec training class and other w2v methods not directly
involved in training such as most_similar()
"""
def __init__(self):
super(EuclideanKeyedVectors, self).__init__()
self.syn0norm = None
@property
def wv(self):
return self
def save(self, *args, **kwargs):
# don't bother storing the cached normalized vectors
kwargs['ignore'] = kwargs.get('ignore', ['syn0norm'])
super(EuclideanKeyedVectors, self).save(*args, **kwargs)
def word_vec(self, word, use_norm=False):
"""
Accept a single word as input.
Returns the word's representations in vector space, as a 1D numpy array.
If `use_norm` is True, returns the normalized word vector.
Example::
>>> trained_model['office']
array([ -1.40128313e-02, ...])
"""
if word in self.vocab:
if use_norm:
result = self.syn0norm[self.vocab[word].index]
else:
result = self.syn0[self.vocab[word].index]
result.setflags(write=False)
return result
else:
raise KeyError("word '%s' not in vocabulary" % word)
def most_similar(self, positive=None, negative=None, topn=10, restrict_vocab=None, indexer=None):
"""
Find the top-N most similar words. Positive words contribute positively towards the
similarity, negative words negatively.
This method computes cosine similarity between a simple mean of the projection
weight vectors of the given words and the vectors for each word in the model.
The method corresponds to the `word-analogy` and `distance` scripts in the original
word2vec implementation.
If topn is False, most_similar returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.most_similar(positive=['woman', 'king'], negative=['man'])
[('queen', 0.50882536), ...]
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar('dog'), as a shorthand for most_similar(['dog'])
positive = [positive]
# add weights for each word, if not already present; default to 1.0 for positive and -1.0 for negative words
positive = [
(word, 1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in positive
]
negative = [
(word, -1.0) if isinstance(word, string_types + (ndarray,)) else word
for word in negative
]
# compute the weighted average of all words
all_words, mean = set(), []
for word, weight in positive + negative:
if isinstance(word, ndarray):
mean.append(weight * word)
else:
mean.append(weight * self.word_vec(word, use_norm=True))
if word in self.vocab:
all_words.add(self.vocab[word].index)
if not mean:
raise ValueError("cannot compute similarity with no input")
mean = matutils.unitvec(array(mean).mean(axis=0)).astype(REAL)
if indexer is not None:
return indexer.most_similar(mean, topn)
limited = self.syn0norm if restrict_vocab is None else self.syn0norm[:restrict_vocab]
dists = dot(limited, mean)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def similar_by_word(self, word, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words.
If topn is False, similar_by_word returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_word('graph')
[('user', 0.9999163150787354), ...]
"""
return self.most_similar(positive=[word], topn=topn, restrict_vocab=restrict_vocab)
def similar_by_vector(self, vector, topn=10, restrict_vocab=None):
"""
Find the top-N most similar words by vector.
If topn is False, similar_by_vector returns the vector of similarity scores.
`restrict_vocab` is an optional integer which limits the range of vectors which
are searched for most-similar values. For example, restrict_vocab=10000 would
only check the first 10000 word vectors in the vocabulary order. (This may be
meaningful if you've sorted the vocabulary by descending frequency.)
Example::
>>> trained_model.similar_by_vector([1,2])
[('survey', 0.9942699074745178), ...]
"""
return self.most_similar(positive=[vector], topn=topn, restrict_vocab=restrict_vocab)
def wmdistance(self, document1, document2):
"""
Compute the Word Mover's Distance between two documents. When using this
code, please consider citing the following papers:
.. Ofir Pele and Michael Werman, "A linear time histogram metric for improved SIFT matching".
.. Ofir Pele and Michael Werman, "Fast and robust earth mover's distances".
.. Matt Kusner et al. "From Word Embeddings To Document Distances".
Note that if one of the documents have no words that exist in the
Word2Vec vocab, `float('inf')` (i.e. infinity) will be returned.
This method only works if `pyemd` is installed (can be installed via pip, but requires a C compiler).
Example:
>>> # Train word2vec model.
>>> model = Word2Vec(sentences)
>>> # Some sentences to test.
>>> sentence_obama = 'Obama speaks to the media in Illinois'.lower().split()
>>> sentence_president = 'The president greets the press in Chicago'.lower().split()
>>> # Remove their stopwords.
>>> from nltk.corpus import stopwords
>>> stopwords = nltk.corpus.stopwords.words('english')
>>> sentence_obama = [w for w in sentence_obama if w not in stopwords]
>>> sentence_president = [w for w in sentence_president if w not in stopwords]
>>> # Compute WMD.
>>> distance = model.wmdistance(sentence_obama, sentence_president)
"""
if not PYEMD_EXT:
raise ImportError("Please install pyemd Python package to compute WMD.")
# Remove out-of-vocabulary words.
len_pre_oov1 = len(document1)
len_pre_oov2 = len(document2)
document1 = [token for token in document1 if token in self]
document2 = [token for token in document2 if token in self]
diff1 = len_pre_oov1 - len(document1)
diff2 = len_pre_oov2 - len(document2)
if diff1 > 0 or diff2 > 0:
logger.info('Removed %d and %d OOV words from document 1 and 2 (respectively).', diff1, diff2)
if len(document1) == 0 or len(document2) == 0:
logger.info(
"At least one of the documents had no words that werein the vocabulary. "
"Aborting (returning inf)."
)
return float('inf')
dictionary = Dictionary(documents=[document1, document2])
vocab_len = len(dictionary)
if vocab_len == 1:
# Both documents are composed by a single unique token
return 0.0
# Sets for faster look-up.
docset1 = set(document1)
docset2 = set(document2)
# Compute distance matrix.
distance_matrix = zeros((vocab_len, vocab_len), dtype=double)
for i, t1 in dictionary.items():
for j, t2 in dictionary.items():
if t1 not in docset1 or t2 not in docset2:
continue
# Compute Euclidean distance between word vectors.
distance_matrix[i, j] = sqrt(np_sum((self[t1] - self[t2])**2))
if np_sum(distance_matrix) == 0.0:
# `emd` gets stuck if the distance matrix contains only zeros.
logger.info('The distance matrix is all zeros. Aborting (returning inf).')
return float('inf')
def nbow(document):
d = zeros(vocab_len, dtype=double)
nbow = dictionary.doc2bow(document) # Word frequencies.
doc_len = len(document)
for idx, freq in nbow:
d[idx] = freq / float(doc_len) # Normalized word frequencies.
return d
# Compute nBOW representation of documents.
d1 = nbow(document1)
d2 = nbow(document2)
# Compute WMD.
return emd(d1, d2, distance_matrix)
def most_similar_cosmul(self, positive=None, negative=None, topn=10):
"""
Find the top-N most similar words, using the multiplicative combination objective
proposed by Omer Levy and Yoav Goldberg in [4]_. Positive words still contribute
positively towards the similarity, negative words negatively, but with less
susceptibility to one large distance dominating the calculation.
In the common analogy-solving case, of two positive and one negative examples,
this method is equivalent to the "3CosMul" objective (equation (4)) of Levy and Goldberg.
Additional positive or negative examples contribute to the numerator or denominator,
respectively – a potentially sensible but untested extension of the method. (With
a single positive example, rankings will be the same as in the default most_similar.)
Example::
>>> trained_model.most_similar_cosmul(positive=['baghdad', 'england'], negative=['london'])
[(u'iraq', 0.8488819003105164), ...]
.. [4] Omer Levy and Yoav Goldberg. Linguistic Regularities in Sparse and Explicit Word Representations, 2014.
"""
if positive is None:
positive = []
if negative is None:
negative = []
self.init_sims()
if isinstance(positive, string_types) and not negative:
# allow calls like most_similar_cosmul('dog'), as a shorthand for most_similar_cosmul(['dog'])
positive = [positive]
all_words = {
self.vocab[word].index for word in positive + negative
if not isinstance(word, ndarray) and word in self.vocab
}
positive = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in positive
]
negative = [
self.word_vec(word, use_norm=True) if isinstance(word, string_types) else word
for word in negative
]
if not positive:
raise ValueError("cannot compute similarity with no input")
# equation (4) of Levy & Goldberg "Linguistic Regularities...",
# with distances shifted to [0,1] per footnote (7)
pos_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in positive]
neg_dists = [((1 + dot(self.syn0norm, term)) / 2) for term in negative]
dists = prod(pos_dists, axis=0) / (prod(neg_dists, axis=0) + 0.000001)
if not topn:
return dists
best = matutils.argsort(dists, topn=topn + len(all_words), reverse=True)
# ignore (don't return) words from the input
result = [(self.index2word[sim], float(dists[sim])) for sim in best if sim not in all_words]
return result[:topn]
def doesnt_match(self, words):
"""
Which word from the given list doesn't go with the others?
Example::
>>> trained_model.doesnt_match("breakfast cereal dinner lunch".split())
'cereal'
"""
self.init_sims()
used_words = [word for word in words if word in self]
if len(used_words) != len(words):
ignored_words = set(words) - set(used_words)
logger.warning("vectors for words %s are not present in the model, ignoring these words", ignored_words)
if not used_words:
raise ValueError("cannot select a word from an empty list")
vectors = vstack(self.word_vec(word, use_norm=True) for word in used_words).astype(REAL)
mean = matutils.unitvec(vectors.mean(axis=0)).astype(REAL)
dists = dot(vectors, mean)
return sorted(zip(dists, used_words))[0][1]
@staticmethod
def cosine_similarities(vector_1, vectors_all):
"""
Return cosine similarities between one vector and a set of other vectors.
Parameters
----------
vector_1 : numpy.array
vector from which similarities are to be computed.
expected shape (dim,)
vectors_all : numpy.array
for each row in vectors_all, distance from vector_1 is computed.
expected shape (num_vectors, dim)
Returns
-------
numpy.array
Contains cosine distance between vector_1 and each row in vectors_all.
shape (num_vectors,)
"""
norm = np.linalg.norm(vector_1)
all_norms = np.linalg.norm(vectors_all, axis=1)
dot_products = dot(vectors_all, vector_1)
similarities = dot_products / (norm * all_norms)
return similarities
def distances(self, word_or_vector, other_words=()):
"""
Compute cosine distances from given word or vector to all words in `other_words`.
If `other_words` is empty, return distance between `word_or_vectors` and all words in vocab.
Parameters
----------
word_or_vector : str or numpy.array
Word or vector from which distances are to be computed.
other_words : iterable(str) or None
For each word in `other_words` distance from `word_or_vector` is computed.
If None or empty, distance of `word_or_vector` from all words in vocab is computed (including itself).
Returns
-------
numpy.array
Array containing distances to all words in `other_words` from input `word_or_vector`,
in the same order as `other_words`.
Notes
-----
Raises KeyError if either `word_or_vector` or any word in `other_words` is absent from vocab.
"""
if isinstance(word_or_vector, string_types):
input_vector = self.word_vec(word_or_vector)
else:
input_vector = word_or_vector
if not other_words:
other_vectors = self.syn0
else:
other_indices = [self.vocab[word].index for word in other_words]
other_vectors = self.syn0[other_indices]
return 1 - self.cosine_similarities(input_vector, other_vectors)
def distance(self, w1, w2):
"""
Compute cosine distance between two words.
Example::
>>> trained_model.distance('woman', 'man')
0.34
>>> trained_model.distance('woman', 'woman')
0.0
"""
return 1 - self.similarity(w1, w2)
def similarity(self, w1, w2):
"""
Compute cosine similarity between two words.
Example::
>>> trained_model.similarity('woman', 'man')
0.73723527
>>> trained_model.similarity('woman', 'woman')
1.0
"""
return dot(matutils.unitvec(self[w1]), matutils.unitvec(self[w2]))
def n_similarity(self, ws1, ws2):
"""
Compute cosine similarity between two sets of words.
Example::
>>> trained_model.n_similarity(['sushi', 'shop'], ['japanese', 'restaurant'])
0.61540466561049689
>>> trained_model.n_similarity(['restaurant', 'japanese'], ['japanese', 'restaurant'])
1.0000000000000004
>>> trained_model.n_similarity(['sushi'], ['restaurant']) == trained_model.similarity('sushi', 'restaurant')
True
"""
if not(len(ws1) and len(ws2)):
raise ZeroDivisionError('At least one of the passed list is empty.')
v1 = [self[word] for word in ws1]
v2 = [self[word] for word in ws2]
return dot(matutils.unitvec(array(v1).mean(axis=0)), matutils.unitvec(array(v2).mean(axis=0)))
@staticmethod
def log_accuracy(section):
correct, incorrect = len(section['correct']), len(section['incorrect'])
if correct + incorrect > 0:
logger.info(
"%s: %.1f%% (%i/%i)",
section['section'], 100.0 * correct / (correct + incorrect), correct, correct + incorrect
)
def accuracy(self, questions, restrict_vocab=30000, most_similar=most_similar, case_insensitive=True):
"""
Compute accuracy of the model. `questions` is a filename where lines are
4-tuples of words, split into sections by ": SECTION NAME" lines.
See questions-words.txt in
https://storage.googleapis.com/google-code-archive-source/v2/code.google.com/word2vec/source-archive.zip
for an example.
The accuracy is reported (=printed to log and returned as a list) for each
section separately, plus there's one aggregate summary at the end.
Use `restrict_vocab` to ignore all questions containing a word not in the first `restrict_vocab`
words (default 30,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
In case `case_insensitive` is True, the first `restrict_vocab` words are taken first, and then
case normalization is performed.
Use `case_insensitive` to convert all words in questions and vocab to their uppercase form before
evaluating the accuracy (default True). Useful in case of case-mismatch between training tokens
and question words. In case of multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
This method corresponds to the `compute-accuracy` script of the original C word2vec.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
sections, section = [], None
for line_no, line in enumerate(utils.smart_open(questions)):
# TODO: use level3 BLAS (=evaluate multiple questions at once), for speed
line = utils.to_unicode(line)
if line.startswith(': '):
# a new section starts => store the old section
if section:
sections.append(section)
self.log_accuracy(section)
section = {'section': line.lstrip(': ').strip(), 'correct': [], 'incorrect': []}
else:
if not section:
raise ValueError("missing section header before line #%i in %s" % (line_no, questions))
try:
if case_insensitive:
a, b, c, expected = [word.upper() for word in line.split()]
else:
a, b, c, expected = [word for word in line.split()]
except ValueError:
logger.info("skipping invalid line #%i in %s", line_no, questions)
continue
if a not in ok_vocab or b not in ok_vocab or c not in ok_vocab or expected not in ok_vocab:
logger.debug("skipping line #%i with OOV words: %s", line_no, line.strip())
continue
original_vocab = self.vocab
self.vocab = ok_vocab
ignore = {a, b, c} # input words to be ignored
predicted = None
# find the most likely prediction, ignoring OOV words and input words
sims = most_similar(self, positive=[b, c], negative=[a], topn=False, restrict_vocab=restrict_vocab)
self.vocab = original_vocab
for index in matutils.argsort(sims, reverse=True):
predicted = self.index2word[index].upper() if case_insensitive else self.index2word[index]
if predicted in ok_vocab and predicted not in ignore:
if predicted != expected:
logger.debug("%s: expected %s, predicted %s", line.strip(), expected, predicted)
break
if predicted == expected:
section['correct'].append((a, b, c, expected))
else:
section['incorrect'].append((a, b, c, expected))
if section:
# store the last section, too
sections.append(section)
self.log_accuracy(section)
total = {
'section': 'total',
'correct': sum((s['correct'] for s in sections), []),
'incorrect': sum((s['incorrect'] for s in sections), []),
}
self.log_accuracy(total)
sections.append(total)
return sections
@staticmethod
def log_evaluate_word_pairs(pearson, spearman, oov, pairs):
logger.info('Pearson correlation coefficient against %s: %.4f', pairs, pearson[0])
logger.info('Spearman rank-order correlation coefficient against %s: %.4f', pairs, spearman[0])
logger.info('Pairs with unknown words ratio: %.1f%%', oov)
def evaluate_word_pairs(self, pairs, delimiter='\t', restrict_vocab=300000,
case_insensitive=True, dummy4unknown=False):
"""
Compute correlation of the model with human similarity judgments. `pairs` is a filename of a dataset where
lines are 3-tuples, each consisting of a word pair and a similarity value, separated by `delimiter`.
An example dataset is included in Gensim (test/test_data/wordsim353.tsv). More datasets can be found at
http://technion.ac.il/~ira.leviant/MultilingualVSMdata.html or https://www.cl.cam.ac.uk/~fh295/simlex.html.
The model is evaluated using Pearson correlation coefficient and Spearman rank-order correlation coefficient
between the similarities from the dataset and the similarities produced by the model itself.
The results are printed to log and returned as a triple (pearson, spearman, ratio of pairs with unknown words).
Use `restrict_vocab` to ignore all word pairs containing a word not in the first `restrict_vocab`
words (default 300,000). This may be meaningful if you've sorted the vocabulary by descending frequency.
If `case_insensitive` is True, the first `restrict_vocab` words are taken, and then case normalization
is performed.
Use `case_insensitive` to convert all words in the pairs and vocab to their uppercase form before
evaluating the model (default True). Useful when you expect case-mismatch between training tokens
and words pairs in the dataset. If there are multiple case variants of a single word, the vector for the first
occurrence (also the most frequent if vocabulary is sorted) is taken.
Use `dummy4unknown=True` to produce zero-valued similarities for pairs with out-of-vocabulary words.
Otherwise (default False), these pairs are skipped entirely.
"""
ok_vocab = [(w, self.vocab[w]) for w in self.index2word[:restrict_vocab]]
ok_vocab = {w.upper(): v for w, v in reversed(ok_vocab)} if case_insensitive else dict(ok_vocab)
similarity_gold = []
similarity_model = []
oov = 0
original_vocab = self.vocab
self.vocab = ok_vocab
for line_no, line in enumerate(utils.smart_open(pairs)):
line = utils.to_unicode(line)
if line.startswith('#'):
# May be a comment
continue
else:
try:
if case_insensitive:
a, b, sim = [word.upper() for word in line.split(delimiter)]
else:
a, b, sim = [word for word in line.split(delimiter)]
sim = float(sim)
except (ValueError, TypeError):
logger.info('skipping invalid line #%d in %s', line_no, pairs)
continue
if a not in ok_vocab or b not in ok_vocab:
oov += 1
if dummy4unknown:
similarity_model.append(0.0)
similarity_gold.append(sim)
continue
else:
logger.debug('skipping line #%d with OOV words: %s', line_no, line.strip())
continue
similarity_gold.append(sim) # Similarity from the dataset
similarity_model.append(self.similarity(a, b)) # Similarity from the model
self.vocab = original_vocab
spearman = stats.spearmanr(similarity_gold, similarity_model)
pearson = stats.pearsonr(similarity_gold, similarity_model)
oov_ratio = float(oov) / (len(similarity_gold) + oov) * 100
logger.debug('Pearson correlation coefficient against %s: %f with p-value %f', pairs, pearson[0], pearson[1])
logger.debug(
'Spearman rank-order correlation coefficient against %s: %f with p-value %f',
pairs, spearman[0], spearman[1]
)
logger.debug('Pairs with unknown words: %d', oov)
self.log_evaluate_word_pairs(pearson, spearman, oov_ratio, pairs)
return pearson, spearman, oov_ratio
def init_sims(self, replace=False):
"""
Precompute L2-normalized vectors.
If `replace` is set, forget the original vectors and only keep the normalized
ones = saves lots of memory!
Note that you **cannot continue training** after doing a replace. The model becomes
effectively read-only = you can call `most_similar`, `similarity` etc., but not `train`.
"""
if getattr(self, 'syn0norm', None) is None or replace:
logger.info("precomputing L2-norms of word weight vectors")
if replace:
for i in xrange(self.syn0.shape[0]):
self.syn0[i, :] /= sqrt((self.syn0[i, :] ** 2).sum(-1))
self.syn0norm = self.syn0
else:
self.syn0norm = (self.syn0 / sqrt((self.syn0 ** 2).sum(-1))[..., newaxis]).astype(REAL)
def get_keras_embedding(self, train_embeddings=False):
"""
Return a Keras 'Embedding' layer with weights set as the Word2Vec model's learned word embeddings
"""
try:
from keras.layers import Embedding
except ImportError:
raise ImportError("Please install Keras to use this function")
weights = self.syn0
# set `trainable` as `False` to use the pretrained word embedding
# No extra mem usage here as `Embedding` layer doesn't create any new matrix for weights
layer = Embedding(
input_dim=weights.shape[0], output_dim=weights.shape[1],
weights=[weights], trainable=train_embeddings
)
return layer
# For backward compatibility
KeyedVectors = EuclideanKeyedVectors
| 43,913 | 39.850233 | 119 | py |
poincare_glove | poincare_glove-master/gensim/test/test_sklearn_api.py | import unittest
import numpy
import codecs
import pickle
from scipy import sparse
try:
from sklearn.pipeline import Pipeline
from sklearn import linear_model, cluster
from sklearn.exceptions import NotFittedError
except ImportError:
raise unittest.SkipTest("Test requires scikit-learn to be installed, which is not available")
from gensim.sklearn_api.rpmodel import RpTransformer
from gensim.sklearn_api.ldamodel import LdaTransformer
from gensim.sklearn_api.lsimodel import LsiTransformer
from gensim.sklearn_api.ldaseqmodel import LdaSeqTransformer
from gensim.sklearn_api.w2vmodel import W2VTransformer
from gensim.sklearn_api.atmodel import AuthorTopicTransformer
from gensim.sklearn_api.d2vmodel import D2VTransformer
from gensim.sklearn_api.text2bow import Text2BowTransformer
from gensim.sklearn_api.tfidf import TfIdfTransformer
from gensim.sklearn_api.hdp import HdpTransformer
from gensim.sklearn_api.phrases import PhrasesTransformer
from gensim.corpora import mmcorpus, Dictionary
from gensim import matutils, models
from gensim.test.utils import datapath, common_texts
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer'],
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
author2doc = {
'john': [0, 1, 2, 3, 4, 5, 6],
'jane': [2, 3, 4, 5, 6, 7, 8],
'jack': [0, 2, 4, 6, 8],
'jill': [1, 3, 5, 7]
}
texts_new = texts[0:3]
author2doc_new = {
'jill': [0],
'bob': [0, 1],
'sally': [1, 2]
}
dictionary_new = Dictionary(texts_new)
corpus_new = [dictionary_new.doc2bow(text) for text in texts_new]
texts_ldaseq = [
[
u'senior', u'studios', u'studios', u'studios', u'creators', u'award', u'mobile', u'currently',
u'challenges', u'senior', u'summary', u'senior', u'motivated', u'creative', u'senior'
],
[
u'performs', u'engineering', u'tasks', u'infrastructure', u'focusing', u'primarily', u'programming',
u'interaction', u'designers', u'engineers', u'leadership', u'teams', u'teams', u'crews', u'responsibilities',
u'engineering', u'quality', u'functional', u'functional', u'teams', u'organizing', u'prioritizing',
u'technical', u'decisions', u'engineering', u'participates', u'participates', u'reviews', u'participates',
u'hiring', u'conducting', u'interviews'
],
[
u'feedback', u'departments', u'define', u'focusing', u'engineering', u'teams', u'crews', u'facilitate',
u'engineering', u'departments', u'deadlines', u'milestones', u'typically', u'spends', u'designing',
u'developing', u'updating', u'bugs', u'mentoring', u'engineers', u'define', u'schedules', u'milestones',
u'participating'
],
[
u'reviews', u'interviews', u'sized', u'teams', u'interacts', u'disciplines', u'knowledge', u'skills',
u'knowledge', u'knowledge', u'xcode', u'scripting', u'debugging', u'skills', u'skills', u'knowledge',
u'disciplines', u'animation', u'networking', u'expertise', u'competencies', u'oral', u'skills',
u'management', u'skills', u'proven', u'effectively', u'teams', u'deadline', u'environment', u'bachelor',
u'minimum', u'shipped', u'leadership', u'teams', u'location', u'resumes', u'jobs', u'candidates',
u'openings', u'jobs'
],
[
u'maryland', u'client', u'producers', u'electricity', u'operates', u'storage', u'utility', u'retail',
u'customers', u'engineering', u'consultant', u'maryland', u'summary', u'technical', u'technology',
u'departments', u'expertise', u'maximizing', u'output', u'reduces', u'operating', u'participates',
u'areas', u'engineering', u'conducts', u'testing', u'solve', u'supports', u'environmental', u'understands',
u'objectives', u'operates', u'responsibilities', u'handles', u'complex', u'engineering', u'aspects',
u'monitors', u'quality', u'proficiency', u'optimization', u'recommendations', u'supports', u'personnel',
u'troubleshooting', u'commissioning', u'startup', u'shutdown', u'supports', u'procedure', u'operating',
u'units', u'develops', u'simulations', u'troubleshooting', u'tests', u'enhancing', u'solving', u'develops',
u'estimates', u'schedules', u'scopes', u'understands', u'technical', u'management', u'utilize', u'routine',
u'conducts', u'hazards', u'utilizing', u'hazard', u'operability', u'methodologies', u'participates',
u'startup', u'reviews', u'pssr', u'participate', u'teams', u'participate', u'regulatory', u'audits',
u'define', u'scopes', u'budgets', u'schedules', u'technical', u'management', u'environmental', u'awareness',
u'interfacing', u'personnel', u'interacts', u'regulatory', u'departments', u'input', u'objectives',
u'identifying', u'introducing', u'concepts', u'solutions', u'peers', u'customers', u'coworkers', u'knowledge',
u'skills', u'engineering', u'quality', u'engineering'
],
[
u'commissioning', u'startup', u'knowledge', u'simulators', u'technologies', u'knowledge', u'engineering',
u'techniques', u'disciplines', u'leadership', u'skills', u'proven', u'engineers', u'oral', u'skills',
u'technical', u'skills', u'analytically', u'solve', u'complex', u'interpret', u'proficiency', u'simulation',
u'knowledge', u'applications', u'manipulate', u'applications', u'engineering'
],
[
u'calculations', u'programs', u'matlab', u'excel', u'independently', u'environment', u'proven', u'skills',
u'effectively', u'multiple', u'tasks', u'planning', u'organizational', u'management', u'skills', u'rigzone',
u'jobs', u'developer', u'exceptional', u'strategies', u'junction', u'exceptional', u'strategies', u'solutions',
u'solutions', u'biggest', u'insurers', u'operates', u'investment'
],
[
u'vegas', u'tasks', u'electrical', u'contracting', u'expertise', u'virtually', u'electrical', u'developments',
u'institutional', u'utilities', u'technical', u'experts', u'relationships', u'credibility', u'contractors',
u'utility', u'customers', u'customer', u'relationships', u'consistently', u'innovations', u'profile',
u'construct', u'envision', u'dynamic', u'complex', u'electrical', u'management', u'grad', u'internship',
u'electrical', u'engineering', u'infrastructures', u'engineers', u'documented', u'management', u'engineering',
u'quality', u'engineering', u'electrical', u'engineers', u'complex', u'distribution', u'grounding',
u'estimation', u'testing', u'procedures', u'voltage', u'engineering'
],
[
u'troubleshooting', u'installation', u'documentation', u'bsee', u'certification', u'electrical', u'voltage',
u'cabling', u'electrical', u'engineering', u'candidates', u'electrical', u'internships', u'oral', u'skills',
u'organizational', u'prioritization', u'skills', u'skills', u'excel', u'cadd', u'calculation', u'autocad',
u'mathcad', u'skills', u'skills', u'customer', u'relationships', u'solving', u'ethic', u'motivation', u'tasks',
u'budget', u'affirmative', u'diversity', u'workforce', u'gender', u'orientation', u'disability', u'disabled',
u'veteran', u'vietnam', u'veteran', u'qualifying', u'veteran', u'diverse', u'candidates', u'respond',
u'developing', u'workplace', u'reflects', u'diversity', u'communities', u'reviews', u'electrical',
u'contracting', u'southwest', u'electrical', u'contractors'
],
[
u'intern', u'electrical', u'engineering', u'idexx', u'laboratories', u'validating', u'idexx', u'integrated',
u'hardware', u'entails', u'planning', u'debug', u'validation', u'engineers', u'validation', u'methodologies',
u'healthcare', u'platforms', u'brightest', u'solve', u'challenges', u'innovation', u'technology', u'idexx',
u'intern', u'idexx', u'interns', u'supplement', u'interns', u'teams', u'roles', u'competitive', u'interns',
u'idexx', u'interns', u'participate', u'internships', u'mentors', u'seminars', u'topics', u'leadership',
u'workshops', u'relevant', u'planning', u'topics', u'intern', u'presentations', u'mixers', u'applicants',
u'ineligible', u'laboratory', u'compliant', u'idexx', u'laboratories', u'healthcare', u'innovation',
u'practicing', u'veterinarians', u'diagnostic', u'technology', u'idexx', u'enhance', u'veterinarians',
u'efficiency', u'economically', u'idexx', u'worldwide', u'diagnostic', u'tests', u'tests', u'quality',
u'headquartered', u'idexx', u'laboratories', u'employs', u'customers', u'qualifications', u'applicants',
u'idexx', u'interns', u'potential', u'demonstrated', u'portfolio', u'recommendation', u'resumes', u'marketing',
u'location', u'americas', u'verification', u'validation', u'schedule', u'overtime', u'idexx', u'laboratories',
u'reviews', u'idexx', u'laboratories', u'nasdaq', u'healthcare', u'innovation', u'practicing', u'veterinarians'
],
[
u'location', u'duration', u'temp', u'verification', u'validation', u'tester', u'verification', u'validation',
u'middleware', u'specifically', u'testing', u'applications', u'clinical', u'laboratory', u'regulated',
u'environment', u'responsibilities', u'complex', u'hardware', u'testing', u'clinical', u'analyzers',
u'laboratory', u'graphical', u'interfaces', u'complex', u'sample', u'sequencing', u'protocols', u'developers',
u'correction', u'tracking', u'tool', u'timely', u'troubleshoot', u'testing', u'functional', u'manual',
u'automated', u'participate', u'ongoing'
],
[
u'testing', u'coverage', u'planning', u'documentation', u'testing', u'validation', u'corrections', u'monitor',
u'implementation', u'recurrence', u'operating', u'statistical', u'quality', u'testing', u'global', u'multi',
u'teams', u'travel', u'skills', u'concepts', u'waterfall', u'agile', u'methodologies', u'debugging', u'skills',
u'complex', u'automated', u'instrumentation', u'environment', u'hardware', u'mechanical', u'components',
u'tracking', u'lifecycle', u'management', u'quality', u'organize', u'define', u'priorities', u'organize',
u'supervision', u'aggressive', u'deadlines', u'ambiguity', u'analyze', u'complex', u'situations', u'concepts',
u'technologies', u'verbal', u'skills', u'effectively', u'technical', u'clinical', u'diverse', u'strategy',
u'clinical', u'chemistry', u'analyzer', u'laboratory', u'middleware', u'basic', u'automated', u'testing',
u'biomedical', u'engineering', u'technologists', u'laboratory', u'technology', u'availability', u'click',
u'attach'
],
[
u'scientist', u'linux', u'asrc', u'scientist', u'linux', u'asrc', u'technology', u'solutions', u'subsidiary',
u'asrc', u'engineering', u'technology', u'contracts'
],
[
u'multiple', u'agencies', u'scientists', u'engineers', u'management', u'personnel', u'allows', u'solutions',
u'complex', u'aeronautics', u'aviation', u'management', u'aviation', u'engineering', u'hughes', u'technical',
u'technical', u'aviation', u'evaluation', u'engineering', u'management', u'technical', u'terminal',
u'surveillance', u'programs', u'currently', u'scientist', u'travel', u'responsibilities', u'develops',
u'technology', u'modifies', u'technical', u'complex', u'reviews', u'draft', u'conformity', u'completeness',
u'testing', u'interface', u'hardware', u'regression', u'impact', u'reliability', u'maintainability',
u'factors', u'standardization', u'skills', u'travel', u'programming', u'linux', u'environment', u'cisco',
u'knowledge', u'terminal', u'environment', u'clearance', u'clearance', u'input', u'output', u'digital',
u'automatic', u'terminal', u'management', u'controller', u'termination', u'testing', u'evaluating', u'policies',
u'procedure', u'interface', u'installation', u'verification', u'certification', u'core', u'avionic',
u'programs', u'knowledge', u'procedural', u'testing', u'interfacing', u'hardware', u'regression', u'impact',
u'reliability', u'maintainability', u'factors', u'standardization', u'missions', u'asrc', u'subsidiaries',
u'affirmative', u'employers', u'applicants', u'disability', u'veteran', u'technology', u'location', u'airport',
u'bachelor', u'schedule', u'travel', u'contributor', u'management', u'asrc', u'reviews'
],
[
u'technical', u'solarcity', u'niche', u'vegas', u'overview', u'resolving', u'customer', u'clients',
u'expanding', u'engineers', u'developers', u'responsibilities', u'knowledge', u'planning', u'adapt',
u'dynamic', u'environment', u'inventive', u'creative', u'solarcity', u'lifecycle', u'responsibilities',
u'technical', u'analyzing', u'diagnosing', u'troubleshooting', u'customers', u'ticketing', u'console',
u'escalate', u'knowledge', u'engineering', u'timely', u'basic', u'phone', u'functionality', u'customer',
u'tracking', u'knowledgebase', u'rotation', u'configure', u'deployment', u'sccm', u'technical', u'deployment',
u'deploy', u'hardware', u'solarcity', u'bachelor', u'knowledge', u'dell', u'laptops', u'analytical',
u'troubleshooting', u'solving', u'skills', u'knowledge', u'databases', u'preferably', u'server', u'preferably',
u'monitoring', u'suites', u'documentation', u'procedures', u'knowledge', u'entries', u'verbal', u'skills',
u'customer', u'skills', u'competitive', u'solar', u'package', u'insurance', u'vacation', u'savings',
u'referral', u'eligibility', u'equity', u'performers', u'solarcity', u'affirmative', u'diversity', u'workplace',
u'applicants', u'orientation', u'disability', u'veteran', u'careerrookie'
],
[
u'embedded', u'exelis', u'junction', u'exelis', u'embedded', u'acquisition', u'networking', u'capabilities',
u'classified', u'customer', u'motivated', u'develops', u'tests', u'innovative', u'solutions', u'minimal',
u'supervision', u'paced', u'environment', u'enjoys', u'assignments', u'interact', u'multi', u'disciplined',
u'challenging', u'focused', u'embedded', u'developments', u'spanning', u'engineering', u'lifecycle',
u'specification', u'enhancement', u'applications', u'embedded', u'freescale', u'applications', u'android',
u'platforms', u'interface', u'customers', u'developers', u'refine', u'specifications', u'architectures'
],
[
u'java', u'programming', u'scripts', u'python', u'debug', u'debugging', u'emulators', u'regression',
u'revisions', u'specialized', u'setups', u'capabilities', u'subversion', u'technical', u'documentation',
u'multiple', u'engineering', u'techexpousa', u'reviews'
],
[
u'modeler', u'semantic', u'modeling', u'models', u'skills', u'ontology', u'resource', u'framework', u'schema',
u'technologies', u'hadoop', u'warehouse', u'oracle', u'relational', u'artifacts', u'models', u'dictionaries',
u'models', u'interface', u'specifications', u'documentation', u'harmonization', u'mappings', u'aligned',
u'coordinate', u'technical', u'peer', u'reviews', u'stakeholder', u'communities', u'impact', u'domains',
u'relationships', u'interdependencies', u'models', u'define', u'analyze', u'legacy', u'models', u'corporate',
u'databases', u'architectural', u'alignment', u'customer', u'expertise', u'harmonization', u'modeling',
u'modeling', u'consulting', u'stakeholders', u'quality', u'models', u'storage', u'agile', u'specifically',
u'focus', u'modeling', u'qualifications', u'bachelors', u'accredited', u'modeler', u'encompass', u'evaluation',
u'skills', u'knowledge', u'modeling', u'techniques', u'resource', u'framework', u'schema', u'technologies',
u'unified', u'modeling', u'technologies', u'schemas', u'ontologies', u'sybase', u'knowledge', u'skills',
u'interpersonal', u'skills', u'customers', u'clearance', u'applicants', u'eligibility', u'classified',
u'clearance', u'polygraph', u'techexpousa', u'solutions', u'partnership', u'solutions', u'integration'
],
[
u'technologies', u'junction', u'develops', u'maintains', u'enhances', u'complex', u'diverse', u'intensive',
u'analytics', u'algorithm', u'manipulation', u'management', u'documented', u'individually', u'reviews',
u'tests', u'components', u'adherence', u'resolves', u'utilizes', u'methodologies', u'environment', u'input',
u'components', u'hardware', u'offs', u'reuse', u'cots', u'gots', u'synthesis', u'components', u'tasks',
u'individually', u'analyzes', u'modifies', u'debugs', u'corrects', u'integrates', u'operating',
u'environments', u'develops', u'queries', u'databases', u'repositories', u'recommendations', u'improving',
u'documentation', u'develops', u'implements', u'algorithms', u'functional', u'assists', u'developing',
u'executing', u'procedures', u'components', u'reviews', u'documentation', u'solutions', u'analyzing',
u'conferring', u'users', u'engineers', u'analyzing', u'investigating', u'areas', u'adapt', u'hardware',
u'mathematical', u'models', u'predict', u'outcome', u'implement', u'complex', u'database', u'repository',
u'interfaces', u'queries', u'bachelors', u'accredited', u'substituted', u'bachelors', u'firewalls',
u'ipsec', u'vpns', u'technology', u'administering', u'servers', u'apache', u'jboss', u'tomcat',
u'developing', u'interfaces', u'firefox', u'internet', u'explorer', u'operating', u'mainframe',
u'linux', u'solaris', u'virtual', u'scripting', u'programming', u'oriented', u'programming', u'ajax',
u'script', u'procedures', u'cobol', u'cognos', u'fusion', u'focus', u'html', u'java', u'java', u'script',
u'jquery', u'perl', u'visual', u'basic', u'powershell', u'cots', u'cots', u'oracle', u'apex', u'integration',
u'competitive', u'package', u'bonus', u'corporate', u'equity', u'tuition', u'reimbursement', u'referral',
u'bonus', u'holidays', u'insurance', u'flexible', u'disability', u'insurance'
],
[u'technologies', u'disability', u'accommodation', u'recruiter', u'techexpousa'],
['bank', 'river', 'shore', 'water'],
['river', 'water', 'flow', 'fast', 'tree'],
['bank', 'water', 'fall', 'flow'],
['bank', 'bank', 'water', 'rain', 'river'],
['river', 'water', 'mud', 'tree'],
['money', 'transaction', 'bank', 'finance'],
['bank', 'borrow', 'money'],
['bank', 'finance'],
['finance', 'money', 'sell', 'bank'],
['borrow', 'sell'],
['bank', 'loan', 'sell']
]
dictionary_ldaseq = Dictionary(texts_ldaseq)
corpus_ldaseq = [dictionary_ldaseq.doc2bow(text) for text in texts_ldaseq]
w2v_texts = [
['calculus', 'is', 'the', 'mathematical', 'study', 'of', 'continuous', 'change'],
['geometry', 'is', 'the', 'study', 'of', 'shape'],
['algebra', 'is', 'the', 'study', 'of', 'generalizations', 'of', 'arithmetic', 'operations'],
['differential', 'calculus', 'is', 'related', 'to', 'rates', 'of', 'change', 'and', 'slopes', 'of', 'curves'],
['integral', 'calculus', 'is', 'realted', 'to', 'accumulation', 'of', 'quantities', 'and',
'the', 'areas', 'under', 'and', 'between', 'curves'],
['physics', 'is', 'the', 'natural', 'science', 'that', 'involves', 'the', 'study', 'of', 'matter',
'and', 'its', 'motion', 'and', 'behavior', 'through', 'space', 'and', 'time'],
['the', 'main', 'goal', 'of', 'physics', 'is', 'to', 'understand', 'how', 'the', 'universe', 'behaves'],
['physics', 'also', 'makes', 'significant', 'contributions', 'through', 'advances', 'in', 'new',
'technologies', 'that', 'arise', 'from', 'theoretical', 'breakthroughs'],
['advances', 'in', 'the', 'understanding', 'of', 'electromagnetism', 'or', 'nuclear', 'physics',
'led', 'directly', 'to', 'the', 'development', 'of', 'new', 'products', 'that', 'have', 'dramatically',
'transformed', 'modern', 'day', 'society']
]
d2v_sentences = [models.doc2vec.TaggedDocument(words, [i]) for i, words in enumerate(w2v_texts)]
dict_texts = [' '.join(text) for text in common_texts]
phrases_sentences = common_texts + [
['graph', 'minors', 'survey', 'human', 'interface']
]
class TestLdaWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = LdaTransformer(
id2word=dictionary, num_topics=2, passes=100, minimum_probability=0, random_state=numpy.random.seed(0)
)
self.model.fit(corpus)
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model.transform(doc)
expected = numpy.array([0.13, 0.87])
passed = numpy.allclose(sorted(transformed[0]), sorted(expected), atol=1e-1)
self.assertTrue(passed)
def testConsistencyWithGensimModel(self):
# training an LdaTransformer with `num_topics`=10
self.model = LdaTransformer(
id2word=dictionary, num_topics=10, passes=100, minimum_probability=0, random_state=numpy.random.seed(0)
)
self.model.fit(corpus)
# training a Gensim LdaModel with the same params
gensim_ldamodel = models.LdaModel(
corpus=corpus, id2word=dictionary, num_topics=10, passes=100,
minimum_probability=0, random_state=numpy.random.seed(0)
)
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix_transformer_api = self.model.transform(bow)
matrix_gensim_model = gensim_ldamodel[bow]
# convert into dense representation to be able to compare with transformer output
matrix_gensim_model_dense = matutils.sparse2full(matrix_gensim_model, 10)
passed = numpy.allclose(matrix_transformer_api, matrix_gensim_model_dense, atol=1e-1)
self.assertTrue(passed)
def testCSRMatrixConversion(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
arr = numpy.array([[1, 2, 0], [0, 0, 3], [1, 0, 0]])
sarr = sparse.csr_matrix(arr)
newmodel = LdaTransformer(num_topics=2, passes=100)
newmodel.fit(sarr)
bow = [(0, 1), (1, 2), (2, 0)]
transformed_vec = newmodel.transform(bow)
expected_vec = numpy.array([0.12843782, 0.87156218])
passed = numpy.allclose(transformed_vec, expected_vec, atol=1e-1)
self.assertTrue(passed)
def testPipeline(self):
model = LdaTransformer(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline([('features', model,), ('classifier', clf)])
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreaterEqual(score, 0.40)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
# updating multiple params
param_dict = {"eval_every": 20, "decay": 0.7}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'eval_every'), 20)
self.assertEqual(getattr(self.model.gensim_model, 'decay'), 0.7)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
texts_new = ['graph', 'eulerian']
loaded_bow = model_load.id2word.doc2bow(texts_new)
loaded_matrix = model_load.transform(loaded_bow)
# sanity check for transformation operation
self.assertEqual(loaded_matrix.shape[0], 1)
self.assertEqual(loaded_matrix.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_bow = self.model.id2word.doc2bow(texts_new)
original_matrix = self.model.transform(original_bow)
passed = numpy.allclose(loaded_matrix, original_matrix, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
lda_wrapper = LdaTransformer(
id2word=dictionary, num_topics=2, passes=100,
minimum_probability=0, random_state=numpy.random.seed(0)
)
texts_new = ['graph', 'eulerian']
bow = lda_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lda_wrapper.transform, bow)
class TestLsiWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
self.model = LsiTransformer(id2word=dictionary, num_topics=2)
self.model.fit(corpus)
def testTransform(self):
texts_new = ['graph', 'eulerian']
bow = self.model.id2word.doc2bow(texts_new)
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
texts_new = [['graph', 'eulerian'], ['server', 'flow'], ['path', 'system']]
bow = []
for i in texts_new:
bow.append(self.model.id2word.doc2bow(i))
matrix = self.model.transform(bow)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPartialFit(self):
for i in range(10):
self.model.partial_fit(X=corpus) # fit against the model again
doc = list(corpus)[0] # transform only the first document
transformed = self.model.transform(doc)
expected = numpy.array([1.39, 0.0])
passed = numpy.allclose(transformed[0], expected, atol=1)
self.assertTrue(passed)
def testPipeline(self):
model = LsiTransformer(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lsi = Pipeline([('features', model,), ('classifier', clf)])
text_lsi.fit(corpus, data.target)
score = text_lsi.score(corpus, data.target)
self.assertGreater(score, 0.50)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
# updating multiple params
param_dict = {"chunksize": 10000, "decay": 0.9}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'chunksize'), 10000)
self.assertEqual(getattr(self.model.gensim_model, 'decay'), 0.9)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
texts_new = ['graph', 'eulerian']
loaded_bow = model_load.id2word.doc2bow(texts_new)
loaded_matrix = model_load.transform(loaded_bow)
# sanity check for transformation operation
self.assertEqual(loaded_matrix.shape[0], 1)
self.assertEqual(loaded_matrix.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_bow = self.model.id2word.doc2bow(texts_new)
original_matrix = self.model.transform(original_bow)
passed = numpy.allclose(loaded_matrix, original_matrix, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
lsi_wrapper = LsiTransformer(id2word=dictionary, num_topics=2)
texts_new = ['graph', 'eulerian']
bow = lsi_wrapper.id2word.doc2bow(texts_new)
self.assertRaises(NotFittedError, lsi_wrapper.transform, bow)
class TestLdaSeqWrapper(unittest.TestCase):
def setUp(self):
self.model = LdaSeqTransformer(
id2word=dictionary_ldaseq, num_topics=2, time_slice=[10, 10, 11], initialize='gensim'
)
self.model.fit(corpus_ldaseq)
def testTransform(self):
# transforming two documents
docs = [list(corpus_ldaseq)[0], list(corpus_ldaseq)[1]]
transformed_vecs = self.model.transform(docs)
self.assertEqual(transformed_vecs.shape[0], 2)
self.assertEqual(transformed_vecs.shape[1], self.model.num_topics)
# transforming one document
doc = list(corpus_ldaseq)[0]
transformed_vecs = self.model.transform(doc)
self.assertEqual(transformed_vecs.shape[0], 1)
self.assertEqual(transformed_vecs.shape[1], self.model.num_topics)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
test_data = data.data[0:2]
test_target = data.target[0:2]
id2word = Dictionary([x.split() for x in test_data])
corpus = [id2word.doc2bow(i.split()) for i in test_data]
model = LdaSeqTransformer(id2word=id2word, num_topics=2, time_slice=[1, 1, 1], initialize='gensim')
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_ldaseq = Pipeline([('features', model,), ('classifier', clf)])
text_ldaseq.fit(corpus, test_target)
score = text_ldaseq.score(corpus, test_target)
self.assertGreater(score, 0.50)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus_ldaseq)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = list(corpus_ldaseq)[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(loaded_transformed_vecs, original_transformed_vecs, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
ldaseq_wrapper = LdaSeqTransformer(num_topics=2)
doc = list(corpus_ldaseq)[0]
self.assertRaises(NotFittedError, ldaseq_wrapper.transform, doc)
class TestRpWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(13)
self.model = RpTransformer(num_topics=2)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)
def testTransform(self):
# tranform two documents
docs = [list(self.corpus)[0], list(self.corpus)[1]]
matrix = self.model.transform(docs)
self.assertEqual(matrix.shape[0], 2)
self.assertEqual(matrix.shape[1], self.model.num_topics)
# tranform one document
doc = list(self.corpus)[0]
matrix = self.model.transform(doc)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.num_topics)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = RpTransformer(num_topics=2)
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_rp = Pipeline([('features', model,), ('classifier', clf)])
text_rp.fit(corpus, data.target)
score = text_rp.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(self.corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = list(self.corpus)[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.num_topics)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(loaded_transformed_vecs, original_transformed_vecs, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
rpmodel_wrapper = RpTransformer(num_topics=2)
doc = list(self.corpus)[0]
self.assertRaises(NotFittedError, rpmodel_wrapper.transform, doc)
class TestWord2VecWrapper(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = W2VTransformer(size=10, min_count=0, seed=42)
self.model.fit(texts)
def testTransform(self):
# tranform multiple words
words = []
words = words + texts[0]
matrix = self.model.transform(words)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.size)
# tranform one word
word = texts[0][0]
matrix = self.model.transform(word)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.size)
def testConsistencyWithGensimModel(self):
# training a W2VTransformer
self.model = W2VTransformer(size=10, min_count=0, seed=42)
self.model.fit(texts)
# training a Gensim Word2Vec model with the same params
gensim_w2vmodel = models.Word2Vec(texts, size=10, min_count=0, seed=42)
word = texts[0][0]
vec_transformer_api = self.model.transform(word) # vector returned by W2VTransformer
vec_gensim_model = gensim_w2vmodel[word] # vector returned by Word2Vec
passed = numpy.allclose(vec_transformer_api, vec_gensim_model, atol=1e-1)
self.assertTrue(passed)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = W2VTransformer(size=10, min_count=1)
model.fit(w2v_texts)
class_dict = {'mathematics': 1, 'physics': 0}
train_data = [
('calculus', 'mathematics'), ('mathematical', 'mathematics'),
('geometry', 'mathematics'), ('operations', 'mathematics'),
('curves', 'mathematics'), ('natural', 'physics'), ('nuclear', 'physics'),
('science', 'physics'), ('electromagnetism', 'physics'), ('natural', 'physics')
]
train_input = [x[0] for x in train_data]
train_target = [class_dict[x[1]] for x in train_data]
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
clf.fit(model.transform(train_input), train_target)
text_w2v = Pipeline([('features', model,), ('classifier', clf)])
score = text_w2v.score(train_input, train_target)
self.assertGreater(score, 0.40)
def testSetGetParams(self):
# updating only one param
self.model.set_params(negative=20)
model_params = self.model.get_params()
self.assertEqual(model_params["negative"], 20)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(texts)
self.assertEqual(getattr(self.model.gensim_model, 'negative'), 20)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
word = texts[0][0]
loaded_transformed_vecs = model_load.transform(word)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.size)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(word)
passed = numpy.allclose(loaded_transformed_vecs, original_transformed_vecs, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
w2vmodel_wrapper = W2VTransformer(size=10, min_count=0, seed=42)
word = texts[0][0]
self.assertRaises(NotFittedError, w2vmodel_wrapper.transform, word)
class TestAuthorTopicWrapper(unittest.TestCase):
def setUp(self):
self.model = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=2, passes=100)
self.model.fit(corpus)
def testTransform(self):
# transforming multiple authors
author_list = ['jill', 'jack']
author_topics = self.model.transform(author_list)
self.assertEqual(author_topics.shape[0], 2)
self.assertEqual(author_topics.shape[1], self.model.num_topics)
# transforming one author
jill_topics = self.model.transform('jill')
self.assertEqual(jill_topics.shape[0], 1)
self.assertEqual(jill_topics.shape[1], self.model.num_topics)
def testPartialFit(self):
self.model.partial_fit(corpus_new, author2doc=author2doc_new)
# Did we learn something about Sally?
output_topics = self.model.transform('sally')
sally_topics = output_topics[0] # getting the topics corresponding to 'sally' (from the list of lists)
self.assertTrue(all(sally_topics > 0))
def testPipeline(self):
# train the AuthorTopic model first
model = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=10, passes=100)
model.fit(corpus)
# create and train clustering model
clstr = cluster.MiniBatchKMeans(n_clusters=2)
authors_full = ['john', 'jane', 'jack', 'jill']
clstr.fit(model.transform(authors_full))
# stack together the two models in a pipeline
text_atm = Pipeline([('features', model,), ('cluster', clstr)])
author_list = ['jane', 'jack', 'jill']
ret_val = text_atm.predict(author_list)
self.assertEqual(len(ret_val), len(author_list))
def testSetGetParams(self):
# updating only one param
self.model.set_params(num_topics=3)
model_params = self.model.get_params()
self.assertEqual(model_params["num_topics"], 3)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'num_topics'), 3)
# updating multiple params
param_dict = {"passes": 5, "iterations": 10}
self.model.set_params(**param_dict)
model_params = self.model.get_params()
for key in param_dict.keys():
self.assertEqual(model_params[key], param_dict[key])
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(corpus)
self.assertEqual(getattr(self.model.gensim_model, 'passes'), 5)
self.assertEqual(getattr(self.model.gensim_model, 'iterations'), 10)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
author_list = ['jill']
loaded_author_topics = model_load.transform(author_list)
# sanity check for transformation operation
self.assertEqual(loaded_author_topics.shape[0], 1)
self.assertEqual(loaded_author_topics.shape[1], self.model.num_topics)
# comparing the original and loaded models
original_author_topics = self.model.transform(author_list)
passed = numpy.allclose(loaded_author_topics, original_author_topics, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
atmodel_wrapper = AuthorTopicTransformer(id2word=dictionary, author2doc=author2doc, num_topics=10, passes=100)
author_list = ['jill', 'jack']
self.assertRaises(NotFittedError, atmodel_wrapper.transform, author_list)
class TestD2VTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = D2VTransformer(min_count=1)
self.model.fit(d2v_sentences)
def testTransform(self):
# tranform multiple documents
docs = [w2v_texts[0], w2v_texts[1], w2v_texts[2]]
matrix = self.model.transform(docs)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], self.model.size)
# tranform one document
doc = w2v_texts[0]
matrix = self.model.transform(doc)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], self.model.size)
def testFitTransform(self):
model = D2VTransformer(min_count=1)
# fit and transform multiple documents
docs = [w2v_texts[0], w2v_texts[1], w2v_texts[2]]
matrix = model.fit_transform(docs)
self.assertEqual(matrix.shape[0], 3)
self.assertEqual(matrix.shape[1], model.size)
# fit and transform one document
doc = w2v_texts[0]
matrix = model.fit_transform(doc)
self.assertEqual(matrix.shape[0], 1)
self.assertEqual(matrix.shape[1], model.size)
def testSetGetParams(self):
# updating only one param
self.model.set_params(negative=20)
model_params = self.model.get_params()
self.assertEqual(model_params["negative"], 20)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(d2v_sentences)
self.assertEqual(getattr(self.model.gensim_model, 'negative'), 20)
def testPipeline(self):
numpy.random.seed(0) # set fixed seed to get similar values everytime
model = D2VTransformer(min_count=1)
model.fit(d2v_sentences)
class_dict = {'mathematics': 1, 'physics': 0}
train_data = [
(['calculus', 'mathematical'], 'mathematics'), (['geometry', 'operations', 'curves'], 'mathematics'),
(['natural', 'nuclear'], 'physics'), (['science', 'electromagnetism', 'natural'], 'physics')
]
train_input = [x[0] for x in train_data]
train_target = [class_dict[x[1]] for x in train_data]
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
clf.fit(model.transform(train_input), train_target)
text_w2v = Pipeline([('features', model,), ('classifier', clf)])
score = text_w2v.score(train_input, train_target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = w2v_texts[0]
loaded_transformed_vecs = model_load.transform(doc)
# sanity check for transformation operation
self.assertEqual(loaded_transformed_vecs.shape[0], 1)
self.assertEqual(loaded_transformed_vecs.shape[1], model_load.size)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
passed = numpy.allclose(sorted(loaded_transformed_vecs), sorted(original_transformed_vecs), atol=1e-1)
self.assertTrue(passed)
def testConsistencyWithGensimModel(self):
# training a D2VTransformer
self.model = D2VTransformer(min_count=1)
self.model.fit(d2v_sentences)
# training a Gensim Doc2Vec model with the same params
gensim_d2vmodel = models.Doc2Vec(d2v_sentences, min_count=1)
doc = w2v_texts[0]
vec_transformer_api = self.model.transform(doc) # vector returned by D2VTransformer
vec_gensim_model = gensim_d2vmodel[doc] # vector returned by Doc2Vec
passed = numpy.allclose(vec_transformer_api, vec_gensim_model, atol=1e-1)
self.assertTrue(passed)
def testModelNotFitted(self):
d2vmodel_wrapper = D2VTransformer(min_count=1)
self.assertRaises(NotFittedError, d2vmodel_wrapper.transform, 1)
class TestText2BowTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = Text2BowTransformer()
self.model.fit(dict_texts)
def testTransform(self):
# tranform one document
doc = ['computer system interface time computer system']
bow_vec = self.model.transform(doc)[0]
expected_values = [1, 1, 2, 2] # comparing only the word-counts
values = [x[1] for x in bow_vec]
self.assertEqual(sorted(expected_values), sorted(values))
def testSetGetParams(self):
# updating only one param
self.model.set_params(prune_at=1000000)
model_params = self.model.get_params()
self.assertEqual(model_params["prune_at"], 1000000)
def testPipeline(self):
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
text2bow_model = Text2BowTransformer()
lda_model = LdaTransformer(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline([('bow_model', text2bow_model), ('ldamodel', lda_model), ('classifier', clf)])
text_lda.fit(data.data, data.target)
score = text_lda.score(data.data, data.target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = dict_texts[0]
loaded_transformed_vecs = model_load.transform(doc)
# comparing the original and loaded models
original_transformed_vecs = self.model.transform(doc)
self.assertEqual(original_transformed_vecs, loaded_transformed_vecs)
def testModelNotFitted(self):
text2bow_wrapper = Text2BowTransformer()
self.assertRaises(NotFittedError, text2bow_wrapper.transform, dict_texts[0])
class TestTfIdfTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = TfIdfTransformer(normalize=True)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)
def testTransform(self):
# tranform one document
doc = corpus[0]
transformed_doc = self.model.transform(doc)
expected_doc = [[(0, 0.5773502691896257), (1, 0.5773502691896257), (2, 0.5773502691896257)]]
self.assertTrue(numpy.allclose(transformed_doc, expected_doc))
# tranform multiple documents
docs = [corpus[0], corpus[1]]
transformed_docs = self.model.transform(docs)
expected_docs = [
[(0, 0.5773502691896257), (1, 0.5773502691896257), (2, 0.5773502691896257)],
[(3, 0.44424552527467476), (4, 0.44424552527467476), (5, 0.3244870206138555),
(6, 0.44424552527467476), (7, 0.3244870206138555), (8, 0.44424552527467476)]
]
self.assertTrue(numpy.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(numpy.allclose(transformed_docs[1], expected_docs[1]))
def testSetGetParams(self):
# updating only one param
self.model.set_params(smartirs='nnn')
model_params = self.model.get_params()
self.assertEqual(model_params["smartirs"], 'nnn')
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(self.corpus)
self.assertEqual(getattr(self.model.gensim_model, 'smartirs'), 'nnn')
def testPipeline(self):
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
tfidf_model = TfIdfTransformer()
tfidf_model.fit(corpus)
lda_model = LdaTransformer(num_topics=2, passes=10, minimum_probability=0, random_state=numpy.random.seed(0))
numpy.random.mtrand.RandomState(1) # set seed for getting same result
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_tfidf = Pipeline([('tfidf_model', tfidf_model), ('ldamodel', lda_model), ('classifier', clf)])
text_tfidf.fit(corpus, data.target)
score = text_tfidf.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = corpus[0]
loaded_transformed_doc = model_load.transform(doc)
# comparing the original and loaded models
original_transformed_doc = self.model.transform(doc)
self.assertEqual(original_transformed_doc, loaded_transformed_doc)
def testModelNotFitted(self):
tfidf_wrapper = TfIdfTransformer()
self.assertRaises(NotFittedError, tfidf_wrapper.transform, corpus[0])
class TestHdpTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = HdpTransformer(id2word=dictionary, random_state=42)
self.corpus = mmcorpus.MmCorpus(datapath('testcorpus.mm'))
self.model.fit(self.corpus)
def testTransform(self):
# tranform one document
doc = self.corpus[0]
transformed_doc = self.model.transform(doc)
expected_doc = [
[0.81043386270128193, 0.049357139518070477, 0.035840906753517532,
0.026542006926698079, 0.019925705902962578, 0.014776690981729117, 0.011068909979528148]
]
self.assertTrue(numpy.allclose(transformed_doc, expected_doc, atol=1e-2))
# tranform multiple documents
docs = [self.corpus[0], self.corpus[1]]
transformed_docs = self.model.transform(docs)
expected_docs = [
[0.81043386270128193, 0.049357139518070477, 0.035840906753517532,
0.026542006926698079, 0.019925705902962578, 0.014776690981729117, 0.011068909979528148],
[0.03795908, 0.39542609, 0.50650585, 0.0151082, 0.01132749, 0., 0.]
]
self.assertTrue(numpy.allclose(transformed_docs[0], expected_docs[0], atol=1e-2))
self.assertTrue(numpy.allclose(transformed_docs[1], expected_docs[1], atol=1e-2))
def testSetGetParams(self):
# updating only one param
self.model.set_params(var_converge=0.05)
model_params = self.model.get_params()
self.assertEqual(model_params["var_converge"], 0.05)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(self.corpus)
self.assertEqual(getattr(self.model.gensim_model, 'm_var_converge'), 0.05)
def testPipeline(self):
with open(datapath('mini_newsgroup'), 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
data = cache
id2word = Dictionary([x.split() for x in data.data])
corpus = [id2word.doc2bow(i.split()) for i in data.data]
model = HdpTransformer(id2word=id2word)
clf = linear_model.LogisticRegression(penalty='l2', C=0.1)
text_lda = Pipeline([('features', model,), ('classifier', clf)])
text_lda.fit(corpus, data.target)
score = text_lda.score(corpus, data.target)
self.assertGreater(score, 0.40)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = corpus[0]
loaded_transformed_doc = model_load.transform(doc)
# comparing the original and loaded models
original_transformed_doc = self.model.transform(doc)
self.assertTrue(numpy.allclose(original_transformed_doc, loaded_transformed_doc))
def testModelNotFitted(self):
hdp_wrapper = HdpTransformer(id2word=dictionary)
self.assertRaises(NotFittedError, hdp_wrapper.transform, corpus[0])
class TestPhrasesTransformer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = PhrasesTransformer(min_count=1, threshold=1)
self.model.fit(phrases_sentences)
def testTransform(self):
# tranform one document
doc = phrases_sentences[-1]
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey', u'human_interface']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testPartialFit(self):
new_sentences = [
['world', 'peace', 'humans', 'world', 'peace', 'world', 'peace', 'people'],
['world', 'peace', 'people'],
['world', 'peace', 'humans']
]
self.model.partial_fit(X=new_sentences) # train model with new sentences
doc = ['graph', 'minors', 'survey', 'human', 'interface', 'world', 'peace']
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey', u'human_interface', u'world_peace']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testSetGetParams(self):
# updating only one param
self.model.set_params(progress_per=5000)
model_params = self.model.get_params()
self.assertEqual(model_params["progress_per"], 5000)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(phrases_sentences)
self.assertEqual(getattr(self.model.gensim_model, 'progress_per'), 5000)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = phrases_sentences[-1]
loaded_phrase_tokens = model_load.transform(doc)
# comparing the original and loaded models
original_phrase_tokens = self.model.transform(doc)
self.assertEqual(original_phrase_tokens, loaded_phrase_tokens)
def testModelNotFitted(self):
phrases_transformer = PhrasesTransformer()
self.assertRaises(NotFittedError, phrases_transformer.transform, phrases_sentences[0])
# specifically test pluggable scoring in Phrases, because possible pickling issues with function parameter
# this is intentionally in main rather than a class method to support pickling
# all scores will be 1
def dumb_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
return 1
class TestPhrasesTransformerCustomScorer(unittest.TestCase):
def setUp(self):
numpy.random.seed(0)
self.model = PhrasesTransformer(min_count=1, threshold=.9, scoring=dumb_scorer)
self.model.fit(phrases_sentences)
def testTransform(self):
# tranform one document
doc = phrases_sentences[-1]
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey_human', u'interface']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testPartialFit(self):
new_sentences = [
['world', 'peace', 'humans', 'world', 'peace', 'world', 'peace', 'people'],
['world', 'peace', 'people'],
['world', 'peace', 'humans']
]
self.model.partial_fit(X=new_sentences) # train model with new sentences
doc = ['graph', 'minors', 'survey', 'human', 'interface', 'world', 'peace']
phrase_tokens = self.model.transform(doc)[0]
expected_phrase_tokens = [u'graph_minors', u'survey_human', u'interface', u'world_peace']
self.assertEqual(phrase_tokens, expected_phrase_tokens)
def testSetGetParams(self):
# updating only one param
self.model.set_params(progress_per=5000)
model_params = self.model.get_params()
self.assertEqual(model_params["progress_per"], 5000)
# verify that the attributes values are also changed for `gensim_model` after fitting
self.model.fit(phrases_sentences)
self.assertEqual(getattr(self.model.gensim_model, 'progress_per'), 5000)
def testPersistence(self):
model_dump = pickle.dumps(self.model)
model_load = pickle.loads(model_dump)
doc = phrases_sentences[-1]
loaded_phrase_tokens = model_load.transform(doc)
# comparing the original and loaded models
original_phrase_tokens = self.model.transform(doc)
self.assertEqual(original_phrase_tokens, loaded_phrase_tokens)
def testModelNotFitted(self):
phrases_transformer = PhrasesTransformer()
self.assertRaises(NotFittedError, phrases_transformer.transform, phrases_sentences[0])
if __name__ == '__main__':
unittest.main()
| 60,933 | 49.027915 | 120 | py |
poincare_glove | poincare_glove-master/gensim/test/test_keras_integration.py | import unittest
import numpy as np
from gensim.models import word2vec
try:
from sklearn.datasets import fetch_20newsgroups
except ImportError:
raise unittest.SkipTest("Test requires sklearn to be installed, which is not available")
try:
import keras
from keras.engine import Input
from keras.models import Model
from keras.layers.merge import dot
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.utils.np_utils import to_categorical
from keras.layers import Dense, Flatten
from keras.layers import Conv1D, MaxPooling1D
except ImportError:
raise unittest.SkipTest("Test requires Keras to be installed, which is not available")
from gensim.test.utils import common_texts
class TestKerasWord2VecWrapper(unittest.TestCase):
def setUp(self):
self.model_cos_sim = word2vec.Word2Vec(common_texts, size=100, min_count=1, hs=1)
self.model_twenty_ng = word2vec.Word2Vec(min_count=1)
def testWord2VecTraining(self):
"""
Test word2vec training.
"""
model = self.model_cos_sim
self.assertTrue(model.wv.syn0.shape == (len(model.wv.vocab), 100))
self.assertTrue(model.syn1.shape == (len(model.wv.vocab), 100))
sims = model.most_similar('graph', topn=10)
# self.assertTrue(sims[0][0] == 'trees', sims) # most similar
# test querying for "most similar" by vector
graph_vector = model.wv.syn0norm[model.wv.vocab['graph'].index]
sims2 = model.most_similar(positive=[graph_vector], topn=11)
sims2 = [(w, sim) for w, sim in sims2 if w != 'graph'] # ignore 'graph' itself
self.assertEqual(sims, sims2)
def testEmbeddingLayerCosineSim(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function for a simple word similarity task.
"""
keras_w2v_model = self.model_cos_sim
keras_w2v_model_wv = keras_w2v_model.wv
embedding_layer = keras_w2v_model_wv.get_keras_embedding()
input_a = Input(shape=(1,), dtype='int32', name='input_a')
input_b = Input(shape=(1,), dtype='int32', name='input_b')
embedding_a = embedding_layer(input_a)
embedding_b = embedding_layer(input_b)
similarity = dot([embedding_a, embedding_b], axes=2, normalize=True)
model = Model(input=[input_a, input_b], output=similarity)
model.compile(optimizer='sgd', loss='mse')
word_a = 'graph'
word_b = 'trees'
output = model.predict([
np.asarray([keras_w2v_model.wv.vocab[word_a].index]),
np.asarray([keras_w2v_model.wv.vocab[word_b].index])
])
# output is the cosine distance between the two words (as a similarity measure)
self.assertTrue(type(output[0][0][0]) == np.float32) # verify that a float is returned
def testEmbeddingLayer20NewsGroup(self):
"""
Test Keras 'Embedding' layer returned by 'get_embedding_layer' function
for a smaller version of the 20NewsGroup classification problem.
"""
MAX_SEQUENCE_LENGTH = 1000
# Prepare text samples and their labels
# Processing text dataset
texts = [] # list of text samples
texts_w2v = [] # used to train the word embeddings
labels = [] # list of label ids
data = fetch_20newsgroups(subset='train', categories=['alt.atheism', 'comp.graphics', 'sci.space'])
for index in range(len(data)):
label_id = data.target[index]
file_data = data.data[index]
i = file_data.find('\n\n') # skip header
if i > 0:
file_data = file_data[i:]
try:
curr_str = str(file_data)
sentence_list = curr_str.split('\n')
for sentence in sentence_list:
sentence = (sentence.strip()).lower()
texts.append(sentence)
texts_w2v.append(sentence.split(' '))
labels.append(label_id)
except Exception:
pass
# Vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer()
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
# word_index = tokenizer.word_index
data = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
labels = to_categorical(np.asarray(labels))
x_train = data
y_train = labels
# prepare the embedding layer using the wrapper
keras_w2v = self.model_twenty_ng
keras_w2v.build_vocab(texts_w2v)
keras_w2v.train(texts, total_examples=keras_w2v.corpus_count, epochs=keras_w2v.iter)
keras_w2v_wv = keras_w2v.wv
embedding_layer = keras_w2v_wv.get_keras_embedding()
# create a 1D convnet to solve our classification task
sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
embedded_sequences = embedding_layer(sequence_input)
x = Conv1D(128, 5, activation='relu')(embedded_sequences)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(5)(x)
x = Conv1D(128, 5, activation='relu')(x)
x = MaxPooling1D(35)(x) # global max pooling
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
preds = Dense(y_train.shape[1], activation='softmax')(x)
model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
fit_ret_val = model.fit(x_train, y_train, epochs=1)
# verify the type of the object returned after training
# value returned is a `History` instance.
# Its `history` attribute contains all information collected during training.
self.assertTrue(type(fit_ret_val) == keras.callbacks.History)
if __name__ == '__main__':
unittest.main()
| 6,028 | 38.927152 | 114 | py |
poincare_glove | poincare_glove-master/gensim/test/test_ldaseqmodel.py | """
Tests to check DTM math functions and Topic-Word, Doc-Topic proportions.
"""
import unittest
import logging
import numpy as np # for arrays, array broadcasting etc.
from gensim.models import ldaseqmodel
from gensim.corpora import Dictionary
from gensim.test.utils import datapath
class TestLdaSeq(unittest.TestCase):
# we are setting up a DTM model and fitting it, and checking topic-word and doc-topic results.
def setUp(self):
texts = [
[u'senior', u'studios', u'studios', u'studios', u'creators', u'award', u'mobile', u'currently',
u'challenges', u'senior', u'summary', u'senior', u'motivated', u'creative', u'senior'],
[u'performs', u'engineering', u'tasks', u'infrastructure', u'focusing', u'primarily',
u'programming', u'interaction', u'designers', u'engineers', u'leadership', u'teams',
u'teams', u'crews', u'responsibilities', u'engineering', u'quality', u'functional',
u'functional', u'teams', u'organizing', u'prioritizing', u'technical', u'decisions',
u'engineering', u'participates', u'participates', u'reviews', u'participates',
u'hiring', u'conducting', u'interviews'],
[u'feedback', u'departments', u'define', u'focusing', u'engineering', u'teams', u'crews',
u'facilitate', u'engineering', u'departments', u'deadlines', u'milestones', u'typically',
u'spends', u'designing', u'developing', u'updating', u'bugs', u'mentoring', u'engineers',
u'define', u'schedules', u'milestones', u'participating'],
[u'reviews', u'interviews', u'sized', u'teams', u'interacts', u'disciplines', u'knowledge',
u'skills', u'knowledge', u'knowledge', u'xcode', u'scripting', u'debugging', u'skills',
u'skills', u'knowledge', u'disciplines', u'animation', u'networking', u'expertise',
u'competencies', u'oral', u'skills', u'management', u'skills', u'proven', u'effectively',
u'teams', u'deadline', u'environment', u'bachelor', u'minimum', u'shipped', u'leadership',
u'teams', u'location', u'resumes', u'jobs', u'candidates', u'openings', u'jobs'],
[u'maryland', u'client', u'producers', u'electricity', u'operates', u'storage', u'utility',
u'retail', u'customers', u'engineering', u'consultant', u'maryland', u'summary', u'technical',
u'technology', u'departments', u'expertise', u'maximizing', u'output', u'reduces', u'operating',
u'participates', u'areas', u'engineering', u'conducts', u'testing', u'solve', u'supports',
u'environmental', u'understands', u'objectives', u'operates', u'responsibilities', u'handles',
u'complex', u'engineering', u'aspects', u'monitors', u'quality', u'proficiency', u'optimization',
u'recommendations', u'supports', u'personnel', u'troubleshooting', u'commissioning', u'startup',
u'shutdown', u'supports', u'procedure', u'operating', u'units', u'develops', u'simulations',
u'troubleshooting', u'tests', u'enhancing', u'solving', u'develops', u'estimates', u'schedules',
u'scopes', u'understands', u'technical', u'management', u'utilize', u'routine', u'conducts',
u'hazards', u'utilizing', u'hazard', u'operability', u'methodologies', u'participates', u'startup',
u'reviews', u'pssr', u'participate', u'teams', u'participate', u'regulatory', u'audits', u'define',
u'scopes', u'budgets', u'schedules', u'technical', u'management', u'environmental', u'awareness',
u'interfacing', u'personnel', u'interacts', u'regulatory', u'departments', u'input', u'objectives',
u'identifying', u'introducing', u'concepts', u'solutions', u'peers', u'customers', u'coworkers',
u'knowledge', u'skills', u'engineering', u'quality', u'engineering'],
[u'commissioning', u'startup', u'knowledge', u'simulators', u'technologies', u'knowledge',
u'engineering', u'techniques', u'disciplines', u'leadership', u'skills', u'proven',
u'engineers', u'oral', u'skills', u'technical', u'skills', u'analytically', u'solve',
u'complex', u'interpret', u'proficiency', u'simulation', u'knowledge', u'applications',
u'manipulate', u'applications', u'engineering'],
[u'calculations', u'programs', u'matlab', u'excel', u'independently', u'environment',
u'proven', u'skills', u'effectively', u'multiple', u'tasks', u'planning', u'organizational',
u'management', u'skills', u'rigzone', u'jobs', u'developer', u'exceptional', u'strategies',
u'junction', u'exceptional', u'strategies', u'solutions', u'solutions', u'biggest',
u'insurers', u'operates', u'investment'],
[u'vegas', u'tasks', u'electrical', u'contracting', u'expertise', u'virtually', u'electrical',
u'developments', u'institutional', u'utilities', u'technical', u'experts', u'relationships',
u'credibility', u'contractors', u'utility', u'customers', u'customer', u'relationships',
u'consistently', u'innovations', u'profile', u'construct', u'envision', u'dynamic', u'complex',
u'electrical', u'management', u'grad', u'internship', u'electrical', u'engineering',
u'infrastructures', u'engineers', u'documented', u'management', u'engineering',
u'quality', u'engineering', u'electrical', u'engineers', u'complex', u'distribution',
u'grounding', u'estimation', u'testing', u'procedures', u'voltage', u'engineering'],
[u'troubleshooting', u'installation', u'documentation', u'bsee', u'certification',
u'electrical', u'voltage', u'cabling', u'electrical', u'engineering', u'candidates',
u'electrical', u'internships', u'oral', u'skills', u'organizational', u'prioritization',
u'skills', u'skills', u'excel', u'cadd', u'calculation', u'autocad', u'mathcad',
u'skills', u'skills', u'customer', u'relationships', u'solving', u'ethic', u'motivation',
u'tasks', u'budget', u'affirmative', u'diversity', u'workforce', u'gender', u'orientation',
u'disability', u'disabled', u'veteran', u'vietnam', u'veteran', u'qualifying', u'veteran',
u'diverse', u'candidates', u'respond', u'developing', u'workplace', u'reflects', u'diversity',
u'communities', u'reviews', u'electrical', u'contracting', u'southwest', u'electrical', u'contractors'],
[u'intern', u'electrical', u'engineering', u'idexx', u'laboratories', u'validating', u'idexx',
u'integrated', u'hardware', u'entails', u'planning', u'debug', u'validation', u'engineers',
u'validation', u'methodologies', u'healthcare', u'platforms', u'brightest', u'solve',
u'challenges', u'innovation', u'technology', u'idexx', u'intern', u'idexx', u'interns',
u'supplement', u'interns', u'teams', u'roles', u'competitive', u'interns', u'idexx',
u'interns', u'participate', u'internships', u'mentors', u'seminars', u'topics', u'leadership',
u'workshops', u'relevant', u'planning', u'topics', u'intern', u'presentations', u'mixers',
u'applicants', u'ineligible', u'laboratory', u'compliant', u'idexx', u'laboratories', u'healthcare',
u'innovation', u'practicing', u'veterinarians', u'diagnostic', u'technology', u'idexx', u'enhance',
u'veterinarians', u'efficiency', u'economically', u'idexx', u'worldwide', u'diagnostic', u'tests',
u'tests', u'quality', u'headquartered', u'idexx', u'laboratories', u'employs', u'customers',
u'qualifications', u'applicants', u'idexx', u'interns', u'potential', u'demonstrated', u'portfolio',
u'recommendation', u'resumes', u'marketing', u'location', u'americas', u'verification', u'validation',
u'schedule', u'overtime', u'idexx', u'laboratories', u'reviews', u'idexx', u'laboratories',
u'nasdaq', u'healthcare', u'innovation', u'practicing', u'veterinarians'],
[u'location', u'duration', u'temp', u'verification', u'validation', u'tester', u'verification',
u'validation', u'middleware', u'specifically', u'testing', u'applications', u'clinical',
u'laboratory', u'regulated', u'environment', u'responsibilities', u'complex', u'hardware',
u'testing', u'clinical', u'analyzers', u'laboratory', u'graphical', u'interfaces', u'complex',
u'sample', u'sequencing', u'protocols', u'developers', u'correction', u'tracking',
u'tool', u'timely', u'troubleshoot', u'testing', u'functional', u'manual',
u'automated', u'participate', u'ongoing'],
[u'testing', u'coverage', u'planning', u'documentation', u'testing', u'validation',
u'corrections', u'monitor', u'implementation', u'recurrence', u'operating', u'statistical',
u'quality', u'testing', u'global', u'multi', u'teams', u'travel', u'skills', u'concepts',
u'waterfall', u'agile', u'methodologies', u'debugging', u'skills', u'complex', u'automated',
u'instrumentation', u'environment', u'hardware', u'mechanical', u'components', u'tracking',
u'lifecycle', u'management', u'quality', u'organize', u'define', u'priorities', u'organize',
u'supervision', u'aggressive', u'deadlines', u'ambiguity', u'analyze', u'complex', u'situations',
u'concepts', u'technologies', u'verbal', u'skills', u'effectively', u'technical', u'clinical',
u'diverse', u'strategy', u'clinical', u'chemistry', u'analyzer', u'laboratory', u'middleware',
u'basic', u'automated', u'testing', u'biomedical', u'engineering', u'technologists',
u'laboratory', u'technology', u'availability', u'click', u'attach'],
[u'scientist', u'linux', u'asrc', u'scientist', u'linux', u'asrc', u'technology',
u'solutions', u'subsidiary', u'asrc', u'engineering', u'technology', u'contracts'],
[u'multiple', u'agencies', u'scientists', u'engineers', u'management', u'personnel',
u'allows', u'solutions', u'complex', u'aeronautics', u'aviation', u'management', u'aviation',
u'engineering', u'hughes', u'technical', u'technical', u'aviation', u'evaluation',
u'engineering', u'management', u'technical', u'terminal', u'surveillance', u'programs',
u'currently', u'scientist', u'travel', u'responsibilities', u'develops', u'technology',
u'modifies', u'technical', u'complex', u'reviews', u'draft', u'conformity', u'completeness',
u'testing', u'interface', u'hardware', u'regression', u'impact', u'reliability',
u'maintainability', u'factors', u'standardization', u'skills', u'travel', u'programming',
u'linux', u'environment', u'cisco', u'knowledge', u'terminal', u'environment', u'clearance',
u'clearance', u'input', u'output', u'digital', u'automatic', u'terminal', u'management',
u'controller', u'termination', u'testing', u'evaluating', u'policies', u'procedure', u'interface',
u'installation', u'verification', u'certification', u'core', u'avionic', u'programs', u'knowledge',
u'procedural', u'testing', u'interfacing', u'hardware', u'regression', u'impact',
u'reliability', u'maintainability', u'factors', u'standardization', u'missions', u'asrc', u'subsidiaries',
u'affirmative', u'employers', u'applicants', u'disability', u'veteran', u'technology', u'location',
u'airport', u'bachelor', u'schedule', u'travel', u'contributor', u'management', u'asrc', u'reviews'],
[u'technical', u'solarcity', u'niche', u'vegas', u'overview', u'resolving', u'customer',
u'clients', u'expanding', u'engineers', u'developers', u'responsibilities', u'knowledge',
u'planning', u'adapt', u'dynamic', u'environment', u'inventive', u'creative', u'solarcity',
u'lifecycle', u'responsibilities', u'technical', u'analyzing', u'diagnosing', u'troubleshooting',
u'customers', u'ticketing', u'console', u'escalate', u'knowledge', u'engineering', u'timely',
u'basic', u'phone', u'functionality', u'customer', u'tracking', u'knowledgebase', u'rotation',
u'configure', u'deployment', u'sccm', u'technical', u'deployment', u'deploy', u'hardware',
u'solarcity', u'bachelor', u'knowledge', u'dell', u'laptops', u'analytical', u'troubleshooting',
u'solving', u'skills', u'knowledge', u'databases', u'preferably', u'server', u'preferably',
u'monitoring', u'suites', u'documentation', u'procedures', u'knowledge', u'entries', u'verbal',
u'skills', u'customer', u'skills', u'competitive', u'solar', u'package', u'insurance', u'vacation',
u'savings', u'referral', u'eligibility', u'equity', u'performers', u'solarcity', u'affirmative',
u'diversity', u'workplace', u'applicants', u'orientation', u'disability', u'veteran', u'careerrookie'],
[u'embedded', u'exelis', u'junction', u'exelis', u'embedded', u'acquisition', u'networking',
u'capabilities', u'classified', u'customer', u'motivated', u'develops', u'tests',
u'innovative', u'solutions', u'minimal', u'supervision', u'paced', u'environment', u'enjoys',
u'assignments', u'interact', u'multi', u'disciplined', u'challenging', u'focused', u'embedded',
u'developments', u'spanning', u'engineering', u'lifecycle', u'specification', u'enhancement',
u'applications', u'embedded', u'freescale', u'applications', u'android', u'platforms',
u'interface', u'customers', u'developers', u'refine', u'specifications', u'architectures'],
[u'java', u'programming', u'scripts', u'python', u'debug', u'debugging', u'emulators',
u'regression', u'revisions', u'specialized', u'setups', u'capabilities', u'subversion',
u'technical', u'documentation', u'multiple', u'engineering', u'techexpousa', u'reviews'],
[u'modeler', u'semantic', u'modeling', u'models', u'skills', u'ontology', u'resource',
u'framework', u'schema', u'technologies', u'hadoop', u'warehouse', u'oracle', u'relational',
u'artifacts', u'models', u'dictionaries', u'models', u'interface', u'specifications',
u'documentation', u'harmonization', u'mappings', u'aligned', u'coordinate', u'technical',
u'peer', u'reviews', u'stakeholder', u'communities', u'impact', u'domains', u'relationships',
u'interdependencies', u'models', u'define', u'analyze', u'legacy', u'models', u'corporate',
u'databases', u'architectural', u'alignment', u'customer', u'expertise', u'harmonization',
u'modeling', u'modeling', u'consulting', u'stakeholders', u'quality', u'models', u'storage',
u'agile', u'specifically', u'focus', u'modeling', u'qualifications', u'bachelors', u'accredited',
u'modeler', u'encompass', u'evaluation', u'skills', u'knowledge', u'modeling', u'techniques',
u'resource', u'framework', u'schema', u'technologies', u'unified', u'modeling', u'technologies',
u'schemas', u'ontologies', u'sybase', u'knowledge', u'skills', u'interpersonal', u'skills',
u'customers', u'clearance', u'applicants', u'eligibility', u'classified', u'clearance',
u'polygraph', u'techexpousa', u'solutions', u'partnership', u'solutions', u'integration'],
[u'technologies', u'junction', u'develops', u'maintains', u'enhances', u'complex', u'diverse',
u'intensive', u'analytics', u'algorithm', u'manipulation', u'management', u'documented',
u'individually', u'reviews', u'tests', u'components', u'adherence', u'resolves', u'utilizes',
u'methodologies', u'environment', u'input', u'components', u'hardware', u'offs', u'reuse', u'cots',
u'gots', u'synthesis', u'components', u'tasks', u'individually', u'analyzes', u'modifies',
u'debugs', u'corrects', u'integrates', u'operating', u'environments', u'develops', u'queries',
u'databases', u'repositories', u'recommendations', u'improving', u'documentation', u'develops',
u'implements', u'algorithms', u'functional', u'assists', u'developing', u'executing', u'procedures',
u'components', u'reviews', u'documentation', u'solutions', u'analyzing', u'conferring',
u'users', u'engineers', u'analyzing', u'investigating', u'areas', u'adapt', u'hardware',
u'mathematical', u'models', u'predict', u'outcome', u'implement', u'complex', u'database',
u'repository', u'interfaces', u'queries', u'bachelors', u'accredited', u'substituted',
u'bachelors', u'firewalls', u'ipsec', u'vpns', u'technology', u'administering', u'servers',
u'apache', u'jboss', u'tomcat', u'developing', u'interfaces', u'firefox', u'internet',
u'explorer', u'operating', u'mainframe', u'linux', u'solaris', u'virtual', u'scripting',
u'programming', u'oriented', u'programming', u'ajax', u'script', u'procedures', u'cobol',
u'cognos', u'fusion', u'focus', u'html', u'java', u'java', u'script', u'jquery', u'perl',
u'visual', u'basic', u'powershell', u'cots', u'cots', u'oracle', u'apex', u'integration',
u'competitive', u'package', u'bonus', u'corporate', u'equity', u'tuition', u'reimbursement',
u'referral', u'bonus', u'holidays', u'insurance', u'flexible', u'disability', u'insurance'],
[u'technologies', u'disability', u'accommodation', u'recruiter', u'techexpousa'],
['bank', 'river', 'shore', 'water'],
['river', 'water', 'flow', 'fast', 'tree'],
['bank', 'water', 'fall', 'flow'],
['bank', 'bank', 'water', 'rain', 'river'],
['river', 'water', 'mud', 'tree'],
['money', 'transaction', 'bank', 'finance'],
['bank', 'borrow', 'money'],
['bank', 'finance'],
['finance', 'money', 'sell', 'bank'],
['borrow', 'sell'],
['bank', 'loan', 'sell']
]
# initializing using own LDA sufficient statistics so that we get same results each time.
sstats = np.loadtxt(datapath('DTM/sstats_test.txt'))
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
self.ldaseq = ldaseqmodel.LdaSeqModel(
corpus=corpus, id2word=dictionary, num_topics=2,
time_slice=[10, 10, 11], initialize='own', sstats=sstats
)
# testing topic word proportions
def testTopicWord(self):
topics = self.ldaseq.print_topics(0)
expected_topic_word = [('skills', 0.035999999999999997)]
self.assertEqual(topics[0][0][0], expected_topic_word[0][0])
self.assertAlmostEqual(topics[0][0][1], expected_topic_word[0][1], places=2)
# testing document-topic proportions
def testDocTopic(self):
doc_topic = self.ldaseq.doc_topics(0)
expected_doc_topic = 0.00066577896138482028
self.assertAlmostEqual(doc_topic[0], expected_doc_topic, places=2)
def testDtypeBackwardCompatibility(self):
ldaseq_3_0_1_fname = datapath('DTM/ldaseq_3_0_1_model')
test_doc = [(547, 1), (549, 1), (552, 1), (555, 1)]
expected_topics = [0.99751244, 0.00248756]
# save model to use in test
# self.ldaseq.save(ldaseq_3_0_1_fname)
# load a model saved using a 3.0.1 version of Gensim
model = ldaseqmodel.LdaSeqModel.load(ldaseq_3_0_1_fname)
# and test it on a predefined document
topics = model[test_doc]
self.assertTrue(np.allclose(expected_topics, topics))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
| 20,187 | 82.078189 | 119 | py |
allosaurus | allosaurus-master/setup.py | from setuptools import setup,find_packages
setup(
name='allosaurus',
version='1.0.2',
description='a multilingual phone recognizer',
author='Xinjian Li',
author_email='xinjianl@cs.cmu.edu',
url="https://github.com/xinjli/allosaurus",
packages=find_packages(),
install_requires=[
'scipy',
'numpy',
'resampy',
'panphon',
'torch',
'editdistance',
]
)
| 412 | 19.65 | 49 | py |
allosaurus | allosaurus-master/allosaurus/am/utils.py | import torch
from collections import OrderedDict
import numpy as np
def torch_load(model, path, device_id, unit_mask=None):
"""Load torch model states.
Args:
path (str): Model path or snapshot file path to be loaded.
model (torch.nn.Module): Torch model.
device_id (int): gpu id (-1 indicates cpu only)
"""
if device_id >= 0:
model_state_dict = torch.load(str(path),map_location=torch.device(f'cuda:{device_id}'))
else:
model_state_dict = torch.load(str(path), map_location=torch.device('cpu'))
new_state_dict = OrderedDict()
for k, v in model_state_dict.items():
# no need for lang specific layer in inference model
if k.startswith('allophone_layer_dict'):
continue
if k.startswith('module.'):
name = k[7:] # remove `module.`
else:
name = k
# remap the phone_layer for fine-tuning
# it will remap phone_layer.weight and phone_layer.bias
if k.startswith('phone_layer'):
if unit_mask is not None:
phone_size = len(unit_mask.target_unit)
if len(v.shape) == 2:
# for weight
hidden_size = v.shape[1]
new_v = v.new(phone_size, hidden_size)
else:
# for bias
assert len(v.shape) == 1, 'phone_layer shape is either 2 or 1'
new_v = v.new(phone_size)
for domain_phone_id, target_phone_id in unit_mask.unit_map.items():
new_v[target_phone_id] = v[domain_phone_id]
v = new_v
new_state_dict[name] = v
if hasattr(model, 'module'):
model.module.load_state_dict(new_state_dict)
else:
model.load_state_dict(new_state_dict)
if device_id >= 0:
model = model.cuda(device_id)
del model_state_dict, new_state_dict
def torch_save(model, path):
"""Save torch model states.
Args:
path (str): Model path to be saved.
model (torch.nn.Module): Torch model.
"""
path = str(path)
if hasattr(model, 'module'):
torch.save(model.module.state_dict(), path)
else:
torch.save(model.state_dict(), path)
def apply_to_tensor(f, sample):
if len(sample) == 0:
return {}
def _apply(x):
if torch.is_tensor(x):
return f(x)
elif isinstance(x, dict):
return {
key: _apply(value)
for key, value in x.items()
}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def apply_to_ndarray(f, sample):
if len(sample) == 0:
return {}
def _apply(x):
if isinstance(x, np.ndarray):
return f(x)
elif isinstance(x, dict):
return {
key: _apply(value)
for key, value in x.items()
}
elif isinstance(x, list):
return [_apply(x) for x in x]
elif isinstance(x, tuple):
return [_apply(x) for x in x]
else:
return x
return _apply(sample)
def tensor_to_cuda(sample, device_id=0):
def _move_to_cuda(tensor):
return tensor.to(device_id)
return apply_to_tensor(_move_to_cuda, sample)
def ndarray_to_tensor(sample):
def _move_to_tensor(dnarray):
return torch.from_numpy(dnarray)
return apply_to_ndarray(_move_to_tensor, sample)
def move_to_tensor(sample, device_id=-1):
"""
move numpy array to torch tensor
:param sample:
:param device_id: -1 means cpu, other means gpu device_id
:return:
"""
sample = ndarray_to_tensor(sample)
# move to cuda if device_id provided
if device_id >= 0:
sample = tensor_to_cuda(sample, device_id)
return sample
def move_to_ndarray(sample):
if sample.is_cuda:
sample = sample.cpu()
return sample.data.numpy()
| 4,126 | 24.475309 | 95 | py |
allosaurus | allosaurus-master/allosaurus/am/dataset.py | from allosaurus.pm.kdict import read_matrix
from pathlib import Path
from torch.utils.data import Dataset
import numpy as np
class AllosaurusDataset(Dataset):
def __init__(self, data_path):
self.data_path = Path(data_path)
required_files = ['feat.scp', 'token', 'feat.ark', 'shape']
for required_file in required_files:
assert (self.data_path / required_file).exists(), required_file+" does not exist, please run the preparation before fine-tuning"
# read all tokens
self.utt2token = {}
self._read_token()
# read all features and their shapes
self.utt2offset = {}
self.utt2shape = {}
self.ark = None
self._read_feat()
# extract all valid utt_ids
token_utt_set = set(self.utt2token.keys())
feat_utt_set = set(self.utt2offset.keys())
shape_utt_set = set(self.utt2shape.keys())
self.utt_ids = list(set.intersection(token_utt_set, feat_utt_set, shape_utt_set))
# sort all ids based on their their shape
self.utt_ids.sort(key=lambda utt_id: self.utt2shape[utt_id][0], reverse=True)
def __len__(self):
return len(self.utt_ids)
def __getitem__(self, item):
utt_id = self.utt_ids[item]
token = self.utt2token[utt_id]
offset = self.utt2offset[utt_id]
self.ark.seek(offset)
feature = read_matrix(self.ark, np.float32)
return (feature, token)
def close(self):
if self.ark:
self.ark.close()
def _read_token(self):
"""
load token from file
:param token_path:
:return:
"""
token_reader = open(str(self.data_path / 'token'), 'r', encoding='utf-8')
self.utt2token = {}
for line in token_reader:
fields = line.strip().split()
utt_id = fields[0]
tokens = list(map(int, fields[1:]))
# reject empty token or too long token
if len(tokens) == 0 or len(tokens) > 1000:
continue
self.utt2token[utt_id] = tokens
def _read_feat(self):
"""
load offsets from feat.scp
:return:
"""
#####################################################################
# read feature
#####################################################################
feat_reader = open(str(self.data_path / 'feat.scp'), 'r')
self.utt2offset = {}
for line in feat_reader:
fields = line.strip().split()
assert len(fields) == 2, " feat.scp should only contain two fields"
utt_id = fields[0]
feat = fields[1]
p = feat.rfind(":")
assert p >= 0, " offset pointer not found"
offset = int(feat[p+1:])
self.utt2offset[utt_id] = offset
feat_reader.close()
self.ark = open(self.data_path / 'feat.ark', 'rb')
#####################################################################
# read shape
#####################################################################
shape_reader = open(str(self.data_path / 'shape'), 'r')
for line in shape_reader:
fields = line.strip().split()
utt_id = fields[0]
shape = (int(fields[1]), int(fields[2]))
self.utt2shape[utt_id] = shape
shape_reader.close() | 3,451 | 26.616 | 140 | py |
allosaurus | allosaurus-master/allosaurus/am/factory.py | from allosaurus.am.allosaurus_torch import AllosaurusTorchModel
from allosaurus.am.utils import *
from allosaurus.lm.inventory import Inventory
from allosaurus.lm.unit import write_unit
import json
from argparse import Namespace
from allosaurus.model import get_model_path
def read_am(model_path, inference_config):
"""
load pretrained acoustic model
:param model_path: path to the
:return:
"""
am_config = Namespace(**json.load(open(str(model_path / 'am_config.json'))))
assert am_config.model == 'allosaurus', "This project only support allosaurus model"
model = AllosaurusTorchModel(am_config)
# load weights
torch_load(model, str(model_path / 'model.pt'), inference_config.device_id)
return model
def transfer_am(train_config):
"""
initialize the acoustic model with a pretrained model for fine-tuning
:param model_path: path to the
:return:
"""
pretrained_model_path = get_model_path(train_config.pretrained_model)
am_config = Namespace(**json.load(open(str(pretrained_model_path / 'am_config.json'))))
assert am_config.model == 'allosaurus', "This project only support allosaurus model"
# load inventory
inventory = Inventory(pretrained_model_path)
# get unit_mask which maps the full phone inventory to the target phone inventory
unit_mask = inventory.get_mask(train_config.lang, approximation=True)
# reset the new phone_size
am_config.phone_size = len(unit_mask.target_unit)
model = AllosaurusTorchModel(am_config)
# load the pretrained model and setup the phone_layer with correct weights
torch_load(model, str(pretrained_model_path / 'model.pt'), train_config.device_id, unit_mask)
# update new model
new_model = train_config.new_model
# get its path
model_path = get_model_path(new_model)
# overwrite old am_config
new_am_config_json = vars(am_config)
json.dump(new_am_config_json, open(str(model_path / 'am_config.json'), 'w'), indent=4)
# overwrite old phones
write_unit(unit_mask.target_unit, model_path / 'phone.txt')
# overwrite old model
torch_save(model, model_path / 'model.pt')
return model | 2,194 | 29.486111 | 97 | py |
allosaurus | allosaurus-master/allosaurus/am/allosaurus_torch.py | import torch
import torch.nn as nn
class AllosaurusTorchModel(nn.Module):
def __init__(self, config):
super(AllosaurusTorchModel, self).__init__()
self.hidden_size = config.hidden_size
self.layer_size = config.layer_size
self.proj_size = config.proj_size
# decide input feature size
if config.feat_size == -1:
corpus_feat_size_dict = list(config.feat_size_dict.values())[0]
self.feat_size = list(corpus_feat_size_dict.values())[0]
else:
self.feat_size = config.feat_size
assert hasattr(config, 'lang_size_dict'), " config should has the lang_size_dict property"
self.lang_size_dict = config.lang_size_dict
self.lang_output_size = dict()
self.phone_size = config.phone_size
self.config = config
self.blstm_layer = nn.LSTM(self.feat_size, self.hidden_size, num_layers=self.layer_size, bidirectional=True)
self.phone_layer = nn.Linear(self.hidden_size*2, self.phone_size)
self.phone_tensor = None
@staticmethod
def add_args(parser):
parser.add_argument('--feat_size', type=int, default=-1, help='input size in the blstm model. if -1, then it is determined automatically by loader')
parser.add_argument('--hidden_size', type=int, default=320, help='hidden size in the blstm model')
parser.add_argument('--lang_size', type=int, default=-1, help='output size in the blstm model, if -1, then it is determined automatically by loader')
parser.add_argument('--proj_size', type=int, default=0, help='projection')
parser.add_argument('--layer_size', type=int, default=5, help='layer size in the blstm model')
parser.add_argument('--l2', type=float, default=0.0, help='regularization')
parser.add_argument('--loss', type=str, default='ctc', help='ctc/warp_ctc/e2e')
parser.add_argument('--debug_model', type=str, default=False, help='print tensor info for debugging')
def forward(self, input_tensor, input_lengths, return_lstm=False, return_both=False, meta=None):
"""
:param input: an Tensor with shape (B,T,H)
:lengths: a list of length of input_tensor, if None then no padding
:meta: dictionary containing meta information (should contain lang_id in this case
:return_lstm: [list containing the output_embeddings and their respective lengths]
:return_both: tuple containing (a list containing the output_embeddings and their respective lengths and the ouptut of phone layer)
:return:
"""
#if utt_ids:
#print("utt_ids {} \n target_tensor {}".format(' '.join(utt_ids), target_tensor))
#print("input_lengths {}".format(str(input_lengths)))
#print("target_tensor {}".format(target_tensor))
#print("target_lengths {}".format(target_lengths))
# (B,T,H) -> (T,B,H)
input_tensor = input_tensor.transpose(0, 1).float()
# extract lengths
if input_lengths is None:
input_lengths = torch.LongTensor([input_tensor.shape[0]]*input_tensor.shape[1])
# keep the max length for padding
total_length = input_tensor.size(0)
#if self.config.loss == 'warp_ctc':
#target_tensor = torch.cat([target_tensor[idx,:index] for idx, index in enumerate(target_lengths)])
#if lengths.dim() == 2:
# lengths = lengths.squeeze()
# build each layer
# (T,B,H) -> PackSequence
pack_sequence = nn.utils.rnn.pack_padded_sequence(input_tensor, input_lengths.cpu())
# PackSequence -> (PackSequence, States)
self.blstm_layer.flatten_parameters()
hidden_pack_sequence, _ = self.blstm_layer(pack_sequence)
# PackSequence -> (T,B,2H), lengths
output_tensor, _ = nn.utils.rnn.pad_packed_sequence(hidden_pack_sequence, total_length=total_length)
# (T,B,2H) -> (T,B,P)
phone_tensor = self.phone_layer(output_tensor)
#added the return_lstm argument
if return_lstm:
return [output_tensor.cpu(),input_lengths.cpu()]
if return_both:
return [(output_tensor.cpu(),input_lengths.cpu()), phone_tensor.transpose(0,1)]
# return (B,T,H) for gathering
return phone_tensor.transpose(0,1) | 4,359 | 40.52381 | 159 | py |
allosaurus | allosaurus-master/allosaurus/am/criterion.py | import torch
import torch.nn as nn
def read_criterion(train_config):
assert train_config.criterion == 'ctc', 'only ctc criterion is supported now'
return CTCCriterion(train_config)
class CTCCriterion(nn.Module):
def __init__(self, train_config):
super().__init__()
self.train_config = train_config
self.logsoftmax = nn.LogSoftmax(dim=2)
self.criterion = nn.CTCLoss(reduction='sum', zero_infinity=True)
def forward(self,
output_tensor: torch.tensor,
output_lengths: torch.tensor,
target_tensor: torch.tensor,
target_lengths: torch.tensor):
output_tensor = self.logsoftmax(output_tensor).transpose(0,1)
loss = self.criterion(output_tensor, target_tensor, output_lengths, target_lengths)
return loss | 841 | 29.071429 | 91 | py |
allosaurus | allosaurus-master/allosaurus/am/trainer.py | from allosaurus.am.utils import move_to_tensor, torch_save
from allosaurus.am.criterion import read_criterion
from allosaurus.am.optimizer import read_optimizer
from allosaurus.am.reporter import Reporter
import editdistance
import numpy as np
import torch
from itertools import groupby
from allosaurus.model import get_model_path
import os
import json
class Trainer:
def __init__(self, model, train_config):
self.model = model
self.train_config = train_config
self.device_id = self.train_config.device_id
# criterion, only ctc currently
self.criterion = read_criterion(train_config)
# optimizer, only sgd currently
self.optimizer = read_optimizer(self.model, train_config)
# reporter to write logs
self.reporter = Reporter(train_config)
# best per
self.best_per = 100.0
# intialize the model
self.model_path = get_model_path(train_config.new_model)
# counter for early stopping
self.num_no_improvement = 0
def sum_edit_distance(self, output_ndarray, output_lengths_ndarray, token_ndarray, token_lengths_ndarray):
"""
compute SUM of ter in this batch
"""
error_cnt_sum = 0.0
for i in range(len(token_lengths_ndarray)):
target_list = token_ndarray[i, :token_lengths_ndarray[i]].tolist()
logit = output_ndarray[i][:output_lengths_ndarray[i]]
raw_token = [x[0] for x in groupby(np.argmax(logit, axis=1))]
decoded_token = list(filter(lambda a: a != 0, raw_token))
error_cnt_sum += editdistance.distance(target_list, decoded_token)
return error_cnt_sum
def step(self, feat_batch, token_batch):
# prepare torch tensors from numpy arrays
feat_tensor, feat_lengths_tensor = move_to_tensor(feat_batch, self.device_id)
token_tensor, token_lengths_tensor = move_to_tensor(token_batch, self.device_id)
#print(feat_tensor)
#print(feat_lengths_tensor)
output_tensor = self.model(feat_tensor, feat_lengths_tensor)
#print(output_tensor)
#print(token_tensor)
#print(token_lengths_tensor)
loss = self.criterion(output_tensor, feat_lengths_tensor, token_tensor, token_lengths_tensor)
#print(loss.item())
# extract numpy format for edit distance computing
output_ndarray = output_tensor.cpu().detach().numpy()
feat_ndarray, feat_lengths_ndarray = feat_batch
token_ndarray, token_lengths_ndarray = token_batch
phone_error_sum = self.sum_edit_distance(output_ndarray, feat_lengths_ndarray, token_ndarray,
token_lengths_ndarray)
phone_count = sum(token_lengths_ndarray)
return loss, phone_error_sum, phone_count
def train(self, train_loader, validate_loader):
self.best_per = 100.0
batch_count = len(train_loader)
for epoch in range(self.train_config.epoch):
# shuffle
train_loader.shuffle()
# set to the training mode
self.model.train()
# reset all stats
all_phone_count = 0.0
all_loss_sum = 0.0
all_phone_error_sum = 0.0
# training loop
for ii in range(batch_count):
self.optimizer.zero_grad()
feat_batch, token_batch = train_loader.read_batch(ii)
# forward step
loss_tensor, phone_error_sum, phone_count = self.step(feat_batch, token_batch)
# backprop and optimize
loss_tensor.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.train_config.grad_clip)
self.optimizer.step()
# update stats
loss_sum = loss_tensor.item()
all_phone_count += phone_count
all_loss_sum += loss_sum
all_phone_error_sum += phone_error_sum
if ii % self.train_config.report_per_batch == 0:
message = f'epoch[batch]: {epoch:02d}[{ii:04d}] | train loss {all_loss_sum/all_phone_count:0.5f} train per {all_phone_error_sum / all_phone_count:0.5f}'
self.reporter.write(message)
# reset all stats
all_phone_count = 0.0
all_loss_sum = 0.0
all_phone_error_sum = 0.0
# evaluate this model
validate_phone_error_rate = self.validate(validate_loader)
self.reporter.write(f"epoch{epoch} | validate per : {validate_phone_error_rate:0.5f}")
if validate_phone_error_rate <= self.best_per:
self.best_per = validate_phone_error_rate
self.num_no_improvement = 0
self.reporter.write("saving model")
model_name = f"model_{validate_phone_error_rate:0.5f}.pt"
# save model
torch_save(self.model, self.model_path / model_name)
# overwrite the best model
torch_save(self.model, self.model_path / 'model.pt')
else:
self.num_no_improvement += 1
if self.num_no_improvement >= 3:
self.reporter.write("no improvements for several epochs, early stopping now")
break
# close reporter stream
self.reporter.close()
def validate(self, validate_loader):
self.model.eval()
batch_count = len(validate_loader)
all_phone_error_sum = 0
all_phone_count = 0
# validation loop
for ii in range(batch_count):
self.optimizer.zero_grad()
feat_batch, token_batch = validate_loader.read_batch(ii)
# one step
loss_tensor, phone_error_sum, phone_count = self.step(feat_batch, token_batch)
# update stats
all_phone_error_sum += phone_error_sum
all_phone_count += phone_count
return all_phone_error_sum/all_phone_count
| 6,149 | 30.538462 | 172 | py |
allosaurus | allosaurus-master/allosaurus/am/optimizer.py | from torch.optim import SGD
def read_optimizer(model, train_config):
assert train_config.optimizer == 'sgd', 'only sgd is supported now, others optimizers would be easier to add though'
return SGD(model.parameters(), lr=train_config.lr) | 247 | 34.428571 | 120 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/train_sed_CRST.py | import argparse
from copy import deepcopy
import numpy as np
import os
import pandas as pd
import random
import torch
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from desed_task.dataio import ConcatDatasetBatchSampler
from desed_task.dataio.datasets import StronglyAnnotatedSet, UnlabeledSet, WeakSet
from desed_task.nnet.CRNN import CRNN, RCRNN
from desed_task.utils.encoder import ManyHotEncoder
from desed_task.utils.schedulers import ExponentialWarmup
from local.classes_dict import classes_labels
from local.sed_trainer_CRST import SEDTask4_2021
from local.resample_folder import resample_folder
from local.utils import generate_tsv_wav_durations
def resample_data_generate_durations(config_data, test_only=False, evaluation=False):
if not test_only:
dsets = [
"synth_folder",
"synth_val_folder",
"weak_folder",
"unlabeled_folder",
"test_folder",
]
elif test_only:
dsets = ["test_folder"]
else:
dsets = ["eval_folder"]
for dset in dsets:
computed = resample_folder(
config_data[dset + "_44k"], config_data[dset], target_fs=config_data["fs"]
)
if not evaluation:
for base_set in ["synth_val", "test"]:
if not os.path.exists(config_data[base_set + "_dur"]) or computed:
generate_tsv_wav_durations(
config_data[base_set + "_folder"], config_data[base_set + "_dur"]
)
def single_run(
config,
log_dir,
gpus,
checkpoint_resume=None,
test_state_dict=None,
fast_dev_run=False,
evaluation=False
):
"""
Running sound event detection baselin
Args:
config (dict): the dictionary of configuration params
log_dir (str): path to log directory
gpus (int): number of gpus to use
checkpoint_resume (str, optional): path to checkpoint to resume from. Defaults to "".
test_state_dict (dict, optional): if not None, no training is involved. This dictionary is the state_dict
to be loaded to test the model.
fast_dev_run (bool, optional): whether to use a run with only one batch at train and validation, useful
for development purposes.
"""
config.update({"log_dir": log_dir})
##### data prep test ##########
encoder = ManyHotEncoder(
list(classes_labels.keys()),
audio_len=config["data"]["audio_max_len"],
frame_len=config["feats"]["n_filters"],
frame_hop=config["feats"]["hop_length"],
net_pooling=config["data"]["net_subsample"],
fs=config["data"]["fs"],
)
if not evaluation:
devtest_df = pd.read_csv(config["data"]["test_tsv"], sep="\t")
devtest_dataset = StronglyAnnotatedSet(
config["data"]["test_folder"],
devtest_df,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
else:
devtest_dataset = UnlabeledSet(
config["data"]["test_folder"],
encoder,
pad_to=11,
return_filename=True
)
test_dataset = devtest_dataset
##### model definition ############
sed_student1 = RCRNN(**config["net1"])
sed_student2 = RCRNN(**config["net1"])
if test_state_dict is None:
##### data prep train valid ##########
synth_df = pd.read_csv(config["data"]["synth_tsv"], sep="\t")
synth_set = StronglyAnnotatedSet(
config["data"]["synth_folder"],
synth_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
weak_df = pd.read_csv(config["data"]["weak_tsv"], sep="\t")
train_weak_df = weak_df.sample(
frac=config["training"]["weak_split"], random_state=config["training"]["seed"]
)
valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
train_weak_df = train_weak_df.reset_index(drop=True)
weak_set = WeakSet(
config["data"]["weak_folder"],
train_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
unlabeled_set = UnlabeledSet(
config["data"]["unlabeled_folder"],
encoder,
pad_to=config["data"]["audio_max_len"],
)
synth_df_val = pd.read_csv(config["data"]["synth_val_tsv"], sep="\t")
synth_val = StronglyAnnotatedSet(
config["data"]["synth_val_folder"],
synth_df_val,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
weak_val = WeakSet(
config["data"]["weak_folder"],
valid_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
return_filename=True,
)
tot_train_data = [synth_set, weak_set, unlabeled_set]
train_dataset = torch.utils.data.ConcatDataset(tot_train_data)
batch_sizes = config["training"]["batch_size"]
samplers = [torch.utils.data.RandomSampler(x) for x in tot_train_data]
batch_sampler = ConcatDatasetBatchSampler(samplers, batch_sizes)
valid_dataset = torch.utils.data.ConcatDataset(
[synth_val, weak_val]
)
##### training params and optimizers ############
epoch_len = min(
[
len(tot_train_data[indx])
// (
config["training"]["batch_size"][indx]
* config["training"]["accumulate_batches"]
)
for indx in range(len(tot_train_data))
]
)
opt1 = torch.optim.Adam(sed_student1.parameters(), 1e-3, betas=(0.9, 0.999))
opt2 = torch.optim.Adam(sed_student2.parameters(), 1e-3, betas=(0.9, 0.999))
exp_steps = config["training"]["n_epochs_warmup"] * epoch_len
exp_scheduler1 = {
"scheduler": ExponentialWarmup(opt1, config["opt"]["lr"], exp_steps),
"interval": "step",
}
exp_scheduler2 = {
"scheduler": ExponentialWarmup(opt2, config["opt"]["lr"], exp_steps),
"interval": "step",
}
logger = TensorBoardLogger(
os.path.dirname(config["log_dir"]), config["log_dir"].split("/")[-1],
)
print(f"experiment dir: {logger.log_dir}")
callbacks = [
EarlyStopping(
monitor="val/obj_metric",
patience=config["training"]["early_stop_patience"],
verbose=True,
mode="max"
),
ModelCheckpoint(logger.log_dir, monitor="val/obj_metric", save_top_k=1, mode="max",
save_last=True),
]
else:
train_dataset = None
valid_dataset = None
batch_sampler = None
opt1 = None
opt2 = None
exp_scheduler1 = None
exp_scheduler2 = None
logger = True
callbacks = None
desed_training = SEDTask4_2021(
config,
encoder=encoder,
sed_student=[sed_student1, sed_student2],
opt=[opt1, opt2],
train_data=train_dataset,
valid_data=valid_dataset,
test_data=test_dataset,
train_sampler=batch_sampler,
scheduler=[exp_scheduler1, exp_scheduler2],
fast_dev_run=fast_dev_run,
evaluation=evaluation
)
# Not using the fast_dev_run of Trainer because creates a DummyLogger so cannot check problems with the Logger
if fast_dev_run:
flush_logs_every_n_steps = 1
log_every_n_steps = 1
limit_train_batches = 2
limit_val_batches = 2
limit_test_batches = 2
n_epochs = 3
else:
flush_logs_every_n_steps = 100
log_every_n_steps = 40
limit_train_batches = 1.
limit_val_batches = 1.
limit_test_batches = 1.
n_epochs = config["training"]["n_epochs"]
trainer = pl.Trainer(
max_epochs=n_epochs,
callbacks=callbacks,
gpus=gpus,
distributed_backend=config["training"].get("backend"),
accumulate_grad_batches=config["training"]["accumulate_batches"],
logger=logger,
resume_from_checkpoint=checkpoint_resume,
gradient_clip_val=config["training"]["gradient_clip"],
check_val_every_n_epoch=config["training"]["validation_interval"],
num_sanity_val_steps=0,
log_every_n_steps=log_every_n_steps,
flush_logs_every_n_steps=flush_logs_every_n_steps,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
if test_state_dict is None:
trainer.fit(desed_training)
best_path = trainer.checkpoint_callback.best_model_path
print(f"best model: {best_path}")
test_state_dict = torch.load(best_path)["state_dict"]
desed_training.load_state_dict(test_state_dict)
trainer.test(desed_training)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Training a SED system for DESED Task")
parser.add_argument("--conf_file", default="./confs/sed.yaml",
help="The configuration file with all the experiment parameters.")
parser.add_argument("--log_dir", default="./exp/2021_baseline",
help="Directory where to save tensorboard logs, saved models, etc.")
parser.add_argument("--resume_from_checkpoint", default=None,
help="Allow the training to be resumed, take as input a previously saved model (.ckpt).")
parser.add_argument("--test_from_checkpoint", default=None,
help="Test the model specified")
parser.add_argument("--gpus", default="0", help="The number of GPUs to train on, or the gpu to use, default='0', "
"so uses one GPU indexed by 0.")
parser.add_argument("--fast_dev_run", action="store_true", default=False,
help="Use this option to make a 'fake' run which is useful for development and debugging. "
"It uses very few batches and epochs so it won't give any meaningful result.")
parser.add_argument("--eval_from_checkpoint", default=None, help="Evaluate the model specified")
args = parser.parse_args()
with open(args.conf_file, "r") as f:
configs = yaml.safe_load(f)
evaluation = False
test_from_checkpoint = args.test_from_checkpoint
if args.eval_from_checkpoint is not None:
test_from_checkpoint = args.eval_from_checkpoint
evaluation = True
test_model_state_dict = None
if test_from_checkpoint is not None:
checkpoint = torch.load(test_from_checkpoint)
configs_ckpt = checkpoint["hyper_parameters"]
configs_ckpt["data"] = configs["data"]
configs = configs_ckpt
print(
f"loaded model: {test_from_checkpoint} \n"
f"at epoch: {checkpoint['epoch']}"
)
test_model_state_dict = checkpoint["state_dict"]
#if evaluation:
# configs["training"]["batch_size_val"] = 1
seed = configs["training"]["seed"]
if seed:
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
pl.seed_everything(seed)
test_only = test_from_checkpoint is not None
resample_data_generate_durations(configs["data"], test_only)
single_run(
configs,
args.log_dir,
args.gpus,
args.resume_from_checkpoint,
test_model_state_dict,
args.fast_dev_run,
evaluation
)
| 11,870 | 34.121302 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/train_sed_SRST.py | import argparse
from copy import deepcopy
import numpy as np
import os
import pandas as pd
import random
import torch
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from desed_task.dataio import ConcatDatasetBatchSampler
from desed_task.dataio.datasets import StronglyAnnotatedSet, UnlabelledSet, WeakSet
from desed_task.nnet.CRNN import CRNN
from desed_task.utils.encoder import ManyHotEncoder
from desed_task.utils.schedulers import ExponentialWarmup
from local.classes_dict import classes_labels
from local.sed_trainer_SRST import SEDTask4_2021
from local.resample_folder import resample_folder
from local.utils import generate_tsv_wav_durations
def resample_data_generate_durations(config_data, test_only=False):
if not test_only:
dsets = [
"synth_folder",
"synth_val_folder",
"weak_folder",
"unlabeled_folder",
"test_folder",
]
else:
dsets = ["test_folder"]
for dset in dsets:
computed = resample_folder(
config_data[dset + "_44k"], config_data[dset], target_fs=config_data["fs"]
)
for base_set in ["synth_val", "test"]:
if not os.path.exists(config_data[base_set + "_dur"]) or computed:
generate_tsv_wav_durations(
config_data[base_set + "_folder"], config_data[base_set + "_dur"]
)
def single_run(
config,
log_dir,
gpus,
checkpoint_resume=None,
test_state_dict=None,
fast_dev_run=False,
):
"""
Running sound event detection baselin
Args:
config (dict): the dictionary of configuration params
log_dir (str): path to log directory
gpus (int): number of gpus to use
checkpoint_resume (str, optional): path to checkpoint to resume from. Defaults to "".
test_state_dict (dict, optional): if not None, no training is involved. This dictionary is the state_dict
to be loaded to test the model.
fast_dev_run (bool, optional): whether to use a run with only one batch at train and validation, useful
for development purposes.
"""
config.update({"log_dir": log_dir})
##### data prep test ##########
encoder = ManyHotEncoder(
list(classes_labels.keys()),
audio_len=config["data"]["audio_max_len"],
frame_len=config["feats"]["n_filters"],
frame_hop=config["feats"]["hop_length"],
net_pooling=config["data"]["net_subsample"],
fs=config["data"]["fs"],
)
devtest_df = pd.read_csv(config["data"]["test_tsv"], sep="\t")
devtest_dataset = StronglyAnnotatedSet(
config["data"]["test_folder"],
devtest_df,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
test_dataset = devtest_dataset
##### model definition ############
sed_student = CRNN(**config["net"])
if test_state_dict is None:
##### data prep train valid ##########
synth_df = pd.read_csv(config["data"]["synth_tsv"], sep="\t")
synth_set = StronglyAnnotatedSet(
config["data"]["synth_folder"],
synth_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
weak_df = pd.read_csv(config["data"]["weak_tsv"], sep="\t")
train_weak_df = weak_df.sample(
frac=config["training"]["weak_split"], random_state=config["training"]["seed"]
)
valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
train_weak_df = train_weak_df.reset_index(drop=True)
weak_set = WeakSet(
config["data"]["weak_folder"],
train_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
unlabeled_set = UnlabelledSet(
config["data"]["unlabeled_folder"],
encoder,
pad_to=config["data"]["audio_max_len"],
)
synth_df_val = pd.read_csv(config["data"]["synth_val_tsv"], sep="\t")
synth_val = StronglyAnnotatedSet(
config["data"]["synth_val_folder"],
synth_df_val,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
weak_val = WeakSet(
config["data"]["weak_folder"],
valid_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
return_filename=True,
)
tot_train_data = [synth_set, weak_set, unlabeled_set]
train_dataset = torch.utils.data.ConcatDataset(tot_train_data)
batch_sizes = config["training"]["batch_size"]
samplers = [torch.utils.data.RandomSampler(x) for x in tot_train_data]
batch_sampler = ConcatDatasetBatchSampler(samplers, batch_sizes)
valid_dataset = torch.utils.data.ConcatDataset(
[synth_val, weak_val]
)
##### training params and optimizers ############
epoch_len = min(
[
len(tot_train_data[indx])
// (
config["training"]["batch_size"][indx]
* config["training"]["accumulate_batches"]
)
for indx in range(len(tot_train_data))
]
)
opt = torch.optim.Adam(sed_student.parameters(), 1e-3, betas=(0.9, 0.999))
exp_steps = config["training"]["n_epochs_warmup"] * epoch_len
exp_scheduler = {
"scheduler": ExponentialWarmup(opt, config["opt"]["lr"], exp_steps),
"interval": "step",
}
logger = TensorBoardLogger(
os.path.dirname(config["log_dir"]), config["log_dir"].split("/")[-1],
)
print(f"experiment dir: {logger.log_dir}")
callbacks = [
EarlyStopping(
monitor="val/obj_metric",
patience=config["training"]["early_stop_patience"],
verbose=True,
mode="max"
),
ModelCheckpoint(logger.log_dir, monitor="val/obj_metric", save_top_k=1, mode="max",
save_last=True),
]
else:
train_dataset = None
valid_dataset = None
batch_sampler = None
opt = None
exp_scheduler = None
logger = True
callbacks = None
desed_training = SEDTask4_2021(
config,
encoder=encoder,
sed_student=sed_student,
opt=opt,
train_data=train_dataset,
valid_data=valid_dataset,
test_data=test_dataset,
train_sampler=batch_sampler,
scheduler=exp_scheduler,
fast_dev_run=fast_dev_run,
)
# Not using the fast_dev_run of Trainer because creates a DummyLogger so cannot check problems with the Logger
if fast_dev_run:
flush_logs_every_n_steps = 1
log_every_n_steps = 1
limit_train_batches = 2
limit_val_batches = 2
limit_test_batches = 2
n_epochs = 3
else:
flush_logs_every_n_steps = 100
log_every_n_steps = 40
limit_train_batches = 1.
limit_val_batches = 1.
limit_test_batches = 1.
n_epochs = config["training"]["n_epochs"]
trainer = pl.Trainer(
max_epochs=n_epochs,
callbacks=callbacks,
gpus=gpus,
distributed_backend=config["training"].get("backend"),
accumulate_grad_batches=config["training"]["accumulate_batches"],
logger=logger,
resume_from_checkpoint=checkpoint_resume,
gradient_clip_val=config["training"]["gradient_clip"],
check_val_every_n_epoch=config["training"]["validation_interval"],
num_sanity_val_steps=0,
log_every_n_steps=log_every_n_steps,
flush_logs_every_n_steps=flush_logs_every_n_steps,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
if test_state_dict is None:
trainer.fit(desed_training)
best_path = trainer.checkpoint_callback.best_model_path
print(f"best model: {best_path}")
test_state_dict = torch.load(best_path)["state_dict"]
desed_training.load_state_dict(test_state_dict)
trainer.test(desed_training)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Training a SED system for DESED Task")
parser.add_argument("--conf_file", default="./confs/sed.yaml",
help="The configuration file with all the experiment parameters.")
parser.add_argument("--log_dir", default="./exp/2021_baseline",
help="Directory where to save tensorboard logs, saved models, etc.")
parser.add_argument("--resume_from_checkpoint", default=None,
help="Allow the training to be resumed, take as input a previously saved model (.ckpt).")
parser.add_argument("--test_from_checkpoint", default=None,
help="Test the model specified")
parser.add_argument("--gpus", default="0", help="The number of GPUs to train on, or the gpu to use, default='0', "
"so uses one GPU indexed by 0.")
parser.add_argument("--fast_dev_run", action="store_true", default=False,
help="Use this option to make a 'fake' run which is useful for development and debugging. "
"It uses very few batches and epochs so it won't give any meaningful result.")
args = parser.parse_args()
with open(args.conf_file, "r") as f:
configs = yaml.safe_load(f)
test_from_checkpoint = args.test_from_checkpoint
test_model_state_dict = None
if test_from_checkpoint is not None:
checkpoint = torch.load(test_from_checkpoint)
configs_ckpt = checkpoint["hyper_parameters"]
configs_ckpt["data"] = configs["data"]
configs = configs_ckpt
print(
f"loaded model: {test_from_checkpoint} \n"
f"at epoch: {checkpoint['epoch']}"
)
test_model_state_dict = checkpoint["state_dict"]
seed = configs["training"]["seed"]
if seed:
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
pl.seed_everything(seed)
test_only = test_from_checkpoint is not None
resample_data_generate_durations(configs["data"], test_only)
single_run(
configs,
args.log_dir,
args.gpus,
args.resume_from_checkpoint,
test_model_state_dict,
args.fast_dev_run,
)
| 10,734 | 34.429043 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/train_sed.py | import argparse
from copy import deepcopy
import numpy as np
import os
import pandas as pd
import random
import torch
import yaml
import pytorch_lightning as pl
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.loggers import TensorBoardLogger
from desed_task.dataio import ConcatDatasetBatchSampler
from desed_task.dataio.datasets import StronglyAnnotatedSet, UnlabeledSet, WeakSet
from desed_task.nnet.CRNN import CRNN, RCRNN
from desed_task.utils.encoder import ManyHotEncoder
from desed_task.utils.schedulers import ExponentialWarmup
from local.classes_dict import classes_labels
from local.sed_trainer import SEDTask4_2021
from local.resample_folder import resample_folder
from local.utils import generate_tsv_wav_durations
def resample_data_generate_durations(config_data, test_only=False, evaluation=False):
if not test_only:
dsets = [
"synth_folder",
"synth_val_folder",
"weak_folder",
"unlabeled_folder",
"test_folder",
]
elif test_only:
dsets = ["test_folder"]
else:
dsets = ["eval_folder"]
for dset in dsets:
computed = resample_folder(
config_data[dset + "_44k"], config_data[dset], target_fs=config_data["fs"]
)
if not evaluation:
for base_set in ["synth_val", "test"]:
if not os.path.exists(config_data[base_set + "_dur"]) or computed:
generate_tsv_wav_durations(
config_data[base_set + "_folder"], config_data[base_set + "_dur"]
)
def single_run(
config,
log_dir,
gpus,
checkpoint_resume=None,
test_state_dict=None,
fast_dev_run=False,
evaluation=False
):
"""
Running sound event detection baselin
Args:
config (dict): the dictionary of configuration params
log_dir (str): path to log directory
gpus (int): number of gpus to use
checkpoint_resume (str, optional): path to checkpoint to resume from. Defaults to "".
test_state_dict (dict, optional): if not None, no training is involved. This dictionary is the state_dict
to be loaded to test the model.
fast_dev_run (bool, optional): whether to use a run with only one batch at train and validation, useful
for development purposes.
"""
config.update({"log_dir": log_dir})
##### data prep test ##########
encoder = ManyHotEncoder(
list(classes_labels.keys()),
audio_len=config["data"]["audio_max_len"],
frame_len=config["feats"]["n_filters"],
frame_hop=config["feats"]["hop_length"],
net_pooling=config["data"]["net_subsample"],
fs=config["data"]["fs"],
)
if not evaluation:
devtest_df = pd.read_csv(config["data"]["test_tsv"], sep="\t")
devtest_dataset = StronglyAnnotatedSet(
config["data"]["test_folder"],
devtest_df,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
else:
devtest_dataset = UnlabeledSet(
config["data"]["eval_folder"],
encoder,
pad_to=10,
return_filename=True
)
test_dataset = devtest_dataset
##### model definition ############
sed_student = CRNN(**config["net"])
if test_state_dict is None:
##### data prep train valid ##########
synth_df = pd.read_csv(config["data"]["synth_tsv"], sep="\t")
synth_set = StronglyAnnotatedSet(
config["data"]["synth_folder"],
synth_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
weak_df = pd.read_csv(config["data"]["weak_tsv"], sep="\t")
train_weak_df = weak_df.sample(
frac=config["training"]["weak_split"], random_state=config["training"]["seed"]
)
valid_weak_df = weak_df.drop(train_weak_df.index).reset_index(drop=True)
train_weak_df = train_weak_df.reset_index(drop=True)
weak_set = WeakSet(
config["data"]["weak_folder"],
train_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
)
unlabeled_set = UnlabeledSet(
config["data"]["unlabeled_folder"],
encoder,
pad_to=config["data"]["audio_max_len"],
)
synth_df_val = pd.read_csv(config["data"]["synth_val_tsv"], sep="\t")
synth_val = StronglyAnnotatedSet(
config["data"]["synth_val_folder"],
synth_df_val,
encoder,
return_filename=True,
pad_to=config["data"]["audio_max_len"],
)
weak_val = WeakSet(
config["data"]["weak_folder"],
valid_weak_df,
encoder,
pad_to=config["data"]["audio_max_len"],
return_filename=True,
)
tot_train_data = [synth_set, weak_set, unlabeled_set]
train_dataset = torch.utils.data.ConcatDataset(tot_train_data)
batch_sizes = config["training"]["batch_size"]
samplers = [torch.utils.data.RandomSampler(x) for x in tot_train_data]
batch_sampler = ConcatDatasetBatchSampler(samplers, batch_sizes)
valid_dataset = torch.utils.data.ConcatDataset(
[synth_val, weak_val]
)
##### training params and optimizers ############
epoch_len = min(
[
len(tot_train_data[indx])
// (
config["training"]["batch_size"][indx]
* config["training"]["accumulate_batches"]
)
for indx in range(len(tot_train_data))
]
)
# print(epoch_len) => 118
opt = torch.optim.Adam(sed_student.parameters(), 1e-3, betas=(0.9, 0.999))
exp_steps = config["training"]["n_epochs_warmup"] * epoch_len
exp_scheduler = {
"scheduler": ExponentialWarmup(opt, config["opt"]["lr"], exp_steps),
"interval": "step",
}
logger = TensorBoardLogger(
os.path.dirname(config["log_dir"]), config["log_dir"].split("/")[-1],
)
print(f"experiment dir: {logger.log_dir}")
callbacks = [
EarlyStopping(
monitor="val/obj_metric",
patience=config["training"]["early_stop_patience"],
verbose=True,
mode="max"
),
ModelCheckpoint(logger.log_dir, monitor="val/obj_metric", save_top_k=1, mode="max",
save_last=True),
]
else:
train_dataset = None
valid_dataset = None
batch_sampler = None
opt = None
exp_scheduler = None
logger = True
callbacks = None
desed_training = SEDTask4_2021(
config,
encoder=encoder,
sed_student=sed_student,
opt=opt,
train_data=train_dataset,
valid_data=valid_dataset,
test_data=test_dataset,
train_sampler=batch_sampler,
scheduler=exp_scheduler,
fast_dev_run=fast_dev_run,
evaluation=evaluation
)
# Not using the fast_dev_run of Trainer because creates a DummyLogger so cannot check problems with the Logger
if fast_dev_run:
flush_logs_every_n_steps = 1
log_every_n_steps = 1
limit_train_batches = 2
limit_val_batches = 2
limit_test_batches = 2
n_epochs = 3
else:
flush_logs_every_n_steps = 100
log_every_n_steps = 40
limit_train_batches = 1.
limit_val_batches = 1.
limit_test_batches = 1.
n_epochs = config["training"]["n_epochs"]
trainer = pl.Trainer(
max_epochs=n_epochs,
callbacks=callbacks,
gpus=gpus,
distributed_backend=config["training"].get("backend"),
accumulate_grad_batches=config["training"]["accumulate_batches"],
logger=logger,
resume_from_checkpoint=checkpoint_resume,
gradient_clip_val=config["training"]["gradient_clip"],
check_val_every_n_epoch=config["training"]["validation_interval"],
num_sanity_val_steps=0,
log_every_n_steps=log_every_n_steps,
flush_logs_every_n_steps=flush_logs_every_n_steps,
limit_train_batches=limit_train_batches,
limit_val_batches=limit_val_batches,
limit_test_batches=limit_test_batches,
)
if test_state_dict is None:
trainer.fit(desed_training)
best_path = trainer.checkpoint_callback.best_model_path
print(f"best model: {best_path}")
test_state_dict = torch.load(best_path)["state_dict"]
desed_training.load_state_dict(test_state_dict)
trainer.test(desed_training)
if __name__ == "__main__":
parser = argparse.ArgumentParser("Training a SED system for DESED Task")
parser.add_argument("--conf_file", default="./confs/sed.yaml",
help="The configuration file with all the experiment parameters.")
parser.add_argument("--log_dir", default="./exp/2021_baseline",
help="Directory where to save tensorboard logs, saved models, etc.")
parser.add_argument("--resume_from_checkpoint", default=None,
help="Allow the training to be resumed, take as input a previously saved model (.ckpt).")
parser.add_argument("--test_from_checkpoint", default=None,
help="Test the model specified")
parser.add_argument("--gpus", default="0", help="The number of GPUs to train on, or the gpu to use, default='0', "
"so uses one GPU indexed by 0.")
parser.add_argument("--fast_dev_run", action="store_true", default=False,
help="Use this option to make a 'fake' run which is useful for development and debugging. "
"It uses very few batches and epochs so it won't give any meaningful result.")
parser.add_argument("--eval_from_checkpoint", default=None, help="Evaluate the model specified")
args = parser.parse_args()
with open(args.conf_file, "r") as f:
configs = yaml.safe_load(f)
evaluation = False
test_from_checkpoint = args.test_from_checkpoint
if args.eval_from_checkpoint is not None:
test_from_checkpoint = args.eval_from_checkpoint
evaluation = True
test_model_state_dict = None
if test_from_checkpoint is not None:
checkpoint = torch.load(test_from_checkpoint)
configs_ckpt = checkpoint["hyper_parameters"]
configs_ckpt["data"] = configs["data"]
configs = configs_ckpt
print(
f"loaded model: {test_from_checkpoint} \n"
f"at epoch: {checkpoint['epoch']}"
)
test_model_state_dict = checkpoint["state_dict"]
# if evaluation:
# configs["training"]["batch_size_val"] = 1
seed = configs["training"]["seed"]
if seed:
torch.random.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
pl.seed_everything(seed)
test_only = test_from_checkpoint is not None
resample_data_generate_durations(configs["data"], test_only)
single_run(
configs,
args.log_dir,
args.gpus,
args.resume_from_checkpoint,
test_model_state_dict,
args.fast_dev_run,
evaluation
)
| 11,519 | 34.015198 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/resample_folder.py | import argparse
import glob
import os
from pathlib import Path
import librosa
import torch
import torchaudio
import tqdm
parser = argparse.ArgumentParser("Resample a folder recursively")
parser.add_argument(
"--in_dir",
type=str,
default="/media/sam/bx500/DCASE_DATA/dataset/audio/validation/",
)
parser.add_argument("--out_dir", type=str, default="/tmp/val16k")
parser.add_argument("--target_fs", default=16000)
parser.add_argument("--regex", type=str, default="*.wav")
def resample(audio, orig_fs, target_fs):
"""
Resamples the audio given as input at the target_fs sample rate, if the target sample rate and the
original sample rate are different.
Args:
audio (Tensor): audio to resample
orig_fs (int): original sample rate
target_fs (int): target sample rate
Returns:
Tensor: audio resampled
"""
out = []
for c in range(audio.shape[0]):
tmp = audio[c].detach().cpu().numpy()
if target_fs != orig_fs:
tmp = librosa.resample(tmp, orig_fs, target_fs)
out.append(torch.from_numpy(tmp))
out = torch.stack(out)
return out
def resample_folder(in_dir, out_dir, target_fs=16000, regex="*.wav"):
"""
Resamples the audio files contained in the in_dir folder and saves them in out_dir folder
Args:
in_dir (str): path to audio directory (audio to be resampled)
out_dir (str): path to audio resampled directory
target_fs (int, optional): target sample rate. Defaults to 16000.
regex (str, optional): regular expression for extension of file. Defaults to "*.wav".
"""
compute = True
files = glob.glob(os.path.join(in_dir, regex))
if os.path.exists(out_dir):
out_files = glob.glob(os.path.join(out_dir, regex))
if len(files) == len(out_files):
compute = False
if compute:
for f in tqdm.tqdm(files):
audio, orig_fs = torchaudio.load(f)
audio = resample(audio, orig_fs, target_fs)
os.makedirs(
Path(os.path.join(out_dir, Path(f).relative_to(Path(in_dir)))).parent,
exist_ok=True,
)
torchaudio.set_audio_backend("sox_io")
torchaudio.save(
os.path.join(out_dir, Path(f).relative_to(Path(in_dir))),
audio,
target_fs,
)
return compute
if __name__ == "__main__":
args = parser.parse_args()
resample_folder(args.in_dir, args.out_dir, int(args.target_fs), args.regex)
| 2,556 | 29.807229 | 102 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/sed_trainer_SRST.py | import os
import random
from copy import deepcopy
from pathlib import Path
import local.config as cfg
import pandas as pd
import pytorch_lightning as pl
import torch
from torchaudio.transforms import AmplitudeToDB, MelSpectrogram
from desed_task.data_augm import mixup, add_noise
from desed_task.utils.scaler import TorchScaler
import numpy as np
from .utils import (
batched_decode_preds,
log_sedeval_metrics,
JSD,
)
from desed_task.evaluation.evaluation_measures import (
compute_per_intersection_macro_f1,
compute_psds_from_operating_points,
)
class SEDTask4_2021(pl.LightningModule):
""" Pytorch lightning module for the SED 2021 baseline
Args:
hparams: dict, the dictionnary to be used for the current experiment/
encoder: ManyHotEncoder object, object to encode and decode labels.
sed_student: torch.Module, the student model to be trained. The teacher model will be
opt: torch.optimizer.Optimizer object, the optimizer to be used
train_data: torch.utils.data.Dataset subclass object, the training data to be used.
valid_data: torch.utils.data.Dataset subclass object, the validation data to be used.
test_data: torch.utils.data.Dataset subclass object, the test data to be used.
train_sampler: torch.utils.data.Sampler subclass object, the sampler to be used in the training dataloader.
scheduler: asteroid.engine.schedulers.BaseScheduler subclass object, the scheduler to be used. This is
used to apply ramp-up during training for example.
fast_dev_run: bool, whether to launch a run with only one batch for each set, this is for development purpose,
to test the code runs.
"""
def __init__(
self,
hparams,
encoder,
sed_student,
opt=None,
train_data=None,
valid_data=None,
test_data=None,
train_sampler=None,
scheduler=None,
fast_dev_run=False,
):
super(SEDTask4_2021, self).__init__()
self.hparams = hparams
self.encoder = encoder
self.sed_student = sed_student
self.sed_teacher = deepcopy(sed_student)
self.opt = opt
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.train_sampler = train_sampler
self.scheduler = scheduler
self.fast_dev_run = fast_dev_run
if self.fast_dev_run:
self.num_workers = 1
else:
self.num_workers = self.hparams["training"]["num_workers"]
# add class_label
self.softmax = torch.nn.Softmax(dim=1)
self.jsd = JSD()
self.class_label = torch.tensor(cfg.class_label).cuda()
feat_params = self.hparams["feats"]
self.mel_spec = MelSpectrogram(
sample_rate=feat_params["sample_rate"],
n_fft=feat_params["n_window"],
win_length=feat_params["n_window"],
hop_length=feat_params["hop_length"],
f_min=feat_params["f_min"],
f_max=feat_params["f_max"],
n_mels=feat_params["n_mels"],
window_fn=torch.hamming_window,
wkwargs={"periodic": False},
power=1,
)
for param in self.sed_teacher.parameters():
param.detach_()
# instantiating losses
self.supervised_loss = torch.nn.BCELoss()
if hparams["training"]["self_sup_loss"] == "mse":
self.selfsup_loss = torch.nn.MSELoss()
elif hparams["training"]["self_sup_loss"] == "bce":
self.selfsup_loss = torch.nn.BCELoss()
else:
raise NotImplementedError
# for weak labels we simply compute f1 score
self.get_weak_student_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.get_weak_teacher_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.scaler = self._init_scaler()
# buffer for event based scores which we compute using sed-eval
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_student_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
test_n_thresholds = self.hparams["training"]["n_test_thresholds"]
test_thresholds = np.arange(
1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
)
self.test_psds_buffer_student = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in test_thresholds}
self.decoded_student_05_buffer = pd.DataFrame()
self.decoded_teacher_05_buffer = pd.DataFrame()
def update_ema(self, alpha, global_step, model, ema_model):
""" Update teacher model parameters
Args:
alpha: float, the factor to be used between each updated step.
global_step: int, the current global step to be used.
model: torch.Module, student model to use
ema_model: torch.Module, teacher model to use
"""
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)
def _init_scaler(self):
"""Scaler inizialization
Raises:
NotImplementedError: in case of not Implemented scaler
Returns:
TorchScaler: returns the scaler
"""
if self.hparams["scaler"]["statistic"] == "instance":
scaler = TorchScaler("instance", "minmax", self.hparams["scaler"]["dims"])
return scaler
elif self.hparams["scaler"]["statistic"] == "dataset":
# we fit the scaler
scaler = TorchScaler(
"dataset",
self.hparams["scaler"]["normtype"],
self.hparams["scaler"]["dims"],
)
else:
raise NotImplementedError
if self.hparams["scaler"]["savepath"] is not None:
if os.path.exists(self.hparams["scaler"]["savepath"]):
scaler = torch.load(self.hparams["scaler"]["savepath"])
print(
"Loaded Scaler from previous checkpoint from {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
self.train_loader = self.train_dataloader()
scaler.fit(
self.train_loader,
transform_func=lambda x: self.take_log(self.mel_spec(x[0])),
)
if self.hparams["scaler"]["savepath"] is not None:
torch.save(scaler, self.hparams["scaler"]["savepath"])
print(
"Saving Scaler from previous checkpoint at {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
def take_log(self, mels):
""" Apply the log transformation to mel spectrograms.
Args:
mels: torch.Tensor, mel spectrograms for which to apply log.
Returns:
Tensor: logarithmic mel spectrogram of the mel spectrogram given as input
"""
amp_to_db = AmplitudeToDB(stype="amplitude")
amp_to_db.amin = 1e-5 # amin= 1e-5 as in librosa
return amp_to_db(mels).clamp(min=-50, max=80) # clamp to reproduce old code
def training_step(self, batch, batch_indx):
""" Applying the training for one batch (a step). Used during trainer.fit
Args:
batch: torch.Tensor, batch input tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
torch.Tensor, the loss to take into account.
"""
audio, labels, padded_indxs = batch
indx_synth, indx_weak, indx_unlabelled = self.hparams["training"]["batch_size"]
features = self.mel_spec(audio)
batch_num = features.shape[0]
# deriving masks for each dataset
strong_mask = torch.zeros(batch_num).to(features).bool()
weak_mask = torch.zeros(batch_num).to(features).bool()
strong_mask[:indx_synth] = 1
weak_mask[indx_synth : indx_weak + indx_synth] = 1
# deriving weak labels
labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
mixup_type = self.hparams["training"].get("mixup")
if mixup_type is not None and 0.5 > random.random():
features[weak_mask], labels_weak = mixup(
features[weak_mask], labels_weak, mixup_label_type=mixup_type
)
features[strong_mask], labels[strong_mask] = mixup(
features[strong_mask], labels[strong_mask], mixup_label_type=mixup_type
)
# sed students forward
strong_preds_student, weak_preds_student = self.sed_student(
self.scaler(self.take_log(features))
)
# supervised loss on strong labels
loss_strong = self.supervised_loss(
strong_preds_student[strong_mask], labels[strong_mask]
)
# supervised loss on weakly labelled
loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
# total supervised loss
tot_loss_supervised = loss_strong + loss_weak
with torch.no_grad():
# perturbation
ema_features = self.scaler(self.take_log(add_noise(features)))
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(ema_features)
nClass = self.hparams['net']['nclass']
est_strong_target = torch.zeros(batch_num,156,nClass).cuda()
for bter in range(batch_num):
sp = strong_preds_teacher[bter]
sp = torch.clamp(sp, 1.0e-4, 1-1.0e-4)
p_h1 = torch.log(sp.permute((1,0)))
p_h0 = torch.log(1-sp.permute(1,0))
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
#P = torch.cat([P0.reshape(156,1), P1, P2], 1)
# K: up to 3
P3 = []
for cter1 in range(1,nClass):
for cter2 in range(1,nClass-cter1):
P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
P3 = torch.cat(P3,1)
P3 = P3 - 2*P0[:,None]
P = torch.cat([P0.reshape(156,1), P1, P2, P3], 1)
P = self.softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = self.class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target[bter,:,:] = torch.squeeze(cl[:156,:])
est_strong_target = est_strong_target.permute((0,2,1))
est_weak_target = est_strong_target.mean(2)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[strong_mask], labels[strong_mask]
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[weak_mask], labels_weak
)
# we apply consistency between the predictions, use the scheduler for learning rate (to be changed ?)
weight = (
self.hparams["training"]["const_max"]
* self.scheduler["scheduler"]._get_scaling_factor()
)
strong_reliability = weight*(1-self.jsd(est_strong_target[strong_mask], labels[strong_mask]).mean())
weak_reliability = weight*(1-self.jsd(est_weak_target[weak_mask], labels_weak).mean())
strong_self_sup_loss = self.selfsup_loss(
strong_preds_student[24:], est_strong_target[24:]
)
weak_self_sup_loss = self.selfsup_loss(
weak_preds_student[weak_mask], est_weak_target[weak_mask]
)
tot_self_loss = strong_reliability*strong_self_sup_loss + weak_reliability*weak_self_sup_loss
tot_loss = tot_loss_supervised + tot_self_loss
self.log("train/student/loss_strong", loss_strong)
self.log("train/student/loss_weak", loss_weak)
self.log("train/teacher/loss_strong", loss_strong_teacher)
self.log("train/teacher/loss_weak", loss_weak_teacher)
self.log("train/step", self.scheduler["scheduler"].step_num, prog_bar=True)
self.log("train/student/tot_loss", tot_loss, prog_bar=True)
self.log("train/weight", weight)
self.log("train/student/tot_supervised", strong_self_sup_loss, prog_bar=True)
self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
self.log("train/lr", self.opt.param_groups[-1]["lr"], prog_bar=True)
return {'loss': tot_loss}
def on_before_zero_grad(self, *args, **kwargs):
# update EMA teacher
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler["scheduler"].step_num,
self.sed_student,
self.sed_teacher,
)
def validation_step(self, batch, batch_indx):
""" Apply validation to a batch (step). Used during trainer.fit
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
# we derive masks for each dataset based on folders of filenames
mask_weak = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["weak_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
mask_synth = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["synth_val_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
if torch.any(mask_weak):
labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
loss_weak_student = self.supervised_loss(
weak_preds_student[mask_weak], labels_weak
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[mask_weak], labels_weak
)
self.log("val/weak/student/loss_weak", loss_weak_student)
self.log("val/weak/teacher/loss_weak", loss_weak_teacher)
# accumulate f1 score for weak labels
self.get_weak_student_f1_seg_macro(
weak_preds_student[mask_weak], labels_weak
)
self.get_weak_teacher_f1_seg_macro(
weak_preds_teacher[mask_weak], labels_weak
)
if torch.any(mask_synth):
loss_strong_student = self.supervised_loss(
strong_preds_student[mask_synth], labels[mask_synth]
)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[mask_synth], labels[mask_synth]
)
self.log("val/synth/student/loss_strong", loss_strong_student)
self.log("val/synth/teacher/loss_strong", loss_strong_teacher)
filenames_synth = [
x
for x in filenames
if Path(x).parent == Path(self.hparams["data"]["synth_val_folder"])
]
decoded_student_strong = batched_decode_preds(
strong_preds_student[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_student_synth.keys()),
)
for th in self.val_buffer_student_synth.keys():
self.val_buffer_student_synth[th] = self.val_buffer_student_synth[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_teacher_synth.keys()),
)
for th in self.val_buffer_teacher_synth.keys():
self.val_buffer_teacher_synth[th] = self.val_buffer_teacher_synth[
th
].append(decoded_teacher_strong[th], ignore_index=True)
return
def validation_epoch_end(self, outputs):
""" Fonction applied at the end of all the validation steps of the epoch.
Args:
outputs: torch.Tensor, the concatenation of everything returned by validation_step.
Returns:
torch.Tensor, the objective metric to be used to choose the best model from for example.
"""
weak_student_f1_macro = self.get_weak_student_f1_seg_macro.compute()
weak_teacher_f1_macro = self.get_weak_teacher_f1_seg_macro.compute()
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
self.val_buffer_student_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_student_event_macro = log_sedeval_metrics(
self.val_buffer_student_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
self.val_buffer_teacher_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_teacher_event_macro = log_sedeval_metrics(
self.val_buffer_teacher_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
obj_metric_synth_type = self.hparams["training"].get("obj_metric_synth_type")
if obj_metric_synth_type is None:
synth_metric = intersection_f1_macro_student
elif obj_metric_synth_type == "event":
synth_metric = synth_student_event_macro
elif obj_metric_synth_type == "intersection":
synth_metric = intersection_f1_macro_student
else:
raise NotImplementedError(
f"obj_metric_synth_type: {obj_metric_synth_type} not implemented."
)
obj_metric = torch.tensor(weak_student_f1_macro.item() + synth_metric)
self.log("val/obj_metric", obj_metric, prog_bar=True)
self.log("val/weak/student/macro_F1", weak_student_f1_macro)
self.log("val/weak/teacher/macro_F1", weak_teacher_f1_macro)
self.log(
"val/synth/student/intersection_f1_macro", intersection_f1_macro_student
)
self.log(
"val/synth/teacher/intersection_f1_macro", intersection_f1_macro_teacher
)
self.log("val/synth/student/event_f1_macro", synth_student_event_macro)
self.log("val/synth/teacher/event_f1_macro", synth_teacher_event_macro)
# free the buffers
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.get_weak_student_f1_seg_macro.reset()
self.get_weak_teacher_f1_seg_macro.reset()
return obj_metric
def on_save_checkpoint(self, checkpoint):
checkpoint["sed_student"] = self.sed_student.state_dict()
checkpoint["sed_teacher"] = self.sed_teacher.state_dict()
return checkpoint
def test_step(self, batch, batch_indx):
""" Apply Test to a batch (step), used only when (trainer.test is called)
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
loss_strong_student = self.supervised_loss(strong_preds_student, labels)
loss_strong_teacher = self.supervised_loss(strong_preds_teacher, labels)
self.log("test/student/loss_strong", loss_strong_student)
self.log("test/teacher/loss_strong", loss_strong_teacher)
# compute psds
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student.keys()),
)
for th in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[th] = self.test_psds_buffer_student[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher.keys()),
)
for th in self.test_psds_buffer_teacher.keys():
self.test_psds_buffer_teacher[th] = self.test_psds_buffer_teacher[
th
].append(decoded_teacher_strong[th], ignore_index=True)
# compute f1 score
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student_05_buffer = self.decoded_student_05_buffer.append(
decoded_student_strong[0.5]
)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher_05_buffer = self.decoded_teacher_05_buffer.append(
decoded_teacher_strong[0.5]
)
def on_test_epoch_end(self):
# pub eval dataset
try:
log_dir = self.logger.log_dir
except Exception as e:
log_dir = self.hparams["log_dir"]
save_dir = os.path.join(log_dir, "metrics_test")
psds_score_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario1"),
)
psds_score_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario2"),
)
psds_score_teacher_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario1"),
)
psds_score_teacher_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario2"),
)
event_macro_student = log_sedeval_metrics(
self.decoded_student_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student"),
)[0]
event_macro_teacher = log_sedeval_metrics(
self.decoded_teacher_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher"),
)[0]
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
best_test_result = torch.tensor(max(psds_score_scenario1, psds_score_scenario2))
results = {
"hp_metric": best_test_result,
"test/student/psds_score_scenario1": psds_score_scenario1,
"test/student/psds_score_scenario2": psds_score_scenario2,
"test/teacher/psds_score_scenario1": psds_score_teacher_scenario1,
"test/teacher/psds_score_scenario2": psds_score_teacher_scenario2,
"test/student/event_f1_macro": event_macro_student,
"test/student/intersection_f1_macro": intersection_f1_macro_student,
"test/teacher/event_f1_macro": event_macro_teacher,
"test/teacher/intersection_f1_macro": intersection_f1_macro_teacher
}
if self.logger is not None:
self.logger.log_metrics(results)
self.logger.log_hyperparams(self.hparams, results)
for key in results.keys():
self.log(key, results[key], prog_bar=True, logger=False)
def configure_optimizers(self):
return [self.opt], [self.scheduler]
def train_dataloader(self):
self.train_loader = torch.utils.data.DataLoader(
self.train_data,
batch_sampler=self.train_sampler,
num_workers=self.num_workers,
)
return self.train_loader
def val_dataloader(self):
self.val_loader = torch.utils.data.DataLoader(
self.valid_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.val_loader
def test_dataloader(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.test_loader
| 29,014 | 37.077428 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/utils.py | import os
from pathlib import Path
import pandas as pd
import scipy
from desed_task.evaluation.evaluation_measures import compute_sed_eval_metrics
from torch import nn
import soundfile
import glob
class JSD(nn.Module):
def __init__(self):
super(JSD, self).__init__()
self.kld = nn.KLDivLoss().cuda()
def forward(self, p, q):
m = 0.5*(p+q)
return -0.5*(self.kld(p,m)+self.kld(q,m))
def batched_decode_preds(
strong_preds, filenames, encoder, thresholds=[0.5], median_filter=7, pad_indx=None,
):
""" Decode a batch of predictions to dataframes. Each threshold gives a different dataframe and stored in a
dictionary
Args:
strong_preds: torch.Tensor, batch of strong predictions.
filenames: list, the list of filenames of the current batch.
encoder: ManyHotEncoder object, object used to decode predictions.
thresholds: list, the list of thresholds to be used for predictions.
median_filter: int, the number of frames for which to apply median window (smoothing).
pad_indx: list, the list of indexes which have been used for padding.
Returns:
dict of predictions, each keys is a threshold and the value is the DataFrame of predictions.
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
for j in range(strong_preds.shape[0]): # over batches
for c_th in thresholds:
c_preds = strong_preds[j]
if pad_indx is not None:
true_len = int(c_preds.shape[-1] * pad_indx[j].item())
c_preds = c_preds[:true_len]
pred = c_preds.transpose(0, 1).detach().cpu().numpy()
pred = pred > c_th
pred = scipy.ndimage.filters.median_filter(pred, (median_filter, 1))
pred = encoder.decode_strong(pred)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
pred["filename"] = Path(filenames[j]).stem + ".wav"
prediction_dfs[c_th] = prediction_dfs[c_th].append(pred, ignore_index=True)
return prediction_dfs
def convert_to_event_based(weak_dataframe):
""" Convert a weakly labeled DataFrame ('filename', 'event_labels') to a DataFrame strongly labeled
('filename', 'onset', 'offset', 'event_label').
Args:
weak_dataframe: pd.DataFrame, the dataframe to be converted.
Returns:
pd.DataFrame, the dataframe strongly labeled.
"""
new = []
for i, r in weak_dataframe.iterrows():
events = r["event_labels"].split(",")
for e in events:
new.append(
{"filename": r["filename"], "event_label": e, "onset": 0, "offset": 1}
)
return pd.DataFrame(new)
def log_sedeval_metrics(predictions, ground_truth, save_dir=None):
""" Return the set of metrics from sed_eval
Args:
predictions: pd.DataFrame, the dataframe of predictions.
ground_truth: pd.DataFrame, the dataframe of groundtruth.
save_dir: str, path to the folder where to save the event and segment based metrics outputs.
Returns:
tuple, event-based macro-F1 and micro-F1, segment-based macro-F1 and micro-F1
"""
if predictions.empty:
return 0.0, 0.0, 0.0, 0.0
gt = pd.read_csv(ground_truth, sep="\t")
event_res, segment_res = compute_sed_eval_metrics(predictions, gt)
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
with open(os.path.join(save_dir, "event_f1.txt"), "w") as f:
f.write(str(event_res))
with open(os.path.join(save_dir, "segment_f1.txt"), "w") as f:
f.write(str(segment_res))
return (
event_res.results()["class_wise_average"]["f_measure"]["f_measure"],
event_res.results()["overall"]["f_measure"]["f_measure"],
segment_res.results()["class_wise_average"]["f_measure"]["f_measure"],
segment_res.results()["overall"]["f_measure"]["f_measure"],
) # return also segment measures
def parse_jams(jams_list, encoder, out_json):
if len(jams_list) == 0:
raise IndexError("jams list is empty ! Wrong path ?")
backgrounds = []
sources = []
for jamfile in jams_list:
with open(jamfile, "r") as f:
jdata = json.load(f)
# check if we have annotations for each source in scaper
assert len(jdata["annotations"][0]["data"]) == len(
jdata["annotations"][-1]["sandbox"]["scaper"]["isolated_events_audio_path"]
)
for indx, sound in enumerate(jdata["annotations"][0]["data"]):
source_name = Path(
jdata["annotations"][-1]["sandbox"]["scaper"][
"isolated_events_audio_path"
][indx]
).stem
source_file = os.path.join(
Path(jamfile).parent,
Path(jamfile).stem + "_events",
source_name + ".wav",
)
if sound["value"]["role"] == "background":
backgrounds.append(source_file)
else: # it is an event
if (
sound["value"]["label"] not in encoder.labels
): # correct different labels
if sound["value"]["label"].startswith("Frying"):
sound["value"]["label"] = "Frying"
elif sound["value"]["label"].startswith("Vacuum_cleaner"):
sound["value"]["label"] = "Vacuum_cleaner"
else:
raise NotImplementedError
sources.append(
{
"filename": source_file,
"onset": sound["value"]["event_time"],
"offset": sound["value"]["event_time"]
+ sound["value"]["event_duration"],
"event_label": sound["value"]["label"],
}
)
os.makedirs(Path(out_json).parent, exist_ok=True)
with open(out_json, "w") as f:
json.dump({"backgrounds": backgrounds, "sources": sources}, f, indent=4)
def generate_tsv_wav_durations(audio_dir, out_tsv):
"""
Generate a dataframe with filename and duration of the file
Args:
audio_dir: str, the path of the folder where audio files are (used by glob.glob)
out_tsv: str, the path of the output tsv file
Returns:
pd.DataFrame: the dataframe containing filenames and durations
"""
meta_list = []
for file in glob.glob(os.path.join(audio_dir, "*.wav")):
d = soundfile.info(file).duration
meta_list.append([os.path.basename(file), d])
meta_df = pd.DataFrame(meta_list, columns=["filename", "duration"])
if out_tsv is not None:
meta_df.to_csv(out_tsv, sep="\t", index=False, float_format="%.1f")
return meta_df
| 6,982 | 35.369792 | 111 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/utilities.py | import numpy as np
import scipy.signal as sp
import wave, struct
import torch
import torch.nn as nn
from scipy.io import wavfile, loadmat
from torchaudio.functional import lfilter
from torchaudio.transforms import Spectrogram
class LinearSpectrogram(nn.Module):
def __init__(self, nCh=128, n_fft=2048, hop_length=256, win_fn=torch.hamming_window):
super(LinearSpectrogram, self).__init__()
self.spec = Spectrogram(n_fft=n_fft, hop_length=hop_length, window_fn=win_fn)
self.nCh = nCh
nbin = n_fft//2
fbin = nbin // nCh
linfilt = torch.zeros([nbin, nCh]).cuda()
for ch in range(nCh):
stridx = ch*fbin
endidx = ch*fbin+fbin
linfilt[stridx:endidx,ch] = 1.0
self.lfilter = linfilt
def forward(self, wavData):
specData = self.spec(wavData)
bs, nfrq, nfrm = specData.size(0), specData.size(1), specData.size(2)
specData = specData[:,1:,:]
out = torch.matmul(specData.permute(0,2,1),self.lfilter)
return out.permute(0,2,1)
class AuditorySpectrogram(nn.Module):
def __init__(self, frmRate=16, tc=8, fac=1, shft=0):
super(AuditorySpectrogram, self).__init__()
self.frmRate = frmRate
self.tc = tc
self.fac = fac
self.shft = shft
self.haircell_tc = 0.5
cochlear = loadmat('./aud24.mat')
cochba = torch.from_numpy(cochlear['COCHBA']).cuda()
L, M = cochba.shape
self.L = L
self.M = M
A = []
B = []
for ch in range(M-1,-1,-1):
p = torch.real(cochba[0, ch]).to(torch.long)
B.append(torch.real(cochba[1:p+2, ch]).to(torch.float))
A.append(torch.imag(cochba[1:p+2, ch]).to(torch.float))
self.A = A
self.B = B
self.nCh = len(A)
alpha = torch.exp(torch.tensor(-1/(tc*2**(4+shft)))).cuda()
beta = torch.exp(torch.tensor(-1/(self.haircell_tc*2**(4+shft)))).cuda()
self.alpha = alpha
self.L_frm = torch.tensor(frmRate*2**(4+shft)).cuda()
# hair-cell membrane
self.hair_a = torch.tensor([1, -beta]).cuda().to(torch.float)
self.hair_b = torch.tensor([1, 0]).cuda().to(torch.float)
# temporal integration
self.temp_a = torch.tensor([1, -alpha]).cuda().to(torch.float)
self.temp_b = torch.tensor([1,0]).cuda().to(torch.float)
def forward(self, wavData):
bs, wavLeng = wavData.size(0), wavData.size(1)
y1 = lfilter(wavData, self.A[0], self.B[0])
y2 = torch.sigmoid(y1*self.fac)
# hair cell membrane (low-pass <= 4kHz)
if not self.fac == -2:
y2 = lfilter(y2, self.hair_a, self.hair_b)
y2_h = y2
y3_h = 0
#####################################################
# All other channels
#####################################################
audData = []
for ch in range(self.nCh):
y1 = lfilter(wavData, self.A[ch], self.B[ch])
########################################
# TRANSDUCTION: hair cells
########################################
# Fluid cillia coupling (preemphasis) (ignored)
# ionic channels (sigmoid function)
y2 = torch.sigmoid(y1*self.fac)
# hair cell membrane (low-pass <= 4 kHz) ---> y2 (ignored for linear)
if not self.fac == -2:
y2 = lfilter(y2, self.hair_a, self.hair_b)
########################################
# REDUCTION: lateral inhibitory network
########################################
# masked by higher (frequency) spatial response
y3 = y2 - y2_h
y2_h = y2
# half-wave rectifier ---> y4
y4 = torch.maximum(torch.tensor(0).cuda(), y3)
# temporal integration window ---> y5
if self.alpha: # leaky integration
y5 = lfilter(y4, self.temp_a, self.temp_b)
audData.append(y5[:,0:-1:self.L_frm])
else: # short-term average
if L_frm == 1:
audData.append(y4)
else:
audData.append(torch.mean(torch.reshape(y4, [self.L_frm, self.N]), 0))
audData = torch.stack(audData,2)
return audData.permute(0,2,1)
def audioread(audioPath):
FS, wavData = wavfile.read(audioPath)
maxV = np.amax(abs(wavData))
wavData = wavData/maxV
return wavData, FS
def wav2aud(batchWave, frmLeng, tc, fac, shft):
nbatch = batchWave.shape[0]
# define parameters and load cochlear filter
cochlear = loadmat('./aud24.mat')
COCHBA = torch.from_numpy(cochlear['COCHBA']).cuda()
L, M = COCHBA.shape
haircell_tc= 0.5
alpha = torch.exp(torch.tensor(-1/(tc*2**(4+shft)))).cuda()
beta = torch.exp(torch.tensor(-1/(haircell_tc*2**(4+shft)))).cuda()
L_frm = torch.tensor(frmLeng*2**(4+shft)).cuda()
batchAud = []
for bter in range(nbatch):
wavData = batchWave[bter]
L_x = len(wavData)
N = torch.ceil(L_x/L_frm).to(torch.long).cuda()
buff = torch.zeros([N*L_frm]).cuda()
buff[:L_x] = wavData
wavData = buff
# initialize output
audData = torch.zeros([N, M-1]).cuda()
#####################################################
# Last channel (highest frequency)
#####################################################
p = torch.real(COCHBA[0, M-1]).to(torch.long)
B = torch.real(COCHBA[1:p+2, M-1]).to(torch.float)
A = torch.imag(COCHBA[1:p+2, M-1]).to(torch.float)
y1 = lfilter(wavData, A, B)
y2 = torch.sigmoid(y1*fac)
# hair cell membrane (low-pass <= 4kHz)
if not fac == -2:
b = torch.tensor([1, 0]).cuda().to(torch.float)
a = torch.tensor([1, -beta]).cuda().to(torch.float)
y2 = lfilter(y2, a, b)
y2_h = y2
y3_h = 0
#####################################################
# All other channels
#####################################################
for ch in range(M-2,-1,-1):
########################################
# ANALYSIS: cochlear filterbank
########################################
# (IIR) filter bank convolution ---> y1
p = torch.real(COCHBA[0, ch]).to(torch.long)
B = torch.real(COCHBA[1:p+2, ch]).to(torch.float)
A = torch.imag(COCHBA[1:p+2, ch]).to(torch.float)
y1 = lfilter(wavData, A, B)
########################################
# TRANSDUCTION: hair cells
########################################
# Fluid cillia coupling (preemphasis) (ignored)
# ionic channels (sigmoid function)
y2 = torch.sigmoid(y1*fac)
# hair cell membrane (low-pass <= 4 kHz) ---> y2 (ignored for linear)
if not fac == -2:
b = torch.tensor([1, 0]).cuda().to(torch.float)
a = torch.tensor([1, -beta]).cuda().to(torch.float)
y2 = lfilter(y2, a, b)
########################################
# REDUCTION: lateral inhibitory network
########################################
# masked by higher (frequency) spatial response
y3 = y2 - y2_h
y2_h = y2
# half-wave rectifier ---> y4
y4 = torch.maximum(torch.tensor(0).cuda(), y3)
# temporal integration window ---> y5
if alpha: # leaky integration
b = torch.tensor([1, 0]).cuda().to(torch.float)
a = torch.tensor([1, -alpha]).cuda().to(torch.float)
y5 = lfilter(y4, a, b)
audData[:, ch] = y5[0:-1:L_frm]
else: # short-term average
if L_frm == 1:
audData[:, ch] = y4
else:
audData[:, ch] = torch.mean(torch.reshape(y4, [L_frm, N]), 0)
batchAud.append(audData)
batchAud = torch.cat(batchAud, 0).permute(0,2,1)
return batchAud
def sigmoid(x, a):
x = np.exp(-x/a)
return 1/(1+x)
def DataNormalization(target, meanV=None, stdV=None):
nData, nDim = target.shape[0], target.shape[1]
output = np.zeros(shape=[nData, nDim], dtype=float)
if meanV is None:
meanV = np.mean(target, axis=0)
stdV = np.std(target, axis=0, ddof=1)
for dter in range(nData):
output[dter,:nDim] = (target[dter,:nDim]-meanV) / stdV
else:
for dter in range(nData):
output[dter,:nDim] = (target[dter,:nDim]-meanV) / stdV
return output, meanV, stdV
def DataRegularization(target):
nData, nSeq = target.shape[0], target.shape[1]
for dter in range(nData):
for ster in range(nSeq):
temp = target[dter, ster]
maxV = np.amax(temp)
minV = np.amin(temp)
reg_temp = 2*(temp-minV)/(maxV-minV)
target[dter, ster] = reg_temp - np.mean(reg_temp)
return target
def weights_init(m):
""" Initialize the weights of some layers of neural networks, here Conv2D, BatchNorm, GRU, Linear
Based on the work of Xavier Glorot
Args:
m: the model to initialize
"""
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('GRU') != -1:
for weight in m.parameters():
if len(weight.size()) > 1:
nn.init.orthogonal_(weight.data)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def calc_error(samples, labels):
batch_size, nSnaps, nDim = list(samples.size())
_, _, nClass = list(labels.size())
samples = samples.permute(1,0,2)
labels = labels.permute(1,0,2).cpu().numpy()
cidx = np.where(labels[0])[1]
idx = np.arange(nSnaps)
idx = np.delete(idx, 0)
v0 = samples[0]
v1 = samples[idx]
v = v1 - v0
nVec, batch_size, nDim = list(v.size())
error = None
for iter in range(nVec):
idx = np.arange(nVec)
idx = np.roll(idx, iter)
v1_norm = torch.norm(v[idx[1]], dim=1)**2
v2_norm = torch.norm(v[idx[2]], dim=1)**2
v01_dot = torch.mul(v[idx[0]], v[idx[1]]).sum(1)
v02_dot = torch.mul(v[idx[0]], v[idx[2]]).sum(1)
alpha = torch.div(v01_dot, v1_norm)
beta = torch.div(v02_dot, v2_norm)
n_vec = v[idx[0]] - torch.mul(alpha[:,None],v[idx[1]]) - torch.mul(beta[:,None],v[idx[2]])
n_vec_norm = torch.norm(n_vec, dim=1).mean()
orthogonality = 0
for cter in range(nClass):
tidx = np.where(cidx==cter)[0]
ntidx = np.arange(batch_size)
ntidx = np.delete(ntidx, tidx)
vecs = v[idx[0]]
nvec = torch.norm(vecs, dim=1)
vecs = torch.div(vecs, nvec[:,None])
tvec = vecs[tidx]
ntvec = vecs[ntidx].permute(1,0)
inners = torch.matmul(tvec, ntvec)**2
orthogonality += inners.mean()
if error is None:
error = (n_vec_norm + orthogonality/nClass)
else:
error += (n_vec_norm + orthogonality/nClass)
return error
| 10,213 | 28.865497 | 101 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/sed_trainer.py | import os
import random
from copy import deepcopy
from pathlib import Path
import pandas as pd
import pytorch_lightning as pl
import torch
from torchaudio.transforms import AmplitudeToDB, MelSpectrogram
from desed_task.data_augm import mixup
from desed_task.utils.scaler import TorchScaler
import numpy as np
from .utils import (
batched_decode_preds,
log_sedeval_metrics,
)
from desed_task.evaluation.evaluation_measures import (
compute_per_intersection_macro_f1,
compute_psds_from_operating_points,
)
class SEDTask4_2021(pl.LightningModule):
""" Pytorch lightning module for the SED 2021 baseline
Args:
hparams: dict, the dictionnary to be used for the current experiment/
encoder: ManyHotEncoder object, object to encode and decode labels.
sed_student: torch.Module, the student model to be trained. The teacher model will be
opt: torch.optimizer.Optimizer object, the optimizer to be used
train_data: torch.utils.data.Dataset subclass object, the training data to be used.
valid_data: torch.utils.data.Dataset subclass object, the validation data to be used.
test_data: torch.utils.data.Dataset subclass object, the test data to be used.
train_sampler: torch.utils.data.Sampler subclass object, the sampler to be used in the training dataloader.
scheduler: asteroid.engine.schedulers.BaseScheduler subclass object, the scheduler to be used. This is
used to apply ramp-up during training for example.
fast_dev_run: bool, whether to launch a run with only one batch for each set, this is for development purpose,
to test the code runs.
"""
def __init__(
self,
hparams,
encoder,
sed_student,
opt=None,
train_data=None,
valid_data=None,
test_data=None,
train_sampler=None,
scheduler=None,
fast_dev_run=False,
evaluation=False
):
super(SEDTask4_2021, self).__init__()
self.hparams = hparams
self.encoder = encoder
self.sed_student = sed_student
self.sed_teacher = deepcopy(sed_student)
self.opt = opt
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.train_sampler = train_sampler
self.scheduler = scheduler
self.fast_dev_run = fast_dev_run
self.evaluation = evaluation
if self.fast_dev_run:
self.num_workers = 1
else:
self.num_workers = self.hparams["training"]["num_workers"]
feat_params = self.hparams["feats"]
self.mel_spec = MelSpectrogram(
sample_rate=feat_params["sample_rate"],
n_fft=feat_params["n_window"],
win_length=feat_params["n_window"],
hop_length=feat_params["hop_length"],
f_min=feat_params["f_min"],
f_max=feat_params["f_max"],
n_mels=feat_params["n_mels"],
window_fn=torch.hamming_window,
wkwargs={"periodic": False},
power=1,
)
for param in self.sed_teacher.parameters():
param.detach_()
# instantiating losses
self.supervised_loss = torch.nn.BCELoss()
if hparams["training"]["self_sup_loss"] == "mse":
self.selfsup_loss = torch.nn.MSELoss()
elif hparams["training"]["self_sup_loss"] == "bce":
self.selfsup_loss = torch.nn.BCELoss()
else:
raise NotImplementedError
# for weak labels we simply compute f1 score
self.get_weak_student_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.get_weak_teacher_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.scaler = self._init_scaler()
# buffer for event based scores which we compute using sed-eval
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_student_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
test_n_thresholds = self.hparams["training"]["n_test_thresholds"]
test_thresholds = np.arange(
1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
)
self.test_psds_buffer_student = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in test_thresholds}
self.decoded_student_05_buffer = pd.DataFrame()
self.decoded_teacher_05_buffer = pd.DataFrame()
def update_ema(self, alpha, global_step, model, ema_model):
""" Update teacher model parameters
Args:
alpha: float, the factor to be used between each updated step.
global_step: int, the current global step to be used.
model: torch.Module, student model to use
ema_model: torch.Module, teacher model to use
"""
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)
def _init_scaler(self):
"""Scaler inizialization
Raises:
NotImplementedError: in case of not Implemented scaler
Returns:
TorchScaler: returns the scaler
"""
if self.hparams["scaler"]["statistic"] == "instance":
scaler = TorchScaler("instance", "minmax", self.hparams["scaler"]["dims"])
return scaler
elif self.hparams["scaler"]["statistic"] == "dataset":
# we fit the scaler
scaler = TorchScaler(
"dataset",
self.hparams["scaler"]["normtype"],
self.hparams["scaler"]["dims"],
)
else:
raise NotImplementedError
if self.hparams["scaler"]["savepath"] is not None:
if os.path.exists(self.hparams["scaler"]["savepath"]):
scaler = torch.load(self.hparams["scaler"]["savepath"])
print(
"Loaded Scaler from previous checkpoint from {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
self.train_loader = self.train_dataloader()
scaler.fit(
self.train_loader,
transform_func=lambda x: self.take_log(self.mel_spec(x[0])),
)
if self.hparams["scaler"]["savepath"] is not None:
torch.save(scaler, self.hparams["scaler"]["savepath"])
print(
"Saving Scaler from previous checkpoint at {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
def take_log(self, mels):
""" Apply the log transformation to mel spectrograms.
Args:
mels: torch.Tensor, mel spectrograms for which to apply log.
Returns:
Tensor: logarithmic mel spectrogram of the mel spectrogram given as input
"""
amp_to_db = AmplitudeToDB(stype="amplitude")
amp_to_db.amin = 1e-5 # amin= 1e-5 as in librosa
return amp_to_db(mels).clamp(min=-50, max=80) # clamp to reproduce old code
def training_step(self, batch, batch_indx):
""" Applying the training for one batch (a step). Used during trainer.fit
Args:
batch: torch.Tensor, batch input tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
torch.Tensor, the loss to take into account.
"""
audio, labels, padded_indxs = batch
indx_synth, indx_weak, indx_unlabelled = self.hparams["training"]["batch_size"]
features = self.mel_spec(audio)
batch_num = features.shape[0]
# deriving masks for each dataset
strong_mask = torch.zeros(batch_num).to(features).bool()
weak_mask = torch.zeros(batch_num).to(features).bool()
strong_mask[:indx_synth] = 1
weak_mask[indx_synth : indx_weak + indx_synth] = 1
# deriving weak labels
labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
mixup_type = self.hparams["training"].get("mixup")
if mixup_type is not None and 0.5 > random.random():
features[weak_mask], labels_weak = mixup(
features[weak_mask], labels_weak, mixup_label_type=mixup_type
)
features[strong_mask], labels[strong_mask] = mixup(
features[strong_mask], labels[strong_mask], mixup_label_type=mixup_type
)
# sed student forward
strong_preds_student, weak_preds_student = self.sed_student(
self.scaler(self.take_log(features))
)
# supervised loss on strong labels
loss_strong = self.supervised_loss(
strong_preds_student[strong_mask], labels[strong_mask]
)
# supervised loss on weakly labelled
loss_weak = self.supervised_loss(weak_preds_student[weak_mask], labels_weak)
# total supervised loss
tot_loss_supervised = loss_strong + loss_weak
with torch.no_grad():
ema_features = self.scaler(self.take_log(features))
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(ema_features)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[strong_mask], labels[strong_mask]
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[weak_mask], labels_weak
)
# we apply consistency between the predictions, use the scheduler for learning rate (to be changed ?)
weight = (
self.hparams["training"]["const_max"]
* self.scheduler["scheduler"]._get_scaling_factor()
)
strong_self_sup_loss = self.selfsup_loss(
strong_preds_student, strong_preds_teacher.detach()
)
weak_self_sup_loss = self.selfsup_loss(
weak_preds_student, weak_preds_teacher.detach()
)
tot_self_loss = (strong_self_sup_loss + weak_self_sup_loss) * weight
tot_loss = tot_loss_supervised + tot_self_loss
self.log("train/student/loss_strong", loss_strong)
self.log("train/student/loss_weak", loss_weak)
self.log("train/teacher/loss_strong", loss_strong_teacher)
self.log("train/teacher/loss_weak", loss_weak_teacher)
self.log("train/step", self.scheduler["scheduler"].step_num, prog_bar=True)
self.log("train/student/tot_self_loss", tot_self_loss, prog_bar=True)
self.log("train/weight", weight)
self.log("train/student/tot_supervised", strong_self_sup_loss, prog_bar=True)
self.log("train/student/weak_self_sup_loss", weak_self_sup_loss)
self.log("train/student/strong_self_sup_loss", strong_self_sup_loss)
self.log("train/lr", self.opt.param_groups[-1]["lr"], prog_bar=True)
return tot_loss
def on_before_zero_grad(self, *args, **kwargs):
# update EMA teacher
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler["scheduler"].step_num,
self.sed_student,
self.sed_teacher,
)
def validation_step(self, batch, batch_indx):
""" Apply validation to a batch (step). Used during trainer.fit
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
# we derive masks for each dataset based on folders of filenames
mask_weak = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["weak_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
mask_synth = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["synth_val_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
if torch.any(mask_weak):
labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
loss_weak_student = self.supervised_loss(
weak_preds_student[mask_weak], labels_weak
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[mask_weak], labels_weak
)
self.log("val/weak/student/loss_weak", loss_weak_student)
self.log("val/weak/teacher/loss_weak", loss_weak_teacher)
# accumulate f1 score for weak labels
self.get_weak_student_f1_seg_macro(
weak_preds_student[mask_weak], labels_weak
)
self.get_weak_teacher_f1_seg_macro(
weak_preds_teacher[mask_weak], labels_weak
)
if torch.any(mask_synth):
loss_strong_student = self.supervised_loss(
strong_preds_student[mask_synth], labels[mask_synth]
)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[mask_synth], labels[mask_synth]
)
self.log("val/synth/student/loss_strong", loss_strong_student)
self.log("val/synth/teacher/loss_strong", loss_strong_teacher)
filenames_synth = [
x
for x in filenames
if Path(x).parent == Path(self.hparams["data"]["synth_val_folder"])
]
decoded_student_strong = batched_decode_preds(
strong_preds_student[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_student_synth.keys()),
)
for th in self.val_buffer_student_synth.keys():
self.val_buffer_student_synth[th] = self.val_buffer_student_synth[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_teacher_synth.keys()),
)
for th in self.val_buffer_teacher_synth.keys():
self.val_buffer_teacher_synth[th] = self.val_buffer_teacher_synth[
th
].append(decoded_teacher_strong[th], ignore_index=True)
return
def validation_epoch_end(self, outputs):
""" Fonction applied at the end of all the validation steps of the epoch.
Args:
outputs: torch.Tensor, the concatenation of everything returned by validation_step.
Returns:
torch.Tensor, the objective metric to be used to choose the best model from for example.
"""
weak_student_f1_macro = self.get_weak_student_f1_seg_macro.compute()
weak_teacher_f1_macro = self.get_weak_teacher_f1_seg_macro.compute()
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
self.val_buffer_student_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_student_event_macro = log_sedeval_metrics(
self.val_buffer_student_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
self.val_buffer_teacher_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_teacher_event_macro = log_sedeval_metrics(
self.val_buffer_teacher_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
obj_metric_synth_type = self.hparams["training"].get("obj_metric_synth_type")
if obj_metric_synth_type is None:
synth_metric = intersection_f1_macro_student
elif obj_metric_synth_type == "event":
synth_metric = synth_student_event_macro
elif obj_metric_synth_type == "intersection":
synth_metric = intersection_f1_macro_student
else:
raise NotImplementedError(
f"obj_metric_synth_type: {obj_metric_synth_type} not implemented."
)
obj_metric = torch.tensor(weak_student_f1_macro.item() + synth_metric)
self.log("val/obj_metric", obj_metric, prog_bar=True)
self.log("val/weak/student/macro_F1", weak_student_f1_macro)
self.log("val/weak/teacher/macro_F1", weak_teacher_f1_macro)
self.log(
"val/synth/student/intersection_f1_macro", intersection_f1_macro_student
)
self.log(
"val/synth/teacher/intersection_f1_macro", intersection_f1_macro_teacher
)
self.log("val/synth/student/event_f1_macro", synth_student_event_macro)
self.log("val/synth/teacher/event_f1_macro", synth_teacher_event_macro)
# free the buffers
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.get_weak_student_f1_seg_macro.reset()
self.get_weak_teacher_f1_seg_macro.reset()
return obj_metric
def on_save_checkpoint(self, checkpoint):
checkpoint["sed_student"] = self.sed_student.state_dict()
checkpoint["sed_teacher"] = self.sed_teacher.state_dict()
return checkpoint
def test_step(self, batch, batch_indx):
""" Apply Test to a batch (step), used only when (trainer.test is called)
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
# prediction for student
logmels = self.scaler(self.take_log(self.mel_spec(audio)))
strong_preds_student, weak_preds_student = self.sed_student(logmels)
# prediction for teacher
strong_preds_teacher, weak_preds_teacher = self.sed_teacher(logmels)
"""
bsz = len(filenames)
for bter in range(bsz):
pred_student = strong_preds_student[bter].cpu().numpy()
pred_teacher = strong_preds_teacher[bter].cpu().numpy()
path, filename = os.path.split(filenames[bter])
np.save('./Posterior/student/{}.npy'.format(filename), pred_student)
np.save('./Posterior/teacher/{}.npy'.format(filename), pred_teacher)
"""
if not self.evaluation:
loss_strong_student = self.supervised_loss(strong_preds_student, labels)
loss_strong_teacher = self.supervised_loss(strong_preds_teacher, labels)
self.log("test/student/loss_strong", loss_strong_student)
self.log("test/teacher/loss_strong", loss_strong_teacher)
# compute psds
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student.keys()),
)
for th in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[th] = self.test_psds_buffer_student[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher.keys()),
)
for th in self.test_psds_buffer_teacher.keys():
self.test_psds_buffer_teacher[th] = self.test_psds_buffer_teacher[
th
].append(decoded_teacher_strong[th], ignore_index=True)
# compute f1 score
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student_05_buffer = self.decoded_student_05_buffer.append(
decoded_student_strong[0.5]
)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher_05_buffer = self.decoded_teacher_05_buffer.append(
decoded_teacher_strong[0.5]
)
def on_test_epoch_end(self):
# pub eval dataset
try:
log_dir = self.logger.log_dir
except Exception as e:
log_dir = self.hparams["log_dir"]
save_dir = os.path.join(log_dir, "metrics_test")
if self.evaluation:
# only save the predictions
save_dir_student = os.path.join(save_dir, "student")
os.makedirs(save_dir_student, exist_ok=True)
self.decoded_student_05_buffer.to_csv(
os.path.join(save_dir_student, f"predictions_05_student.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_student, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for student saved in: {save_dir_student}")
save_dir_teacher = os.path.join(save_dir, "teacher")
os.makedirs(save_dir_teacher, exist_ok=True)
self.decoded_teacher_05_buffer.to_csv(
os.path.join(save_dir_teacher, f"predictions_05_teacher.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_teacher, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for teacher saved in: {save_dir_teacher}")
else:
psds_score_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario1"),
)
psds_score_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario2"),
)
psds_score_teacher_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario1"),
)
psds_score_teacher_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario2"),
)
event_macro_student = log_sedeval_metrics(
self.decoded_student_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student"),
)[0]
event_macro_teacher = log_sedeval_metrics(
self.decoded_teacher_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher"),
)[0]
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
best_test_result = torch.tensor(max(psds_score_scenario1, psds_score_scenario2))
results = {
"hp_metric": best_test_result,
"test/student/psds_score_scenario1": psds_score_scenario1,
"test/student/psds_score_scenario2": psds_score_scenario2,
"test/teacher/psds_score_scenario1": psds_score_teacher_scenario1,
"test/teacher/psds_score_scenario2": psds_score_teacher_scenario2,
"test/student/event_f1_macro": event_macro_student,
"test/student/intersection_f1_macro": intersection_f1_macro_student,
"test/teacher/event_f1_macro": event_macro_teacher,
"test/teacher/intersection_f1_macro": intersection_f1_macro_teacher
}
if self.logger is not None:
self.logger.log_metrics(results)
self.logger.log_hyperparams(self.hparams, results)
for key in results.keys():
self.log(key, results[key], prog_bar=True, logger=False)
def configure_optimizers(self):
return [self.opt], [self.scheduler]
def train_dataloader(self):
self.train_loader = torch.utils.data.DataLoader(
self.train_data,
batch_sampler=self.train_sampler,
num_workers=self.num_workers,
)
return self.train_loader
def val_dataloader(self):
self.val_loader = torch.utils.data.DataLoader(
self.valid_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.val_loader
def test_dataloader(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.test_loader
| 29,083 | 37.675532 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/recipes/dcase2021_task4_baseline/local/sed_trainer_CRST.py | import os
import random
from copy import deepcopy
from pathlib import Path
import local.config as cfg
import pandas as pd
import pytorch_lightning as pl
import torch
from torchaudio.transforms import AmplitudeToDB, MelSpectrogram
from desed_task.data_augm import mixup, frame_shift, add_noise, temporal_reverse
from desed_task.utils.scaler import TorchScaler
import numpy as np
from .utils import (
batched_decode_preds,
log_sedeval_metrics,
JSD,
)
from desed_task.evaluation.evaluation_measures import (
compute_per_intersection_macro_f1,
compute_psds_from_operating_points,
)
class SEDTask4_2021(pl.LightningModule):
""" Pytorch lightning module for the SED 2021 baseline
Args:
hparams: dict, the dictionnary to be used for the current experiment/
encoder: ManyHotEncoder object, object to encode and decode labels.
sed_student: torch.Module, the student model to be trained. The teacher model will be
opt: torch.optimizer.Optimizer object, the optimizer to be used
train_data: torch.utils.data.Dataset subclass object, the training data to be used.
valid_data: torch.utils.data.Dataset subclass object, the validation data to be used.
test_data: torch.utils.data.Dataset subclass object, the test data to be used.
train_sampler: torch.utils.data.Sampler subclass object, the sampler to be used in the training dataloader.
scheduler: asteroid.engine.schedulers.BaseScheduler subclass object, the scheduler to be used. This is
used to apply ramp-up during training for example.
fast_dev_run: bool, whether to launch a run with only one batch for each set, this is for development purpose,
to test the code runs.
"""
def __init__(
self,
hparams,
encoder,
sed_student,
opt=None,
train_data=None,
valid_data=None,
test_data=None,
train_sampler=None,
scheduler=None,
fast_dev_run=False,
evaluation=False
):
super(SEDTask4_2021, self).__init__()
self.hparams = hparams
# manual optimization
self.automatic_optimization = False
self.encoder = encoder
self.sed_student1 = sed_student[0]
self.sed_teacher1 = deepcopy(sed_student[0])
self.sed_student2 = sed_student[1]
self.sed_teacher2 = deepcopy(sed_student[1])
self.opt1 = opt[0]
self.opt2 = opt[1]
self.train_data = train_data
self.valid_data = valid_data
self.test_data = test_data
self.train_sampler = train_sampler
self.scheduler1 = scheduler[0]
self.scheduler2 = scheduler[1]
self.fast_dev_run = fast_dev_run
self.evaluation = evaluation
if self.fast_dev_run:
self.num_workers = 1
else:
self.num_workers = self.hparams["training"]["num_workers"]
# add class_label
self.softmax = torch.nn.Softmax(dim=2)
self.jsd = JSD()
self.class_label = torch.tensor(cfg.class_label).cuda()
feat_params = self.hparams["feats"]
#self.lin_spec = LinearSpectrogram(nCh=128, n_fft=2048, hop_length=256, win_fn = torch.hamming_window)
self.mel_spec = MelSpectrogram(
sample_rate=feat_params["sample_rate"],
n_fft=feat_params["n_window"],
win_length=feat_params["n_window"],
hop_length=feat_params["hop_length"],
f_min=feat_params["f_min"],
f_max=feat_params["f_max"],
n_mels=feat_params["n_mels"],
window_fn=torch.hamming_window,
wkwargs={"periodic": False},
power=1,
)
for param in self.sed_teacher1.parameters():
param.detach_()
for param in self.sed_teacher2.parameters():
param.detach_()
# instantiating losses
self.supervised_loss = torch.nn.BCELoss()
if hparams["training"]["self_sup_loss"] == "mse":
self.selfsup_loss = torch.nn.MSELoss()
elif hparams["training"]["self_sup_loss"] == "bce":
self.selfsup_loss = torch.nn.BCELoss()
else:
raise NotImplementedError
# for weak labels we simply compute f1 score
self.get_weak_student_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.get_weak_teacher_f1_seg_macro = pl.metrics.classification.F1(
len(self.encoder.labels),
average="macro",
multilabel=True,
compute_on_step=False,
)
self.scaler = self._init_scaler()
# buffer for event based scores which we compute using sed-eval
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_student_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_test = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
test_n_thresholds = self.hparams["training"]["n_test_thresholds"]
test_thresholds = np.arange(
1 / (test_n_thresholds * 2), 1, 1 / test_n_thresholds
)
self.test_psds_buffer_student1 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher1 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_student2 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher2 = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_student = {k: pd.DataFrame() for k in test_thresholds}
self.test_psds_buffer_teacher = {k: pd.DataFrame() for k in test_thresholds}
self.decoded_student1_05_buffer = pd.DataFrame()
self.decoded_teacher1_05_buffer = pd.DataFrame()
self.decoded_student2_05_buffer = pd.DataFrame()
self.decoded_teacher2_05_buffer = pd.DataFrame()
self.decoded_student_05_buffer = pd.DataFrame()
self.decoded_teacher_05_buffer = pd.DataFrame()
def update_ema(self, alpha, global_step, model, ema_model):
""" Update teacher model parameters
Args:
alpha: float, the factor to be used between each updated step.
global_step: int, the current global step to be used.
model: torch.Module, student model to use
ema_model: torch.Module, teacher model to use
"""
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(params.data, alpha=1 - alpha)
def _init_scaler(self):
"""Scaler inizialization
Raises:
NotImplementedError: in case of not Implemented scaler
Returns:
TorchScaler: returns the scaler
"""
if self.hparams["scaler"]["statistic"] == "instance":
scaler = TorchScaler("instance", "minmax", self.hparams["scaler"]["dims"])
return scaler
elif self.hparams["scaler"]["statistic"] == "dataset":
# we fit the scaler
scaler = TorchScaler(
"dataset",
self.hparams["scaler"]["normtype"],
self.hparams["scaler"]["dims"],
)
else:
raise NotImplementedError
if self.hparams["scaler"]["savepath"] is not None:
if os.path.exists(self.hparams["scaler"]["savepath"]):
scaler = torch.load(self.hparams["scaler"]["savepath"])
print(
"Loaded Scaler from previous checkpoint from {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
self.train_loader = self.train_dataloader()
scaler.fit(
self.train_loader,
transform_func=lambda x: self.take_log(self.mel_spec(x[0])),
)
if self.hparams["scaler"]["savepath"] is not None:
torch.save(scaler, self.hparams["scaler"]["savepath"])
print(
"Saving Scaler from previous checkpoint at {}".format(
self.hparams["scaler"]["savepath"]
)
)
return scaler
def take_log(self, mels):
""" Apply the log transformation to mel spectrograms.
Args:
mels: torch.Tensor, mel spectrograms for which to apply log.
Returns:
Tensor: logarithmic mel spectrogram of the mel spectrogram given as input
"""
amp_to_db = AmplitudeToDB(stype="amplitude")
amp_to_db.amin = 1e-5 # amin= 1e-5 as in librosa
return amp_to_db(mels).clamp(min=-50, max=80) # clamp to reproduce old code
def training_step(self, batch, batch_indx, optimizer_idx):
""" Applying the training for one batch (a step). Used during trainer.fit
Args:
batch: torch.Tensor, batch input tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
torch.Tensor, the loss to take into account.
"""
audio, labels, padded_indxs = batch
indx_synth, indx_weak, indx_unlabelled = self.hparams["training"]["batch_size"]
features = self.mel_spec(audio)
batch_num = features.shape[0]
# deriving masks for each dataset
strong_mask = torch.zeros(batch_num).to(features).bool()
weak_mask = torch.zeros(batch_num).to(features).bool()
strong_mask[:indx_synth] = 1
weak_mask[indx_synth : indx_weak + indx_synth] = 1
# deriving weak labels
labels_weak = (torch.sum(labels[weak_mask], -1) > 0).float()
mixup_type = self.hparams["training"].get("mixup")
if mixup_type is not None and 0.5 > random.random():
features[weak_mask], labels_weak = mixup(
features[weak_mask], labels_weak, mixup_label_type=mixup_type
)
features[strong_mask], labels[strong_mask] = mixup(
features[strong_mask], labels[strong_mask], mixup_label_type=mixup_type
)
# perturbation
ori_features = self.scaler(self.take_log(features))
ema_features = ori_features.clone().detach()
ema_labels = labels.clone().detach()
ema_features, ema_labels = frame_shift(ema_features, ema_labels)
ema_labels_weak = (torch.sum(ema_labels[weak_mask], -1) > 0).float()
# sed students forward
strong_preds_student1, weak_preds_student1 = self.sed_student1(ori_features)
strong_preds_student2, weak_preds_student2 = self.sed_student2(ema_features)
# supervised loss on strong labels
loss_strong1 = self.supervised_loss(
strong_preds_student1[strong_mask], labels[strong_mask]
)
loss_strong2 = self.supervised_loss(
strong_preds_student2[strong_mask], ema_labels[strong_mask]
)
# supervised loss on weakly labelled
loss_weak1 = self.supervised_loss(weak_preds_student1[weak_mask], labels_weak)
loss_weak2 = self.supervised_loss(weak_preds_student2[weak_mask], ema_labels_weak)
# total supervised loss
tot_loss_supervised1 = loss_strong1 + loss_weak1
tot_loss_supervised2 = loss_strong2 + loss_weak2
with torch.no_grad():
strong_preds_teacher1, weak_preds_teacher1 = self.sed_teacher1(ema_features)
strong_preds_teacher2, weak_preds_teacher2 = self.sed_teacher2(ori_features)
nClass = self.hparams['net']['nclass']
sp1 = torch.clamp(strong_preds_teacher1, 1.0e-4, 1-1.0e-4)
p1_h1 = torch.log(sp1.permute(0,2,1))
p1_h0 = torch.log(1-sp1.permute(0,2,1))
sp2 = torch.clamp(strong_preds_teacher2, 1.0e-4, 1-1.0e-4)
p2_h1 = torch.log(sp2.permute(0,2,1))
p2_h0 = torch.log(1-sp2.permute(0,2,1))
p_h0 = torch.cat((p1_h0, p2_h0), 1)
p_h1 = torch.cat((p1_h1, p2_h1), 1)
# K = 0
P0 = p_h0.sum(2)
# K = 1
P1 = P0[:,:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,nClass):
P2.append(P1[:,:,:-cter]+P1[:,:,cter:])
P2 = torch.cat(P2, 2)
P2 = P2 - P0[:,:,None]
#P = torch.cat([P0.reshape(156*2,1), P1, P2], 1)
# K: up to 3
P3 = []
for cter1 in range(1,nClass):
for cter2 in range(1,nClass-cter1):
P3.append(P1[:,:,:-(cter1+cter2)]+P1[:,:,cter1:-cter2]+P1[:,:,(cter1+cter2):])
P3 = torch.cat(P3,2)
P3 = P3 - 2*P0[:,:,None]
P = torch.cat([P0.reshape(batch_num,156*2,1), P1, P2, P3], 2)
P = self.softmax(P)
prob_v, prob_i = torch.sort(P, dim=2, descending=True)
# 5 best potential labels
norm_p = prob_v[:,:,:].sum(2)
prob_v = prob_v[:,:,:]/norm_p[:,:,None]
cl = self.class_label[prob_i[:,:,:].tolist(),:]
# picking up the best label
cl = torch.mul(cl, prob_v[:,:,:,None]).sum(2)
est_strong_target1 = torch.squeeze(cl[:,:156,:]).float()
est_strong_target2 = torch.squeeze(cl[:,156:,:]).float()
est_strong_target1 = est_strong_target1.permute((0,2,1)) # for ema_feature
est_strong_target2 = est_strong_target2.permute((0,2,1)) # for ori_feature
est_weak_target1 = est_strong_target1.mean(2)
est_weak_target2 = est_strong_target2.mean(2)
loss_strong_teacher1 = self.supervised_loss(
strong_preds_teacher1[strong_mask], ema_labels[strong_mask]
)
loss_strong_teacher2 = self.supervised_loss(
strong_preds_teacher2[strong_mask], labels[strong_mask]
)
loss_weak_teacher1 = self.supervised_loss(
weak_preds_teacher1[weak_mask], ema_labels_weak
)
loss_weak_teacher2 = self.supervised_loss(
weak_preds_teacher2[weak_mask], labels_weak
)
# we apply consistency between the predictions, use the scheduler for learning rate (to be changed ?)
weight1 = (
self.hparams["training"]["const_max"]
* self.scheduler1["scheduler"]._get_scaling_factor()
)
weight2 = (
self.hparams["training"]["const_max"]
* self.scheduler2["scheduler"]._get_scaling_factor()
)
strong_reliability1 = weight1*(1-self.jsd(est_strong_target1[strong_mask], ema_labels[strong_mask]))
strong_reliability2 = weight2*(1-self.jsd(est_strong_target2[strong_mask], labels[strong_mask]))
weak_reliability1 = weight1*(1-self.jsd(est_weak_target1[weak_mask], ema_labels_weak))
weak_reliability2 = weight2*(1-self.jsd(est_weak_target2[weak_mask], labels_weak))
strong_self_sup_loss1 = self.selfsup_loss(
strong_preds_student1[24:], est_strong_target2[24:] # for ori_feature
)
strong_self_sup_loss2 = self.selfsup_loss(
strong_preds_student2[24:], est_strong_target1[24:] # for ema_feature
)
weak_self_sup_loss1 = self.selfsup_loss(
weak_preds_student1[weak_mask], est_weak_target2[weak_mask]
)
weak_self_sup_loss2 = self.selfsup_loss(
weak_preds_student2[weak_mask], est_weak_target1[weak_mask]
)
tot_self_loss1 = strong_reliability2*strong_self_sup_loss1 + weak_reliability2*weak_self_sup_loss1
tot_self_loss2 = strong_reliability1*strong_self_sup_loss2 + weak_reliability1*weak_self_sup_loss2
tot_loss1 = tot_loss_supervised1 + tot_self_loss1
tot_loss2 = tot_loss_supervised2 + tot_self_loss2
#self.log("train/student/loss_strong1", loss_strong1)
#self.log("train/student/loss_weak1", loss_weak1)
#self.log("train/student/loss_strong2", loss_strong2)
#self.log("train/student/loss_weak2", loss_weak2)
#self.log("train/teacher/loss_strong1", loss_strong_teacher1)
#self.log("train/teacher/loss_weak1", loss_weak_teacher1)
#self.log("train/teacher/loss_strong2", loss_strong_teacher2)
#self.log("train/teacher/loss_weak2", loss_weak_teacher2)
self.log("train/step1", self.scheduler1["scheduler"].step_num, prog_bar=True)
self.log("train/step2", self.scheduler2["scheduler"].step_num, prog_bar=True)
self.log("train/student/tot_loss1", tot_loss1, prog_bar=True)
self.log("train/student/tot_loss2", tot_loss2, prog_bar=True)
self.log("train/strong_reliability1", strong_reliability1, prog_bar=True)
self.log("train/strong_reliability2", strong_reliability2, prog_bar=True)
#self.log("train/student/tot_self_loss1", tot_self_loss1, prog_bar=True)
#self.log("train/student/weak_self_sup_loss1", weak_self_sup_loss1)
#self.log("train/student/strong_self_sup_loss1", strong_self_sup_loss1)
#self.log("train/student/tot_self_loss2", tot_self_loss2, prog_bar=True)
#self.log("train/student/weak_self_sup_loss2", weak_self_sup_loss2)
#self.log("train/student/strong_self_sup_loss2", strong_self_sup_loss2)
self.log("train/lr1", self.opt1.param_groups[-1]["lr"], prog_bar=True)
self.log("train/lr2", self.opt2.param_groups[-1]["lr"], prog_bar=True)
# update EMA teacher
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler1["scheduler"].step_num,
self.sed_student1,
self.sed_teacher1,
)
self.update_ema(
self.hparams["training"]["ema_factor"],
self.scheduler2["scheduler"].step_num,
self.sed_student2,
self.sed_teacher2,
)
# training Model I
self.opt1.zero_grad()
self.manual_backward(tot_loss1, self.opt1)
self.opt1.step()
# training Model II
self.opt2.zero_grad()
self.manual_backward(tot_loss2, self.opt2)
self.opt2.step()
return {'tot_loss1': tot_loss1, 'tot_loss2': tot_loss2}
def validation_step(self, batch, batch_indx):
""" Apply validation to a batch (step). Used during trainer.fit
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
features = self.mel_spec(audio)
#features2 = self.lin_spec(audio)
#features = torch.cat([features1, features2], 1)
logmels = self.scaler(self.take_log(features))
# prediction for strudent
strong_preds_student1, weak_preds_student1 = self.sed_student1(logmels)
strong_preds_student2, weak_preds_student2 = self.sed_student2(logmels)
strong_preds_student = (strong_preds_student1 + strong_preds_student2)/2
weak_preds_student = (weak_preds_student1 + weak_preds_student2)/2
# prediction for teacher
strong_preds_teacher1, weak_preds_teacher1 = self.sed_teacher1(logmels)
strong_preds_teacher2, weak_preds_teacher2 = self.sed_teacher2(logmels)
strong_preds_teacher = (strong_preds_teacher1 + strong_preds_teacher2)/2
weak_preds_teacher = (weak_preds_teacher1 + weak_preds_teacher2)/2
# we derive masks for each dataset based on folders of filenames
mask_weak = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["weak_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
mask_synth = (
torch.tensor(
[
str(Path(x).parent)
== str(Path(self.hparams["data"]["synth_val_folder"]))
for x in filenames
]
)
.to(audio)
.bool()
)
if torch.any(mask_weak):
labels_weak = (torch.sum(labels[mask_weak], -1) >= 1).float()
loss_weak_student = self.supervised_loss(
weak_preds_student[mask_weak], labels_weak
)
loss_weak_teacher = self.supervised_loss(
weak_preds_teacher[mask_weak], labels_weak
)
self.log("val/weak/student/loss_weak", loss_weak_student)
self.log("val/weak/teacher/loss_weak", loss_weak_teacher)
# accumulate f1 score for weak labels
self.get_weak_student_f1_seg_macro(
weak_preds_student[mask_weak], labels_weak
)
self.get_weak_teacher_f1_seg_macro(
weak_preds_teacher[mask_weak], labels_weak
)
if torch.any(mask_synth):
loss_strong_student = self.supervised_loss(
strong_preds_student[mask_synth], labels[mask_synth]
)
loss_strong_teacher = self.supervised_loss(
strong_preds_teacher[mask_synth], labels[mask_synth]
)
self.log("val/synth/student/loss_strong", loss_strong_student)
self.log("val/synth/teacher/loss_strong", loss_strong_teacher)
filenames_synth = [
x
for x in filenames
if Path(x).parent == Path(self.hparams["data"]["synth_val_folder"])
]
decoded_student_strong = batched_decode_preds(
strong_preds_student[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_student_synth.keys()),
)
for th in self.val_buffer_student_synth.keys():
self.val_buffer_student_synth[th] = self.val_buffer_student_synth[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher[mask_synth],
filenames_synth,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.val_buffer_teacher_synth.keys()),
)
for th in self.val_buffer_teacher_synth.keys():
self.val_buffer_teacher_synth[th] = self.val_buffer_teacher_synth[
th
].append(decoded_teacher_strong[th], ignore_index=True)
return
def validation_epoch_end(self, outputs):
""" Fonction applied at the end of all the validation steps of the epoch.
Args:
outputs: torch.Tensor, the concatenation of everything returned by validation_step.
Returns:
torch.Tensor, the objective metric to be used to choose the best model from for example.
"""
weak_student_f1_macro = self.get_weak_student_f1_seg_macro.compute()
weak_teacher_f1_macro = self.get_weak_teacher_f1_seg_macro.compute()
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
self.val_buffer_student_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_student_event_macro = log_sedeval_metrics(
self.val_buffer_student_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
self.val_buffer_teacher_synth,
self.hparams["data"]["synth_val_tsv"],
self.hparams["data"]["synth_val_dur"],
)
synth_teacher_event_macro = log_sedeval_metrics(
self.val_buffer_teacher_synth[0.5], self.hparams["data"]["synth_val_tsv"],
)[0]
obj_metric_synth_type = self.hparams["training"].get("obj_metric_synth_type")
if obj_metric_synth_type is None:
synth_metric = intersection_f1_macro_student
elif obj_metric_synth_type == "event":
synth_metric = synth_student_event_macro
elif obj_metric_synth_type == "intersection":
synth_metric = intersection_f1_macro_student
else:
raise NotImplementedError(
f"obj_metric_synth_type: {obj_metric_synth_type} not implemented."
)
obj_metric = torch.tensor(weak_student_f1_macro.item() + synth_metric)
self.log("val/obj_metric", obj_metric, prog_bar=True)
self.log("val/weak/student/macro_F1", weak_student_f1_macro)
self.log("val/weak/teacher/macro_F1", weak_teacher_f1_macro)
self.log(
"val/synth/student/intersection_f1_macro", intersection_f1_macro_student
)
self.log(
"val/synth/teacher/intersection_f1_macro", intersection_f1_macro_teacher
)
self.log("val/synth/student/event_f1_macro", synth_student_event_macro)
self.log("val/synth/teacher/event_f1_macro", synth_teacher_event_macro)
# free the buffers
self.val_buffer_student_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.val_buffer_teacher_synth = {
k: pd.DataFrame() for k in self.hparams["training"]["val_thresholds"]
}
self.get_weak_student_f1_seg_macro.reset()
self.get_weak_teacher_f1_seg_macro.reset()
return obj_metric
def on_save_checkpoint(self, checkpoint):
checkpoint["sed_student1"] = self.sed_student1.state_dict()
checkpoint["sed_teacher1"] = self.sed_teacher1.state_dict()
checkpoint["sed_student2"] = self.sed_student2.state_dict()
checkpoint["sed_teacher2"] = self.sed_teacher2.state_dict()
return checkpoint
def test_step(self, batch, batch_indx):
""" Apply Test to a batch (step), used only when (trainer.test is called)
Args:
batch: torch.Tensor, input batch tensor
batch_indx: torch.Tensor, 1D tensor of indexes to know which data are present in each batch.
Returns:
"""
audio, labels, padded_indxs, filenames = batch
features = self.mel_spec(audio)
#features2 = self.lin_spec(audio)
#features = torch.cat([features1, features2], 1)
# prediction for student
logmels = self.scaler(self.take_log(features))
strong_preds_student1, weak_preds_student1 = self.sed_student1(logmels)
strong_preds_student2, weak_preds_student2 = self.sed_student2(logmels)
strong_preds_student = (strong_preds_student1 + strong_preds_student2)/2
weak_preds_student = (weak_preds_student1 + weak_preds_student2)/2
# prediction for teacher
strong_preds_teacher1, weak_preds_teacher1 = self.sed_teacher1(logmels)
strong_preds_teacher2, weak_preds_teacher2 = self.sed_teacher2(logmels)
strong_preds_teacher = (strong_preds_teacher1 + strong_preds_teacher2)/2
weak_preds_teacher = (weak_preds_teacher1 + weak_preds_teacher2)/2
bsz = len(filenames)
for bter in range(bsz):
path, filename = os.path.split(filenames[bter])
pred_student = strong_preds_student[bter].cpu().numpy()
pred_teacher = strong_preds_teacher[bter].cpu().numpy()
np.save('./Posterior/student/{}.npy'.format(filename), pred_student)
np.save('./Posterior/teacher/{}.npy'.format(filename), pred_teacher)
if not self.evaluation:
loss_strong_student1 = self.supervised_loss(strong_preds_student1, labels)
loss_strong_student2 = self.supervised_loss(strong_preds_student2, labels)
loss_strong_student = self.supervised_loss(strong_preds_student, labels)
loss_strong_teacher1 = self.supervised_loss(strong_preds_teacher1, labels)
loss_strong_teacher2 = self.supervised_loss(strong_preds_teacher2, labels)
loss_strong_teacher = self.supervised_loss(strong_preds_teacher, labels)
# self.log("test/student1/loss_strong", loss_strong_student1)
# self.log("test/student2/loss_strong", loss_strong_student2)
self.log("test/student/loss_strong", loss_strong_student)
# self.log("test/teacher1/loss_strong", loss_strong_teacher1)
# self.log("test/teacher2/loss_strong", loss_strong_teacher2)
self.log("test/teacher/loss_strong", loss_strong_teacher)
# compute psds
decoded_student1_strong = batched_decode_preds(
strong_preds_student1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student1.keys()),
)
for th in self.test_psds_buffer_student1.keys():
self.test_psds_buffer_student1[th] = self.test_psds_buffer_student1[
th
].append(decoded_student1_strong[th], ignore_index=True)
decoded_student2_strong = batched_decode_preds(
strong_preds_student2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student2.keys()),
)
for th in self.test_psds_buffer_student2.keys():
self.test_psds_buffer_student2[th] = self.test_psds_buffer_student2[
th
].append(decoded_student2_strong[th], ignore_index=True)
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_student.keys()),
)
for th in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[th] = self.test_psds_buffer_student[
th
].append(decoded_student_strong[th], ignore_index=True)
decoded_teacher1_strong = batched_decode_preds(
strong_preds_teacher1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher1.keys()),
)
for th in self.test_psds_buffer_teacher1.keys():
self.test_psds_buffer_teacher1[th] = self.test_psds_buffer_teacher1[
th
].append(decoded_teacher1_strong[th], ignore_index=True)
decoded_teacher2_strong = batched_decode_preds(
strong_preds_teacher2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher2.keys()),
)
for th in self.test_psds_buffer_teacher2.keys():
self.test_psds_buffer_teacher2[th] = self.test_psds_buffer_teacher2[
th
].append(decoded_teacher2_strong[th], ignore_index=True)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=list(self.test_psds_buffer_teacher.keys()),
)
for th in self.test_psds_buffer_teacher.keys():
self.test_psds_buffer_teacher[th] = self.test_psds_buffer_teacher[
th
].append(decoded_teacher_strong[th], ignore_index=True)
# compute f1 score
decoded_student1_strong = batched_decode_preds(
strong_preds_student1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student1_05_buffer = self.decoded_student1_05_buffer.append(
decoded_student1_strong[0.5]
)
decoded_student2_strong = batched_decode_preds(
strong_preds_student2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student2_05_buffer = self.decoded_student2_05_buffer.append(
decoded_student2_strong[0.5]
)
decoded_student_strong = batched_decode_preds(
strong_preds_student,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_student_05_buffer = self.decoded_student_05_buffer.append(
decoded_student_strong[0.5]
)
decoded_teacher1_strong = batched_decode_preds(
strong_preds_teacher1,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher1_05_buffer = self.decoded_teacher1_05_buffer.append(
decoded_teacher1_strong[0.5]
)
decoded_teacher2_strong = batched_decode_preds(
strong_preds_teacher2,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher2_05_buffer = self.decoded_teacher2_05_buffer.append(
decoded_teacher2_strong[0.5]
)
decoded_teacher_strong = batched_decode_preds(
strong_preds_teacher,
filenames,
self.encoder,
median_filter=self.hparams["training"]["median_window"],
thresholds=[0.5],
)
self.decoded_teacher_05_buffer = self.decoded_teacher_05_buffer.append(
decoded_teacher_strong[0.5]
)
def on_test_epoch_end(self):
# pub eval dataset
try:
log_dir = self.logger.log_dir
except Exception as e:
log_dir = self.hparams["log_dir"]
save_dir = os.path.join(log_dir, "metrics_test")
if self.evaluation:
# only save the predictions
save_dir_student = os.path.join(save_dir, "student")
os.makedirs(save_dir_student, exist_ok=True)
self.decoded_student_05_buffer.to_csv(
os.path.join(save_dir_student, f"predictions_05_student.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_student, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for student saved in: {save_dir_student}")
save_dir_teacher = os.path.join(save_dir, "teacher")
os.makedirs(save_dir_teacher, exist_ok=True)
self.decoded_teacher_05_buffer.to_csv(
os.path.join(save_dir_teacher, f"predictions_05_teacher.tsv"),
sep="\t",
index=False
)
for k in self.test_psds_buffer_student.keys():
self.test_psds_buffer_student[k].to_csv(
os.path.join(save_dir_teacher, f"predictions_th_{k:.2f}.tsv"),
sep="\t",
index=False,
)
print(f"\nPredictions for teacher saved in: {save_dir_teacher}")
else:
# calculate the metrics
psds_score_student1_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student1", "scenario1"),
)
psds_score_student1_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student1", "scenario2"),
)
psds_score_student2_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student2", "scenario1"),
)
psds_score_student2_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student2", "scenario2"),
)
psds_score_student_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario1"),
)
psds_score_student_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_student,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "student", "scenario2"),
)
psds_score_teacher1_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher1", "scenario1"),
)
psds_score_teacher1_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher1,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher1", "scenario2"),
)
psds_score_teacher2_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher2", "scenario1"),
)
psds_score_teacher2_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher2,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher2", "scenario2"),
)
psds_score_teacher_scenario1 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.7,
gtc_threshold=0.7,
alpha_ct=0,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario1"),
)
psds_score_teacher_scenario2 = compute_psds_from_operating_points(
self.test_psds_buffer_teacher,
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
dtc_threshold=0.1,
gtc_threshold=0.1,
cttc_threshold=0.3,
alpha_ct=0.5,
alpha_st=1,
save_dir=os.path.join(save_dir, "teacher", "scenario2"),
)
event_macro_student1 = log_sedeval_metrics(
self.decoded_student1_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student1"),
)[0]
event_macro_student2 = log_sedeval_metrics(
self.decoded_student2_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student2"),
)[0]
event_macro_student = log_sedeval_metrics(
self.decoded_student_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "student"),
)[0]
event_macro_teacher1 = log_sedeval_metrics(
self.decoded_teacher1_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher1"),
)[0]
event_macro_teacher2 = log_sedeval_metrics(
self.decoded_teacher2_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher2"),
)[0]
event_macro_teacher = log_sedeval_metrics(
self.decoded_teacher_05_buffer,
self.hparams["data"]["test_tsv"],
os.path.join(save_dir, "teacher"),
)[0]
# synth dataset
intersection_f1_macro_student1 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student1_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher1 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher1_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_student2 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student2_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher2 = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher2_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_student = compute_per_intersection_macro_f1(
{"0.5": self.decoded_student_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
# synth dataset
intersection_f1_macro_teacher = compute_per_intersection_macro_f1(
{"0.5": self.decoded_teacher_05_buffer},
self.hparams["data"]["test_tsv"],
self.hparams["data"]["test_dur"],
)
best_test_result1 = torch.tensor(max(psds_score_student1_scenario1, psds_score_student1_scenario2))
best_test_result2 = torch.tensor(max(psds_score_student2_scenario1, psds_score_student2_scenario2))
best_test_result = torch.tensor(max(psds_score_student_scenario1, psds_score_student_scenario2))
results = {
"hp_metric": best_test_result,
"test/student/psds_score_scenario1": psds_score_student_scenario1,
"test/student/psds_score_scenario2": psds_score_student_scenario2,
"test/teacher/psds_score_scenario1": psds_score_teacher_scenario1,
"test/teacher/psds_score_scenario2": psds_score_teacher_scenario2,
"test/student/event_f1_macro": event_macro_student,
"test/student/intersection_f1_macro": intersection_f1_macro_student,
"test/teacher/event_f1_macro": event_macro_teacher,
"test/teacher/intersection_f1_macro": intersection_f1_macro_teacher,
#"hp_metric_I": best_test_result1,
#"test/student1/psds_score_scenario1": psds_score_student1_scenario1,
#"test/student1/psds_score_scenario2": psds_score_student1_scenario2,
#"test/teacher1/psds_score_scenario1": psds_score_teacher1_scenario1,
#"test/teacher1/psds_score_scenario2": psds_score_teacher1_scenario2,
#"test/student1/event_f1_macro": event_macro_student1,
#"test/student1/intersection_f1_macro": intersection_f1_macro_student1,
#"test/teacher1/event_f1_macro": event_macro_teacher1,
#"test/teacher1/intersection_f1_macro": intersection_f1_macro_teacher1,
#"hp_metric_II": best_test_result2,
#"test/student2/psds_score_scenario1": psds_score_student2_scenario1,
#"test/student2/psds_score_scenario2": psds_score_student2_scenario2,
#"test/teacher2/psds_score_scenario1": psds_score_teacher2_scenario1,
#"test/teacher2/psds_score_scenario2": psds_score_teacher2_scenario2,
#"test/student2/event_f1_macro": event_macro_student2,
#"test/student2/intersection_f1_macro": intersection_f1_macro_student2,
#"test/teacher2/event_f1_macro": event_macro_teacher2,
#"test/teacher2/intersection_f1_macro": intersection_f1_macro_teacher2,
}
if self.logger is not None:
self.logger.log_metrics(results)
self.logger.log_hyperparams(self.hparams, results)
for key in results.keys():
self.log(key, results[key], prog_bar=True, logger=False)
def configure_optimizers(self):
return [self.opt1, self.opt2], [self.scheduler1, self.scheduler2]
def train_dataloader(self):
self.train_loader = torch.utils.data.DataLoader(
self.train_data,
batch_sampler=self.train_sampler,
num_workers=self.num_workers,
)
return self.train_loader
def val_dataloader(self):
self.val_loader = torch.utils.data.DataLoader(
self.valid_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.val_loader
def test_dataloader(self):
self.test_loader = torch.utils.data.DataLoader(
self.test_data,
batch_size=self.hparams["training"]["batch_size_val"],
num_workers=self.num_workers,
shuffle=False,
drop_last=False,
)
return self.test_loader
| 48,990 | 39.757903 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/nnet/CNN.py | import torch.nn as nn
import torch
import math
import torch.nn.functional as F
class GLU(nn.Module):
def __init__(self, input_num):
super(GLU, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(x)
res = lin * sig
return res
class ContextGating(nn.Module):
def __init__(self, input_num):
super(ContextGating, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(lin)
res = x * sig
return res
class BasicConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, relu=True, bn=True, bias=False):
super(BasicConv, self).__init__()
self.out_channels = out_planes
self.conv = nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
self.bn = nn.BatchNorm2d(out_planes, eps=1e-5, momentum=0.01, affine=True) if bn else None
self.relu = nn.ReLU() if relu else None
def forward(self, x):
x = self.conv(x)
if self.bn is not None:
x = self.bn(x)
if self.relu is not None:
x = self.relu(x)
return x
class Flatten(nn.Module):
def forward(self, x):
return x.view(x.size(0), -1)
class ChannelGate(nn.Module):
def __init__(self, gate_channels, reduction_ratio=4, pool_types=['avg', 'max']):
super(ChannelGate, self).__init__()
self.gate_channels = gate_channels
self.mlp = nn.Sequential(
Flatten(),
nn.Linear(gate_channels, gate_channels // reduction_ratio),
nn.ReLU(),
nn.Linear(gate_channels // reduction_ratio, gate_channels)
)
self.pool_types = pool_types
def forward(self, x):
channel_att_sum = None
for pool_type in self.pool_types:
if pool_type=='avg':
avg_pool = F.avg_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(avg_pool)
elif pool_type == 'max':
max_pool = F.max_pool2d(x, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(max_pool)
elif pool_type == 'lp':
lp_pool = F.lp_pool2d(x, 2, (x.size(2), x.size(3)), stride=(x.size(2), x.size(3)))
channel_att_raw = self.mlp(lp_pool)
elif pool_type == 'lse':
# LSE pool only
lse_pool = logsumexp_2d(x)
channel_att_raw = self.mlp(lse_pool)
if channel_att_sum is None:
channel_att_sum = channel_att_raw
else:
channel_att_sum += channel_att_raw
scale = F.sigmoid(channel_att_sum).unsqueeze(2).unsqueeze(3).expand_as(x)
return x*scale
def logsumexp_2d(tensor):
tensor_flatten = tensor.view(tensor.size(0), tensor.size(1), -1)
s, _ = torch.max(tensor_flatten, dim=2, keepdim=True)
outputs = s + (tensor_flatten-s).exp().sum(dim=2, keepdim=True).log()
return outputs
class ChannelPool(nn.Module):
def forward(self,x):
return torch.cat((torch.max(x,1)[0].unsqueeze(1), torch.mean(x,1).unsqueeze(1)), dim=1)
class SpatialGate(nn.Module):
def __init__(self):
super(SpatialGate, self).__init__()
kernel_size = 7
self.compress = ChannelPool()
self.spatial = BasicConv(2,1,kernel_size, stride=1, padding=(kernel_size-1)//2, relu=False)
def forward(self, x):
x_compress = self.compress(x)
x_out = self.spatial(x_compress)
scale = F.sigmoid(x_out)
return x*scale
class CBAM(nn.Module):
def __init__(self, gate_channels, reduction_ratio=4, pool_types=['avg', 'max'], no_spatial=False):
super(CBAM, self).__init__()
self.no_spatial = no_spatial
self.ChannelGate = ChannelGate(gate_channels, reduction_ratio, pool_types)
self.SpatialGate = SpatialGate() if not no_spatial else None
def forward(self, x):
x_out = self.ChannelGate(x)
if not self.no_spatial:
x_out = self.SpatialGate(x_out)
return x_out
class ResidualConv(nn.Module):
def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1, bn=True, bias=False):
super(ResidualConv, self).__init__()
self.conv1 = BasicConv(in_planes, out_planes, kernel_size, padding=1)
self.conv2 = BasicConv(out_planes, out_planes, kernel_size, padding=1)
self.skip = nn.Conv2d(in_planes, out_planes, [3,3], [1,1], [1,1])
def forward(self, x):
c1 = self.conv1(x)
c2 = self.conv2(c1)
s = self.skip(x)
return c2+s
class ResidualCNN(nn.Module):
def __init__(
self,
n_in_channel,
activation="Relu",
conv_dropout=0,
kernel_size=[3, 3, 3],
padding=[1, 1, 1],
stride=[1, 1, 1],
nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)],
normalization="batch",
**transformer_kwargs
):
"""
Initialization of CNN network s
Args:
n_in_channel: int, number of input channel
activation: str, activation function
conv_dropout: float, dropout
kernel_size: kernel size
padding: padding
stride: list, stride
nb_filters: number of filters
pooling: list of tuples, time and frequency pooling
normalization: choose between "batch" for BatchNormalization and "layer" for LayerNormalization.
"""
super(ResidualCNN, self).__init__()
self.nb_filters = nb_filters
cnn = nn.Sequential()
# stem block 0
cnn.add_module('conv0', nn.Conv2d(n_in_channel, nb_filters[0], kernel_size=kernel_size[0], stride=stride[0], padding=padding[0]))
cnn.add_module('batchnorm0', nn.BatchNorm2d(nb_filters[0], eps=0.001, momentum=0.99))
cnn.add_module('glu0', GLU(nb_filters[0]))
cnn.add_module('avgpool0', nn.AvgPool2d(pooling[0]))
# stem block 1
cnn.add_module('conv1', nn.Conv2d(nb_filters[0], nb_filters[1], kernel_size=kernel_size[1], stride=stride[1], padding=padding[1]))
cnn.add_module('batchnorm1', nn.BatchNorm2d(nb_filters[1], eps=0.001, momentum=0.99))
cnn.add_module('glu1', GLU(nb_filters[1]))
cnn.add_module('avgpool1', nn.AvgPool2d(pooling[1]))
# Residual block 0
cnn.add_module('conv2', ResidualConv(nb_filters[1], nb_filters[2], kernel_size=kernel_size[2], stride=stride[2], padding=padding[2]))
cnn.add_module('cbam2', CBAM(nb_filters[2]))
cnn.add_module('avgpool2', nn.AvgPool2d(pooling[2]))
cnn.add_module('conv3', ResidualConv(nb_filters[2], nb_filters[3], kernel_size=kernel_size[3], stride=stride[3], padding=padding[3]))
cnn.add_module('cbam3', CBAM(nb_filters[3]))
cnn.add_module('avgpool3', nn.AvgPool2d(pooling[3]))
# Residual block 2
cnn.add_module('conv4', ResidualConv(nb_filters[3], nb_filters[4], kernel_size=kernel_size[4], stride=stride[4], padding=padding[4]))
cnn.add_module('cbam4', CBAM(nb_filters[4]))
cnn.add_module('avgpool4', nn.AvgPool2d(pooling[4]))
# Residual block 3
cnn.add_module('conv5', ResidualConv(nb_filters[4], nb_filters[5], kernel_size=kernel_size[5], stride=stride[5], padding=padding[5]))
cnn.add_module('cbam5', CBAM(nb_filters[5]))
cnn.add_module('avgpool5', nn.AvgPool2d(pooling[5]))
# Residual block 4
cnn.add_module('conv6', ResidualConv(nb_filters[5], nb_filters[6], kernel_size=kernel_size[6], stride=stride[6], padding=padding[6]))
cnn.add_module('cbam6', CBAM(nb_filters[6]))
cnn.add_module('avgpool6', nn.AvgPool2d(pooling[6]))
# Residual block 4
#cnn.add_module('conv7', ResidualConv(nb_filters[6], nb_filters[7], kernel_size=kernel_size[7], stride=stride[7], padding=padding[7]))
#cnn.add_module('cbam7', CBAM(nb_filters[7]))
#cnn.add_module('avgpool7', nn.AvgPool2d(pooling[7]))
# cnn
self.cnn = cnn
def forward(self, x):
"""
Forward step of the CNN module
Args:
x (Tensor): input batch of size (batch_size, n_channels, n_frames, n_freq)
Returns:
Tensor: batch embedded
"""
# conv features
x = self.cnn(x)
return x
class CNN(nn.Module):
def __init__(
self,
n_in_channel,
activation="Relu",
conv_dropout=0,
kernel_size=[3, 3, 3],
padding=[1, 1, 1],
stride=[1, 1, 1],
nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)],
normalization="batch",
**transformer_kwargs
):
"""
Initialization of CNN network s
Args:
n_in_channel: int, number of input channel
activation: str, activation function
conv_dropout: float, dropout
kernel_size: kernel size
padding: padding
stride: list, stride
nb_filters: number of filters
pooling: list of tuples, time and frequency pooling
normalization: choose between "batch" for BatchNormalization and "layer" for LayerNormalization.
"""
super(CNN, self).__init__()
self.nb_filters = nb_filters
cnn = nn.Sequential()
def conv(i, normalization="batch", dropout=None, activ="relu"):
nIn = n_in_channel if i == 0 else nb_filters[i - 1]
nOut = nb_filters[i]
cnn.add_module(
"conv{0}".format(i),
nn.Conv2d(nIn, nOut, kernel_size[i], stride[i], padding[i]),
)
if normalization == "batch":
cnn.add_module(
"batchnorm{0}".format(i),
nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99),
)
elif normalization == "layer":
cnn.add_module("layernorm{0}".format(i), nn.GroupNorm(1, nOut))
if activ.lower() == "leakyrelu":
cnn.add_module("relu{0}".format(i), nn.LeakyReLU(0.2))
elif activ.lower() == "relu":
cnn.add_module("relu{0}".format(i), nn.ReLU())
elif activ.lower() == "glu":
cnn.add_module("glu{0}".format(i), GLU(nOut))
elif activ.lower() == "cg":
cnn.add_module("cg{0}".format(i), ContextGating(nOut))
if dropout is not None:
cnn.add_module("dropout{0}".format(i), nn.Dropout(dropout))
# 128x862x64
for i in range(len(nb_filters)):
conv(i, normalization=normalization, dropout=conv_dropout, activ=activation)
cnn.add_module(
"pooling{0}".format(i), nn.AvgPool2d(pooling[i])
) # bs x tframe x mels
self.cnn = cnn
def forward(self, x):
"""
Forward step of the CNN module
Args:
x (Tensor): input batch of size (batch_size, n_channels, n_frames, n_freq)
Returns:
Tensor: batch embedded
"""
# conv features
x = self.cnn(x)
return x
| 11,753 | 35.616822 | 154 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/nnet/CRNN.py | import warnings
import torch.nn as nn
import torch
from .RNN import BidirectionalGRU
from .CNN import CNN, ResidualCNN
class RCRNN(nn.Module):
def __init__(
self,
n_in_channel=1,
nclass=10,
attention=True,
activation="glu",
dropout=0.5,
train_cnn=True,
rnn_type="BGRU",
n_RNN_cell=128,
n_layers_RNN=2,
dropout_recurrent=0,
cnn_integration=False,
freeze_bn=False,
**kwargs,
):
"""
Initialization of CRNN model
Args:
n_in_channel: int, number of input channel
n_class: int, number of classes
attention: bool, adding attention layer or not
activation: str, activation function
dropout: float, dropout
train_cnn: bool, training cnn layers
rnn_type: str, rnn type
n_RNN_cell: int, RNN nodes
n_layer_RNN: int, number of RNN layers
dropout_recurrent: float, recurrent layers dropout
cnn_integration: bool, integration of cnn
freeze_bn:
**kwargs: keywords arguments for CNN.
"""
super(RCRNN, self).__init__()
self.n_in_channel = n_in_channel
self.attention = attention
self.cnn_integration = cnn_integration
self.freeze_bn = freeze_bn
n_in_cnn = n_in_channel
if cnn_integration:
n_in_cnn = 1
self.cnn = ResidualCNN(
n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs
)
self.train_cnn = train_cnn
if not train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
if rnn_type == "BGRU":
nb_in = self.cnn.nb_filters[-1]
if self.cnn_integration:
# self.fc = nn.Linear(nb_in * n_in_channel, nb_in)
nb_in = nb_in * n_in_channel
self.rnn = BidirectionalGRU(
n_in=nb_in,
n_hidden=n_RNN_cell,
dropout=dropout_recurrent,
num_layers=2,
)
#self.rnn2 = BidirectionalGRU(
# n_in=nb_in*2,
# n_hidden=n_RNN_cell,
# dropout=dropout_recurrent,
# num_layers=1,
#)
else:
NotImplementedError("Only BGRU supported for CRNN for now")
self.relu = nn.ReLU()
self.dropout = nn.Dropout(dropout)
self.dense = nn.Linear(n_RNN_cell * 2, nclass)
self.sigmoid = nn.Sigmoid()
if self.attention:
self.dense_softmax = nn.Linear(n_RNN_cell * 2, nclass)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, pad_mask=None):
if len(x.shape) < 4:
x = x.transpose(1, 2).unsqueeze(1)
else:
x = x.permute(0,1,3,2)
# input size : (batch_size, n_channels, n_frames, n_freq)
if self.cnn_integration:
bs_in, nc_in = x.size(0), x.size(1)
x = x.view(bs_in * nc_in, 1, *x.shape[2:])
# conv features
x = self.cnn(x)
bs, chan, frames, freq = x.size()
if self.cnn_integration:
x = x.reshape(bs_in, chan * nc_in, frames, freq)
if freq != 1:
warnings.warn(
f"Output shape is: {(bs, frames, chan * freq)}, from {freq} staying freq"
)
x = x.permute(0, 2, 1, 3)
x = x.contiguous().view(bs, frames, chan * freq)
else:
x = x.squeeze(-1)
x = x.permute(0, 2, 1) # [bs, frames, chan]
# rnn features
x = self.rnn(x)
#x = self.rnn1(x)
#x = self.relu(x)
#x = self.rnn2(x)
#x = self.relu(x)
x = self.dropout(x)
strong = self.dense(x) # [bs, frames, nclass]
strong = self.sigmoid(strong)
if self.attention:
sof = self.dense_softmax(x) # [bs, frames, nclass]
if not pad_mask is None:
sof = sof.masked_fill(pad_mask.transpose(1, 2), -1e30) # mask attention
sof = self.softmax(sof)
sof = torch.clamp(sof, min=1e-7, max=1)
weak = (strong * sof).sum(1) / sof.sum(1) # [bs, nclass]
else:
weak = strong.mean(1)
return strong.transpose(1, 2), weak
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(RCRNN, self).train(mode)
if self.freeze_bn:
print("Freezing Mean/Var of BatchNorm2D.")
if self.freeze_bn:
print("Freezing Weight/Bias of BatchNorm2D.")
if self.freeze_bn:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.freeze_bn:
m.weight.requires_grad = False
m.bias.requires_grad = False
class CRNN(nn.Module):
def __init__(
self,
n_in_channel=1,
nclass=10,
attention=True,
activation="glu",
dropout=0.5,
train_cnn=True,
rnn_type="BGRU",
n_RNN_cell=128,
n_layers_RNN=2,
dropout_recurrent=0,
cnn_integration=False,
freeze_bn=False,
**kwargs,
):
"""
Initialization of CRNN model
Args:
n_in_channel: int, number of input channel
n_class: int, number of classes
attention: bool, adding attention layer or not
activation: str, activation function
dropout: float, dropout
train_cnn: bool, training cnn layers
rnn_type: str, rnn type
n_RNN_cell: int, RNN nodes
n_layer_RNN: int, number of RNN layers
dropout_recurrent: float, recurrent layers dropout
cnn_integration: bool, integration of cnn
freeze_bn:
**kwargs: keywords arguments for CNN.
"""
super(CRNN, self).__init__()
self.n_in_channel = n_in_channel
self.attention = attention
self.cnn_integration = cnn_integration
self.freeze_bn = freeze_bn
n_in_cnn = n_in_channel
if cnn_integration:
n_in_cnn = 1
self.cnn = CNN(
n_in_channel=n_in_cnn, activation=activation, conv_dropout=dropout, **kwargs
)
self.train_cnn = train_cnn
if not train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
if rnn_type == "BGRU":
nb_in = self.cnn.nb_filters[-1]
if self.cnn_integration:
# self.fc = nn.Linear(nb_in * n_in_channel, nb_in)
nb_in = nb_in * n_in_channel
self.rnn = BidirectionalGRU(
n_in=nb_in,
n_hidden=n_RNN_cell,
dropout=dropout_recurrent,
num_layers=n_layers_RNN,
)
else:
NotImplementedError("Only BGRU supported for CRNN for now")
self.dropout = nn.Dropout(dropout)
self.dense = nn.Linear(n_RNN_cell * 2, nclass)
self.sigmoid = nn.Sigmoid()
if self.attention:
self.dense_softmax = nn.Linear(n_RNN_cell * 2, nclass)
self.softmax = nn.Softmax(dim=-1)
def forward(self, x, pad_mask=None):
x = x.transpose(1, 2).unsqueeze(1)
# input size : (batch_size, n_channels, n_frames, n_freq)
if self.cnn_integration:
bs_in, nc_in = x.size(0), x.size(1)
x = x.view(bs_in * nc_in, 1, *x.shape[2:])
# conv features
x = self.cnn(x)
bs, chan, frames, freq = x.size()
if self.cnn_integration:
x = x.reshape(bs_in, chan * nc_in, frames, freq)
if freq != 1:
warnings.warn(
f"Output shape is: {(bs, frames, chan * freq)}, from {freq} staying freq"
)
x = x.permute(0, 2, 1, 3)
x = x.contiguous().view(bs, frames, chan * freq)
else:
x = x.squeeze(-1)
x = x.permute(0, 2, 1) # [bs, frames, chan]
# rnn features
x = self.rnn(x)
x = self.dropout(x)
strong = self.dense(x) # [bs, frames, nclass]
strong = self.sigmoid(strong)
if self.attention:
sof = self.dense_softmax(x) # [bs, frames, nclass]
if not pad_mask is None:
sof = sof.masked_fill(pad_mask.transpose(1, 2), -1e30) # mask attention
sof = self.softmax(sof)
sof = torch.clamp(sof, min=1e-7, max=1)
weak = (strong * sof).sum(1) / sof.sum(1) # [bs, nclass]
else:
weak = strong.mean(1)
return strong.transpose(1, 2), weak
def train(self, mode=True):
"""
Override the default train() to freeze the BN parameters
"""
super(CRNN, self).train(mode)
if self.freeze_bn:
print("Freezing Mean/Var of BatchNorm2D.")
if self.freeze_bn:
print("Freezing Weight/Bias of BatchNorm2D.")
if self.freeze_bn:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
if self.freeze_bn:
m.weight.requires_grad = False
m.bias.requires_grad = False
| 9,567 | 31.767123 | 89 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/nnet/RNN.py | import warnings
import torch
from torch import nn as nn
class BidirectionalGRU(nn.Module):
def __init__(self, n_in, n_hidden, dropout=0, num_layers=1):
"""
Initialization of BidirectionalGRU instance
Args:
n_in: int, number of input
n_hidden: int, number of hidden layers
dropout: flat, dropout
num_layers: int, number of layers
"""
super(BidirectionalGRU, self).__init__()
self.rnn = nn.GRU(
n_in,
n_hidden,
bidirectional=True,
dropout=dropout,
batch_first=True,
num_layers=num_layers,
)
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
return recurrent
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut, dropout=0, num_layers=1):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(
nIn,
nHidden // 2,
bidirectional=True,
batch_first=True,
dropout=dropout,
num_layers=num_layers,
)
self.embedding = nn.Linear(nHidden * 2, nOut)
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
b, T, h = recurrent.size()
t_rec = recurrent.contiguous().view(b * T, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(b, T, -1)
return output
| 1,488 | 26.072727 | 68 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/scaler.py | import tqdm
import torch
class TorchScaler(torch.nn.Module):
"""
This torch module implements scaling for input tensors, both instance based
and dataset-wide statistic based.
Args:
statistic: str, (default='dataset'), represent how to compute the statistic for normalisation.
Choice in {'dataset', 'instance'}.
'dataset' needs to be 'fit()' with a dataloader of the dataset.
'instance' apply the normalisation at an instance-level, so compute the statitics on the instance
specified, it can be a clip or a batch.
normtype: str, (default='standard') the type of normalisation to use.
Choice in {'standard', 'mean', 'minmax'}. 'standard' applies a classic normalisation with mean and standard
deviation. 'mean' substract the mean to the data. 'minmax' substract the minimum of the data and divide by
the difference between max and min.
"""
def __init__(self, statistic="dataset", normtype="standard", dims=(1, 2), eps=1e-8):
super(TorchScaler, self).__init__()
assert statistic in ["dataset", "instance"]
assert normtype in ["standard", "mean", "minmax"]
if statistic == "dataset" and normtype == "minmax":
raise NotImplementedError(
"statistic==dataset and normtype==minmax is not currently implemented."
)
self.statistic = statistic
self.normtype = normtype
self.dims = dims
self.eps = eps
def load_state_dict(self, state_dict, strict=True):
if self.statistic == "dataset":
super(TorchScaler, self).load_state_dict(state_dict, strict)
def _load_from_state_dict(
self,
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
):
if self.statistic == "dataset":
super(TorchScaler, self)._load_from_state_dict(
state_dict,
prefix,
local_metadata,
strict,
missing_keys,
unexpected_keys,
error_msgs,
)
def fit(self, dataloader, transform_func=lambda x: x[0]):
"""
Scaler fitting
Args:
dataloader (DataLoader): training data DataLoader
transform_func (lambda function, optional): Transforms applied to the data.
Defaults to lambdax:x[0].
"""
indx = 0
for batch in tqdm.tqdm(dataloader):
feats = transform_func(batch)
if indx == 0:
mean = torch.mean(feats, self.dims, keepdim=True).mean(0).unsqueeze(0)
mean_squared = (
torch.mean(feats ** 2, self.dims, keepdim=True).mean(0).unsqueeze(0)
)
else:
mean += torch.mean(feats, self.dims, keepdim=True).mean(0).unsqueeze(0)
mean_squared += (
torch.mean(feats ** 2, self.dims, keepdim=True).mean(0).unsqueeze(0)
)
indx += 1
mean /= indx
mean_squared /= indx
self.register_buffer("mean", mean)
self.register_buffer("mean_squared", mean_squared)
def forward(self, tensor):
if self.statistic == "dataset":
assert hasattr(self, "mean") and hasattr(
self, "mean_squared"
), "TorchScaler should be fit before used if statistics=dataset"
assert tensor.ndim == self.mean.ndim, "Pre-computed statistics "
if self.normtype == "mean":
return tensor - self.mean
elif self.normtype == "standard":
std = torch.sqrt(self.mean_squared - self.mean ** 2)
return (tensor - self.mean) / (std + self.eps)
else:
raise NotImplementedError
else:
if self.normtype == "mean":
return tensor - torch.mean(tensor, self.dims, keepdim=True)
elif self.normtype == "standard":
return (tensor - torch.mean(tensor, self.dims, keepdim=True)) / (
torch.std(tensor, self.dims, keepdim=True) + self.eps
)
elif self.normtype == "minmax":
return (tensor - torch.amin(tensor, dim=self.dims, keepdim=True)) / (
torch.amax(tensor, dim=self.dims, keepdim=True)
- torch.amin(tensor, dim=self.dims, keepdim=True)
+ self.eps
)
| 4,606 | 38.042373 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/schedulers.py | from asteroid.engine.schedulers import *
import numpy as np
class ExponentialWarmup(BaseScheduler):
""" Scheduler to apply ramp-up during training to the learning rate.
Args:
optimizer: torch.optimizer.Optimizer, the optimizer from which to rampup the value from
max_lr: float, the maximum learning to use at the end of ramp-up.
rampup_length: int, the length of the rampup (number of steps).
exponent: float, the exponent to be used.
"""
def __init__(self, optimizer, max_lr, rampup_length, exponent=-5.0):
super().__init__(optimizer)
self.rampup_len = rampup_length
self.max_lr = max_lr
self.step_num = 1
self.exponent = exponent
def _get_scaling_factor(self):
if self.rampup_len == 0:
return 1.0
else:
current = np.clip(self.step_num, 0.0, self.rampup_len)
phase = 1.0 - current / self.rampup_len
return float(np.exp(self.exponent * phase * phase))
def _get_lr(self):
return self.max_lr * self._get_scaling_factor()
| 1,094 | 32.181818 | 95 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/utils/torch_utils.py | import torch
import numpy as np
def nantensor(*args, **kwargs):
return torch.ones(*args, **kwargs) * np.nan
def nanmean(v, *args, inplace=False, **kwargs):
if not inplace:
v = v.clone()
is_nan = torch.isnan(v)
v[is_nan] = 0
return v.sum(*args, **kwargs) / (~is_nan).float().sum(*args, **kwargs)
| 327 | 20.866667 | 74 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/desed_task/data_augm.py | import numpy as np
import torch
import random
def frame_shift(mels, labels, net_pooling=4):
bsz, n_bands, frames = mels.shape
shifted = []
new_labels = []
for bindx in range(bsz):
shift = int(random.gauss(0, 90))
shifted.append(torch.roll(mels[bindx], shift, dims=-1))
shift = -abs(shift) // net_pooling if shift < 0 else shift // net_pooling
new_labels.append(torch.roll(labels[bindx], shift, dims=-1))
return torch.stack(shifted), torch.stack(new_labels)
def frame_shift2(mels, labels, net_pooling=4):
bsz, n_ch, n_bands, frames = mels.shape
shifted1 = []
new_labels = []
for bindx in range(bsz):
shift = int(random.gauss(0, 90))
shifted1.append(torch.roll(mels[bindx,0], shift, dims=-1))
shift = -abs(shift) // net_pooling if shift < 0 else shift // net_pooling
new_labels.append(torch.roll(labels[bindx], shift, dims=-1))
shifted1 = torch.stack(shifted1)
shifted2 = []
for bindx in range(bsz):
shift = int(random.gauss(0, 90))
shifted2.append(torch.roll(mels[bindx,1], shift, dims=-1))
shifted2 = torch.stack(shifted2)
shifted = torch.stack([shifted1, shifted2],3).permute(0,3,1,2)
return shifted, torch.stack(new_labels)
def temporal_reverse(mels, labels, net_pooling=4):
bsz, n_bands, frames = mels.shape
reverse = []
new_labels = []
for bindx in range(bsz):
reverse.append(torch.fliplr(mels[bindx]))
new_labels.append(torch.fliplr(labels[bindx]))
return torch.stack(reverse), torch.stack(new_labels)
def mixup(data, target=None, alpha=0.2, beta=0.2, mixup_label_type="soft"):
"""Mixup data augmentation by permuting the data
Args:
data: input tensor, must be a batch so data can be permuted and mixed.
target: tensor of the target to be mixed, if None, do not return targets.
alpha: float, the parameter to the np.random.beta distribution
beta: float, the parameter to the np.random.beta distribution
mixup_label_type: str, the type of mixup to be used choice between {'soft', 'hard'}.
Returns:
torch.Tensor of mixed data and labels if given
"""
with torch.no_grad():
batch_size = data.size(0)
c = np.random.beta(alpha, beta)
perm = torch.randperm(batch_size)
mixed_data = c * data + (1 - c) * data[perm, :]
if target is not None:
if mixup_label_type == "soft":
mixed_target = torch.clamp(
c * target + (1 - c) * target[perm, :], min=0, max=1
)
elif mixup_label_type == "hard":
mixed_target = torch.clamp(target + target[perm, :], min=0, max=1)
else:
raise NotImplementedError(
f"mixup_label_type: {mixup_label_type} not implemented. choice in "
f"{'soft', 'hard'}"
)
return mixed_data, mixed_target
else:
return mixed_data
def add_noise(mels, snrs=(6, 30), dims=(1, 2)):
""" Add white noise to mels spectrograms
Args:
mels: torch.tensor, mels spectrograms to apply the white noise to.
snrs: int or tuple, the range of snrs to choose from if tuple (uniform)
dims: tuple, the dimensions for which to compute the standard deviation (default to (1,2) because assume
an input of a batch of mel spectrograms.
Returns:
torch.Tensor of mels with noise applied
"""
if isinstance(snrs, (list, tuple)):
snr = (snrs[0] - snrs[1]) * torch.rand(
(mels.shape[0],), device=mels.device
).reshape(-1, 1, 1) + snrs[1]
else:
snr = snrs
snr = 10 ** (snr / 20) # linear domain
sigma = torch.std(mels, dim=dims, keepdim=True) / snr
mels = mels + torch.randn(mels.shape, device=mels.device) * sigma
return mels
| 3,931 | 35.073394 | 112 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/desed_task/dataio/sampler.py | from torch.utils.data import Sampler
import numpy as np
class ConcatDatasetBatchSampler(Sampler):
"""This sampler is built to work with a standard Pytorch ConcatDataset.
From SpeechBrain dataio see https://github.com/speechbrain/
It is used to retrieve elements from the different concatenated datasets placing them in the same batch
with proportion specified by batch_sizes, e.g 8, 16 means each batch will
be of 24 elements with the first 8 belonging to the first dataset in ConcatDataset
object and the last 16 to the second.
More than two datasets are supported, in that case you need to provide 3 batch
sizes.
Note
----
Batched are drawn from the datasets till the one with smallest length is exhausted.
Thus number of examples in your training epoch is dictated by the dataset
whose length is the smallest.
Arguments
---------
samplers : int
The base seed to use for the random number generator. It is recommended
to use a value which has a good mix of 0 and 1 bits.
batch_sizes: list
Batch sizes.
epoch : int
The epoch to start at.
"""
def __init__(self, samplers, batch_sizes: (tuple, list), epoch=0) -> None:
if not isinstance(samplers, (list, tuple)):
raise ValueError(
"samplers should be a list or tuple of Pytorch Samplers, "
"but got samplers={}".format(batch_sizes)
)
if not isinstance(batch_sizes, (list, tuple)):
raise ValueError(
"batch_sizes should be a list or tuple of integers, "
"but got batch_sizes={}".format(batch_sizes)
)
if not len(batch_sizes) == len(samplers):
raise ValueError("batch_sizes and samplers should be have same length")
self.batch_sizes = batch_sizes
self.samplers = samplers
self.offsets = [0] + np.cumsum([len(x) for x in self.samplers]).tolist()[:-1]
self.epoch = epoch
self.set_epoch(self.epoch)
def _iter_one_dataset(self, c_batch_size, c_sampler, c_offset):
batch = []
for idx in c_sampler:
batch.append(c_offset + idx)
if len(batch) == c_batch_size:
yield batch
def set_epoch(self, epoch):
if hasattr(self.samplers[0], "epoch"):
for s in self.samplers:
s.set_epoch(epoch)
def __iter__(self):
iterators = [iter(i) for i in self.samplers]
tot_batch = []
for b_num in range(len(self)):
for samp_idx in range(len(self.samplers)):
c_batch = []
while len(c_batch) < self.batch_sizes[samp_idx]:
c_batch.append(self.offsets[samp_idx] + next(iterators[samp_idx]))
tot_batch.extend(c_batch)
yield tot_batch
tot_batch = []
def __len__(self):
min_len = float("inf")
for idx, sampler in enumerate(self.samplers):
c_len = (len(sampler)) // self.batch_sizes[idx]
min_len = min(c_len, min_len)
return min_len
| 3,147 | 33.217391 | 107 | py |
CRSTmodel | CRSTmodel-main/DCASE2021_baseline_platform/desed_task/dataio/datasets.py | from torch.utils.data import Dataset
import pandas as pd
import os
import numpy as np
import torchaudio
import torch
import glob
def to_mono(mixture, random_ch=False):
if mixture.ndim > 1: # multi channel
if not random_ch:
mixture = torch.mean(mixture, 0)
else: # randomly select one channel
indx = np.random.randint(0, mixture.shape[0] - 1)
mixture = mixture[indx]
return mixture
def pad_audio(audio, target_len):
if audio.shape[-1] < target_len:
audio = torch.nn.functional.pad(
audio, (0, target_len - audio.shape[-1]), mode="constant"
)
padded_indx = [target_len / len(audio)]
else:
padded_indx = [1.0]
return audio, padded_indx
def read_audio(file, multisrc, random_channel, pad_to):
mixture, fs = torchaudio.load(file)
if not multisrc:
mixture = to_mono(mixture, random_channel)
if pad_to is not None:
mixture, padded_indx = pad_audio(mixture, pad_to)
else:
padded_indx = [1.0]
mixture = mixture.float()
return mixture, padded_indx
class StronglyAnnotatedSet(Dataset):
def __init__(
self,
audio_folder,
tsv_entries,
encoder,
pad_to=10,
fs=16000,
return_filename=False,
random_channel=False,
multisrc=False,
evaluation=False
):
self.encoder = encoder
self.fs = fs
self.pad_to = pad_to * fs
self.return_filename = return_filename
self.random_channel = random_channel
self.multisrc = multisrc
# annotation = pd.read_csv(tsv_file, sep="\t")
examples = {}
for i, r in tsv_entries.iterrows():
if r["filename"] not in examples.keys():
examples[r["filename"]] = {
"mixture": os.path.join(audio_folder, r["filename"]),
"events": [],
}
if not np.isnan(r["onset"]):
examples[r["filename"]]["events"].append(
{
"event_label": r["event_label"],
"onset": r["onset"],
"offset": r["offset"],
}
)
else:
if not np.isnan(r["onset"]):
examples[r["filename"]]["events"].append(
{
"event_label": r["event_label"],
"onset": r["onset"],
"offset": r["offset"],
}
)
# we construct a dictionary for each example
self.examples = examples
self.examples_list = list(examples.keys())
def __len__(self):
return len(self.examples_list)
def __getitem__(self, item):
c_ex = self.examples[self.examples_list[item]]
mixture, padded_indx = read_audio(
c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to
)
# labels
labels = c_ex["events"]
# check if labels exists:
if not len(labels):
max_len_targets = self.encoder.n_frames
strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
else:
# to steps
strong = self.encoder.encode_strong_df(pd.DataFrame(labels))
strong = torch.from_numpy(strong).float()
if self.return_filename:
return mixture, strong.transpose(0, 1), padded_indx, c_ex["mixture"]
else:
return mixture, strong.transpose(0, 1), padded_indx
class WeakSet(Dataset):
def __init__(
self,
audio_folder,
tsv_entries,
encoder,
pad_to=10,
fs=16000,
return_filename=False,
random_channel=False,
multisrc=False,
):
self.encoder = encoder
self.fs = fs
self.pad_to = pad_to * fs
self.return_filename = return_filename
self.random_channel = random_channel
self.multisrc = multisrc
examples = {}
for i, r in tsv_entries.iterrows():
if r["filename"] not in examples.keys():
examples[r["filename"]] = {
"mixture": os.path.join(audio_folder, r["filename"]),
"events": r["event_labels"].split(","),
}
self.examples = examples
self.examples_list = list(examples.keys())
def __len__(self):
return len(self.examples_list)
def __getitem__(self, item):
file = self.examples_list[item]
c_ex = self.examples[file]
mixture, padded_indx = read_audio(
c_ex["mixture"], self.multisrc, self.random_channel, self.pad_to
)
# labels
labels = c_ex["events"]
# check if labels exists:
max_len_targets = self.encoder.n_frames
weak = torch.zeros(max_len_targets, len(self.encoder.labels))
if len(labels):
weak_labels = self.encoder.encode_weak(labels)
weak[0, :] = torch.from_numpy(weak_labels).float()
out_args = [mixture, weak.transpose(0, 1), padded_indx]
if self.return_filename:
out_args.append(c_ex["mixture"])
return out_args
class UnlabeledSet(Dataset):
def __init__(
self,
unlabeled_folder,
encoder,
pad_to=10,
fs=16000,
return_filename=False,
random_channel=False,
multisrc=False,
):
self.encoder = encoder
self.fs = fs
self.pad_to = pad_to * fs if pad_to is not None else None
self.examples = glob.glob(os.path.join(unlabeled_folder, "*.wav"))
self.return_filename = return_filename
self.random_channel = random_channel
self.multisrc = multisrc
def __len__(self):
return len(self.examples)
def __getitem__(self, item):
c_ex = self.examples[item]
mixture, padded_indx = read_audio(
c_ex, self.multisrc, self.random_channel, self.pad_to
)
max_len_targets = self.encoder.n_frames
strong = torch.zeros(max_len_targets, len(self.encoder.labels)).float()
out_args = [mixture, strong.transpose(0, 1), padded_indx]
if self.return_filename:
out_args.append(c_ex)
return out_args
| 6,460 | 27.337719 | 83 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_MT_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel import _load_model
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
# LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
# We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model, optimizer, c_epoch, ema_model=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss()
consistency_criterion = nn.MSELoss()
class_criterion, consistency_criterion = to_cuda_if_available(class_criterion, consistency_criterion)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
for i, ((batch_input, ema_batch_input), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer, rampup_value)
meters.update('lr', optimizer.param_groups[0]['lr'])
batch_input, ema_batch_input, target = to_cuda_if_available(batch_input, ema_batch_input, target)
# Outputs
strong_pred_ema, weak_pred_ema = ema_model(ema_batch_input)
strong_pred_ema = strong_pred_ema.detach()
weak_pred_ema = weak_pred_ema.detach()
strong_pred, weak_pred = model(batch_input)
#sample = target[mask_strong].sum(2)
#sample = sample.cpu().numpy()
#print(np.where(sample[-1,:]>1))
loss = None
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
weak_class_loss = class_criterion(weak_pred[mask_weak], target_weak[mask_weak])
ema_class_loss = class_criterion(weak_pred_ema[mask_weak], target_weak[mask_weak])
loss = weak_class_loss
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss: {weak_class_loss} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss', weak_class_loss.item())
meters.update('Weak EMA loss', ema_class_loss.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss = class_criterion(strong_pred[mask_strong], target[mask_strong])
meters.update('Strong loss', strong_class_loss.item())
strong_ema_class_loss = class_criterion(strong_pred_ema[mask_strong], target[mask_strong])
meters.update('Strong EMA loss', strong_ema_class_loss.item())
if loss is not None:
loss += strong_class_loss
else:
loss = strong_class_loss
# Teacher-student consistency cost
if ema_model is not None:
consistency_cost = cfg.max_consistency_cost * rampup_value
meters.update('Consistency weight', consistency_cost)
# Take consistency about strong predictions (all data)
#consistency_loss_strong = consistency_cost * consistency_criterion(strong_pred, strong_pred_ema)
consistency_loss_strong = consistency_cost * class_criterion(strong_pred, strong_pred_ema)
meters.update('Consistency strong', consistency_loss_strong.item())
if loss is not None:
loss += consistency_loss_strong
else:
loss = consistency_loss_strong
meters.update('Consistency weight', consistency_cost)
# Take consistency about weak predictions (all data)
#consistency_loss_weak = consistency_cost * consistency_criterion(weak_pred, weak_pred_ema)
consistency_loss_weak = consistency_cost * class_criterion(weak_pred, weak_pred_ema)
meters.update('Consistency weak', consistency_loss_weak.item())
if loss is not None:
loss += consistency_loss_weak
else:
loss = consistency_loss_weak
assert not (np.isnan(loss.item()) or loss.item() > 1e5), 'Loss explosion: {}'.format(loss.item())
assert not loss.item() < 0, 'Loss problem, cannot be negative'
meters.update('Loss', loss.item())
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if ema_model is not None:
update_ema_variables(model, ema_model, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
if no_synthetic:
add_dir_model_name = "_no_synthetic"
else:
add_dir_model_name = "_with_synthetic"
store_dir = os.path.join("stored_data", "MeanTeacher_CRNN_bce4")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn_ema = _load_model(state, 'crnn')
for param in crnn_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn.parameters() if p.requires_grad)
logger.info(crnn)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn.apply(weights_init)
crnn_ema = CRNN(**crnn_kwargs)
crnn_ema.apply(weights_init)
for param in crnn_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
bce_loss = nn.BCELoss()
state = {
'model': {"name": crnn.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn.state_dict()},
'model_ema': {"name": crnn_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn_ema.state_dict()},
'optimizer': {"name": optim.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict': optim.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(cfg.n_epoch):
crnn.train()
crnn_ema.train()
crnn, crnn_ema = to_cuda_if_available(crnn, crnn_ema)
loss_value = train(training_loader, crnn, optim, epoch,
ema_model=crnn_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn = crnn.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict'] = crnn.state_dict()
state['model_ema']['state_dict'] = crnn_ema.state_dict()
state['optimizer']['state_dict'] = optim.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
save_predictions=predicitons_fname)
psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 22,648 | 47.189362 | 120 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_ICT_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel import _load_model
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
# LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
# We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model, optimizer, c_epoch, ema_model=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss()
consistency_criterion = nn.MSELoss()
class_criterion, consistency_criterion = to_cuda_if_available(class_criterion, consistency_criterion)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
for i, ((batch_input, ema_batch_input), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer, rampup_value)
meters.update('lr', optimizer.param_groups[0]['lr'])
batch_input, ema_batch_input, target = to_cuda_if_available(batch_input, ema_batch_input, target)
# Outputs
strong_pred_ema, weak_pred_ema = ema_model(ema_batch_input)
strong_pred_ema = strong_pred_ema.detach()
weak_pred_ema = weak_pred_ema.detach()
strong_pred, weak_pred = model(batch_input)
# core for Interpolation Consistency Training (ICT)
#this version: about 36.55 % in validation
n_unlabeled = int(3*cfg.batch_size/4)
unlabeled_data = batch_input[:n_unlabeled]
strong_prediction, weak_prediction = ema_model(unlabeled_data)
mixed_unlabeled_data = []
mixed_strong_plabel = []
mixed_weak_plabel = []
idx = np.arange(n_unlabeled)
for iter in range(n_unlabeled):
lambda_ = torch.rand(1).cuda()
np.random.shuffle(idx)
idx1 = idx[0]
idx2 = idx[1]
mixed = lambda_*unlabeled_data[idx1] + (1.0-lambda_)*unlabeled_data[idx2]
mixed_unlabeled_data.append(mixed)
spred = lambda_*strong_prediction[idx1] + (1.0-lambda_)*strong_prediction[idx2]
mixed_strong_plabel.append(spred)
wpred = lambda_*weak_prediction[idx1] + (1.0-lambda_)*weak_prediction[idx2]
mixed_weak_plabel.append(wpred)
mixed_unlabeled_data = torch.cat(mixed_unlabeled_data, dim=0)
mixed_unlabeled_data = torch.reshape(mixed_unlabeled_data, (n_unlabeled, 1, 628, 128)).cuda()
mixed_strong_plabel = torch.cat(mixed_strong_plabel, dim=0)
mixed_strong_plabel = torch.reshape(mixed_strong_plabel, (n_unlabeled, 157, 10)).cuda()
mixed_weak_plabel = torch.cat(mixed_weak_plabel, dim=0)
mixed_weak_plabel = torch.reshape(mixed_weak_plabel, (n_unlabeled,10)).cuda()
'''#this version is equal to add noise with random SNR depending on lambda_
n_unlabeled = int(3*cfg.batch_size/4) # mask for unlabeled and weakly labeled data
unlabeled_data1 = batch_input[:n_unlabeled]
unlabeled_data2 = ema_batch_input[:n_unlabeled]
strong_prediction1, weak_prediction1 = ema_model(unlabeled_data1)
strong_prediction2, weak_prediction2 = ema_model(unlabeled_data2)
lambda_ = torch.rand(1).cuda()
mixed_unlabeled_data = lambda_*unlabeled_data1 + (1.0-lambda_)*unlabeled_data2
mixed_strong_plabel = lambda_*strong_prediction1 + (1.0-lambda_)*strong_prediction2
mixed_weak_plabel = lambda_*weak_prediction1 + (1.0-lambda_)*weak_prediction2
'''
strong_prediction_mixed, weak_prediction_mixed = model(mixed_unlabeled_data)
#sample = target[mask_strong].sum(2)
#sample = sample.cpu().numpy()
#print(np.where(sample[-1,:]>1))
loss = None
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
weak_class_loss = class_criterion(weak_pred[mask_weak], target_weak[mask_weak])
ema_class_loss = class_criterion(weak_pred_ema[mask_weak], target_weak[mask_weak])
loss = weak_class_loss
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss: {weak_class_loss} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss', weak_class_loss.item())
meters.update('Weak EMA loss', ema_class_loss.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss = class_criterion(strong_pred[mask_strong], target[mask_strong])
meters.update('Strong loss', strong_class_loss.item())
strong_ema_class_loss = class_criterion(strong_pred_ema[mask_strong], target[mask_strong])
meters.update('Strong EMA loss', strong_ema_class_loss.item())
if loss is not None:
loss += strong_class_loss
else:
loss = strong_class_loss
# Teacher-student consistency cost
if ema_model is not None:
consistency_cost = cfg.max_consistency_cost * rampup_value
meters.update('Consistency weight', consistency_cost)
# Take consistency about strong predictions (all data)
consistency_loss_strong = consistency_cost * consistency_criterion(strong_prediction_mixed, mixed_strong_plabel)
meters.update('Consistency strong', consistency_loss_strong.item())
if loss is not None:
loss += consistency_loss_strong
else:
loss = consistency_loss_strong
meters.update('Consistency weight', consistency_cost)
# Take consistency about weak predictions (all data)
consistency_loss_weak = consistency_cost * consistency_criterion(weak_prediction_mixed, mixed_weak_plabel)
meters.update('Consistency weak', consistency_loss_weak.item())
if loss is not None:
loss += consistency_loss_weak
else:
loss = consistency_loss_weak
assert not (np.isnan(loss.item()) or loss.item() > 1e5), 'Loss explosion: {}'.format(loss.item())
assert not loss.item() < 0, 'Loss problem, cannot be negative'
meters.update('Loss', loss.item())
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if ema_model is not None:
update_ema_variables(model, ema_model, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
if no_synthetic:
add_dir_model_name = "_no_synthetic"
else:
add_dir_model_name = "_with_synthetic"
store_dir = os.path.join("stored_data", "MeanTeacher_with_ICT5")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn_ema = _load_model(state, 'crnn')
for param in crnn_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn.parameters() if p.requires_grad)
logger.info(crnn)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn.apply(weights_init)
crnn_ema = CRNN(**crnn_kwargs)
crnn_ema.apply(weights_init)
for param in crnn_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
bce_loss = nn.BCELoss()
state = {
'model': {"name": crnn.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn.state_dict()},
'model_ema': {"name": crnn_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn_ema.state_dict()},
'optimizer': {"name": optim.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict': optim.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(cfg.n_epoch):
crnn.train()
crnn_ema.train()
crnn, crnn_ema = to_cuda_if_available(crnn, crnn_ema)
loss_value = train(training_loader, crnn, optim, epoch,
ema_model=crnn_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn = crnn.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict'] = crnn.state_dict()
state['model_ema']['state_dict'] = crnn_ema.state_dict()
state['optimizer']['state_dict'] = optim.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
save_predictions=predicitons_fname)
psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 24,772 | 47.57451 | 124 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/TestModel.py | # -*- coding: utf-8 -*-
import argparse
import os.path as osp
import torch
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from data_utils.DataLoad import DataLoadDf
from data_utils.Desed import DESED
from evaluation_measures import psds_score, get_predictions_v2, \
compute_psds_from_operating_points, compute_metrics
from utilities.utils import to_cuda_if_available, generate_tsv_wav_durations, meta_path_to_audio_dir
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
from utilities.Logger import create_logger
from utilities.Scaler import Scaler, ScalerPerAudio
from models.CRNN import CRNN
from models.Transformer import Transformer
from models.Conformer_bk import Conformer
import config as cfg
logger = create_logger(__name__)
torch.manual_seed(2020)
def _load_model(state, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
model.load_state_dict(state[model_name]["state_dict"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_model_v2(state, model_id, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
if model_id == 1:
model.load_state_dict(state[model_name]["state_dict1"])
elif model_id == 2:
model.load_state_dict(state[model_name]["state_dict2"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_scaler(state):
scaler_state = state["scaler"]
type_sc = scaler_state["type"]
if type_sc == "ScalerPerAudio":
scaler = ScalerPerAudio(*scaler_state["args"])
elif type_sc == "Scaler":
scaler = Scaler()
else:
raise NotImplementedError("Not the right type of Scaler has been saved in state")
scaler.load_state_dict(state["scaler"]["state_dict"])
return scaler
def _load_state_vars(state, gtruth_df, median_win=None):
pred_df = gtruth_df.copy()
# Define dataloader
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
scaler = _load_scaler(state)
model = _load_model(state, 'crnn')
transforms_valid = get_transforms(cfg.max_frames, scaler=scaler, add_axis=0)
strong_dataload = DataLoadDf(pred_df, many_hot_encoder.encode_strong_df, transforms_valid, return_indexes=True)
strong_dataloader_ind = DataLoader(strong_dataload, batch_size=cfg.batch_size, drop_last=False)
pooling_time_ratio = state["pooling_time_ratio"]
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
if median_win is None:
median_win = state["median_window"]
return {
"model": model,
"dataloader": strong_dataloader_ind,
"pooling_time_ratio": pooling_time_ratio,
"many_hot_encoder": many_hot_encoder,
"median_window": median_win
}
def get_variables(args):
model_pth = args.model_path
gt_fname, ext = osp.splitext(args.groundtruth_tsv)
median_win = args.median_window
meta_gt = args.meta_gt
gt_audio_pth = args.groundtruth_audio_dir
if meta_gt is None:
meta_gt = gt_fname + "_durations" + ext
if gt_audio_pth is None:
gt_audio_pth = meta_path_to_audio_dir(gt_fname)
# Useful because of the data format
if "validation" in gt_audio_pth:
gt_audio_pth = osp.dirname(gt_audio_pth)
groundtruth = pd.read_csv(args.groundtruth_tsv, sep="\t")
if osp.exists(meta_gt):
meta_dur_df = pd.read_csv(meta_gt, sep='\t')
if len(meta_dur_df) == 0:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
else:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
return model_pth, median_win, gt_audio_pth, groundtruth, meta_dur_df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("-m", '--model_path', type=str, required=True,
help="Path of the model to be evaluated")
parser.add_argument("-g", '--groundtruth_tsv', type=str, required=True,
help="Path of the groundtruth tsv file")
# Not required after that, but recommended to defined
parser.add_argument("-mw", "--median_window", type=int, default=None,
help="Nb of frames for the median window, "
"if None the one defined for testing after training is used")
# Next groundtruth variable could be ommited if same organization than DESED dataset
parser.add_argument('--meta_gt', type=str, default=None,
help="Path of the groundtruth description of feat_filenames and durations")
parser.add_argument("-ga", '--groundtruth_audio_dir', type=str, default=None,
help="Path of the groundtruth filename, (see in config, at dataset folder)")
parser.add_argument("-s", '--save_predictions_path', type=str, default=None,
help="Path for the predictions to be saved (if needed)")
# Dev
parser.add_argument("-n", '--nb_files', type=int, default=None,
help="Number of files to be used. Useful when testing on small number of files.")
# Savepath for posterior
parser.add_argument("-sp", '--save_posterior', type=str, default=None,
help="Save path for posterior")
f_args = parser.parse_args()
# Get variables from f_args
model_path, median_window, gt_audio_dir, groundtruth, durations = get_variables(f_args)
# Model
expe_state = torch.load(model_path, map_location="cpu")
dataset = DESED(base_feature_dir=osp.join(cfg.workspace, "dataset", "features"), compute_log=False)
gt_df_feat = dataset.initialize_and_get_df(f_args.groundtruth_tsv, gt_audio_dir, nb_files=f_args.nb_files)
params = _load_state_vars(expe_state, gt_df_feat, median_window)
# Preds with only one value
single_predictions = get_predictions_v2(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
median_window=params["median_window"], save_dir = f_args.save_posterior,
save_predictions=f_args.save_predictions_path)
compute_metrics(single_predictions, groundtruth, durations)
'''
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
thresholds=list_thresholds, median_window=params["median_window"],
save_predictions=f_args.save_predictions_path)
psds = compute_psds_from_operating_points(pred_ss_thresh, groundtruth, durations)
fname_roc = None
if f_args.save_predictions_path is not None:
fname_roc = osp.splitext(f_args.save_predictions_path)[0] + "_roc.png"
psds_score(psds, filename_roc_curves=fname_roc)
'''
| 8,195 | 40.604061 | 115 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/TestModel_ss_late_integration.py | # -*- coding: utf-8 -*-
import argparse
import os
import os.path as osp
import scipy
import torch
from dcase_util.data import ProbabilityEncoder
import pandas as pd
import numpy as np
from data_utils.DataLoad import DataLoadDf
from data_utils.Desed import DESED
from TestModel import _load_scaler, _load_crnn
from evaluation_measures import psds_score, compute_psds_from_operating_points, compute_metrics
from utilities.utils import to_cuda_if_available, generate_tsv_wav_durations, meta_path_to_audio_dir
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
from utilities.Logger import create_logger
import config as cfg
logger = create_logger(__name__)
def norm_alpha(x, alpha_val):
return ((1 / x.shape[0]) * (x ** alpha_val).sum(0)) ** (1 / alpha_val)
def get_predictions_ss_late_integration(model, valid_dataload, decoder, pooling_time_ratio=1, thresholds=[0.5],
median_window=1, save_predictions=None, alpha=1):
""" Get the predictions of a trained model on a specific set
Args:
model: torch.Module, a trained pytorch model (you usually want it to be in .eval() mode).
valid_dataload: DataLoadDf, giving ((input_data, label), index) but label is not used here, the multiple
data are the multiple sources (the mixture should always be the first one to appear, and then the sources)
example: if the input data is (3, 1, timesteps, freq) there is the mixture and 2 sources.
decoder: function, takes a numpy.array of shape (time_steps, n_labels) as input and return a list of lists
of ("event_label", "onset", "offset") for each label predicted.
pooling_time_ratio: the division to make between timesteps as input and timesteps as output
median_window: int, the median window (in number of time steps) to be applied
save_predictions: str or list, the path of the base_filename to save the predictions or a list of names
corresponding for each thresholds
thresholds: list, list of threshold to be applied
alpha: float, the value of the norm to combine the predictions
Returns:
dict of the different predictions with associated threshold
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
# Get predictions
for i, ((input_data, _), index) in enumerate(valid_dataload):
input_data = to_cuda_if_available(input_data)
with torch.no_grad():
pred_strong, _ = model(input_data)
pred_strong = pred_strong.cpu()
pred_strong = pred_strong.detach().numpy()
if i == 0:
logger.debug(pred_strong)
pred_strong_sources = pred_strong[1:]
pred_strong_sources = norm_alpha(pred_strong_sources, alpha)
pred_strong_comb = norm_alpha(np.stack((pred_strong[0], pred_strong_sources), 0), alpha)
# Get different post processing per threshold
for threshold in thresholds:
pred_strong_bin = ProbabilityEncoder().binarization(pred_strong_comb,
binarization_type="global_threshold",
threshold=threshold)
pred_strong_m = scipy.ndimage.filters.median_filter(pred_strong_bin, (median_window, 1))
pred = decoder(pred_strong_m)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
# Put them in seconds
pred.loc[:, ["onset", "offset"]] *= pooling_time_ratio / (cfg.sample_rate / cfg.hop_size)
pred.loc[:, ["onset", "offset"]] = pred[["onset", "offset"]].clip(0, cfg.max_len_seconds)
pred["filename"] = valid_dataload.filenames.iloc[index]
prediction_dfs[threshold] = prediction_dfs[threshold].append(pred, ignore_index=True)
if i == 0:
logger.debug("predictions: \n{}".format(pred))
logger.debug("predictions strong: \n{}".format(pred_strong_comb))
# Save predictions
if save_predictions is not None:
if isinstance(save_predictions, str):
if len(thresholds) == 1:
save_predictions = [save_predictions]
else:
base, ext = osp.splitext(save_predictions)
save_predictions = [osp.join(base, f"{threshold:.3f}{ext}") for threshold in thresholds]
else:
assert len(save_predictions) == len(thresholds), \
f"There should be a prediction file per threshold: len predictions: {len(save_predictions)}\n" \
f"len thresholds: {len(thresholds)}"
save_predictions = save_predictions
for ind, threshold in enumerate(thresholds):
dir_to_create = osp.dirname(save_predictions[ind])
if dir_to_create != "":
os.makedirs(dir_to_create, exist_ok=True)
if ind % 10 == 0:
logger.info(f"Saving predictions at: {save_predictions[ind]}. {ind + 1} / {len(thresholds)}")
prediction_dfs[threshold].to_csv(save_predictions[ind], index=False, sep="\t", float_format="%.3f")
list_predictions = []
for key in prediction_dfs:
list_predictions.append(prediction_dfs[key])
if len(list_predictions) == 1:
list_predictions = list_predictions[0]
return list_predictions
def _load_state_vars(state, gtruth_df, median_win=None):
pred_df = gtruth_df.copy()
# Define dataloader
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
scaler = _load_scaler(state)
crnn = _load_crnn(state)
# Note, need to unsqueeze axis 1
transforms_valid = get_transforms(cfg.max_frames, scaler=scaler, add_axis=1)
# Note, no dataloader here
strong_dataload = DataLoadDf(pred_df, many_hot_encoder.encode_strong_df, transforms_valid, return_indexes=True)
pooling_time_ratio = state["pooling_time_ratio"]
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
if median_win is None:
median_win = state["median_window"]
return {
"model": crnn,
"dataload": strong_dataload,
"pooling_time_ratio": pooling_time_ratio,
"many_hot_encoder": many_hot_encoder,
"median_window": median_win
}
def get_variables(args):
model_pth = args.model_path
gt_fname, ext = osp.splitext(args.groundtruth_tsv)
median_win = args.median_window
meta_gt = args.meta_gt
gt_audio_pth = args.groundtruth_audio_dir
if meta_gt is None:
meta_gt = gt_fname + "_durations" + ext
if gt_audio_pth is None:
gt_audio_pth = meta_path_to_audio_dir(gt_fname)
# Useful because of the data format
if "validation" in gt_audio_pth:
gt_audio_pth = osp.dirname(gt_audio_pth)
if osp.exists(meta_gt):
meta_dur_df = pd.read_csv(meta_gt, sep='\t')
if len(meta_dur_df) == 0:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
else:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
keep_sources = args.keep_sources
if keep_sources is not None:
keep_sources = keep_sources.split(",")
return model_pth, median_win, gt_audio_pth, meta_dur_df, keep_sources
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("-m", '--model_path', type=str, required=True,
help="Path of the model to be evaluated")
parser.add_argument("-g", '--groundtruth_tsv', type=str, required=True,
help="Path of the groundtruth tsv file")
# Source separation
parser.add_argument("-a", '--base_dir_ss', type=str, required=True,
help="Base directory of source separation. "
"Path where to search subdirectories in which there are isolated events")
parser.add_argument("-k", "--keep_sources", type=str, default=None,
help="The sources to be kept from the sound_separation (each source separated by a comma)."
"Example: '1,2' keeps the 2nd and 3rd sources (begins at 0).")
# Not required after that, but recommended to defined
parser.add_argument("-mw", "--median_window", type=int, default=None,
help="Nb of frames for the median window, "
"if None the one defined for testing after training is used")
# Next groundtruth variable could be ommited if same organization than DESED dataset
parser.add_argument('--meta_gt', type=str, default=None,
help="Path of the groundtruth description of feat_filenames and durations")
parser.add_argument("-ga", '--groundtruth_audio_dir', type=str, default=None,
help="Path of the groundtruth filename, (see in config, at dataset folder)")
parser.add_argument("-s", '--save_predictions_path', type=str, default=None,
help="Path for the predictions to be saved (if needed)")
# Dev only
parser.add_argument("-n", '--nb_files', type=int, default=None,
help="Number of files to be used. Useful when testing on small number of files.")
f_args = parser.parse_args()
# Get variables from f_args
model_path, median_window, gt_audio_dir, durations, keep_sources = get_variables(f_args)
expe_state = torch.load(model_path, map_location="cpu")
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"), compute_log=False)
groundtruth = pd.read_csv(f_args.groundtruth_tsv, sep="\t")
gt_df_feat_ss = dataset.initialize_and_get_df(f_args.groundtruth_tsv, gt_audio_dir, f_args.base_dir_ss,
pattern_ss="_events", nb_files=f_args.nb_files,
keep_sources=keep_sources)
params = _load_state_vars(expe_state, gt_df_feat_ss, median_window)
alpha_norm = 1
# Preds with only one value (note that in comparison of TestModel, here we do not use a dataloader)
single_predictions = get_predictions_ss_late_integration(params["model"], params["dataload"],
params["many_hot_encoder"].decode_strong,
params["pooling_time_ratio"],
median_window=params["median_window"],
save_predictions=f_args.save_predictions_path,
alpha=alpha_norm)
compute_metrics(single_predictions, groundtruth, durations)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions_ss_late_integration(params["model"], params["dataload"],
params["many_hot_encoder"].decode_strong,
params["pooling_time_ratio"],
thresholds=thresholds,
median_window=params["median_window"],
save_predictions=f_args.save_predictions_path)
psds = compute_psds_from_operating_points(pred_ss_thresh, groundtruth, durations)
psds_score(psds, filename_roc_curves=osp.splitext(f_args.save_predictions_path)[0] + "_roc.png")
| 12,022 | 48.887967 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/evaluation_measures.py | # -*- coding: utf-8 -*-
import os
from os import path as osp
import psds_eval
import scipy
from dcase_util.data import ProbabilityEncoder
import sed_eval
import numpy as np
import pandas as pd
import torch
from psds_eval import plot_psd_roc, PSDSEval
import config as cfg
from utilities.Logger import create_logger
from utilities.utils import to_cuda_if_available
from utilities.ManyHotEncoder import ManyHotEncoder
logger = create_logger(__name__, terminal_level=cfg.terminal_level)
def get_event_list_current_file(df, fname):
"""
Get list of events for a given filename
:param df: pd.DataFrame, the dataframe to search on
:param fname: the filename to extract the value from the dataframe
:return: list of events (dictionaries) for the given filename
"""
event_file = df[df["filename"] == fname]
if len(event_file) == 1:
if pd.isna(event_file["event_label"].iloc[0]):
event_list_for_current_file = [{"filename": fname}]
else:
event_list_for_current_file = event_file.to_dict('records')
else:
event_list_for_current_file = event_file.to_dict('records')
return event_list_for_current_file
def event_based_evaluation_df(reference, estimated, t_collar=0.200, percentage_of_length=0.2):
""" Calculate EventBasedMetric given a reference and estimated dataframe
Args:
reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
t_collar: float, in seconds, the number of time allowed on onsets and offsets
percentage_of_length: float, between 0 and 1, the percentage of length of the file allowed on the offset
Returns:
sed_eval.sound_event.EventBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
event_based_metric = sed_eval.sound_event.EventBasedMetrics(
event_label_list=classes,
t_collar=t_collar,
percentage_of_length=percentage_of_length,
empty_system_output_handling='zero_score'
)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(estimated, fname)
event_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file,
)
return event_based_metric
def segment_based_evaluation_df(reference, estimated, time_resolution=1.):
""" Calculate SegmentBasedMetrics given a reference and estimated dataframe
Args:
reference: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
reference events
estimated: pd.DataFrame containing "filename" "onset" "offset" and "event_label" columns which describe the
estimated events to be compared with reference
time_resolution: float, the time resolution of the segment based metric
Returns:
sed_eval.sound_event.SegmentBasedMetrics with the scores
"""
evaluated_files = reference["filename"].unique()
classes = []
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
segment_based_metric = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=classes,
time_resolution=time_resolution
)
for fname in evaluated_files:
reference_event_list_for_current_file = get_event_list_current_file(reference, fname)
estimated_event_list_for_current_file = get_event_list_current_file(estimated, fname)
segment_based_metric.evaluate(
reference_event_list=reference_event_list_for_current_file,
estimated_event_list=estimated_event_list_for_current_file
)
return segment_based_metric
def get_predictions(model, dataloader, decoder, pooling_time_ratio=1, thresholds=[0.5],
median_window=1, save_predictions=None):
""" Get the predictions of a trained model on a specific set
Args:
model: torch.Module, a trained pytorch model (you usually want it to be in .eval() mode).
dataloader: torch.utils.data.DataLoader, giving ((input_data, label), indexes) but label is not used here
decoder: function, takes a numpy.array of shape (time_steps, n_labels) as input and return a list of lists
of ("event_label", "onset", "offset") for each label predicted.
pooling_time_ratio: the division to make between timesteps as input and timesteps as output
median_window: int, the median window (in number of time steps) to be applied
save_predictions: str or list, the path of the base_filename to save the predictions or a list of names
corresponding for each thresholds
thresholds: list, list of threshold to be applied
Returns:
dict of the different predictions with associated threshold
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
# Get predictions
for i, ((input_data,_), indexes) in enumerate(dataloader):
indexes = indexes.numpy()
input_data = to_cuda_if_available(input_data)
with torch.no_grad():
pred_strong, _ = model(input_data)
pred_strong = pred_strong.cpu()
pred_strong = pred_strong.detach().numpy()
if i == 0:
logger.debug(pred_strong)
# Post processing and put predictions in a dataframe
for j, pred_strong_it in enumerate(pred_strong):
#savePath = "./Posterior/" + dataloader.dataset.filenames.iloc[indexes[j]]
#savePath.replace("wav", "npy")
#np.save(savePath, pred_strong_it)
for threshold in thresholds:
pred_strong_bin = ProbabilityEncoder().binarization(pred_strong_it,
binarization_type="global_threshold",
threshold=threshold)
pred_strong_m = scipy.ndimage.filters.median_filter(pred_strong_bin, (median_window, 1))
pred = decoder(pred_strong_m)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
# Put them in seconds
pred.loc[:, ["onset", "offset"]] *= pooling_time_ratio / (cfg.sample_rate / cfg.hop_size)
pred.loc[:, ["onset", "offset"]] = pred[["onset", "offset"]].clip(0, cfg.max_len_seconds)
pred["filename"] = dataloader.dataset.filenames.iloc[indexes[j]]
prediction_dfs[threshold] = prediction_dfs[threshold].append(pred, ignore_index=True)
if i == 0 and j == 0:
logger.debug("predictions: \n{}".format(pred))
logger.debug("predictions strong: \n{}".format(pred_strong_it))
# Save predictions
if save_predictions is not None:
if isinstance(save_predictions, str):
if len(thresholds) == 1:
save_predictions = [save_predictions]
else:
base, ext = osp.splitext(save_predictions)
save_predictions = [osp.join(base, f"{threshold:.3f}{ext}") for threshold in thresholds]
else:
assert len(save_predictions) == len(thresholds), \
f"There should be a prediction file per threshold: len predictions: {len(save_predictions)}\n" \
f"len thresholds: {len(thresholds)}"
save_predictions = save_predictions
for ind, threshold in enumerate(thresholds):
dir_to_create = osp.dirname(save_predictions[ind])
if dir_to_create != "":
os.makedirs(dir_to_create, exist_ok=True)
if ind % 10 == 0:
logger.info(f"Saving predictions at: {save_predictions[ind]}. {ind + 1} / {len(thresholds)}")
prediction_dfs[threshold].to_csv(save_predictions[ind], index=False, sep="\t", float_format="%.3f")
list_predictions = []
for key in prediction_dfs:
list_predictions.append(prediction_dfs[key])
if len(list_predictions) == 1:
list_predictions = list_predictions[0]
return list_predictions
def get_predictions_v2(model, dataloader, decoder, pooling_time_ratio=1, thresholds=[0.5],
median_window=1, save_dir=None, save_predictions=None):
""" Get the predictions of a trained model on a specific set
Args:
model: torch.Module, a trained pytorch model (you usually want it to be in .eval() mode).
dataloader: torch.utils.data.DataLoader, giving ((input_data, label), indexes) but label is not used here
decoder: function, takes a numpy.array of shape (time_steps, n_labels) as input and return a list of lists
of ("event_label", "onset", "offset") for each label predicted.
pooling_time_ratio: the division to make between timesteps as input and timesteps as output
median_window: int, the median window (in number of time steps) to be applied
save_predictions: str or list, the path of the base_filename to save the predictions or a list of names
corresponding for each thresholds
thresholds: list, list of threshold to be applied
Returns:
dict of the different predictions with associated threshold
"""
# Init a dataframe per threshold
prediction_dfs = {}
for threshold in thresholds:
prediction_dfs[threshold] = pd.DataFrame()
if save_dir is not None:
os.makedirs(save_dir, exist_ok=True)
# Get predictions
for i, ((input_data,_), indexes) in enumerate(dataloader):
indexes = indexes.numpy()
input_data = to_cuda_if_available(input_data)
with torch.no_grad():
pred_strong, _ = model(input_data)
pred_strong = pred_strong.cpu()
pred_strong = pred_strong.detach().numpy()
if i == 0:
logger.debug(pred_strong)
# Post processing and put predictions in a dataframe
for j, pred_strong_it in enumerate(pred_strong):
if save_dir is not None:
savePath = save_dir + dataloader.dataset.filenames.iloc[indexes[j]]
savePath.replace("wav", "npy")
np.save(savePath, pred_strong_it)
for threshold in thresholds:
pred_strong_bin = ProbabilityEncoder().binarization(pred_strong_it,
binarization_type="global_threshold",
threshold=threshold)
pred_strong_m = scipy.ndimage.filters.median_filter(pred_strong_bin, (median_window, 1))
pred = decoder(pred_strong_m)
pred = pd.DataFrame(pred, columns=["event_label", "onset", "offset"])
# Put them in seconds
pred.loc[:, ["onset", "offset"]] *= pooling_time_ratio / (cfg.sample_rate / cfg.hop_size)
pred.loc[:, ["onset", "offset"]] = pred[["onset", "offset"]].clip(0, cfg.max_len_seconds)
pred["filename"] = dataloader.dataset.filenames.iloc[indexes[j]]
prediction_dfs[threshold] = prediction_dfs[threshold].append(pred, ignore_index=True)
if i == 0 and j == 0:
logger.debug("predictions: \n{}".format(pred))
logger.debug("predictions strong: \n{}".format(pred_strong_it))
# Save predictions
if save_predictions is not None:
if isinstance(save_predictions, str):
if len(thresholds) == 1:
save_predictions = [save_predictions]
else:
base, ext = osp.splitext(save_predictions)
save_predictions = [osp.join(base, f"{threshold:.3f}{ext}") for threshold in thresholds]
else:
assert len(save_predictions) == len(thresholds), \
f"There should be a prediction file per threshold: len predictions: {len(save_predictions)}\n" \
f"len thresholds: {len(thresholds)}"
save_predictions = save_predictions
for ind, threshold in enumerate(thresholds):
dir_to_create = osp.dirname(save_predictions[ind])
if dir_to_create != "":
os.makedirs(dir_to_create, exist_ok=True)
if ind % 10 == 0:
logger.info(f"Saving predictions at: {save_predictions[ind]}. {ind + 1} / {len(thresholds)}")
prediction_dfs[threshold].to_csv(save_predictions[ind], index=False, sep="\t", float_format="%.3f")
list_predictions = []
for key in prediction_dfs:
list_predictions.append(prediction_dfs[key])
if len(list_predictions) == 1:
list_predictions = list_predictions[0]
return list_predictions
def psds_score(psds, filename_roc_curves=None):
""" add operating points to PSDSEval object and compute metrics
Args:
psds: psds.PSDSEval object initialized with the groundtruth corresponding to the predictions
filename_roc_curves: str, the base filename of the roc curve to be computed
"""
try:
psds_score = psds.psds(alpha_ct=0, alpha_st=0, max_efpr=100)
logger.info(f"\nPSD-Score (0, 0, 100): {psds_score.value:.5f}")
psds_ct_score = psds.psds(alpha_ct=1, alpha_st=0, max_efpr=100)
logger.info(f"\nPSD-Score (1, 0, 100): {psds_ct_score.value:.5f}")
psds_macro_score = psds.psds(alpha_ct=0, alpha_st=1, max_efpr=100)
logger.info(f"\nPSD-Score (0, 1, 100): {psds_macro_score.value:.5f}")
if filename_roc_curves is not None:
if osp.dirname(filename_roc_curves) != "":
os.makedirs(osp.dirname(filename_roc_curves), exist_ok=True)
base, ext = osp.splitext(filename_roc_curves)
plot_psd_roc(psds_score, filename=f"{base}_0_0_100{ext}")
plot_psd_roc(psds_ct_score, filename=f"{base}_1_0_100{ext}")
plot_psd_roc(psds_score, filename=f"{base}_0_1_100{ext}")
except psds_eval.psds.PSDSEvalError as e:
logger.error("psds score did not work ....")
logger.error(e)
def compute_sed_eval_metrics(predictions, groundtruth):
metric_event = event_based_evaluation_df(groundtruth, predictions, t_collar=0.200,
percentage_of_length=0.2)
metric_segment = segment_based_evaluation_df(groundtruth, predictions, time_resolution=1.)
logger.info(metric_event)
logger.info(metric_segment)
return metric_event
def format_df(df, mhe):
""" Make a weak labels dataframe from strongly labeled (join labels)
Args:
df: pd.DataFrame, the dataframe strongly labeled with onset and offset columns (+ event_label)
mhe: ManyHotEncoder object, the many hot encoder object that can encode the weak labels
Returns:
weakly labeled dataframe
"""
def join_labels(x):
return pd.Series(dict(filename=x['filename'].iloc[0],
event_label=mhe.encode_weak(x["event_label"].drop_duplicates().dropna().tolist())))
if "onset" in df.columns or "offset" in df.columns:
df = df.groupby("filename", as_index=False).apply(join_labels)
return df
def get_f_measure_by_class(torch_model, nb_tags, dataloader_, thresholds_=None):
""" get f measure for each class given a model and a generator of data (batch_x, y)
Args:
torch_model : Model, model to get predictions, forward should return weak and strong predictions
nb_tags : int, number of classes which are represented
dataloader_ : generator, data generator used to get f_measure
thresholds_ : int or list, thresholds to apply to each class to binarize probabilities
Returns:
macro_f_measure : list, f measure for each class
"""
if torch.cuda.is_available():
torch_model = torch_model.cuda()
# Calculate external metrics
tp = np.zeros(nb_tags)
tn = np.zeros(nb_tags)
fp = np.zeros(nb_tags)
fn = np.zeros(nb_tags)
for counter, (batch_x, y) in enumerate(dataloader_):
if torch.cuda.is_available():
batch_x = batch_x.cuda()
pred_strong, pred_weak = torch_model(batch_x)
pred_weak = pred_weak.cpu().data.numpy()
labels = y.numpy()
# Used only with a model predicting only strong outputs
if len(pred_weak.shape) == 3:
# average data to have weak labels
pred_weak = np.max(pred_weak, axis=1)
if len(labels.shape) == 3:
labels = np.max(labels, axis=1)
labels = ProbabilityEncoder().binarization(labels,
binarization_type="global_threshold",
threshold=0.5)
if thresholds_ is None:
binarization_type = 'global_threshold'
thresh = 0.5
else:
binarization_type = "class_threshold"
assert type(thresholds_) is list
thresh = thresholds_
batch_predictions = ProbabilityEncoder().binarization(pred_weak,
binarization_type=binarization_type,
threshold=thresh,
time_axis=0
)
tp_, fp_, fn_, tn_ = intermediate_at_measures(labels, batch_predictions)
tp += tp_
fp += fp_
fn += fn_
tn += tn_
macro_f_score = np.zeros(nb_tags)
mask_f_score = 2 * tp + fp + fn != 0
macro_f_score[mask_f_score] = 2 * tp[mask_f_score] / (2 * tp + fp + fn)[mask_f_score]
return macro_f_score
def intermediate_at_measures(encoded_ref, encoded_est):
""" Calculate true/false - positives/negatives.
Args:
encoded_ref: np.array, the reference array where a 1 means the label is present, 0 otherwise
encoded_est: np.array, the estimated array, where a 1 means the label is present, 0 otherwise
Returns:
tuple
number of (true positives, false positives, false negatives, true negatives)
"""
tp = (encoded_est + encoded_ref == 2).sum(axis=0)
fp = (encoded_est - encoded_ref == 1).sum(axis=0)
fn = (encoded_ref - encoded_est == 1).sum(axis=0)
tn = (encoded_est + encoded_ref == 0).sum(axis=0)
return tp, fp, fn, tn
def macro_f_measure(tp, fp, fn):
""" From intermediates measures, give the macro F-measure
Args:
tp: int, number of true positives
fp: int, number of false positives
fn: int, number of true negatives
Returns:
float
The macro F-measure
"""
macro_f_score = np.zeros(tp.shape[-1])
mask_f_score = 2 * tp + fp + fn != 0
macro_f_score[mask_f_score] = 2 * tp[mask_f_score] / (2 * tp + fp + fn)[mask_f_score]
return macro_f_score
def audio_tagging_results(reference, estimated):
classes = []
if "event_label" in reference.columns:
classes.extend(reference.event_label.dropna().unique())
classes.extend(estimated.event_label.dropna().unique())
classes = list(set(classes))
mhe = ManyHotEncoder(classes)
reference = format_df(reference, mhe)
estimated = format_df(estimated, mhe)
else:
classes.extend(reference.event_labels.str.split(',', expand=True).unstack().dropna().unique())
classes.extend(estimated.event_labels.str.split(',', expand=True).unstack().dropna().unique())
classes = list(set(classes))
mhe = ManyHotEncoder(classes)
matching = reference.merge(estimated, how='outer', on="filename", suffixes=["_ref", "_pred"])
def na_values(val):
if type(val) is np.ndarray:
return val
if pd.isna(val):
return np.zeros(len(classes))
return val
if not estimated.empty:
matching.event_label_pred = matching.event_label_pred.apply(na_values)
matching.event_label_ref = matching.event_label_ref.apply(na_values)
tp, fp, fn, tn = intermediate_at_measures(np.array(matching.event_label_ref.tolist()),
np.array(matching.event_label_pred.tolist()))
macro_res = macro_f_measure(tp, fp, fn)
else:
macro_res = np.zeros(len(classes))
results_serie = pd.DataFrame(macro_res, index=mhe.labels)
return results_serie[0]
def compute_psds_from_operating_points(list_predictions, groundtruth_df, meta_df, dtc_threshold=0.5, gtc_threshold=0.5,
cttc_threshold=0.3):
psds = PSDSEval(dtc_threshold, gtc_threshold, cttc_threshold, ground_truth=groundtruth_df, metadata=meta_df)
for prediction_df in list_predictions:
psds.add_operating_point(prediction_df)
return psds
def compute_metrics(predictions, gtruth_df, meta_df):
events_metric = compute_sed_eval_metrics(predictions, gtruth_df)
macro_f1_event = events_metric.results_class_wise_average_metrics()['f_measure']['f_measure']
dtc_threshold, gtc_threshold, cttc_threshold = 0.5, 0.5, 0.3
psds = PSDSEval(dtc_threshold, gtc_threshold, cttc_threshold, ground_truth=gtruth_df, metadata=meta_df)
psds_macro_f1, psds_f1_classes = psds.compute_macro_f_score(predictions)
logger.info(f"F1_score (psds_eval) accounting cross triggers: {psds_macro_f1}")
return macro_f1_event, psds_macro_f1
| 22,296 | 42.044402 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/TestModel_dual.py | # -*- coding: utf-8 -*-
import argparse
import os.path as osp
import torch
from torch.utils.data import DataLoader
import numpy as np
import pandas as pd
from data_utils.DataLoad import DataLoadDf
from data_utils.Desed import DESED
from evaluation_measures import psds_score, get_predictions_v2, \
compute_psds_from_operating_points, compute_metrics
from utilities.utils import to_cuda_if_available, generate_tsv_wav_durations, meta_path_to_audio_dir
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
from utilities.Logger import create_logger
from utilities.Scaler import Scaler, ScalerPerAudio
from models.CRNN import CRNN
from models.Transformer import Transformer
from models.Conformer_bk import Conformer
import config as cfg
logger = create_logger(__name__)
torch.manual_seed(2020)
def _load_model(state, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
model.load_state_dict(state[model_name]["state_dict"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_model_v2(state, model_id, model_type, model_name="model"):
model_args = state[model_name]["args"]
model_kwargs = state[model_name]["kwargs"]
if model_type is 'crnn':
model = CRNN(*model_args, **model_kwargs)
elif model_type is 'transformer':
model = Transformer(*model_args, **model_kwargs)
elif model_type is 'conformer':
model = Conformer(*model_args, **model_kwargs)
if model_id == 1:
model.load_state_dict(state[model_name]["state_dict1"])
elif model_id == 2:
model.load_state_dict(state[model_name]["state_dict2"])
model.eval()
model = to_cuda_if_available(model)
logger.info("Model loaded at epoch: {}".format(state["epoch"]))
logger.info(model)
return model
def _load_scaler(state):
scaler_state = state["scaler"]
type_sc = scaler_state["type"]
if type_sc == "ScalerPerAudio":
scaler = ScalerPerAudio(*scaler_state["args"])
elif type_sc == "Scaler":
scaler = Scaler()
else:
raise NotImplementedError("Not the right type of Scaler has been saved in state")
scaler.load_state_dict(state["scaler"]["state_dict"])
return scaler
def _load_state_vars(state, gtruth_df, median_win=None):
pred_df = gtruth_df.copy()
# Define dataloader
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
scaler = _load_scaler(state)
model = _load_model_v2(state, 1, 'crnn')
transforms = get_transforms(cfg.max_frames, scaler, 0, noise_dict_params={"mean":0, "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler=scaler, add_axis=0)
strong_dataload = DataLoadDf(pred_df, many_hot_encoder.encode_strong_df, transforms_valid, return_indexes=True)
strong_dataloader_ind = DataLoader(strong_dataload, batch_size=cfg.batch_size, drop_last=False)
pooling_time_ratio = state["pooling_time_ratio"]
many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"])
if median_win is None:
median_win = state["median_window"]
return {
"model": model,
"dataloader": strong_dataloader_ind,
"pooling_time_ratio": pooling_time_ratio,
"many_hot_encoder": many_hot_encoder,
"median_window": median_win
}
def get_variables(args):
model_pth = args.model_path
gt_fname, ext = osp.splitext(args.groundtruth_tsv)
median_win = args.median_window
meta_gt = args.meta_gt
gt_audio_pth = args.groundtruth_audio_dir
if meta_gt is None:
meta_gt = gt_fname + "_durations" + ext
if gt_audio_pth is None:
gt_audio_pth = meta_path_to_audio_dir(gt_fname)
# Useful because of the data format
if "validation" in gt_audio_pth:
gt_audio_pth = osp.dirname(gt_audio_pth)
groundtruth = pd.read_csv(args.groundtruth_tsv, sep="\t")
if osp.exists(meta_gt):
meta_dur_df = pd.read_csv(meta_gt, sep='\t')
if len(meta_dur_df) == 0:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
else:
meta_dur_df = generate_tsv_wav_durations(gt_audio_pth, meta_gt)
return model_pth, median_win, gt_audio_pth, groundtruth, meta_dur_df
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="")
parser.add_argument("-m", '--model_path', type=str, required=True,
help="Path of the model to be evaluated")
parser.add_argument("-g", '--groundtruth_tsv', type=str, required=True,
help="Path of the groundtruth tsv file")
# Not required after that, but recommended to defined
parser.add_argument("-mw", "--median_window", type=int, default=None,
help="Nb of frames for the median window, "
"if None the one defined for testing after training is used")
# Next groundtruth variable could be ommited if same organization than DESED dataset
parser.add_argument('--meta_gt', type=str, default=None,
help="Path of the groundtruth description of feat_filenames and durations")
parser.add_argument("-ga", '--groundtruth_audio_dir', type=str, default=None,
help="Path of the groundtruth filename, (see in config, at dataset folder)")
parser.add_argument("-s", '--save_predictions_path', type=str, default=None,
help="Path for the predictions to be saved (if needed)")
# Dev
parser.add_argument("-n", '--nb_files', type=int, default=None,
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-sp", '--save_posterior', type=str, default=None,
help="Save path for posterior")
f_args = parser.parse_args()
# Get variables from f_args
model_path, median_window, gt_audio_dir, groundtruth, durations = get_variables(f_args)
# Model
expe_state = torch.load(model_path, map_location="cpu")
dataset = DESED(base_feature_dir=osp.join(cfg.workspace, "dataset", "features"), compute_log=False)
gt_df_feat = dataset.initialize_and_get_df(f_args.groundtruth_tsv, gt_audio_dir, nb_files=f_args.nb_files)
params = _load_state_vars(expe_state, gt_df_feat, median_window)
# Preds with only one value
single_predictions = get_predictions_v2(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
median_window=params["median_window"], save_dir=f_args.save_posterior,
save_predictions=f_args.save_predictions_path)
compute_metrics(single_predictions, groundtruth, durations)
'''
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(params["model"], params["dataloader"],
params["many_hot_encoder"].decode_strong, params["pooling_time_ratio"],
thresholds=list_thresholds, median_window=params["median_window"],
save_predictions=f_args.save_predictions_path)
psds = compute_psds_from_operating_points(pred_ss_thresh, groundtruth, durations)
fname_roc = None
if f_args.save_predictions_path is not None:
fname_roc = osp.splitext(f_args.save_predictions_path)[0] + "_roc.png"
psds_score(psds, filename_roc_curves=fname_roc)
'''
| 8,284 | 40.633166 | 115 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_CRST_model_v2.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel_dual import _load_model_v2
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, JSD, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df, median_smoothing
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms, get_transforms_v2
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
#LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
#We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model1, model2, optimizer1, optimizer2, c_epoch, ema_model1=None, ema_model2=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss(reduction='none')
mse_criterion = nn.MSELoss(reduction='none')
reliability_criterion = nn.CrossEntropyLoss(reduction='none')
jsd = JSD()
softmax = nn.Softmax(dim=1)
class_label = torch.tensor(cfg.class_label).cuda()
class_criterion, mse_criterion, softmax = to_cuda_if_available(class_criterion, mse_criterion, softmax)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
#plabel = []
for i, (((batch_input, batch_input_ema), target2), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup2*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer1, rampup_value, rampdown_value=1.0)
adjust_learning_rate(optimizer2, rampup_value, rampdown_value=0.9)
meters.update('lr', optimizer1.param_groups[0]['lr'])
target2 = target2.type(torch.FloatTensor)
batch_input, batch_input_ema, target, target2 = to_cuda_if_available(batch_input, batch_input_ema, target, target2)
# Outputs
strong_pred1, weak_pred1 = model1(batch_input)
strong_predict1, weak_predict1 = ema_model1(batch_input_ema)
strong_predict1 = strong_predict1.detach()
weak_predict1 = weak_predict1.detach()
# data augmentation
strong_pred2, weak_pred2 = model2(batch_input_ema)
strong_predict2, weak_predict2 = ema_model2(batch_input)
strong_predict2 = strong_predict2.detach()
weak_predict2 = weak_predict2.detach()
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
target2_weak = target2.max(-2)[0]
if mask_weak is not None:
weak_class_loss1 = class_criterion(weak_pred1[mask_weak], target_weak[mask_weak]).mean()
weak_class_loss2 = class_criterion(weak_pred2[mask_weak], target2_weak[mask_weak]).mean()
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss1: {weak_class_loss1} \t rampup_value: {rampup_value}"
f"weak loss2: {weak_class_loss2} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss1', weak_class_loss1.item())
meters.update('weak_class_loss2', weak_class_loss2.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss1 = class_criterion(strong_pred1[mask_strong], target[mask_strong]).mean()
strong_class_loss2 = class_criterion(strong_pred2[mask_strong], target2[mask_strong]).mean()
meters.update('Strong loss1', strong_class_loss1.item())
meters.update('Strong loss2', strong_class_loss2.item())
# Teacher-student consistency cost
if ema_model1 is not None:
rampup_weight = cfg.max_rampup_weight * rampup_value
meters.update('Rampup weight', rampup_weight)
# Self-labeling
n_unlabeled = int(3*cfg.batch_size/4)
est_strong_target1 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
est_strong_target2 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
for bter in range(cfg.batch_size):
sp1 = strong_predict1[bter]
sp1 = torch.clamp(sp1, 1.0e-4, 1-1.0e-4)
p1_h1 = torch.log(sp1)
p1_h0 = torch.log(1-sp1)
sp2 = strong_predict2[bter]
sp2 = torch.clamp(sp2, 1.0e-4, 1-1.0e-4)
p2_h1 = torch.log(sp2)
p2_h0 = torch.log(1-sp2)
p_h0 = torch.cat((p1_h0, p2_h0), 0)
p_h1 = torch.cat((p1_h1, p2_h1), 0)
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,cfg.nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
P = torch.cat([P0.reshape(157*2,1), P1, P2], 1)
# K: up to 3
#P3 = []
#for cter1 in range(1,cfg.nClass):
# for cter2 in range(1, cfg.nClass-cter1):
# P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
#P3 = torch.cat(P3,1)
#P3 = P3 - 2*P0[:,None]
#P = torch.cat([P0.reshape(157,1), P1, P2, P3], 1)
P = softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target1[bter,:,:] = torch.squeeze(cl[:157,:])
est_strong_target2[bter,:,:] = torch.squeeze(cl[157:,:])
est_weak_target1 = est_strong_target1.mean(1)
est_weak_target2 = est_strong_target2.mean(1)
strong_reliability1 = rampup_weight*(1-jsd.apply(est_strong_target1[mask_strong], target2[mask_strong]).mean())
strong_reliability2 = rampup_weight*(1-jsd.apply(est_strong_target2[mask_strong], target[mask_strong]).mean())
weak_reliability1 = rampup_weight*(1-jsd.apply(est_weak_target1[mask_weak], target2_weak[mask_weak]).mean())
weak_reliability2 = rampup_weight*(1-jsd.apply(est_weak_target2[mask_weak], target_weak[mask_weak]).mean())
meters.update('Reliability of pseudo label1', strong_reliability1.item())
meters.update('Reliability of pseudo label2', strong_reliability2.item())
# classification error with pseudo label
pred_strong_loss1 = mse_criterion(strong_pred1[6:n_unlabeled], est_strong_target2[6:n_unlabeled]).mean([1,2])
pred_weak_loss1 = mse_criterion(strong_pred1[mask_weak], est_strong_target2[mask_weak]).mean([1,2])
pred_strong_loss2 = mse_criterion(strong_pred2[6:n_unlabeled], est_strong_target1[6:n_unlabeled]).mean([1,2])
pred_weak_loss2 = mse_criterion(strong_pred2[mask_weak], est_strong_target1[mask_weak]).mean([1,2])
expect_loss1 = strong_reliability2*pred_strong_loss1.mean() + weak_reliability2*pred_weak_loss1.mean()
expect_loss2 = strong_reliability1*pred_strong_loss2.mean() + weak_reliability1*pred_weak_loss2.mean()
meters.update('Expectation of predict loss1', expect_loss1.item())
meters.update('Expectation of predict loss2', expect_loss2.item())
loss1 = weak_class_loss1 + strong_class_loss1 + expect_loss1
loss2 = weak_class_loss2 + strong_class_loss2 + expect_loss2
meters.update('Loss1', loss1.item())
meters.update('Loss2', loss2.item())
if (np.isnan(loss1.item()) or loss1.item() > 1e5):
print(loss1)
print(loss2)
else:
# compute gradient and do optimizer step
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
global_step += 1
if ema_model1 is not None:
update_ema_variables(model1, ema_model1, 0.999, global_step)
if ema_model2 is not None:
update_ema_variables(model2, ema_model2, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss1, loss2
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
store_dir = os.path.join("stored_data", "MeanTeacher_with_dual_v3_mixup6")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms_v2(cfg.max_frames, scaler, add_axis_conv,
shift_dict_params={"net_pooling": 4})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn1 = _load_model_v2(state, 1, 'crnn')
crnn2 = _load_model_v2(state, 2, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn1_ema = _load_model_v2(state, 1, 'crnn')
for param in crnn1_ema.parameters():
param.detach()
crnn2_ema = _load_model_v2(state, 2, 'crnn')
for param in crnn2_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn1 = CRNN(**crnn_kwargs)
crnn2 = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn1.parameters() if p.requires_grad)
logger.info(crnn1)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn1.apply(weights_init)
crnn2.apply(weights_init)
crnn1_ema = CRNN(**crnn_kwargs)
crnn2_ema = CRNN(**crnn_kwargs)
crnn1_ema.apply(weights_init)
crnn2_ema.apply(weights_init)
for param in crnn1_ema.parameters():
param.detach_()
for param in crnn2_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
state = {
'model': {"name1": crnn1.__class__.__name__,
"name2": crnn2.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1.state_dict(),
'state_dict2': crnn2.state_dict()},
'model_ema': {"name1": crnn1_ema.__class__.__name__,
"name2": crnn2_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1_ema.state_dict(),
'state_dict2': crnn2_ema.state_dict()},
'optimizer': {"name1": optim1.__class__.__name__,
"name2": optim2.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict1': optim1.state_dict(),
'state_dict2': optim2.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
# save_best_cb = SaveBest("inf")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(n_epoch, n_epoch+cfg.n_epoch):
crnn1.train()
crnn2.train()
crnn1_ema.train()
crnn2_ema.train()
crnn1, crnn2, crnn1_ema, crnn2_ema = to_cuda_if_available(crnn1, crnn2, crnn1_ema, crnn2_ema)
loss_value, loss_value2 = train(training_loader, crnn1, crnn2, optim1, optim2, epoch,
ema_model1=crnn1_ema, ema_model2=crnn2_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn1 = crnn1.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn1, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict1'] = crnn1.state_dict()
state['model']['state_dict2'] = crnn2.state_dict()
state['model_ema']['state_dict1'] = crnn1_ema.state_dict()
state['model_ema']['state_dict2'] = crnn2_ema.state_dict()
state['optimizer']['state_dict1'] = optim1.state_dict()
state['optimizer']['state_dict2'] = optim2.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
#stop_criterior = (loss_value.item()+loss_value2.item())/2 + np.abs(loss_value.item()-loss_value2.item())
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
#crnn1.eval()
#transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
#predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
#validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
#validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
#validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
#durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
#valid_predictions = get_predictions(crnn1, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, median_window=median_window,
# save_predictions=predicitons_fname)
#compute_metrics(valid_predictions, validation_labels_df, durations_validation)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model_v2(state, 1, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
# n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
# list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
# pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
# save_predictions=predicitons_fname)
# psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
# psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 28,622 | 48.35 | 158 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_CRST_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel_dual import _load_model_v2
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, JSD, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df, median_smoothing
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
#LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
#We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model1, model2, optimizer1, optimizer2, c_epoch, ema_model1=None, ema_model2=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss(reduction='none')
mse_criterion = nn.MSELoss(reduction='none')
reliability_criterion = nn.CrossEntropyLoss(reduction='none')
jsd = JSD()
softmax = nn.Softmax(dim=1)
class_label = torch.tensor(cfg.class_label).cuda()
class_criterion, mse_criterion, softmax = to_cuda_if_available(class_criterion, mse_criterion, softmax)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
#plabel = []
for i, ((batch_input, batch_input_ema), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup2*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer1, rampup_value, rampdown_value=1.0)
adjust_learning_rate(optimizer2, rampup_value, rampdown_value=0.9)
meters.update('lr', optimizer1.param_groups[0]['lr'])
batch_input, batch_input_ema, target = to_cuda_if_available(batch_input, batch_input_ema, target)
# Outputs
strong_pred1, weak_pred1 = model1(batch_input)
strong_predict1, weak_predict1 = ema_model1(batch_input_ema)
strong_predict1 = strong_predict1.detach()
weak_predict1 = weak_predict1.detach()
# data augmentation
strong_pred2, weak_pred2 = model2(batch_input_ema)
strong_predict2, weak_predict2 = ema_model2(batch_input)
strong_predict2 = strong_predict2.detach()
weak_predict2 = weak_predict2.detach()
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
weak_class_loss1 = class_criterion(weak_pred1[mask_weak], target_weak[mask_weak]).mean()
weak_class_loss2 = class_criterion(weak_pred2[mask_weak], target_weak[mask_weak]).mean()
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss1: {weak_class_loss1} \t rampup_value: {rampup_value}"
f"weak loss2: {weak_class_loss2} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss1', weak_class_loss1.item())
meters.update('weak_class_loss2', weak_class_loss2.item())
# Strong BCE loss
if mask_strong is not None:
strong_class_loss1 = class_criterion(strong_pred1[mask_strong], target[mask_strong]).mean()
strong_class_loss2 = class_criterion(strong_pred2[mask_strong], target[mask_strong]).mean()
meters.update('Strong loss1', strong_class_loss1.item())
meters.update('Strong loss2', strong_class_loss2.item())
# Teacher-student consistency cost
if ema_model1 is not None:
rampup_weight = cfg.max_rampup_weight * rampup_value
meters.update('Rampup weight', rampup_weight)
# Self-labeling
n_unlabeled = int(3*cfg.batch_size/4)
est_strong_target1 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
est_strong_target2 = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
for bter in range(cfg.batch_size):
sp1 = strong_predict1[bter]
sp1 = torch.clamp(sp1, 1.0e-4, 1-1.0e-4)
p1_h1 = torch.log(sp1)
p1_h0 = torch.log(1-sp1)
sp2 = strong_predict2[bter]
sp2 = torch.clamp(sp2, 1.0e-4, 1-1.0e-4)
p2_h1 = torch.log(sp2)
p2_h0 = torch.log(1-sp2)
p_h0 = torch.cat((p1_h0, p2_h0), 0)
p_h1 = torch.cat((p1_h1, p2_h1), 0)
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,cfg.nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
P = torch.cat([P0.reshape(157*2,1), P1, P2], 1)
# K: up to 3
#P3 = []
#for cter1 in range(1,cfg.nClass):
# for cter2 in range(1, cfg.nClass-cter1):
# P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
#P3 = torch.cat(P3,1)
#P3 = P3 - 2*P0[:,None]
#P = torch.cat([P0.reshape(157,1), P1, P2, P3], 1)
P = softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target1[bter,:,:] = torch.squeeze(cl[:157,:])
est_strong_target2[bter,:,:] = torch.squeeze(cl[157:,:])
est_weak_target1 = est_strong_target1.mean(1)
est_weak_target2 = est_strong_target2.mean(1)
strong_reliability1 = rampup_weight*(1-jsd.apply(est_strong_target1[mask_strong], target[mask_strong]).mean())
strong_reliability2 = rampup_weight*(1-jsd.apply(est_strong_target2[mask_strong], target[mask_strong]).mean())
weak_reliability1 = rampup_weight*(1-jsd.apply(est_weak_target1[mask_weak], target_weak[mask_weak]).mean())
weak_reliability2 = rampup_weight*(1-jsd.apply(est_weak_target2[mask_weak], target_weak[mask_weak]).mean())
meters.update('Reliability of pseudo label1', strong_reliability1.item())
meters.update('Reliability of pseudo label2', strong_reliability2.item())
# classification error with pseudo label
pred_strong_loss1 = mse_criterion(strong_pred1[6:n_unlabeled], est_strong_target2[6:n_unlabeled]).mean([1,2])
pred_weak_loss1 = mse_criterion(strong_pred1[mask_weak], est_strong_target2[mask_weak]).mean([1,2])
pred_strong_loss2 = mse_criterion(strong_pred2[6:n_unlabeled], est_strong_target1[6:n_unlabeled]).mean([1,2])
pred_weak_loss2 = mse_criterion(strong_pred2[mask_weak], est_strong_target1[mask_weak]).mean([1,2])
expect_loss1 = strong_reliability2*pred_strong_loss1.mean() + weak_reliability2*pred_weak_loss1.mean()
expect_loss2 = strong_reliability1*pred_strong_loss2.mean() + weak_reliability1*pred_weak_loss2.mean()
meters.update('Expectation of predict loss1', expect_loss1.item())
meters.update('Expectation of predict loss2', expect_loss2.item())
loss1 = weak_class_loss1 + strong_class_loss1 + expect_loss1
loss2 = weak_class_loss2 + strong_class_loss2 + expect_loss2
meters.update('Loss1', loss1.item())
meters.update('Loss2', loss2.item())
if (np.isnan(loss1.item()) or loss1.item() > 1e5):
print(loss1)
print(loss2)
else:
# compute gradient and do optimizer step
optimizer1.zero_grad()
loss1.backward()
optimizer1.step()
optimizer2.zero_grad()
loss2.backward()
optimizer2.step()
global_step += 1
if ema_model1 is not None:
update_ema_variables(model1, ema_model1, 0.999, global_step)
if ema_model2 is not None:
update_ema_variables(model2, ema_model2, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss1, loss2
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
store_dir = os.path.join("stored_data", "MeanTeacher_with_dual_v2_max3_v2_2")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn1 = _load_model_v2(state, 1, 'crnn')
crnn2 = _load_model_v2(state, 2, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn1_ema = _load_model_v2(state, 1, 'crnn')
for param in crnn1_ema.parameters():
param.detach()
crnn2_ema = _load_model_v2(state, 2, 'crnn')
for param in crnn2_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn1 = CRNN(**crnn_kwargs)
crnn2 = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn1.parameters() if p.requires_grad)
logger.info(crnn1)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn1.apply(weights_init)
crnn2.apply(weights_init)
crnn1_ema = CRNN(**crnn_kwargs)
crnn2_ema = CRNN(**crnn_kwargs)
crnn1_ema.apply(weights_init)
crnn2_ema.apply(weights_init)
for param in crnn1_ema.parameters():
param.detach_()
for param in crnn2_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim1 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn1.parameters()), **optim_kwargs)
optim2 = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn2.parameters()), **optim_kwargs)
state = {
'model': {"name1": crnn1.__class__.__name__,
"name2": crnn2.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1.state_dict(),
'state_dict2': crnn2.state_dict()},
'model_ema': {"name1": crnn1_ema.__class__.__name__,
"name2": crnn2_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict1': crnn1_ema.state_dict(),
'state_dict2': crnn2_ema.state_dict()},
'optimizer': {"name1": optim1.__class__.__name__,
"name2": optim2.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict1': optim1.state_dict(),
'state_dict2': optim2.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
# save_best_cb = SaveBest("sup")
save_best_cb = SaveBest("inf")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(n_epoch, n_epoch+cfg.n_epoch):
crnn1.train()
crnn2.train()
crnn1_ema.train()
crnn2_ema.train()
crnn1, crnn2, crnn1_ema, crnn2_ema = to_cuda_if_available(crnn1, crnn2, crnn1_ema, crnn2_ema)
loss_value, loss_value2 = train(training_loader, crnn1, crnn2, optim1, optim2, epoch,
ema_model1=crnn1_ema, ema_model2=crnn2_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn1 = crnn1.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn1, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict1'] = crnn1.state_dict()
state['model']['state_dict2'] = crnn2.state_dict()
state['model_ema']['state_dict1'] = crnn1_ema.state_dict()
state['model_ema']['state_dict2'] = crnn2_ema.state_dict()
state['optimizer']['state_dict1'] = optim1.state_dict()
state['optimizer']['state_dict2'] = optim2.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
stop_criterior = (loss_value.item()+loss_value2.item())/2 + np.abs(loss_value.item()-loss_value2.item())
if save_best_cb.apply(stop_criterior):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
#crnn1.eval()
#transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
#predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
#validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
#validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
#validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
#durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
#valid_predictions = get_predictions(crnn1, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, median_window=median_window,
# save_predictions=predicitons_fname)
#compute_metrics(valid_predictions, validation_labels_df, durations_validation)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model_v2(state, 1, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
# n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
# list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
# pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
# pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
# save_predictions=predicitons_fname)
# psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
# psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 28,492 | 48.295848 | 158 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/main_SRST_model.py | # -*- coding: utf-8 -*-
import argparse
import datetime
import inspect
import os
import time
from pprint import pprint
import pandas as pd
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch import nn
from data_utils.Desed import DESED
from data_utils.DataLoad import DataLoadDf, ConcatDataset, MultiStreamBatchSampler
from TestModel import _load_model
from evaluation_measures import get_predictions, psds_score, compute_psds_from_operating_points, compute_metrics
from models.CRNN import CRNN
import config as cfg
from utilities import ramps
from utilities.Logger import create_logger
from utilities.Scaler import ScalerPerAudio, Scaler
from utilities.utils import SaveBest, to_cuda_if_available, weights_init, AverageMeterSet, EarlyStopping, \
get_durations_df, median_smoothing
from utilities.ManyHotEncoder import ManyHotEncoder
from utilities.Transforms import get_transforms
def adjust_learning_rate(optimizer, rampup_value, rampdown_value=1):
""" adjust the learning rate
Args:
optimizer: torch.Module, the optimizer to be updated
rampup_value: float, the float value between 0 and 1 that should increases linearly
rampdown_value: float, the float between 1 and 0 that should decrease linearly
Returns:
"""
#LR warm-up to handle large minibatch sizes from https://arxiv.org/abs/1706.02677
#We commented parts on betas and weight decay to match 2nd system of last year from Orange
lr = rampup_value * rampdown_value * cfg.max_learning_rate
# beta1 = rampdown_value * cfg.beta1_before_rampdown + (1. - rampdown_value) * cfg.beta1_after_rampdown
# beta2 = (1. - rampup_value) * cfg.beta2_during_rampdup + rampup_value * cfg.beta2_after_rampup
# weight_decay = (1 - rampup_value) * cfg.weight_decay_during_rampup + cfg.weight_decay_after_rampup * rampup_value
for param_group in optimizer.param_groups:
param_group['lr'] = lr
# param_group['betas'] = (beta1, beta2)
# param_group['weight_decay'] = weight_decay
def update_ema_variables(model, ema_model, alpha, global_step):
# Use the true average until the exponential average is more correct
alpha = min(1 - 1 / (global_step + 1), alpha)
for ema_params, params in zip(ema_model.parameters(), model.parameters()):
ema_params.data.mul_(alpha).add_(1 - alpha, params.data)
def train(train_loader, model, optimizer, c_epoch, ema_model=None, mask_weak=None, mask_strong=None, adjust_lr=False):
""" One epoch of a Mean Teacher model
Args:
train_loader: torch.utils.data.DataLoader, iterator of training batches for an epoch.
Should return a tuple: ((teacher input, student input), labels)
model: torch.Module, model to be trained, should return a weak and strong prediction
optimizer: torch.Module, optimizer used to train the model
c_epoch: int, the current epoch of training
ema_model: torch.Module, student model, should return a weak and strong prediction
mask_weak: slice or list, mask the batch to get only the weak labeled data (used to calculate the loss)
mask_strong: slice or list, mask the batch to get only the strong labeled data (used to calcultate the loss)
adjust_lr: bool, Whether or not to adjust the learning rate during training (params in config)
"""
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
class_criterion = nn.BCELoss(reduction='none')
mse_criterion = nn.MSELoss(reduction='none')
reliability_criterion = nn.CrossEntropyLoss(reduction='none')
softmax = nn.Softmax(dim=1)
class_label = torch.tensor(cfg.class_label).cuda()
class_criterion, mse_criterion, softmax = to_cuda_if_available(class_criterion, mse_criterion, softmax)
meters = AverageMeterSet()
log.debug("Nb batches: {}".format(len(train_loader)))
start = time.time()
#plabel = []
for i, ((batch_input, batch_input_ema), target) in enumerate(train_loader):
global_step = c_epoch * len(train_loader) + i
rampup_value = ramps.exp_rampup(global_step, cfg.n_epoch_rampup2*len(train_loader))
if adjust_lr:
adjust_learning_rate(optimizer, rampup_value)
meters.update('lr', optimizer.param_groups[0]['lr'])
batch_input, batch_input_ema, target = to_cuda_if_available(batch_input, batch_input_ema, target)
# Outputs
strong_pred, weak_pred = model(batch_input)
strong_predict1, weak_predict1 = ema_model(batch_input)
strong_predict2, weak_predict2 = ema_model(batch_input_ema)
strong_predict = (strong_predict1 + strong_predict2)/2
weak_predict = (weak_predict1 + weak_predict2)/2
strong_predict = strong_predict.detach()
weak_predict = weak_predict.detach()
# core for Interpolation Consistency Training (ICT)
n_unlabeled = int(3*cfg.batch_size/4) # mask for unlabeled and weakly labeled data
unlabeled_data1 = batch_input[:n_unlabeled]
unlabeled_data2 = batch_input_ema[:n_unlabeled]
strong_prediction1, weak_prediction1 = ema_model(unlabeled_data1)
strong_prediction2, weak_prediction2 = ema_model(unlabeled_data2)
lambda_ = torch.rand(1).cuda()
mixed_unlabeled_data = lambda_*unlabeled_data1 + (1.0-lambda_)*unlabeled_data2
mixed_strong_plabel = lambda_*strong_prediction1 + (1.0-lambda_)*strong_prediction2
mixed_weak_plabel = lambda_*weak_prediction1 + (1.0-lambda_)*weak_prediction2
strong_prediction_mixed, weak_prediction_mixed = model(mixed_unlabeled_data)
loss = None
# Weak BCE Loss
target_weak = target.max(-2)[0] # Take the max in the time axis
if mask_weak is not None:
temp = class_criterion(weak_pred[mask_weak], target_weak[mask_weak])
weak_class_loss = temp.mean()
if i == 0:
log.debug(f"target: {target.mean(-2)} \n Target_weak: {target_weak} \n "
f"Target weak mask: {target_weak[mask_weak]} \n "
f"Target strong mask: {target[mask_strong].sum(-2)}\n"
f"weak loss: {weak_class_loss} \t rampup_value: {rampup_value}"
f"tensor mean: {batch_input.mean()}")
meters.update('weak_class_loss', weak_class_loss.item())
#meters.update('Weak EMA loss', ema_class_loss.mean().item())
# Strong BCE loss
if mask_strong is not None:
temp = class_criterion(strong_pred[mask_strong], target[mask_strong])
strong_class_loss = temp.mean()
meters.update('Strong loss', strong_class_loss.item())
# Teacher-student consistency cost
if ema_model is not None:
rampup_weight = cfg.max_rampup_weight * rampup_value
meters.update('Rampup weight', rampup_weight)
# Take consistency about strong predictions (all data)
consistency_loss_strong = rampup_weight * mse_criterion(strong_prediction_mixed, mixed_strong_plabel).mean()
meters.update('Consistency strong', consistency_loss_strong.item())
#if loss is not None:
# loss += consistency_loss_strong
#else:
# loss = consistency_loss_strong
#meters.update('Consistency weight', consistency_cost)
# Take consistency about weak predictions (all data)
consistency_loss_weak = rampup_weight * mse_criterion(weak_prediction_mixed, mixed_weak_plabel).mean()
meters.update('Consistency weak', consistency_loss_weak.item())
#if loss is not None:
# loss += consistency_loss_weak
#else:
# loss = consistency_loss_weak
# Self-labeling
est_strong_target = torch.zeros(cfg.batch_size,157,cfg.nClass).cuda()
for bter in range(cfg.batch_size):
sp = strong_predict[bter]
sp = torch.clamp(sp, 0.0001, 0.9999)
p_h1 = torch.log(sp)
p_h0 = torch.log(1-sp)
# K = 0
P0 = p_h0.sum(1)
# K = 1
P1 = P0[:,None] + p_h1 - p_h0
#P = torch.cat([P0.reshape(157,1), P1], 1)
# K = 2
P2 = []
for cter in range(1,cfg.nClass):
P2.append(P1[:,:-cter]+P1[:,cter:])
P2 = torch.cat(P2, 1)
P2 = P2 - P0[:,None]
P = torch.cat([P0.reshape(157,1), P1, P2], 1)
# K: up to 3
#P3 = []
#for cter1 in range(1,cfg.nClass):
# for cter2 in range(1, cfg.nClass-cter1):
# P3.append(P1[:,:-(cter1+cter2)]+P1[:,cter1:-cter2]+P1[:,(cter1+cter2):])
#P3 = torch.cat(P3,1)
#P3 = P3 - 2*P0[:,None]
#P = torch.cat([P0.reshape(157,1), P1, P2, P3], 1)
P = softmax(P)
prob_v, prob_i = torch.sort(P, dim=1, descending=True)
norm_p = prob_v.sum(1)
prob_v = prob_v/norm_p[:,None]
cl = class_label[prob_i.tolist(),:]
cl = torch.mul(cl, prob_v[:,:,None]).sum(1)
est_strong_target[bter,:,:] = torch.squeeze(cl)
est_weak_target = est_strong_target.mean(1)
reliability = rampup_weight/class_criterion(est_strong_target[mask_strong], target[mask_strong]).mean()
reliability = torch.clamp(reliability, 0, 2*rampup_weight)
meters.update('Reliability of pseudo label', reliability.item())
# classification error with pseudo label
pred_strong_loss = mse_criterion(strong_pred[:n_unlabeled], est_strong_target[:n_unlabeled]).mean([1,2])
pred_weak_loss = mse_criterion(weak_pred[:n_unlabeled], est_weak_target[:n_unlabeled]).mean(1)
pred_loss = pred_strong_loss + pred_weak_loss
expect_loss = reliability * pred_loss.mean()
meters.update('Expectation of predict loss', expect_loss.item())
loss = weak_class_loss + strong_class_loss + consistency_loss_strong + consistency_loss_weak + expect_loss
meters.update('Loss', loss.item())
if (np.isnan(loss.item()) or loss.item() > 1e5):
print(loss)
else:
# compute gradient and do optimizer step
optimizer.zero_grad()
loss.backward()
optimizer.step()
global_step += 1
if ema_model is not None:
update_ema_variables(model, ema_model, 0.999, global_step)
epoch_time = time.time() - start
log.info(f"Epoch: {c_epoch}\t Time {epoch_time:.2f}\t {meters}")
return loss
def get_dfs(desed_dataset, nb_files=None, separated_sources=False):
log = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
audio_weak_ss = None
audio_unlabel_ss = None
audio_validation_ss = None
audio_synthetic_ss = None
if separated_sources:
audio_weak_ss = cfg.weak_ss
audio_unlabel_ss = cfg.unlabel_ss
audio_validation_ss = cfg.validation_ss
audio_synthetic_ss = cfg.synthetic_ss
weak_df = desed_dataset.initialize_and_get_df(cfg.weak, audio_dir_ss=audio_weak_ss, nb_files=nb_files)
unlabel_df = desed_dataset.initialize_and_get_df(cfg.unlabel, audio_dir_ss=audio_unlabel_ss, nb_files=nb_files)
# Event if synthetic not used for training, used on validation purpose
synthetic_df = desed_dataset.initialize_and_get_df(cfg.synthetic, audio_dir_ss=audio_synthetic_ss,
nb_files=nb_files, download=False)
log.debug(f"synthetic: {synthetic_df.head()}")
validation_df = desed_dataset.initialize_and_get_df(cfg.validation, audio_dir=cfg.audio_validation_dir,
audio_dir_ss=audio_validation_ss, nb_files=nb_files)
# Divide synthetic in train and valid
filenames_train = synthetic_df.filename.drop_duplicates().sample(frac=0.8, random_state=26)
train_synth_df = synthetic_df[synthetic_df.filename.isin(filenames_train)]
valid_synth_df = synthetic_df.drop(train_synth_df.index).reset_index(drop=True)
# Put train_synth in frames so many_hot_encoder can work.
# Not doing it for valid, because not using labels (when prediction) and event based metric expect sec.
train_synth_df.onset = train_synth_df.onset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
train_synth_df.offset = train_synth_df.offset * cfg.sample_rate // cfg.hop_size // pooling_time_ratio
log.debug(valid_synth_df.event_label.value_counts())
data_dfs = {"weak": weak_df,
"unlabel": unlabel_df,
"synthetic": synthetic_df,
"train_synthetic": train_synth_df,
"valid_synthetic": valid_synth_df,
"validation": validation_df,
}
return data_dfs
if __name__ == '__main__':
torch.manual_seed(2020)
np.random.seed(2020)
logger = create_logger(__name__ + "/" + inspect.currentframe().f_code.co_name, terminal_level=cfg.terminal_level)
logger.info("Baseline 2020")
logger.info(f"Starting time: {datetime.datetime.now()}")
parser = argparse.ArgumentParser(description="")
parser.add_argument("-s", '--subpart_data', type=int, default=None, dest="subpart_data",
help="Number of files to be used. Useful when testing on small number of files.")
parser.add_argument("-n", '--no_synthetic', dest='no_synthetic', action='store_true', default=False,
help="Not using synthetic labels during training")
f_args = parser.parse_args()
pprint(vars(f_args))
reduced_number_of_data = f_args.subpart_data
no_synthetic = f_args.no_synthetic
store_dir = os.path.join("stored_data", "MeanTeacher_with_ICT_plabel")
saved_model_dir = os.path.join(store_dir, "model")
saved_pred_dir = os.path.join(store_dir, "predictions")
if os.path.exists(store_dir):
if os.path.exists(saved_model_dir):
load_flag = True
else:
load_flag = False
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
else:
load_flag = False
os.makedirs(store_dir, exist_ok=True)
os.makedirs(saved_model_dir, exist_ok=True)
os.makedirs(saved_pred_dir, exist_ok=True)
n_channel = 1
add_axis_conv = 0
# Model taken from 2nd of dcase19 challenge: see Delphin-Poulat2019 in the results.
n_layers = 7
crnn_kwargs = {"n_in_channel": n_channel, "nclass": len(cfg.classes), "attention": True, "n_RNN_cell": 128,
"n_layers_RNN": 2,
"activation": "glu",
"dropout": 0.5,
"kernel_size": n_layers * [3], "padding": n_layers * [1], "stride": n_layers * [1],
"nb_filters": [16, 32, 64, 128, 128, 128, 128],
"pooling": [[2, 2], [2, 2], [1, 2], [1, 2], [1, 2], [1, 2], [1, 2]]}
pooling_time_ratio = 4 # 2 * 2
out_nb_frames_1s = cfg.sample_rate / cfg.hop_size / pooling_time_ratio
median_window = max(int(cfg.median_window_s * out_nb_frames_1s), 1)
logger.debug(f"median_window: {median_window}")
# ##############
# DATA
# ##############
dataset = DESED(base_feature_dir=os.path.join(cfg.workspace, "dataset", "features"),
compute_log=False)
dfs = get_dfs(dataset, reduced_number_of_data)
# Meta path for psds
durations_synth = get_durations_df(cfg.synthetic)
many_hot_encoder = ManyHotEncoder(cfg.classes, n_frames=cfg.max_frames // pooling_time_ratio)
encod_func = many_hot_encoder.encode_strong_df
# Normalisation per audio or on the full dataset
if cfg.scaler_type == "dataset":
transforms = get_transforms(cfg.max_frames, add_axis=add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms)
scaler_args = []
scaler = Scaler()
# # Only on real data since that's our final goal and test data are real
scaler.calculate_scaler(ConcatDataset([weak_data, unlabel_data, train_synth_data]))
logger.debug(f"scaler mean: {scaler.mean_}")
else:
scaler_args = ["global", "min-max"]
scaler = ScalerPerAudio(*scaler_args)
transforms = get_transforms(cfg.max_frames, scaler, add_axis_conv,
noise_dict_params={"mean": 0., "snr": cfg.noise_snr})
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
weak_data = DataLoadDf(dfs["weak"], encod_func, transforms, in_memory=cfg.in_memory)
unlabel_data = DataLoadDf(dfs["unlabel"], encod_func, transforms, in_memory=cfg.in_memory_unlab)
train_synth_data = DataLoadDf(dfs["train_synthetic"], encod_func, transforms, in_memory=cfg.in_memory)
valid_synth_data = DataLoadDf(dfs["valid_synthetic"], encod_func, transforms_valid,
return_indexes=True, in_memory=cfg.in_memory)
logger.debug(f"len synth: {len(train_synth_data)}, len_unlab: {len(unlabel_data)}, len weak: {len(weak_data)}")
if not no_synthetic:
list_dataset = [weak_data, unlabel_data, train_synth_data]
batch_sizes = [cfg.batch_size//4, cfg.batch_size//2, cfg.batch_size//4]
strong_mask = slice((3*cfg.batch_size)//4, cfg.batch_size)
else:
list_dataset = [weak_data, unlabel_data]
batch_sizes = [cfg.batch_size // 4, 3 * cfg.batch_size // 4]
strong_mask = None
weak_mask = slice(batch_sizes[0]) # Assume weak data is always the first one
concat_dataset = ConcatDataset(list_dataset)
sampler = MultiStreamBatchSampler(concat_dataset, batch_sizes=batch_sizes)
training_loader = DataLoader(concat_dataset, batch_sampler=sampler)
valid_synth_loader = DataLoader(valid_synth_data, batch_size=cfg.batch_size)
# ##############
# Model
# ##############
if load_flag:
mlist = os.listdir(saved_model_dir)
modelName = mlist[-1]
n_epoch = np.int(modelName.split('_')[-1]) + 1
model_fname = os.path.join(saved_model_dir, modelName)
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"training model: {model_fname}, epoch: {state['epoch']}")
crnn_ema = _load_model(state, 'crnn')
for param in crnn_ema.parameters():
param.detach()
optim_kwargs = state['optimizer']["kwargs"]
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
else:
n_epoch = 0
crnn = CRNN(**crnn_kwargs)
pytorch_total_params = sum(p.numel() for p in crnn.parameters() if p.requires_grad)
logger.info(crnn)
logger.info("number of parameters in the model: {}".format(pytorch_total_params))
crnn.apply(weights_init)
crnn_ema = CRNN(**crnn_kwargs)
crnn_ema.apply(weights_init)
for param in crnn_ema.parameters():
param.detach_()
optim_kwargs = {"lr": cfg.default_learning_rate, "betas": (0.9, 0.999)}
optim = torch.optim.Adam(filter(lambda p: p.requires_grad, crnn.parameters()), **optim_kwargs)
state = {
'model': {"name": crnn.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn.state_dict()},
'model_ema': {"name": crnn_ema.__class__.__name__,
'args': '',
"kwargs": crnn_kwargs,
'state_dict': crnn_ema.state_dict()},
'optimizer': {"name": optim.__class__.__name__,
'args': '',
"kwargs": optim_kwargs,
'state_dict': optim.state_dict()},
"pooling_time_ratio": pooling_time_ratio,
"scaler": {
"type": type(scaler).__name__,
"args": scaler_args,
"state_dict": scaler.state_dict()},
"many_hot_encoder": many_hot_encoder.state_dict(),
"median_window": median_window,
"desed": dataset.state_dict()
}
save_best_cb = SaveBest("sup")
if cfg.early_stopping is not None:
early_stopping_call = EarlyStopping(patience=cfg.early_stopping, val_comp="sup", init_patience=cfg.es_init_wait)
# ##############
# Train
# ##############
results = pd.DataFrame(columns=["loss", "valid_synth_f1", "weak_metric", "global_valid"])
for epoch in range(n_epoch, n_epoch+cfg.n_epoch):
crnn.train()
crnn_ema.train()
crnn, crnn_ema = to_cuda_if_available(crnn, crnn_ema)
loss_value = train(training_loader, crnn, optim, epoch,
ema_model=crnn_ema, mask_weak=weak_mask, mask_strong=strong_mask, adjust_lr=cfg.adjust_lr)
# Validation
crnn = crnn.eval()
logger.info("\n ### Valid synthetic metric ### \n")
predictions = get_predictions(crnn, valid_synth_loader, many_hot_encoder.decode_strong, pooling_time_ratio,
median_window=median_window, save_predictions=None)
# Validation with synthetic data (dropping feature_filename for psds)
valid_synth = dfs["valid_synthetic"].drop("feature_filename", axis=1)
valid_synth_f1, psds_m_f1 = compute_metrics(predictions, valid_synth, durations_synth)
# Update state
state['model']['state_dict'] = crnn.state_dict()
state['model_ema']['state_dict'] = crnn_ema.state_dict()
state['optimizer']['state_dict'] = optim.state_dict()
state['epoch'] = epoch
state['valid_metric'] = valid_synth_f1
state['valid_f1_psds'] = psds_m_f1
# Callbacks
if cfg.checkpoint_epochs is not None and (epoch + 1) % cfg.checkpoint_epochs == 0:
model_fname = os.path.join(saved_model_dir, "baseline_epoch_" + str(epoch))
torch.save(state, model_fname)
if cfg.save_best:
if save_best_cb.apply(valid_synth_f1):
model_fname = os.path.join(saved_model_dir, "baseline_best")
torch.save(state, model_fname)
results.loc[epoch, "global_valid"] = valid_synth_f1
results.loc[epoch, "loss"] = loss_value.item()
results.loc[epoch, "valid_synth_f1"] = valid_synth_f1
if cfg.early_stopping:
if early_stopping_call.apply(valid_synth_f1):
logger.warn("EARLY STOPPING")
break
if cfg.save_best:
model_fname = os.path.join(saved_model_dir, "baseline_best")
state = torch.load(model_fname)
crnn = _load_model(state, 'crnn')
logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}")
else:
logger.info("testing model of last epoch: {}".format(cfg.n_epoch))
results_df = pd.DataFrame(results).to_csv(os.path.join(saved_pred_dir, "results.tsv"),
sep="\t", index=False, float_format="%.4f")
# ##############
# Validation
# ##############
crnn.eval()
transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv)
predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")
validation_data = DataLoadDf(dfs["validation"], encod_func, transform=transforms_valid, return_indexes=True)
validation_dataloader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=False, drop_last=False)
validation_labels_df = dfs["validation"].drop("feature_filename", axis=1)
durations_validation = get_durations_df(cfg.validation, cfg.audio_validation_dir)
# Preds with only one value
valid_predictions = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, median_window=median_window,
save_predictions=predicitons_fname)
compute_metrics(valid_predictions, validation_labels_df, durations_validation)
# ##########
# Optional but recommended
# ##########
# Compute psds scores with multiple thresholds (more accurate). n_thresholds could be increased.
n_thresholds = 50
# Example of 5 thresholds: 0.1, 0.3, 0.5, 0.7, 0.9
list_thresholds = np.arange(1 / (n_thresholds * 2), 1, 1 / n_thresholds)
pred_ss_thresh = get_predictions(crnn, validation_dataloader, many_hot_encoder.decode_strong,
pooling_time_ratio, thresholds=list_thresholds, median_window=median_window,
save_predictions=predicitons_fname)
psds = compute_psds_from_operating_points(pred_ss_thresh, validation_labels_df, durations_validation)
psds_score(psds, filename_roc_curves=os.path.join(saved_pred_dir, "figures/psds_roc.png"))
| 25,288 | 46.535714 | 120 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/models/CNN.py | import torch.nn as nn
import torch
class GLU(nn.Module):
def __init__(self, input_num):
super(GLU, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(x)
res = lin * sig
return res
class ContextGating(nn.Module):
def __init__(self, input_num):
super(ContextGating, self).__init__()
self.sigmoid = nn.Sigmoid()
self.linear = nn.Linear(input_num, input_num)
def forward(self, x):
lin = self.linear(x.permute(0, 2, 3, 1))
lin = lin.permute(0, 3, 1, 2)
sig = self.sigmoid(lin)
res = x * sig
return res
class CNN(nn.Module):
def __init__(self, n_in_channel, activation="Relu", conv_dropout=0,
kernel_size=[3, 3, 3], padding=[1, 1, 1], stride=[1, 1, 1], nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)]
):
super(CNN, self).__init__()
self.nb_filters = nb_filters
cnn = nn.Sequential()
def conv(i, batchNormalization=False, dropout=None, activ="relu"):
nIn = n_in_channel if i == 0 else nb_filters[i - 1]
nOut = nb_filters[i]
cnn.add_module('conv{0}'.format(i),
nn.Conv2d(nIn, nOut, kernel_size[i], stride[i], padding[i]))
if batchNormalization:
cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99))
if activ.lower() == "leakyrelu":
cnn.add_module('relu{0}'.format(i),
nn.LeakyReLU(0.2))
elif activ.lower() == "relu":
cnn.add_module('relu{0}'.format(i), nn.ReLU())
elif activ.lower() == "glu":
cnn.add_module('glu{0}'.format(i), GLU(nOut))
elif activ.lower() == "cg":
cnn.add_module('cg{0}'.format(i), ContextGating(nOut))
if dropout is not None:
cnn.add_module('dropout{0}'.format(i),
nn.Dropout(dropout))
batch_norm = True
# 128x862x64
for i in range(len(nb_filters)):
#nIn = n_in_channel if i == 0 else nb_filters[i-1]
#nOut = nb_filters[i]
conv(i, batch_norm, conv_dropout, activ=activation)
cnn.add_module('pooling{0}'.format(i), nn.AvgPool2d(pooling[i])) # bs x tframe x mels
#cnn.add_module('pooling{0}'.format(i), nn.MaxPool2d(pooling[i])) # bs x tframe x mels
#if batch_norm:
# cnn.add_module('batchnorm{0}'.format(i), nn.BatchNorm2d(nOut, eps=0.001, momentum=0.99))
#if activation.lower() == "leakyrelu":
# cnn.add_module('relu{0}'.format(i),
# nn.LeakyReLU(0.2))
#elif activation.lower() == "relu":
# cnn.add_module('relu{0}'.format(i), nn.ReLU())
#elif activation.lower() == "glu":
# cnn.add_module('glu{0}'.format(i), GLU(nOut))
#elif activation.lower() == "cg":
# cnn.add_module('cg{0}'.format(i), ContextGating(nOut))
#if conv_dropout is not None:
# cnn.add_module('dropout{0}'.format(i),
# nn.Dropout(conv_dropout))
self.cnn = cnn
def load_state_dict(self, state_dict, strict=True):
self.cnn.load_state_dict(state_dict)
def state_dict(self, destination=None, prefix='', keep_vars=False):
return self.cnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)
def save(self, filename):
torch.save(self.cnn.state_dict(), filename)
def forward(self, x):
# input size : (batch_size, n_channels, n_frames, n_freq)
# conv features
x = self.cnn(x)
return x
| 4,002 | 37.12381 | 105 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/models/CRNN.py | import warnings
import torch.nn as nn
import torch
from models.RNN import BidirectionalGRU
from models.CNN import CNN
class CRNN(nn.Module):
def __init__(self, n_in_channel, nclass, attention=False, activation="Relu", dropout=0,
train_cnn=True, rnn_type='BGRU', n_RNN_cell=64, n_layers_RNN=1, dropout_recurrent=0,
cnn_integration=False, **kwargs):
super(CRNN, self).__init__()
self.n_in_channel = n_in_channel
self.attention = attention
self.cnn_integration = cnn_integration
n_in_cnn = n_in_channel
if cnn_integration:
n_in_cnn = 1
self.cnn = CNN(n_in_cnn, activation, dropout, **kwargs)
if not train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
self.train_cnn = train_cnn
if rnn_type == 'BGRU':
nb_in = self.cnn.nb_filters[-1]
if self.cnn_integration:
# self.fc = nn.Linear(nb_in * n_in_channel, nb_in)
nb_in = nb_in * n_in_channel
self.rnn = BidirectionalGRU(nb_in,
n_RNN_cell, dropout=dropout_recurrent, num_layers=n_layers_RNN)
else:
NotImplementedError("Only BGRU supported for CRNN for now")
self.dropout = nn.Dropout(dropout)
self.dense = nn.Linear(n_RNN_cell*2, nclass)
self.sigmoid = nn.Sigmoid()
if self.attention:
self.dense_softmax = nn.Linear(n_RNN_cell*2, nclass)
self.softmax = nn.Softmax(dim=-1)
def load_cnn(self, state_dict):
self.cnn.load_state_dict(state_dict)
if not self.train_cnn:
for param in self.cnn.parameters():
param.requires_grad = False
def load_state_dict(self, state_dict, strict=True):
self.cnn.load_state_dict(state_dict["cnn"])
self.rnn.load_state_dict(state_dict["rnn"])
self.dense.load_state_dict(state_dict["dense"])
def state_dict(self, destination=None, prefix='', keep_vars=False):
state_dict = {"cnn": self.cnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars),
"rnn": self.rnn.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars),
'dense': self.dense.state_dict(destination=destination, prefix=prefix, keep_vars=keep_vars)}
return state_dict
def save(self, filename):
parameters = {'cnn': self.cnn.state_dict(), 'rnn': self.rnn.state_dict(), 'dense': self.dense.state_dict()}
torch.save(parameters, filename)
def forward(self, x):
# input size : (batch_size, n_channels, n_frames, n_freq)
if self.cnn_integration:
bs_in, nc_in = x.size(0), x.size(1)
x = x.view(bs_in * nc_in, 1, *x.shape[2:])
# conv features
x = self.cnn(x)
bs, chan, frames, freq = x.size()
if self.cnn_integration:
x = x.reshape(bs_in, chan * nc_in, frames, freq)
if freq != 1:
warnings.warn(f"Output shape is: {(bs, frames, chan * freq)}, from {freq} staying freq")
x = x.permute(0, 2, 1, 3)
x = x.contiguous().view(bs, frames, chan * freq)
else:
x = x.squeeze(-1)
x = x.permute(0, 2, 1) # [bs, frames, chan]
# rnn features
x = self.rnn(x)
x = self.dropout(x)
strong = self.dense(x) # [bs, frames, nclass]
strong = self.sigmoid(strong)
if self.attention:
sof = self.dense_softmax(x) # [bs, frames, nclass]
sof = self.softmax(sof)
sof = torch.clamp(sof, min=1e-7, max=1)
weak = (strong * sof).sum(1) / sof.sum(1) # [bs, nclass]
else:
weak = strong.mean(1)
return strong, weak
if __name__ == '__main__':
CRNN(64, 10, kernel_size=[3, 3, 3], padding=[1, 1, 1], stride=[1, 1, 1], nb_filters=[64, 64, 64],
pooling=[(1, 4), (1, 4), (1, 4)])
| 4,037 | 38.588235 | 115 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/models/RNN.py | import warnings
import torch
from torch import nn as nn
class BidirectionalGRU(nn.Module):
def __init__(self, n_in, n_hidden, dropout=0, num_layers=1):
super(BidirectionalGRU, self).__init__()
self.rnn = nn.GRU(n_in, n_hidden, bidirectional=True, dropout=dropout, batch_first=True, num_layers=num_layers)
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
return recurrent
class BidirectionalLSTM(nn.Module):
def __init__(self, nIn, nHidden, nOut, dropout=0, num_layers=1):
super(BidirectionalLSTM, self).__init__()
self.rnn = nn.LSTM(nIn, nHidden // 2, bidirectional=True, batch_first=True,
dropout=dropout, num_layers=num_layers)
self.embedding = nn.Linear(nHidden * 2, nOut)
def save(self, filename):
torch.save(self.state_dict(), filename)
def load(self, filename=None, parameters=None):
if filename is not None:
self.load_state_dict(torch.load(filename))
elif parameters is not None:
self.load_state_dict(parameters)
else:
raise NotImplementedError("load is a filename or a list of parameters (state_dict)")
def forward(self, input_feat):
recurrent, _ = self.rnn(input_feat)
b, T, h = recurrent.size()
t_rec = recurrent.contiguous().view(b * T, h)
output = self.embedding(t_rec) # [T * b, nOut]
output = output.view(b, T, -1)
return output
| 1,498 | 31.586957 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/utils.py | from __future__ import print_function
import glob
import warnings
import numpy as np
import pandas as pd
import soundfile
import os
import os.path as osp
import librosa
import torch
from desed.utils import create_folder
from torch import nn
import config as cfg
def median_smoothing(input_tensor, win_length):
nFrms, nClass = input_tensor.shape[0], input_tensor.shape[1]
pad_length = (win_length-1) // 2
output_tensor = torch.zeros(nFrms, nClass).cuda()
for cter in range(nClass):
tensor1D = input_tensor[:,cter]
indices = torch.nn.functional.pad(tensor1D, (pad_length,0), mode="constant", value=0.)
indices = torch.nn.functional.pad(indices, (0,pad_length), mode="constant", value=0.)
indices[..., :pad_length] = torch.cat(pad_length*[indices[..., pad_length].unsqueeze(-1)], dim=-1)
roll = indices.unfold(-1, win_length, 1)
values, _ = torch.median(roll, -1)
output_tensor[:, cter] = values[:nFrms]
return output_tensor
def read_audio(path, target_fs=None):
""" Read a wav file
Args:
path: str, path of the audio file
target_fs: int, (Default value = None) sampling rate of the returned audio file, if not specified, the sampling
rate of the audio file is taken
Returns:
tuple
(numpy.array, sampling rate), array containing the audio at the sampling rate given
"""
(audio, fs) = soundfile.read(path)
if audio.ndim > 1:
audio = np.mean(audio, axis=1)
if target_fs is not None and fs != target_fs:
audio = librosa.resample(audio, orig_sr=fs, target_sr=target_fs)
fs = target_fs
return audio, fs
def weights_init(m):
""" Initialize the weights of some layers of neural networks, here Conv2D, BatchNorm, GRU, Linear
Based on the work of Xavier Glorot
Args:
m: the model to initialize
"""
classname = m.__class__.__name__
if classname.find('Conv2d') != -1:
nn.init.xavier_uniform_(m.weight, gain=np.sqrt(2))
m.bias.data.fill_(0)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
elif classname.find('GRU') != -1:
for weight in m.parameters():
if len(weight.size()) > 1:
nn.init.orthogonal_(weight.data)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
def to_cuda_if_available(*args):
""" Transfer object (Module, Tensor) to GPU if GPU available
Args:
args: torch object to put on cuda if available (needs to have object.cuda() defined)
Returns:
Objects on GPU if GPUs available
"""
res = list(args)
if torch.cuda.is_available():
for i, torch_obj in enumerate(args):
res[i] = torch_obj.cuda()
if len(res) == 1:
return res[0]
return res
class SaveBest:
""" Callback to get the best value and epoch
Args:
val_comp: str, (Default value = "inf") "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
Attributes:
val_comp: str, "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
best_val: float, the best values of the model based on the criterion chosen
best_epoch: int, the epoch when the model was the best
current_epoch: int, the current epoch of the model
"""
def __init__(self, val_comp="inf"):
self.comp = val_comp
if val_comp in ["inf", "lt", "desc"]:
self.best_val = np.inf
elif val_comp in ["sup", "gt", "asc"]:
self.best_val = 0
else:
raise NotImplementedError("value comparison is only 'inf' or 'sup'")
self.best_epoch = 0
self.current_epoch = 0
def apply(self, value):
""" Apply the callback
Args:
value: float, the value of the metric followed
"""
decision = False
if self.current_epoch == 0:
decision = True
if (self.comp == "inf" and value < self.best_val) or (self.comp == "sup" and value > self.best_val):
self.best_epoch = self.current_epoch
self.best_val = value
decision = True
self.current_epoch += 1
return decision
class JSD(nn.Module):
def __init__(self):
super(JSD, self).__init__()
self.kld = nn.KLDivLoss().cuda()
def apply(self, p, q):
m = 0.5*(p+q)
return -0.5*(self.kld(p,m)+self.kld(q,m))
class Entropy(nn.Module):
def __init__(self):
super(Entropy, self).__init__()
def forward(self, x, dim):
b = x*torch.log(x)
b = -1.0 * b.sum(dim)
return b
class EarlyStopping:
""" Callback to stop training if the metric have not improved during multiple epochs.
Args:
patience: int, number of epochs with no improvement before stopping the model
val_comp: str, (Default value = "inf") "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
Attributes:
patience: int, number of epochs with no improvement before stopping the model
val_comp: str, "inf" or "sup", inf when we store the lowest model, sup when we
store the highest model
best_val: float, the best values of the model based on the criterion chosen
best_epoch: int, the epoch when the model was the best
current_epoch: int, the current epoch of the model
"""
def __init__(self, patience, val_comp="inf", init_patience=0):
self.patience = patience
self.first_early_wait = init_patience
self.val_comp = val_comp
if val_comp == "inf":
self.best_val = np.inf
elif val_comp == "sup":
self.best_val = 0
else:
raise NotImplementedError("value comparison is only 'inf' or 'sup'")
self.current_epoch = 0
self.best_epoch = 0
def apply(self, value):
""" Apply the callback
Args:
value: the value of the metric followed
"""
current = False
if self.val_comp == "inf":
if value < self.best_val:
current = True
if self.val_comp == "sup":
if value > self.best_val:
current = True
if current:
self.best_val = value
self.best_epoch = self.current_epoch
elif self.current_epoch - self.best_epoch > self.patience and self.current_epoch > self.first_early_wait:
self.current_epoch = 0
return True
self.current_epoch += 1
return False
class AverageMeterSet:
def __init__(self):
self.meters = {}
def __getitem__(self, key):
return self.meters[key]
def update(self, name, value, n=1):
if name not in self.meters:
self.meters[name] = AverageMeter()
self.meters[name].update(value, n)
def reset(self):
for meter in self.meters.values():
meter.reset()
def values(self, postfix=''):
return {name + postfix: meter.val for name, meter in self.meters.items()}
def averages(self, postfix='/avg'):
return {name + postfix: meter.avg for name, meter in self.meters.items()}
def sums(self, postfix='/sum'):
return {name + postfix: meter.sum for name, meter in self.meters.items()}
def counts(self, postfix='/count'):
return {name + postfix: meter.count for name, meter in self.meters.items()}
def __str__(self):
string = ""
for name, meter in self.meters.items():
fmat = ".4f"
if meter.val < 0.01:
fmat = ".2E"
string += "{} {:{format}} \t".format(name, meter.val, format=fmat)
return string
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __format__(self, format):
return "{self.avg:{format}}".format(self=self, format=format)
def generate_tsv_wav_durations(audio_dir, out_tsv):
""" Generate a dataframe with filename and duration of the file
Args:
audio_dir: str, the path of the folder where audio files are (used by glob.glob)
out_tsv: str, the path of the output tsv file
Returns:
pd.DataFrame, the dataframe containing filenames and durations
"""
meta_list = []
for file in glob.glob(os.path.join(audio_dir, "*.wav")):
d = soundfile.info(file).duration
meta_list.append([os.path.basename(file), d])
meta_df = pd.DataFrame(meta_list, columns=["filename", "duration"])
if out_tsv is not None:
meta_df.to_csv(out_tsv, sep="\t", index=False, float_format="%.1f")
return meta_df
def generate_tsv_from_isolated_events(wav_folder, out_tsv=None):
""" Generate list of separated wav files in a folder and export them in a tsv file
Separated audio files considered are all wav files in 'subdirectories' of the 'wav_folder'
Args:
wav_folder: str, path of the folder containing subdirectories (one for each mixture separated)
out_tsv: str, path of the csv in which to save the list of files
Returns:
pd.DataFrame, having only one column with the filename considered
"""
if out_tsv is not None and os.path.exists(out_tsv):
source_sep_df = pd.read_csv(out_tsv, sep="\t")
else:
source_sep_df = pd.DataFrame()
list_dirs = [d for d in os.listdir(wav_folder) if osp.isdir(osp.join(wav_folder, d))]
for dirname in list_dirs:
list_isolated_files = []
for directory, subdir, fnames in os.walk(osp.join(wav_folder, dirname)):
for fname in fnames:
if osp.splitext(fname)[1] in [".wav"]:
# Get the level folders and keep it in the tsv
subfolder = directory.split(dirname + os.sep)[1:]
if len(subfolder) > 0:
subdirs = osp.join(*subfolder)
else:
subdirs = ""
# Append the subfolders and name in the list of files
list_isolated_files.append(osp.join(dirname, subdirs, fname))
else:
warnings.warn(f"Not only wav audio files in the separated source folder,"
f"{fname} not added to the .tsv file")
source_sep_df = source_sep_df.append(pd.DataFrame(list_isolated_files, columns=["filename"]))
if out_tsv is not None:
create_folder(os.path.dirname(out_tsv))
source_sep_df.to_csv(out_tsv, sep="\t", index=False, float_format="%.3f")
return source_sep_df
def meta_path_to_audio_dir(tsv_path):
return os.path.splitext(tsv_path.replace("metadata", "audio"))[0]
def audio_dir_to_meta_path(audio_dir):
return audio_dir.replace("audio", "metadata") + ".tsv"
def get_durations_df(gtruth_path, audio_dir=None):
if audio_dir is None:
audio_dir = meta_path_to_audio_dir(cfg.synthetic)
path, ext = os.path.splitext(gtruth_path)
path_durations_synth = "./validation_durations" + ext
if not os.path.exists(path_durations_synth):
durations_df = generate_tsv_wav_durations(audio_dir, path_durations_synth)
else:
durations_df = pd.read_csv(path_durations_synth, sep="\t")
return durations_df
| 11,860 | 33.988201 | 119 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/Scaler.py | import time
import warnings
import numpy as np
import torch
import json
from utilities.Logger import create_logger
logger = create_logger(__name__)
class Scaler:
"""
operates on one or multiple existing datasets and applies operations
"""
def __init__(self):
self.mean_ = None
self.mean_of_square_ = None
self.std_ = None
# compute the mean incrementaly
def mean(self, data, axis=-1):
# -1 means have at the end a mean vector of the last dimension
if axis == -1:
mean = data
while len(mean.shape) != 1:
mean = np.mean(mean, axis=0, dtype=np.float64)
else:
mean = np.mean(data, axis=axis, dtype=np.float64)
return mean
# compute variance thanks to mean and mean of square
def variance(self, mean, mean_of_square):
return mean_of_square - mean**2
def means(self, dataset):
"""
Splits a dataset in to train test validation.
:param dataset: dataset, from DataLoad class, each sample is an (X, y) tuple.
"""
logger.info('computing mean')
start = time.time()
shape = None
counter = 0
for sample in dataset:
if type(sample) in [tuple, list] and len(sample) == 2:
batch_x, _ = sample
else:
batch_x = sample
if type(batch_x) is torch.Tensor:
batch_x_arr = batch_x.numpy()
else:
batch_x_arr = batch_x
data_square = batch_x_arr ** 2
counter += 1
if shape is None:
shape = batch_x_arr.shape
else:
if not batch_x_arr.shape == shape:
raise NotImplementedError("Not possible to add data with different shape in mean calculation yet")
# assume first item will have shape info
if self.mean_ is None:
self.mean_ = self.mean(batch_x_arr, axis=-1)
else:
self.mean_ += self.mean(batch_x_arr, axis=-1)
if self.mean_of_square_ is None:
self.mean_of_square_ = self.mean(data_square, axis=-1)
else:
self.mean_of_square_ += self.mean(data_square, axis=-1)
self.mean_ /= counter
self.mean_of_square_ /= counter
# ### To be used if data different shape, but need to stop the iteration before.
# rest = len(dataset) - i
# if rest != 0:
# weight = rest / float(i + rest)
# X, y = dataset[-1]
# data_square = X ** 2
# mean = mean * (1 - weight) + self.mean(X, axis=-1) * weight
# mean_of_square = mean_of_square * (1 - weight) + self.mean(data_square, axis=-1) * weight
logger.debug('time to compute means: ' + str(time.time() - start))
return self
def std(self, variance):
return np.sqrt(variance)
def calculate_scaler(self, dataset):
self.means(dataset)
variance = self.variance(self.mean_, self.mean_of_square_)
self.std_ = self.std(variance)
return self.mean_, self.std_
def normalize(self, batch):
if type(batch) is torch.Tensor:
batch_ = batch.numpy()
batch_ = (batch_ - self.mean_) / self.std_
return torch.Tensor(batch_)
else:
return (batch - self.mean_) / self.std_
def state_dict(self):
if type(self.mean_) is not np.ndarray:
raise NotImplementedError("Save scaler only implemented for numpy array means_")
dict_save = {"mean_": self.mean_.tolist(),
"mean_of_square_": self.mean_of_square_.tolist()}
return dict_save
def save(self, path):
dict_save = self.state_dict()
with open(path, "w") as f:
json.dump(dict_save, f)
def load(self, path):
with open(path, "r") as f:
dict_save = json.load(f)
self.load_state_dict(dict_save)
def load_state_dict(self, state_dict):
self.mean_ = np.array(state_dict["mean_"])
self.mean_of_square_ = np.array(state_dict["mean_of_square_"])
variance = self.variance(self.mean_, self.mean_of_square_)
self.std_ = self.std(variance)
class ScalerPerAudio:
"""Normalize inputs one by one
Args:
normalization: str, in {"global", "per_channel"}
type_norm: str, in {"mean", "max"}
"""
def __init__(self, normalization="global", type_norm="mean"):
self.normalization = normalization
self.type_norm = type_norm
def normalize(self, spectrogram):
""" Apply the transformation on data
Args:
spectrogram: np.array, the data to be modified, assume to have 3 dimensions
Returns:
np.array
The transformed data
"""
if type(spectrogram) is torch.Tensor:
tensor = True
spectrogram = spectrogram.numpy()
else:
tensor = False
if self.normalization == "global":
axis = None
elif self.normalization == "per_band":
axis = 0
else:
raise NotImplementedError("normalization is 'global' or 'per_band'")
if self.type_norm == "standard":
res_data = (spectrogram - spectrogram[0].mean(axis)) / (spectrogram[0].std(axis) + np.finfo(float).eps)
elif self.type_norm == "max":
res_data = spectrogram[0] / (np.abs(spectrogram[0].max(axis)) + np.finfo(float).eps)
elif self.type_norm == "min-max":
res_data = (spectrogram - spectrogram[0].min(axis)) / (spectrogram[0].max(axis) - spectrogram[0].min(axis)
+ np.finfo(float).eps)
else:
raise NotImplementedError("No other type_norm implemented except {'standard', 'max', 'min-max'}")
if np.isnan(res_data).any():
res_data = np.nan_to_num(res_data, posinf=0, neginf=0)
warnings.warn("Trying to divide by zeros while normalizing spectrogram, replacing nan by 0")
if tensor:
res_data = torch.Tensor(res_data)
return res_data
def state_dict(self):
pass
def save(self, path):
pass
def load(self, path):
pass
def load_state_dict(self, state_dict):
pass
| 6,478 | 31.888325 | 118 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/utilities/Transforms.py | import warnings
import librosa
import random
import numpy as np
import torch
class Transform:
def transform_data(self, data):
# Mandatory to be defined by subclasses
raise NotImplementedError("Abstract object")
def transform_label(self, label):
# Do nothing, to be changed in subclasses if needed
return label
def _apply_transform(self, sample_no_index):
data, label = sample_no_index
if type(data) is tuple: # meaning there is more than one data_input (could be duet, triplet...)
data = list(data)
if type(data[0]) is tuple:
data2, label2 = data
data2 = list(data2)
for k in range(len(data2)):
data2[k] = self.transform_data(data2[k])
data2 = tuple(data2)
data = data2, label2
else:
for k in range(len(data)):
data[k] = self.transform_data(data[k])
data = tuple(data)
else:
if self.flag:
data = self.transform_data(data, target = label)
else:
data = self.transform_data(data)
label = self.transform_label(label)
return data, label
def __call__(self, sample):
""" Apply the transformation
Args:
sample: tuple, a sample defined by a DataLoad class
Returns:
tuple
The transformed tuple
"""
if type(sample[1]) is int: # Means there is an index, may be another way to make it cleaner
sample_data, index = sample
sample_data = self._apply_transform(sample_data)
sample = sample_data, index
else:
sample = self._apply_transform(sample)
return sample
class GaussianNoise(Transform):
""" Apply gaussian noise
Args:
mean: float, the mean of the gaussian distribution.
std: float, standard deviation of the gaussian distribution.
Attributes:
mean: float, the mean of the gaussian distribution.
std: float, standard deviation of the gaussian distribution.
"""
def __init__(self, mean=0, std=0.5):
self.mean = mean
self.std = std
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return data + np.abs(np.random.normal(0, 0.5 ** 2, data.shape))
class ApplyLog(Transform):
"""Convert ndarrays in sample to Tensors."""
def __init__(self):
self.flag = False
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return librosa.amplitude_to_db(data.T).T
def pad_trunc_seq(x, max_len):
"""Pad or truncate a sequence data to a fixed length.
The sequence should be on axis -2.
Args:
x: ndarray, input sequence data.
max_len: integer, length of sequence to be padded or truncated.
Returns:
ndarray, Padded or truncated input sequence data.
"""
shape = x.shape
if shape[-2] <= max_len:
padded = max_len - shape[-2]
padded_shape = ((0, 0),)*len(shape[:-2]) + ((0, padded), (0, 0))
x = np.pad(x, padded_shape, mode="constant")
else:
x = x[..., :max_len, :]
return x
class PadOrTrunc(Transform):
""" Pad or truncate a sequence given a number of frames
Args:
nb_frames: int, the number of frames to match
Attributes:
nb_frames: int, the number of frames to match
"""
def __init__(self, nb_frames, apply_to_label=False):
self.flag = False
self.nb_frames = nb_frames
self.apply_to_label = apply_to_label
def transform_label(self, label):
if self.apply_to_label:
return pad_trunc_seq(label, self.nb_frames)
else:
return label
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return pad_trunc_seq(data, self.nb_frames)
class AugmentGaussianNoise(Transform):
""" Pad or truncate a sequence given a number of frames
Args:
mean: float, mean of the Gaussian noise to add
Attributes:
std: float, std of the Gaussian noise to add
"""
def __init__(self, mean=0., std=None, snr=None):
self.flag = False
self.mean = mean
self.std = std
self.snr = snr
@staticmethod
def gaussian_noise(features, snr):
"""Apply gaussian noise on each point of the data
Args:
features: numpy.array, features to be modified
snr: float, average snr to be used for data augmentation
Returns:
numpy.ndarray
Modified features
"""
# If using source separation, using only the first audio (the mixture) to compute the gaussian noise,
# Otherwise it just removes the first axis if it was an extended one
if len(features.shape) == 3:
feat_used = features[0]
else:
feat_used = features
std = np.sqrt(np.mean((feat_used ** 2) * (10 ** (-snr / 10)), axis=-2))
try:
noise = np.random.normal(0, std, features.shape)
except Exception as e:
warnings.warn(f"the computed noise did not work std: {std}, using 0.5 for std instead")
noise = np.random.normal(0, 0.5, features.shape)
return features + noise
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
(np.array, np.array)
(original data, noisy_data (data + noise))
"""
if self.std is not None:
noisy_data = data + np.abs(np.random.normal(0, 0.5 ** 2, data.shape))
elif self.snr is not None:
noisy_data = self.gaussian_noise(data, self.snr)
else:
raise NotImplementedError("Only (mean, std) or snr can be given")
return data, noisy_data
class ToTensor(Transform):
"""Convert ndarrays in sample to Tensors.
Args:
unsqueeze_axis: int, (Default value = None) add an dimension to the axis mentioned.
Useful to add a channel axis to use CNN.
Attributes:
unsqueeze_axis: int, add an dimension to the axis mentioned.
Useful to add a channel axis to use CNN.
"""
def __init__(self, unsqueeze_axis=None):
self.flag = False
self.unsqueeze_axis = unsqueeze_axis
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
res_data = torch.from_numpy(data).float()
if self.unsqueeze_axis is not None:
res_data = res_data.unsqueeze(self.unsqueeze_axis)
return res_data
def transform_label(self, label):
return torch.from_numpy(label).float() # float otherwise error
class Normalize(Transform):
"""Normalize inputs
Args:
scaler: Scaler object, the scaler to be used to normalize the data
Attributes:
scaler : Scaler object, the scaler to be used to normalize the data
"""
def __init__(self, scaler):
self.flag = False
self.scaler = scaler
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified
Returns:
np.array
The transformed data
"""
return self.scaler.normalize(data)
class Mixup(Transform):
def __init__(self, alpha=0.2, beta=0.2, mixup_label_type="soft"):
self.flag = True
self.alpha = alpha
self.beta = beta
self.mixup_label_type=mixup_label_type
def transform_data(self, data, target=None):
batch_size = data.shape[0]
c = np.random.beta(self.alpha, self.beta)
perm = torch.randperm(batch_size)
mixed_data = c*data + (1-c)*data[perm,:]
if target is not None:
if self.mixup_label_type == "soft":
mixed_target = np.clip(
c*target + (1-c)*target[perm,:], a_min=0, a_max=1)
elif self.mixup_label_type == "hard":
mixed_target = np.clip(target+target[perm,:], a_min=0, a_max=1)
else:
raise NotImplementedError(
f"mixup_label_type: {mixup_label_type} not implemented. choise in "
f"{'soft', 'hard'}"
)
return (data, mixed_data), mixed_target
else:
return data, mixed_data
class TemporalShifting(Transform):
def __init__(self, net_pooling=4):
self.flag = True
self.net_pooling = net_pooling
def transform_data(self, data, target=None):
frames, n_bands = data.shape
shift = int(random.gauss(0, 40))
shifted = np.roll(data, shift, axis=0)
if target is not None:
shift = -abs(shift) // self.net_pooling if shift < 0 else shift // self.net_pooling
new_labels = np.roll(target, shift, axis=0)
return (data, shifted), new_labels
else:
return data, shifted
class CombineChannels(Transform):
""" Combine channels when using source separation (to remove the channels with low intensity)
Args:
combine_on: str, in {"max", "min"}, the channel in which to combine the channels with the smallest energy
n_channel_mix: int, the number of lowest energy channel to combine in another one
"""
def __init__(self, combine_on="max", n_channel_mix=2):
self.flag = False
self.combine_on = combine_on
self.n_channel_mix = n_channel_mix
def transform_data(self, data):
""" Apply the transformation on data
Args:
data: np.array, the data to be modified, assuming the first values are the mixture,
and the other channels the sources
Returns:
np.array
The transformed data
"""
mix = data[:1] # :1 is just to keep the first axis
sources = data[1:]
channels_en = (sources ** 2).sum(-1).sum(-1) # Get the energy per channel
indexes_sorted = channels_en.argsort()
sources_to_add = sources[indexes_sorted[:2]].sum(0)
if self.combine_on == "min":
sources[indexes_sorted[2]] += sources_to_add
elif self.combine_on == "max":
sources[indexes_sorted[-1]] += sources_to_add
return np.concatenate((mix, sources[indexes_sorted[2:]]))
def get_transforms(frames, scaler=None, add_axis=0, noise_dict_params=None, combine_channels_args=None):
transf = []
unsqueeze_axis = None
if add_axis is not None:
unsqueeze_axis = add_axis
if combine_channels_args is not None:
transf.append(CombineChannels(*combine_channels_args))
if noise_dict_params is not None:
transf.append(AugmentGaussianNoise(**noise_dict_params))
transf.extend([ApplyLog(), PadOrTrunc(nb_frames=frames), ToTensor(unsqueeze_axis=unsqueeze_axis)])
if scaler is not None:
transf.append(Normalize(scaler=scaler))
return Compose(transf)
def get_transforms_v2(frames, scaler=None, add_axis=0, noise_dict_params=None, mixup_dict_params=None, shift_dict_params=None, combine_channels_args=None):
transf = []
unsqueeze_axis = None
if add_axis is not None:
unsqueeze_axis = add_axis
if combine_channels_args is not None:
transf.append(CombineChannels(*combine_channels_args))
if noise_dict_params is not None:
transf.append(AugmentGaussianNoise(**noise_dict_params))
if mixup_dict_params is not None:
transf.append(Mixup(**mixup_dict_params))
if shift_dict_params is not None:
transf.append(TemporalShifting(**shift_dict_params))
transf.extend([ApplyLog(), PadOrTrunc(nb_frames=frames), ToTensor(unsqueeze_axis=unsqueeze_axis)])
if scaler is not None:
transf.append(Normalize(scaler=scaler))
return Compose(transf)
class Compose(object):
"""Composes several transforms together.
Args:
transforms: list of ``Transform`` objects, list of transforms to compose.
Example of transform: ToTensor()
"""
def __init__(self, transforms):
self.transforms = transforms
def add_transform(self, transform):
t = self.transforms.copy()
t.append(transform)
return Compose(t)
def __call__(self, audio):
for t in self.transforms:
audio = t(audio)
return audio
def __repr__(self):
format_string = self.__class__.__name__ + '('
for t in self.transforms:
format_string += '\n'
format_string += ' {0}'.format(t)
format_string += '\n)'
return format_string
| 13,603 | 30.710956 | 155 | py |
CRSTmodel | CRSTmodel-main/DCASE2020_baseline_platform/data_utils/DataLoad.py | import bisect
import numpy as np
import pandas as pd
import torch
import random
import warnings
from torch.utils.data import Dataset
from torch.utils.data.sampler import Sampler
from utilities.Logger import create_logger
import config as cfg
from utilities.Transforms import Compose
torch.manual_seed(0)
random.seed(0)
logger = create_logger(__name__, terminal_level=cfg.terminal_level)
class DataLoadDf(Dataset):
""" Class derived from pytorch DESED
Prepare the data to be use in a batch mode
Args:
df: pandas.DataFrame, the dataframe containing the set infromation (feat_filenames, labels),
it should contain these columns :
"feature_filename"
"feature_filename", "event_labels"
"feature_filename", "onset", "offset", "event_label"
encode_function: function(), function which encode labels
transform: function(), (Default value = None), function to be applied to the sample (pytorch transformations)
return_indexes: bool, (Default value = False) whether or not to return indexes when use __getitem__
Attributes:
df: pandas.DataFrame, the dataframe containing the set information (feat_filenames, labels, ...)
encode_function: function(), function which encode labels
transform : function(), function to be applied to the sample (pytorch transformations)
return_indexes: bool, whether or not to return indexes when use __getitem__
"""
def __init__(self, df, encode_function=None, transform=None, return_indexes=False, in_memory=False):
self.df = df
self.encode_function = encode_function
self.transform = transform
self.return_indexes = return_indexes
self.feat_filenames = df.feature_filename.drop_duplicates()
self.filenames = df.filename.drop_duplicates()
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def set_return_indexes(self, val):
""" Set the value of self.return_indexes
Args:
val : bool, whether or not to return indexes when use __getitem__
"""
self.return_indexes = val
def get_feature_file_func(self, filename):
"""Get a feature file from a filename
Args:
filename: str, name of the file to get the feature
Returns:
numpy.array
containing the features computed previously
"""
if not self.in_memory:
data = np.load(filename).astype(np.float32)
else:
if self.features.get(filename) is None:
data = np.load(filename).astype(np.float32)
self.features[filename] = data
else:
data = self.features[filename]
return data
def __len__(self):
"""
Returns:
int
Length of the object
"""
length = len(self.feat_filenames)
return length
def get_sample(self, index):
"""From an index, get the features and the labels to create a sample
Args:
index: int, Index of the sample desired
Returns:
tuple
Tuple containing the features and the labels (numpy.array, numpy.array)
"""
features = self.get_feature_file_func(self.feat_filenames.iloc[index])
if len(features) == 1:
features = features[0]
# event_labels means weak labels, event_label means strong labels
if "event_labels" in self.df.columns or {"onset", "offset", "event_label"}.issubset(self.df.columns):
if "event_labels" in self.df.columns:
label = self.df.iloc[index]["event_labels"]
if pd.isna(label):
label = []
if type(label) is str:
if label == "":
label = []
else:
label = label.split(",")
else:
cols = ["onset", "offset", "event_label"]
label = self.df[self.df.filename == self.filenames.iloc[index]][cols]
if label.empty:
label = []
else:
label = "empty" # trick to have -1 for unlabeled data and concat them with labeled
if "filename" not in self.df.columns:
raise NotImplementedError(
"Dataframe to be encoded doesn't have specified columns: columns allowed: 'filename' for unlabeled;"
"'filename', 'event_labels' for weak labels; 'filename' 'onset' 'offset' 'event_label' "
"for strong labels, yours: {}".format(self.df.columns))
if index == 0:
logger.debug("label to encode: {}".format(label))
if self.encode_function is not None:
# labels are a list of string or list of list [[label, onset, offset]]
y = self.encode_function(label)
else:
y = label
sample = features, y
return sample
def __getitem__(self, index):
""" Get a sample and transform it to be used in a ss_model, use the transformations
Args:
index : int, index of the sample desired
Returns:
tuple
Tuple containing the features and the labels (numpy.array, numpy.array) or
Tuple containing the features, the labels and the index (numpy.array, numpy.array, int)
"""
sample = self.get_sample(index)
if self.transform:
sample = self.transform(sample)
if self.return_indexes:
sample = (sample, index)
return sample
def set_transform(self, transform):
"""Set the transformations used on a sample
Args:
transform: function(), the new transformations
"""
self.transform = transform
def add_transform(self, transform):
if type(self.transform) is not Compose:
raise TypeError("To add transform, the transform should already be a compose of transforms")
transforms = self.transform.add_transform(transform)
return DataLoadDf(self.df, self.encode_function, transforms, self.return_indexes, self.in_memory)
class ConcatDataset(Dataset):
"""
DESED to concatenate multiple datasets.
Purpose: useful to assemble different existing datasets, possibly
large-scale datasets as the concatenation operation is done in an
on-the-fly manner.
Args:
datasets : sequence, list of datasets to be concatenated
"""
@staticmethod
def cumsum(sequence):
r, s = [], 0
for e in sequence:
l = len(e)
r.append(l + s)
s += l
return r
@property
def cluster_indices(self):
cluster_ind = []
prec = 0
for size in self.cumulative_sizes:
cluster_ind.append(range(prec, size))
prec = size
return cluster_ind
def __init__(self, datasets):
assert len(datasets) > 0, 'datasets should not be an empty iterable'
self.datasets = list(datasets)
self.cumulative_sizes = self.cumsum(self.datasets)
def __len__(self):
return self.cumulative_sizes[-1]
def __getitem__(self, idx):
dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx)
if dataset_idx == 0:
sample_idx = idx
else:
sample_idx = idx - self.cumulative_sizes[dataset_idx - 1]
return self.datasets[dataset_idx][sample_idx]
@property
def cummulative_sizes(self):
warnings.warn("cummulative_sizes attribute is renamed to "
"cumulative_sizes", DeprecationWarning, stacklevel=2)
return self.cumulative_sizes
@property
def df(self):
df = self.datasets[0].df
for dataset in self.datasets[1:]:
df = pd.concat([df, dataset.df], axis=0, ignore_index=True, sort=False)
return df
class MultiStreamBatchSampler(Sampler):
"""Takes a dataset with cluster_indices property, cuts it into batch-sized chunks
Drops the extra items, not fitting into exact batches
Args:
data_source : DESED, a DESED to sample from. Should have a cluster_indices property
batch_size : int, a batch size that you would like to use later with Dataloader class
shuffle : bool, whether to shuffle the data or not
Attributes:
data_source : DESED, a DESED to sample from. Should have a cluster_indices property
batch_size : int, a batch size that you would like to use later with Dataloader class
shuffle : bool, whether to shuffle the data or not
"""
def __init__(self, data_source, batch_sizes, shuffle=True):
super(MultiStreamBatchSampler, self).__init__(data_source)
self.data_source = data_source
self.batch_sizes = batch_sizes
l_bs = len(batch_sizes)
nb_dataset = len(self.data_source.cluster_indices)
assert l_bs == nb_dataset, "batch_sizes must be the same length as the number of datasets in " \
"the source {} != {}".format(l_bs, nb_dataset)
self.shuffle = shuffle
def __iter__(self):
indices = self.data_source.cluster_indices
if self.shuffle:
for i in range(len(self.batch_sizes)):
indices[i] = np.random.permutation(indices[i])
iterators = []
for i in range(len(self.batch_sizes)):
iterators.append(grouper(indices[i], self.batch_sizes[i]))
return (sum(subbatch_ind, ()) for subbatch_ind in zip(*iterators))
def __len__(self):
val = np.inf
for i in range(len(self.batch_sizes)):
val = min(val, len(self.data_source.cluster_indices[i]) // self.batch_sizes[i])
return val
def grouper(iterable, n):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3) --> ABC DEF"
args = [iter(iterable)] * n
return zip(*args)
| 10,066 | 35.607273 | 120 | py |
GalaxyDataset | GalaxyDataset-master/GalaxyDataset.py | # -*- coding: utf-8 -*-
import torch
import torch.utils.data as Data
import numpy as np
import argparse
import os
import random
import yaml
import downloadData
import fdata
import preprocess
import mnist_bias
# 1. download dataset 2. split dataset
def make_dataset():
parser = argparse.ArgumentParser('parameters')
# dataset
parser.add_argument('--dataset-mode', type=str, default="CIFAR10", help="dataset")
# node num
parser.add_argument('--node-num', type=int, default=4,
help="Number of node (default n=10) one node corresponding to one dataset")
# small dataset config
parser.add_argument('--isaverage-dataset-size', type=bool, default=True, help="if average splits dataset")
parser.add_argument('--dataset-size-list', type=list, default=[5000, 3000, 2000, 3300],
help= "each small dataset size,if isaverage-dataset-size == True, list contain one element")
# split mode
parser.add_argument('--split-mode', type=int, default = 1,
help="dataset split: randomSplit(0), splitByLabels(1)")
# each node - label kind
parser.add_argument('--node-label-num', type=list, default=[4, 3, 2, 1],
help="each node consists of label kind, default each node has one kind of label")
parser.add_argument('--isadd-label', type=bool, default=True,
help="whether add error dataset default=False")
parser.add_argument('--add-label-rate', type=float, default=0.1,
help="if split-mode == 2 or 3, add same normal small dataset")
parser.add_argument('--isadd-error', type=bool, default=True,
help="whether add error dataset default=False")
parser.add_argument('--add-error-rate', type=float, default=0.01,
help="if split-mode == 3, add same error dataset")
parser.add_argument('--isuse-yaml', type= bool, default= True,
help='isuse-yaml = True means using yaml file, false means using command line')
parser.add_argument('--RandomResizedCrop', type=list, default=[0.2, 1.],
help='RandomResizedCrop')
parser.add_argument('--GaussianBlur', type=list, default=[0.1, .2],
help='GaussianBlur')
parser.add_argument('--RandomGrayscale', type=float, default=0.2,
help='GaussianBlur')
parser.add_argument('--Normalize-mean', type=list, default=[0.4914, 0.4822, 0.4465],
help='Normalize-mean')
parser.add_argument('--Normalize-std', type=list, default=[0.2023, 0.1994, 0.2010],
help='Normalize-std')
# args.RandomResizedCrop = config["RandomResizedCrop"]
# args.GaussianBlur = config["GaussianBlur"]
# args.RandomGrayscale = config["RandomGrayscale"]
# args.Normalize_mean = config["Normalize_mean"]
# args.Normalize_std = config["Normalize_std"]
args = parser.parse_args()
args = readYaml("./config.yaml", args)
# valid parameters
if args.dataset_mode != "CIFAR10" and args.dataset_mode != "MNIST":
print("currently only for CIFAR10 and MNIST")
return
if len(args.dataset_size_list) < args.node_num:
print("Error: the number of dataset smaller than node num")
return
if args.node_num != len(args.dataset_size_list) or args.node_num != len(args.node_label_num):
print("Error: nodes num is not equal to the length of dataset_size_list or node_label_num ")
return
if args.split_mode == 3:
#file_path, batch_size , sub_num, dataset_ident = 'CIFAR10C' , download = False, train_transform = cifar_train_transforms(), test_transform = cifar_test_transforms(), use_cuda =True
Xloader = fdata.Loader("./data", batch_size = 32, sub_num=args.sub_num, dataset_ident = 'CIFAR10C', download = False, train_transform=fdata.cifar_train_transforms(args), test_transform=fdata.cifar_test_transforms(), use_cuda=True)
return
if args.split_mode == 4:
mnist_bias.mnist_process(args.datasetpath)
return
train_loader, test_loader = downloadData.load_data(args)
splitDataset(args, train_loader)
def readYaml(path, args):
if args.isuse_yaml == False:
return args
if not os.path.exists(path):
return args
f = open(path)
config = yaml.load(f)
args.dataset_mode = config["dataset_mode"]
args.datasetpath = str(config["datasetpath"])
args.node_num = int(config["node_num"])
args.isaverage_dataset_size = config["isaverage_dataset_size"]
args.dataset_size_list = config["dataset_size_list"]
args.split_mode = int(config["split_mode"])
args.node_label_num = config["node_label_num"]
args.isadd_label = config["isadd_label"]
args.add_label_rate = float(config["add_label_rate"])
args.isadd_error = config["isadd_error"]
args.add_error_rate = float(config["add_error_rate"])
args.RandomResizedCrop = config["RandomResizedCrop"]
args.GaussianBlur = config["GaussianBlur"]
args.RandomGrayscale = config["RandomGrayscale"]
args.Normalize_mean = config["Normalize_mean"]
args.Normalize_std = config["Normalize_std"]
args.sub_num = config["sub_num"]
return args
def splitDataset(args, train_loader):
# sub_datasets [
# [[imgs, label], [imgs, label]....],
# [[imgs, label], [imgs, label]....],
# ]
# randomSplit : 1. no error dataset 2. add error dataset
# splitByLabel: 1. just 2. add other dataset, no error 3. add error no other 4. add both
parent_path = "./" + args.dataset_mode
if args.split_mode == 0: # 1. Randomly split CIFAR10 into n small datasets
if args.isadd_error == False:
args.add_error_rate = 0.0
sub_datasets = randomSplit(args, train_loader)
savenpy(parent_path+"/randomSplit/", sub_datasets, args)
else:
temp_sub_datasets = randomSplit(args, train_loader)
sub_datasets = addErrorDataset(args, temp_sub_datasets)
savenpy(parent_path+"/randomSplitWithError/", sub_datasets, args)
elif args.split_mode == 1: # 2. Divide CIFAR10 into n small datasets according to dataset labels
if args.isadd_label == False and args.isadd_error == False:
args.add_error_rate = 0.0
args.add_label_rate = 0.0
sub_datasets = splitByLabels(args, train_loader)
savenpy(parent_path+"/splitByLabels/", sub_datasets, args)
elif args.isadd_label == True and args.isadd_error == False:
args.add_error_rate = 0.0
# 3. Based on the 2nd method, each dataset adds 10% of the data taken from the other datasets
sub_datasets = splitByLabelsAnddDataset(args, train_loader)
savenpy(parent_path+"/splitByLabelsAnddDataset/", sub_datasets, args)
elif args.isadd_label == False and args.isadd_error == True:
args.add_label_rate = 0.0
# 5. get dataset, each dataset adds some error label data to form a new dataset
temp_sub_datasets = splitByLabels(args, train_loader)
sub_datasets = addErrorDataset(args, temp_sub_datasets)
savenpy(parent_path+"/splitByLabelsWithErrorDataset/", sub_datasets, args)
else:
temp_sub_datasets = splitByLabelsAnddDataset(args, train_loader)
sub_datasets = addErrorDataset(args, temp_sub_datasets)
savenpy(parent_path+"/splitByLabelsWithNormalAndErrorDataset/", sub_datasets, args)
# 1. Randomly split Dataset into n small datasets
def randomSplit(args, loader):
args.add_label_rate = 0.0
node_num = args.node_num
sub_datasets = [[] for i in range(node_num)]
dataset_size_list = args.dataset_size_list
if args.isaverage_dataset_size == True:
# 均分
temp_list = []
node_index = 0
num = 0
print(loader.dataset)
for step, (imgs, label) in enumerate(loader):
temp_list.append([imgs[0].numpy(), label[0].numpy()])
num += 1
if (num % (dataset_size_list[node_index])) == 0 and num != 0:
print("finish average spliting %d dataset" % node_index)
# TODO(save one small dataset)
sub_datasets[node_index] = temp_list
node_index = node_index+1
if node_index == node_num:
break
temp_list = []
if step == len(loader.dataset.data) -1:
print("finish left spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
else:
temp_list = []
node_index = 0
temp_step = dataset_size_list[node_index]
num = 0
if args.dataset_mode == "CIFAR10":
for step, (imgs, labels) in enumerate(loader):
num +=1
temp_list.append([imgs[0].numpy(), labels[0].numpy()])
# temp_list.append([imgs.numpy(), labels.numpy()])
if num == temp_step and num !=0:
print("finish spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
node_index = node_index + 1
if node_index == node_num:
break
temp_step += dataset_size_list[node_index]
temp_list = []
if step == len(loader.dataset.data) -1:
print("finish left spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
elif args.dataset_mode == "MNIST":
step = 0
for (i, data) in enumerate(loader):
step += 1
num +=1
temp_list.append([data[0].numpy(), data[1].numpy()])
# temp_list.append([imgs.numpy(), labels.numpy()])
if num == temp_step and num !=0:
print("finish spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
node_index = node_index + 1
if node_index == node_num:
break
temp_step += dataset_size_list[node_index]
temp_list = []
if i == len(loader.dataset.data) -1:
print("finish left spliting %d dataset" % node_index)
sub_datasets[node_index] = temp_list
return sub_datasets
# 2. Divide CIFAR10 into n small datasets according to dataset labels
def splitByLabels(args, train_loader):
sub_datasets = [[] for i in range(args.node_num)]
temp_datasets = [[] for i in range(10)]
# category according to nodes nums, each node 按照 节点数 分类,每个节点的类别个数,对应数据量
node_index = 0
for step, (imgs, label) in enumerate(train_loader):
num_label = label.data.item()
# CIFAR10 Dataset
# imgs[0].numpy(): <class 'tuple'>: (3, 32, 32) label[0].numpy() [x] =>
# temp_datasets [
# [[(3, 32, 32) , 0], [(3, 32, 32) , 0], ..],
# [[[(3, 32, 32) , 1], [(3, 32, 32) , 1], ..],
# ...
# ]
temp_datasets[num_label].append(
[imgs[0].numpy(), label[0].numpy()])
if step % 5000 == 0:
print("split dataset step: ", step)
# loop temp_datasets, add and contract
# node_label_num [1, 2, 2, 5, 7]
rs = random.sample(range(0, 10), 10) # 0 - 9 random nums
# according to nodes list, distribute label dataset
all_label_kinds = len(temp_datasets)
sum_x = 0
for index, x in enumerate(args.node_label_num):
temp_list = []
if x > all_label_kinds:
x = all_label_kinds
for y in range(x):
# temp_list only contain 10 kinds labels
labels_index = (y + sum_x) % all_label_kinds
temp_list.extend(temp_datasets[labels_index])
print("node %d" % index, "| add label-%d dataset" % (labels_index))
# if we need the part of data, shuffle, split
if args.isaverage_dataset_size == True:
random.shuffle(temp_list)
temp_list = temp_list[:args.dataset_size_list[index]]
sub_datasets[index] = temp_list
sum_x += x
return sub_datasets
# 3. Based on the 2nd method, each dataset adds n% of the data taken from the other datasets
def splitByLabelsAnddDataset(args, train_loader):
percent = args.add_label_rate
# call splitByLabels
sub_datasets = splitByLabels(args, train_loader)
# add other data Attention other dataset
add_rate_num = [int(percent*len(sub_datasets[i])) for i in range(args.node_num)]
for i in range(args.node_num):
for step, (imgs, label) in enumerate(train_loader):
if step < add_rate_num[i]:
if step % 100 == 0:
print("node %d " % i, "| step:%d, adding other label dataset" % step)
sub_datasets[i].append([imgs[0].numpy(), label[0].numpy()])
else:
break
print("adding other data succeed!")
return sub_datasets
# 4. each dataset adds some error label data
def addErrorDataset(args, array):
error_ratio = args.add_error_rate
add_error_nums = [int(error_ratio * len(array[i])) for i in range(args.node_num)]
# add error data
for i in range(args.node_num):
for index in range(add_error_nums[i]):
if index % 5 == 0:
print("node %d" % i, "| step:%d, adding other error dataset" % index)
# array [
# [[imgs, label], [imgs, label]....],
# [[imgs, label], [imgs, label]....],
# ]
real_label = array[i][index][1]
error_label = random.choice([i for i in range(0, 9) if i not in [real_label]])
array[i].append([array[i][index][0], error_label])
print("adds some error label data succeed!")
return array
# save each small list dataset file
def savenpy(path, array, args):
'''
loop array save each small list dataset file
:param path:
:param array:
:return:
'''
if not os.path.exists(path):
os.makedirs(path)
# array [[(3, 32, 32), x], [(3, 32, 32), x]]
# randomSplit_dataset size_target label_添加label_errorlabel
# label classes
classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
for i in range(len(array)):
if len(array[i]) != 0:
filename = ''
if args.split_mode == 0:
filename = 'randomSplit'+'_node_'+str(i) + '_'+str(len(array[i]))+ '_' + "normal"
elif args.split_mode == 1:
if int(args.node_label_num[i]) != 1:
filename = 'SplitByLabels'+'_node_'+str(i) + '_' + str(len(array[i])) + '_' + classes[array[i][0][1]]+ "andMore"
else:
filename = 'SplitByLabels'+'_node_'+str(i) + '_' + str(len(array[i])) + '_' + classes[array[i][0][1]]
strings = path + filename +'_' + str(args.add_label_rate) + '_' + str(args.add_error_rate)+'.npy'
np.save(file=strings, arr=array[i])
print("index %d saved %s" % (i, strings))
print("save file succeed !")
def readnpy(path):
# npy file: [[imgs, label], [imgs, label]...., [imgs, label]]
# when allow_pickle=True, matrix needs same size
np_array = np.load(path, allow_pickle=True)
imgs = []
label = []
for index in range(len(np_array)):
imgs.append(np_array[index][0])
label.append(np_array[index][1])
torch_dataset = Data.TensorDataset(torch.from_numpy(np.array(imgs)), torch.from_numpy(np.array(label)))
dataloader = Data.DataLoader(
torch_dataset,
batch_size=64,
shuffle=True
)
print(dataloader)
return dataloader
if __name__ == "__main__":
make_dataset()
# preprocess.load_npy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_5555_dog_0.1_0.01.npy")
# readnpy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_3666_truck.npy") | 16,303 | 42.946092 | 239 | py |
GalaxyDataset | GalaxyDataset-master/downloadData.py | # -*- coding: utf-8 -*-
import argparse
import torch
from torchvision import datasets, transforms
# CIFAR-10,
# mean, [0.5, 0.5, 0.5]
# std, [0.5, 0.5, 0.5]
# CIFAR-100,
# mean, [0.5071, 0.4865, 0.4409]
# std, [0.2673, 0.2564, 0.2762]
def load_data(args):
args.batch_size = 1
train_loader = []
test_loader = []
if args.dataset_mode == "CIFAR10":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('data', train=True, download=True, transform=transform_train),
batch_size=args.batch_size,
shuffle=True,
num_workers=0
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('data', train=False, transform=transform_test),
batch_size=args.batch_size,
shuffle=True,
num_workers=1
)
elif args.dataset_mode == "CIFAR100":
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2762)),
])
train_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('data', train=True, download=True, transform=transform_train),
batch_size=args.batch_size,
shuffle=True,
num_workers=2
)
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR100('data', train=False, transform=transform_test),
batch_size=args.batch_size,
shuffle=False,
num_workers=2
)
elif args.dataset_mode == "MNIST":
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
])
train_loader = torch.utils.data.DataLoader(
datasets.MNIST('data/newMNIST', train=True, download=True, transform=transform_train),
batch_size=args.batch_size,
shuffle=True,
num_workers=2
)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('data/newMNIST', train=False, transform=transform_test),
batch_size=args.batch_size,
shuffle=True,
num_workers=2
)
return train_loader, test_loader
class argsment:
# 定义基本属性
batch_size = 1,
dataset_mode = "MNIST",
# constructor
def __init__(self, batch, mode):
self.batch_size = batch,
self.dataset_mode = mode,
# method
def getBatchSize(self):
print(self.batch_size)
# download data
if __name__ == "__main__":
parser = argparse.ArgumentParser('parameters')
# dataset
parser.add_argument('--dataset-mode', type=str, default="CIFAR100", help="dataset")
args = parser.parse_args()
print(args.dataset_mode)
train_loader, test_loader = load_data(args)
print(train_loader) | 3,725 | 30.310924 | 98 | py |
GalaxyDataset | GalaxyDataset-master/fdata.py | from torch.utils.data import DataLoader, Dataset
from torchvision import datasets, transforms
import torch as t
import numpy as np
import random
from PIL import ImageFilter
from PIL import Image
class GaussianBlur(object):
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
# Xargs.RandomResizedCrop
def cifar_train_transforms(Xargs):
all_transforms = transforms.Compose([
transforms.RandomResizedCrop(32, scale=(Xargs.RandomResizedCrop[0], Xargs.RandomResizedCrop[1])),
transforms.RandomHorizontalFlip(),
transforms.RandomApply([GaussianBlur(Xargs.GaussianBlur)], p=0.5),
transforms.RandomGrayscale(Xargs.RandomGrayscale),
transforms.ToTensor(),
transforms.Normalize(Xargs.Normalize_mean, Xargs.Normalize_std)
])
return all_transforms
# def cifar_train_transforms():
# all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(32, scale=(0.2, 1.)),
# transforms.RandomHorizontalFlip(),
# transforms.RandomApply([GaussianBlur([.1, 2.])], p=0.5),
# transforms.RandomGrayscale(p=0.2),
# transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
# ])
# return all_transforms
def cifar_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class CIFAR10C(datasets.CIFAR10):
def __init__(self, *args, **kwargs):
super(CIFAR10C, self).__init__(*args, **kwargs)
def __getitem__(self, index):
img, target = self.data[index], self.targets[index]
# return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
xi = self.transform(img)
xj = self.transform(img)
return xi, xj, target
loader_map = {
'CIFAR10C': CIFAR10C,
'CIFAR10': datasets.CIFAR10
}
num_class = {
'CIFAR10C': 10,
'CIFAR10': 10
}
class Loader(object):
def __init__(self, file_path, batch_size , sub_num, train_transform, test_transform, dataset_ident = 'CIFAR10C' , download = False, use_cuda =True):
train_dataset,test_dataset = self.get_dataset_train(loader_map[dataset_ident], file_path, download,
train_transform, test_transform)
subsize = int(50000 / (sub_num +1 ))
subsets_range = [range(i * subsize ,(i+1)*subsize ) for i in range(sub_num)]
subsets = [self.get_fix_part(train_dataset,i) for i in subsets_range]
kwargs = {'num_workers': 0, 'pin_memory': True} if use_cuda else {}
self.train_loaders = [DataLoader(i, batch_size=batch_size, shuffle=True, **kwargs) for i in subsets]
self.test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
#tmp_batch = self.train_loader.__iter__().__next__()[0]
#self.img_shape = list(tmp_batch.size())[1:]
#self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset_train(dataset, file_path, download, train_transform, test_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform)
return train_dataset,test_dataset
def get_fix_part(self,trainset,datarange):
return t.utils.data.Subset(trainset,datarange)
| 3,878 | 33.945946 | 154 | py |
GalaxyDataset | GalaxyDataset-master/autoencoder.py | # Numpy
import numpy as np
# Torch
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Torchvision
import torchvision
import torchvision.transforms as transforms
# Matplotlib
# %matplotlib inline
import matplotlib.pyplot as plt
# OS
import os
import argparse
EPOCH = 100
# Set random seed for reproducibility
# SEED = 87
# np.random.seed(SEED)
# torch.manual_seed(SEED)
# if torch.cuda.is_available():
# torch.cuda.manual_seed(SEED)
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
def print_model(encoder, decoder):
print("============== Encoder ==============")
print(encoder)
print("============== Decoder ==============")
print(decoder)
print("")
# vgg net
vgg19 = torchvision.models.vgg19(pretrained=True)
vgg19_bn = torchvision.models.vgg19_bn(pretrained=True)
if torch.cuda.is_available():
vgg19 = vgg19.cuda()
vgg19_bn = vgg19_bn.cuda()
# 自定义loss
class VGG_loss(nn.Module):
def __init__(self):
super(VGG_loss, self).__init__()
def forward(self, x1, x2):
dis = torch.abs(x1-x2)
return torch.mean(torch.exp((-1.0)*dis))
loss_vgg = VGG_loss()
def create_model():
autoencoder = Autoencoder()
print_model(autoencoder.encoder, autoencoder.decoder)
if torch.cuda.is_available():
autoencoder = autoencoder.cuda()
print("Model moved to GPU in order to speed up training.")
return autoencoder
def get_torch_vars(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def imshow(img):
npimg = img.cpu().numpy()
plt.axis('off')
plt.imshow(np.transpose(npimg, (1, 2, 0)))
if not os.path.exists('./imgs'):
os.mkdir('./imgs')
plt.savefig("./imgs/result.png")
plt.show()
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# Input size: [batch, 3, 32, 32]
# Output size: [batch, 3, 32, 32]
self.encoder = nn.Sequential(
nn.Conv2d(3, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.Conv2d(12, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.Conv2d(24, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
nn.ReLU(),
# nn.Conv2d(48, 96, 4, stride=2, padding=1), # [batch, 96, 2, 2]
# nn.ReLU(),
)
self.decoder = nn.Sequential(
# nn.ConvTranspose2d(96, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
# nn.ReLU(),
nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]
nn.Sigmoid(),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
def main():
parser = argparse.ArgumentParser(description="Train Autoencoder")
# parser.add_argument("--valid", action="store_true", default=False,
# help="Perform validation only.")
parser.add_argument("--valid", type=bool, default=False,
help="Perform validation only.")
args = parser.parse_args()
# Create model
autoencoder = create_model()
# Load data
transform = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=16,
shuffle=False, num_workers=2)
classes = ('plane', 'car', 'bird', 'cat',
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
if args.valid:
print("Loading checkpoint...")
autoencoder.load_state_dict(torch.load("./weights/autoencoder.pkl"))
dataiter = iter(testloader)
images, labels = dataiter.next()
print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(16)))
imshow(torchvision.utils.make_grid(images))
images = Variable(images.cuda())
decoded_imgs = autoencoder(images)[1]
imshow(torchvision.utils.make_grid(decoded_imgs.data))
exit(0)
# Define an optimizer and criterion
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(autoencoder.parameters())
for epoch in range(EPOCH):
running_loss = 0.0
x_list = []
y_list = []
for i, (inputs, _) in enumerate(trainloader, 0):
inputs_x = get_torch_vars(inputs)
inputs_y = get_torch_vars(inputs)
# 循环两步之后
x_list.append(inputs_x)
y_list.append(inputs_y)
if len(x_list) != 2:
continue
# ============ Forward ============
encoded_1, outputs_1 = autoencoder(x_list[0])
encoded_2, outputs_2 = autoencoder(x_list[1])
loss1 = criterion(outputs_1, y_list[0])
loss2 = criterion(outputs_2, y_list[1])
vgg19_bn.eval()
x_list_0 = vgg19_bn(x_list[0])
x_list_1 = vgg19_bn(x_list[1])
loss3 = loss_vgg(x_list_0, x_list_1)
loss = loss1 + loss2 + loss3
# ============ Backward ============
optimizer.zero_grad()
loss.backward()
optimizer.step()
x_list = [] # 清空
y_list = [] # 清空
# ============ Logging ============
running_loss += loss.data
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
print('Saving Model...')
if not os.path.exists('./weights'):
os.mkdir('./weights')
torch.save(autoencoder.state_dict(), "./weights/autoencoder.pkl")
if __name__ == '__main__':
main() | 6,675 | 31.565854 | 87 | py |
GalaxyDataset | GalaxyDataset-master/NEI.py | # -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.utils.data as Data
from preprocess import load_npy
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
# Torchvision
import torchvision
import torchvision.transforms as transforms
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
# Input size: [batch, 3, 32, 32]
# Output size: [batch, 3, 32, 32]
self.encoder = nn.Sequential(
nn.Conv2d(3, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.Conv2d(12, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.Conv2d(24, 48, 4, stride=2, padding=1), # [batch, 48, 4, 4]
nn.ReLU(),
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(48, 24, 4, stride=2, padding=1), # [batch, 24, 8, 8]
nn.ReLU(),
nn.ConvTranspose2d(24, 12, 4, stride=2, padding=1), # [batch, 12, 16, 16]
nn.ReLU(),
nn.ConvTranspose2d(12, 3, 4, stride=2, padding=1), # [batch, 3, 32, 32]
nn.Sigmoid(),
)
def forward(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return encoded, decoded
def get_torch_vars(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
EPOCH = 100
LR = 0.005
def train_AEnet(dataset):
# Load data
transform = transforms.Compose(
[transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=16,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=16,
shuffle=False, num_workers=2)
autoencoder = Autoencoder()
optimizer = torch.optim.Adam(autoencoder.parameters(), lr=LR)
loss_func = nn.CrossEntropyLoss()
for epoch in range(EPOCH):
for step, (x, b_label) in enumerate(trainloader):
inputs_x = get_torch_vars(x)
inputs_y = get_torch_vars(x)
encoded, decoded = autoencoder(inputs_x)
loss = loss_func(decoded, inputs_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if step % 100 == 0:
print("Epoch: ", epoch, "| step: ", step, "| train loss: %.4f" % loss.data.numpy())
return autoencoder
def computing_NI(train_dataset, test_dataset, nums_classes):
# 1. 特征提取 2. 一阶矩 3. 二范式
autoencoder = train_AEnet(train_dataset)
train_encoded, _ = autoencoder(train_dataset)
test_encoded, _ = autoencoder(test_dataset)
normalize_data = F.normalize(train_dataset.concat(test_encoded), p=2, dim=1)
NI = torch.norm(torch.mean(train_encoded) - torch.mean(test_encoded) / (normalize_data), p=2)
return NI
if __name__ == "__main__":
# 1. 读取数据 2. 创建公式 3. 使用公式
train_loader, test_loader = load_npy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_3666_truck.npy")
NI = computing_NI(train_loader, test_loader, 10)
pass | 3,661 | 35.62 | 121 | py |
GalaxyDataset | GalaxyDataset-master/mnist_bias.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision import datasets, transforms
import random, os, time, argparse, pickle
def mnist_image_raw2bias(image_raw, label, background, digit, id_1, id_2):
b = []
d = []
for i in range(8):
i_0 = i//4
i_1 = (i//2)%2
i_2 = i%2
b.append([i_0, i_1, i_2])
d.append([(i_0+0.5)/2, (i_1+0.5)/2, (i_2+0.5)/2])
image_bias = []
for i in image_raw:
for j in i:
if j == 0:
image_bias.append(b[background])
else:
j = ((j - 0.5) / 2).numpy().tolist() # [-0.25, 0.25]
image_bias.append([d[digit][0]+j, d[digit][1]+j, d[digit][2]+j])
im = torch.FloatTensor(image_bias)
im = im.reshape([28, 28, 3])
im = im.permute(2, 0, 1)
data = im
# trans = transforms.Compose([
# transforms.ToPILImage()
# ])
# im = trans(im)
# path = 'mnist_bias_eval/{}/'.format(label)
# if not os.path.exists(path):
# os.makedirs(path)
# im.save('mnist_bias_eval/{}/label={}_background={}_digit={}_id_1={}_id_2={}.jpg'.format(label, label, background, digit, id_1, id_2))
return (data, label, background, digit)
def mnist_process(path):
mnist_raw = datasets.MNIST(path, train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
]))
mnist_list = []
mnist_bias = []
for i in range(10):
mnist_list.append([])
for ([image_raw], label) in mnist_raw:
mnist_list[label].append(([image_raw], label))
for i in range(10):
l = len(mnist_list[i])
num = l // 56
background_color = 0
digit_color = 0
for j in range(l):
([image_raw], label) = mnist_list[i][j]
if j % num == 0:
digit_color += 1
cnt = 0
if background_color == digit_color:
digit_color += 1
if digit_color == 8:
digit_color = 0
background_color += 1
if background_color == 8:
background_color = 7
cnt += 1
mnist_bias.append(mnist_image_raw2bias(image_raw, label, background_color, digit_color, cnt, j))
print(i, j)
print(len(mnist_bias))
f = open(path+'/'+'mnist_bias_train.pkl', 'wb')
pickle.dump(mnist_bias, f)
return mnist_bias
| 2,547 | 33.432432 | 139 | py |
GalaxyDataset | GalaxyDataset-master/preprocess.py | # -*- coding: utf-8 -*-
import torch
import torch.utils.data as Data
import numpy as np
from torchvision import datasets, transforms
import argparse
import os
import random
import yaml
import downloadData
def load_npy(path):
# npy file: [[imgs, label], [imgs, label]...., [imgs, label]]
# when allow_pickle=True, matrix needs same size
if not os.path.isfile(path):
print("files do not exists!!")
return
np_array = np.load(path, allow_pickle=True)
imgs = []
label = []
for index in range(len(np_array)):
imgs.append(np_array[index][0])
label.append(np_array[index][1])
torch_dataset = Data.TensorDataset(torch.from_numpy(np.array(imgs)), torch.from_numpy(np.array(label)))
train_loader = Data.DataLoader(
torch_dataset,
batch_size=64,
shuffle=True,
num_workers=1
)
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
test_loader = torch.utils.data.DataLoader(
datasets.CIFAR10('data', train=False, transform=transform_test),
batch_size=32,
shuffle=True,
num_workers=1
)
print("train_loader, test_loader generated succeed!")
return train_loader, test_loader
if __name__ == "__main__":
dataloder = load_npy("./cifar10/splitByLabelsWithNormalAndErrorDataset/SplitByLabels_3666_truck.npy") | 1,436 | 29.574468 | 107 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/femnist.py | from torchvision.datasets import MNIST, utils
from PIL import Image
import os.path
import torch
class FEMNIST(MNIST):
"""
This dataset is derived from the Leaf repository
(https://github.com/TalwalkarLab/leaf) pre-processing of the Extended MNIST
dataset, grouping examples by writer. Details about Leaf were published in
"LEAF: A Benchmark for Federated Settings" https://arxiv.org/abs/1812.01097.
"""
resources = [
('https://raw.githubusercontent.com/tao-shen/FEMNIST_pytorch/master/femnist.tar.gz',
'59c65cec646fc57fe92d27d83afdf0ed')]
def __init__(self, root, train=True, transform=None, target_transform=None,
download=False):
super(MNIST, self).__init__(root, transform=transform,
target_transform=target_transform)
self.train = train
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
data_file = self.training_file
else:
data_file = self.test_file
self.data, self.targets, self.users_index = torch.load(os.path.join(self.processed_folder, data_file))
# print(len(self.data))
# print(len(self.targets))
# print(len(self.users_index))
self.uid = []
i = 0
for n in self.users_index:
for j in range(n):
self.uid.append(i)
i += 1
# print(len(self.uid))
# def __getitem__(self, index):
# img, target = self.data[index], int(self.targets[index])
# img = Image.fromarray(img.numpy(), mode='F')
# if self.transform is not None:
# img = self.transform(img)
# if self.target_transform is not None:
# target = self.target_transform(target)
# return img, target
def __getitem__(self, index):
img, target, uid = self.data[index], int(self.targets[index]), int(self.uid[index])
img = Image.fromarray(img.numpy(), mode='F')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def download(self):
"""Download the FEMNIST data if it doesn't exist in processed_folder already."""
import shutil
if self._check_exists():
return
utils.makedir_exist_ok(self.raw_folder)
utils.makedir_exist_ok(self.processed_folder)
# download files
for url, md5 in self.resources:
filename = url.rpartition('/')[2]
utils.download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=md5)
# process and save as torch files
print('Processing...')
shutil.move(os.path.join(self.raw_folder, self.training_file), self.processed_folder)
shutil.move(os.path.join(self.raw_folder, self.test_file), self.processed_folder)
| 3,128 | 36.25 | 110 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/usps.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'usps_28x28.mat'
# data = scio.loadmat(dataFile)
# # for k in data.keys():
# # print(k)
# # __header__
# # __version__
# # __globals__
# # dataset
# dataset_training = data['dataset'][0]
# dataset_test = data['dataset'][1]
# # a = dataset_training[0] # data
# # print(type(a)) # numpy
# # print(len(a)) # 7438
# # print(len(a[0])) # 1
# # print(len(a[0][0])) # 28
# # print(len(a[0][0][0])) # 28
# # b = dataset_training[1] # targets
# # print(len(b)) # 7438
# # print(len(b[0])) # 1
# training_data = []
# for img in dataset_training[0]:
# img = img * 255
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# # print(img.size()) # 28 28 3
# training_data.append(img)
# # print(len(temp))
# # print(len(temp[0]))
# # print(len(temp[0][0]))
# training_targets = []
# for label in dataset_training[1]:
# training_targets.append(label[0])
# # print(label[0])
# torch.save((training_data, training_targets), 'USPS/processed/training.pt')
# test_data = []
# for img in dataset_test[0]:
# img = img * 255
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# # print(img.size()) # 28 28 3
# test_data.append(img)
# # print(len(temp))
# # print(len(temp[0]))
# # print(len(temp[0][0]))
# test_targets = []
# for label in dataset_test[1]:
# test_targets.append(label[0])
# # print(label[0])
# torch.save((test_data, test_targets), 'USPS/processed/test.pt')
class USPS(MNIST):
def __init__(self, *args, **kwargs):
super(USPS, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('type of img: ', type(img))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# ToPILImage
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
# 'MNISTM': MNISTM,
# 'SVHN': SVHN,
# 'SYN': SYN,
'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
# 'MNISTM': 10,
# 'SVHN': 10,
# 'SYN': 10,
'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('USPS')
# dataset_train = loader.train_dataset
# img = dataset_train[50][0]
# print(dataset_train[50][1])
# img = img * 255
# print(img)
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
| 5,890 | 30.502674 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/svhn.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'svhn_train_32x32.mat'
# data = scio.loadmat(dataFile)
# # for k in data.keys():
# # print(k)
# # __header__
# # __version__
# # __globals__
# # X
# # y
# data['X'] = data['X'].transpose(3, 0, 1, 2)
# a = data['X']
# print(len(a)) # 73257
# print(len(a[0])) # 32
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 3
# training_data = []
# for img in data['X']:
# img = torch.from_numpy(img)
# training_data.append(img)
# training_targets = []
# for label in data['y']:
# if label[0] == 10:
# l = 0
# else:
# l = label[0]
# training_targets.append(l)
# torch.save((training_data, training_targets), 'SVHN/processed/training.pt')
# dataFile = 'svhn_test_32x32.mat'
# data = scio.loadmat(dataFile)
# # for k in data.keys():
# # print(k)
# # __header__
# # __version__
# # __globals__
# # X
# # y
# data['X'] = data['X'].transpose(3, 0, 1, 2)
# a = data['X']
# print(len(a)) # 26032
# print(len(a[0])) # 32
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 3
# test_data = []
# for img in data['X']:
# img = torch.from_numpy(img)
# test_data.append(img)
# test_targets = []
# for label in data['y']:
# if label[0] == 10:
# l = 0
# else:
# l = label[0]
# test_targets.append(l)
# torch.save((test_data, test_targets), 'SVHN/processed/test.pt')
class SVHN(MNIST):
def __init__(self, *args, **kwargs):
super(SVHN, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
# 'MNISTM': MNISTM,
'SVHN': SVHN,
# 'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
# 'MNISTM': 10,
'SVHN': 10,
# 'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('SVHN')
# dataset_train = loader.train_dataset
# img = dataset_train[50][0]
# print(dataset_train[50][1])
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
# dataset_training = torch.load('MNISTM/processed/training.pt')
# img = dataset_training[0][0]
# print(img.size())
# img = Image.fromarray(np.array(img).astype('uint8'), mode='RGB')
# img.show()
| 5,699 | 29.15873 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/syn.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'syn_number.mat'
# data = scio.loadmat(dataFile)
# for k in data.keys():
# print(k)
# __header__
# __version__
# __globals__
# test_data
# test_label
# train_data
# train_label
# # data['train_data'] = data['train_data'].transpose(0, 3, 1, 2)
# a = data['train_data']
# print(len(a)) # 25000
# print(len(a[0])) # 32
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 3
# training_data = []
# for img in data['train_data']:
# img = torch.from_numpy(img).int()
# training_data.append(img)
# training_targets = []
# for label in data['train_label']:
# l = label[0]
# training_targets.append(l)
# torch.save((training_data, training_targets), 'SYN/processed/training.pt')
# a = data['test_data']
# print(len(a)) # 9000
# print(len(a[0])) # 3
# print(len(a[0][0])) # 32
# print(len(a[0][0][0])) # 32
# test_data = []
# for img in data['test_data']:
# img = torch.from_numpy(img).int()
# test_data.append(img)
# test_targets = []
# for label in data['test_label']:
# l = label[0]
# test_targets.append(l)
# torch.save((test_data, test_targets), 'SYN/processed/test.pt')
class SYN(MNIST):
def __init__(self, *args, **kwargs):
super(SYN, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
# 'MNISTM': MNISTM,
# 'SVHN': SVHN,
'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
# 'MNISTM': 10,
# 'SVHN': 10,
'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('SYN')
# dataset_train = loader.train_dataset
# img = dataset_train[40][0]
# print(dataset_train[40][1])
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
# dataset_training = torch.load('MNISTM/processed/training.pt')
# img = dataset_training[0][0]
# print(img.size())
# img = Image.fromarray(np.array(img).astype('uint8'), mode='RGB')
# img.show()
| 5,471 | 30.630058 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/mnistm.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'mnistm_with_label.mat'
# data = scio.loadmat(dataFile)
# for k in data.keys():
# print(k)
# __header__
# __version__
# __globals__
# label_test
# label_train
# test
# train
# img = data['train'][4]
# # img = Image.fromarray(img, mode='RGB')
# # img.show()
# img = torch.from_numpy(img)
# print(img)
# label = np.argmax(data['label_train'][4])
# print(label)
# training_data = []
# for img in data['train']:
# img = torch.from_numpy(img).int()
# # print(img)
# training_data.append(img)
# img = training_data[0]
# print(type(img))
# img = img.numpy().astype('uint8')
# img = Image.fromarray(img, mode='RGB')
# img.show()
# training_targets = []
# for label in data['label_train']:
# l = np.argmax(np.array(label))
# training_targets.append(l)
# print(training_targets[0])
# torch.save((training_data, training_targets), 'MNISTM/processed/training.pt')
# test_data = []
# for img in data['test']:
# img = torch.from_numpy(img).int()
# test_data.append(img)
# test_targets = []
# for label in data['label_test']:
# l = np.argmax(np.array(label))
# test_targets.append(l)
# torch.save((test_data, test_targets), 'MNISTM/processed/test.pt')
class MNISTM(MNIST):
def __init__(self, *args, **kwargs):
super(MNISTM, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
# 'MNIST': MNIST,
'MNISTM': MNISTM,
# 'SVHN': SVHN,
# 'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
# 'MNIST': 10,
'MNISTM': 10,
# 'SVHN': 10,
# 'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('MNISTM')
# dataset_train = loader.train_dataset
# img = dataset_train[40][0]
# print(dataset_train[40][1])
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
# dataset_training = torch.load('MNISTM/processed/training.pt')
# img = dataset_training[0][0]
# print(img.size())
# img = Image.fromarray(np.array(img).astype('uint8'), mode='RGB')
# img.show()
| 5,512 | 29.97191 | 215 | py |
GalaxyDataset | GalaxyDataset-master/digitfive/mnist.py | import scipy.io as scio
import numpy as np
from PIL import Image
import os
import os.path
import torch
import torchvision
from torchvision import datasets, transforms
from torchvision.datasets import MNIST, utils
from torch.utils.data import DataLoader, Dataset
# dataFile = 'mnist_data.mat'
# data = scio.loadmat(dataFile)
# for k in data.keys():
# print(k)
# __header__
# __version__
# __globals__
# test_32
# test_28
# label_test
# label_train
# train_32
# train_28
# img = data['train_28'][0]
# img = torch.from_numpy(img)
# print(img) # torch.Size([28, 28, 1])
# training_data = []
# for img in data['train_28']:
# img = img.transpose(2, 0, 1)
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# training_data.append(img)
# img = training_data[0]
# # img = Image.fromarray(img.numpy(), mode='RGB') # black
# img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # astype('uint8')
# img.show()
# training_targets = []
# for label in data['label_train']:
# l = np.argmax(np.array(label))
# training_targets.append(l)
# torch.save((training_data, training_targets), 'MNIST/processed/training.pt')
# test_data = []
# for img in data['test_28']:
# img = img.transpose(2, 0, 1)
# img = img.tolist()
# temp = img[0]
# img.append(temp)
# img.append(temp)
# img = torch.Tensor(img)
# img = img.permute(1, 2, 0)
# test_data.append(img)
# test_targets = []
# for label in data['label_test']:
# l = np.argmax(np.array(label))
# test_targets.append(l)
# torch.save((test_data, test_targets), 'MNIST/processed/test.pt')
# img = test_data[0]
# # img = Image.fromarray(img.numpy(), mode='RGB') # black
# img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # astype('uint8')
# img.show()
class MNIST(MNIST):
def __init__(self, *args, **kwargs):
super(MNIST, self).__init__(*args, **kwargs)
def __getitem__(self, index):
# print('type: ',type(self.data))
# print('len: ',len(self.data))
img, target = self.data[index], int(self.targets[index])
# print('type of img: ', type(torch.Tensor(img)))
# print('img size', torch.Tensor(img).size())
# return a PIL Image
img = Image.fromarray(img.numpy().astype('uint8'), mode='RGB') # mode & permute
# print('img: ', img)
# print('img size', img.size())
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
# print(img.size())
return img, target
def digit_five_train_transforms():
all_transforms = transforms.Compose([
# transforms.RandomResizedCrop(28),
# ToPILImage
# transforms.RandomApply([transforms.ColorJitter(0.4, 0.4, 0.4, 0.1)], p=0.8),
# transforms.RandomGrayscale(p=0.2),
# transforms.RandomAffine(degrees=15,
# translate=[0.1, 0.1],
# scale=[0.9, 1.1],
# shear=15),
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
def digit_five_test_transforms():
all_transforms = transforms.Compose([
transforms.ToTensor(),
# transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])
return all_transforms
class Loader(object):
def __init__(self, dataset_ident, file_path='', download=False, batch_size=128, train_transform=digit_five_train_transforms(), test_transform=digit_five_test_transforms(), target_transform=None, use_cuda=False):
kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}
loader_map = {
'MNIST': MNIST,
# 'MNISTM': MNISTM,
# 'SVHN': SVHN,
# 'SYN': SYN,
# 'USPS': USPS,
# 'MNISTC': MNISTC,
}
num_class = {
'MNIST': 10,
# 'MNISTM': 10,
# 'SVHN': 10,
# 'SYN': 10,
# 'USPS': 10,
# 'MNISTC': 10,
}
# Get the datasets
self.train_dataset, self.test_dataset = self.get_dataset(loader_map[dataset_ident], file_path, download,
train_transform, test_transform, target_transform)
# Set the loaders
self.train_loader = DataLoader(self.train_dataset, batch_size=batch_size, shuffle=True, **kwargs)
self.test_loader = DataLoader(self.test_dataset, batch_size=batch_size, shuffle=False, **kwargs)
tmp_batch = self.train_loader.__iter__().__next__()[0]
self.img_shape = list(tmp_batch.size())[1:]
self.num_class = num_class[dataset_ident]
@staticmethod
def get_dataset(dataset, file_path, download, train_transform, test_transform, target_transform):
# Training and Validation datasets
train_dataset = dataset(file_path, train=True, download=download,
transform=train_transform,
target_transform=target_transform)
test_dataset = dataset(file_path, train=False, download=download,
transform=test_transform,
target_transform=target_transform)
return train_dataset, test_dataset
# loader = Loader('MNIST')
# dataset_train = loader.train_dataset
# img = dataset_train[5][0]
# print(dataset_train[5][1])
# print(img)
# img = img * 255
# img = Image.fromarray(np.array(img.permute(1, 2, 0)).astype('uint8'), mode='RGB')
# img.show()
| 5,753 | 30.966667 | 215 | py |
skimulator | skimulator-master/doc/source/conf.py | # -*- coding: utf-8 -*-
#
# S4 documentation build configuration file, created by
# sphinx-quickstart on Thu Jul 10 16:54:19 2014.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
from os.path import expanduser
home = expanduser("~")
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../../'+'skimulator'))
sys.path.insert(0, os.path.abspath('../../'))
#sys.path.append(os.getcwd()+'/../../'+'skimulator')
#sys.path.append(os.getcwd()+'/../../')
#os.path.join(os.path.dirname(__file__), os.path.pardir)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'skimulator'
copyright = u'2017, Lucile Gaultier'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.1'
# The full version, including alpha/beta/rc tags.
release = '1.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
html_theme = 'sphinxdoc'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'skimulator'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'skimulator.tex', 'SKIM Simulator Documentation',
u'Lucile Gaultier', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'SKIM Simulator', u'SKIM Simulator Documentation',
[u'Lucile Gaultier'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'SKIM Simulator', u'SKIM Simulator Documentation',
u'Lucile Gaultier', 'SKIM Simulator', 'Open Source SKIM Simulator.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'skimulator'
epub_author = u'Lucile Gaultier'
epub_publisher = u'Lucile Gaultier'
epub_copyright = u'2017, Lucile Gaultier'
# The basename for the epub file. It defaults to the project name.
#epub_basename = u'SKIM Simulator'
# The HTML theme for the epub output. Since the default themes are not optimized
# for small screen space, using the same theme for HTML and epub output is
# usually not wise. This defaults to 'epub', a theme designed to save visual
# space.
#epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
#epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Choose between 'default' and 'includehidden'.
#epub_tocscope = 'default'
# Fix unsupported image types using the PIL.
#epub_fix_images = False
# Scale large images.
#epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#epub_show_urls = 'inline'
# If false, no index is generated.
#epub_use_index = True
| 10,718 | 30.342105 | 80 | py |
CompositionSpaceNFDI | CompositionSpaceNFDI-main/docs/source/conf.py | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
import os
import sys
import shutil
import glob
sys.path.insert(0, os.path.abspath('../../compositionspace/'))
def skip(app, what, name, obj, would_skip, options):
if name in ( '__init__',):
return False
return would_skip
def setup(app):
app.connect('autodoc-skip-member', skip)
if os.path.exists("example"):
shutil.rmtree("example")
shutil.copytree("../../example", "example")
project = 'compositionspace'
copyright = '2022, Alaukik Saxena, Sarath Menon'
author = 'Alaukik Saxena, Sarath Menon'
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
'sphinx.ext.napoleon',
'm2r2',
'sphinx_markdown_tables',
'nbsphinx',
]
html_theme = 'furo'
html_theme_options = {
#'logo_only' : True,
#'canonical_url' : 'https://calphy.readthedocs.io/',
}
html_extra_path = ['../_static' ]
source_suffix = ['.rst', '.md']
exclude_patterns = [] | 1,349 | 23.545455 | 83 | py |
fuzzyJoiner | fuzzyJoiner-master/build_model.py | from random import shuffle
import pickle
import numpy as np
# import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_PRECOMPUTED_SPLIT = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print(str("failed to find embedding for:" + word).encode('utf-8'))
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath, 'r', encoding='utf8') as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r', encoding='utf8')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2, val_split = 0.2, base_file=None):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_test_samples = int(test_split * len(ents))
num_validation_samples = int(val_split * len(ents))
if USE_PRECOMPUTED_SPLIT:
train = pickle.load(open((base_file + '.train'), "rb" ))
test = pickle.load(open((base_file + '.test'), "rb" ))
validation = pickle.load(open((base_file + '.validation'), "rb" ))
else:
train = ents[:-(num_validation_samples + num_test_samples)]
test = ents[-num_test_samples:]
validation = ents[-(num_validation_samples + num_test_samples):-num_test_samples]
pickle.dump(train, open(base_file + '.train', 'wb'))
pickle.dump(test, open(base_file + '.test', 'wb'))
pickle.dump(validation, open(base_file + '.validation', 'wb'))
return train, test, validation
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = K.square(y_pred[:,0,0])
a_n_distance = K.square(y_pred[:,1,0])
p_n_distance = K.square(y_pred[:,2,0])
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_semi_hard_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
triplets = {}
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
expected_text = set(entity2same[key])
expected_ids = [entity2unique[i] for i in expected_text]
for positive in expected_text:
k = entity2unique[positive]
nearest = t.get_nns_by_vector(predictions[k], NNlen)
dist_k = t.get_distance(index, k)
semi_hards = []
for n in nearest:
if n == index or n in expected_ids or n == k:
continue
n_dist = t.get_distance(index, n)
if n_dist > dist_k:
semi_hards.append(unique_text[n])
# shuffle(semi_hards)
# semi_hards = semi_hards[0:20]
for i in semi_hards:
triplets['anchor'].append(key)
triplets['positive'].append(unique_text[k])
triplets['negative'].append(i)
return triplets
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w', encoding='utf8') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
parser.add_argument('--use_precomputed_split', action='store_true', help='load precomputed split for test and validation data')
parser.add_argument('--data_path', type=str, help='location to store/load data from')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
USE_PRECOMPUTED_SPLIT = args.use_precomputed_split
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test, validation = split(entities, test_split = .20, val_split = 0.20 , base_file=args.data_path)
print("TRAIN")
print(str(train).encode('utf-8'))
print(len(train))
print("TEST")
print(str(test).encode('utf-8'))
print(len(test))
print("validation")
print(str(validation).encode('utf-8'))
print(len(validation))
#pickle.dump(test, open(filepath + '.test_data.pickle', 'wb'))
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
entity2same_validation = generate_names(validation, people)
print(str(entity2same_train).encode('utf-8'))
print(str(entity2same_test).encode('utf-8'))
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
unique_text_validation, entity2unique_validation = build_unique_entities(entity2same_validation)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
if not USE_PRECOMPUTED_SPLIT:
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
tokenizer.fit_on_texts(unique_text + unique_text_test + unique_text_validation)
pickle.dump( tokenizer, open(args.data_path + '.tokenizer' , "wb" ))
else:
tokenizer = pickle.load(open(args.data_path + '.tokenizer', 'rb'))
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
sequences_validation = tokenizer.texts_to_sequences(unique_text_validation)
sequences_validation = pad_sequences(sequences_validation, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
triplets = generate_triplets_from_ANN(embedder_model, sequences, entity2unique, entity2same_train, unique_text, True)
print(len(triplets['anchor']))
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
validation_data, val_match_stats = generate_triplets_from_ANN(embedder_model, sequences_validation, entity2unique_validation, entity2same_validation, unique_text_validation, False)
validation_seq = get_sequences(validation_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, train_match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
Y_val = np.random.randint(2, size=(1,2,len(validation_data['anchor']))).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=2, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_data=([validation_seq['anchor'], validation_seq['positive'], validation_seq['negative']],Y_val))
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 25,195 | 37.118003 | 249 | py |
fuzzyJoiner | fuzzyJoiner-master/preloaded_runner.py | import pickle
import numpy as np
import tensorflow as tf
import random as random
import json
from keras import backend as K
#from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Input, Lambda, GRU
from keras.layers import Embedding
from keras.models import Model
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print(str("failed to find embedding for:" + word).encode('utf-8'))
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath, 'r', encoding='utf8') as fl:
for line in fl:
entities.append(line)
return entities
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w', encoding='utf8') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def build_model_from_weights(weights_file, embbeding_dimensions):
embedder = Embedding(embbeding_dimensions, EMBEDDING_DIM, input_length=MAX_SEQUENCE_LENGTH, trainable=False)
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, 2):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
try:
model.load_weights(weights_file)
except ValueError as e:
full = str(e)
#https://stackoverflow.com/questions/4289331/python-extract-numbers-from-a-string
new_emb = [int(num) for num in full.replace('.', ' ').split() if num.isdigit()]
return build_model_from_weights(weights_file, new_emb[2])
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
try:
model.load_weights(weights_file)
except ValueError as e:
full = str(e)
#https://stackoverflow.com/questions/4289331/python-extract-numbers-from-a-string
new_emb = [int(num) for num in full.replace('.', ' ').split() if num.isdigit()]
return build_model_from_weights(weights_file, new_emb[2])
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
# First get the arguments
output_file_name_for_hpo = "val_dict_list.json"
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: triplet-loss, improved-loss, angular-loss, adapted-loss')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
parser.add_argument('--tokenizer', type=str, help='location of tokenizer file')
parser.add_argument('--previous_test', type=str, help='use previous test data')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'adapted-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
people = 'people' in args.entity_type
# Load test data if specified
if args.previous_test and args.previous_test.lower() in ("yes", "true", "t", "1"):
test = pickle.load(open(args.input, 'rb'))
else:
test = read_entities(args.input)
print("TEST")
print(str(test).encode('utf-8'))
#encode test data for annoy
entity2same_test = generate_names(test, people, limit_pairs=True)
print(str(entity2same_test).encode('utf-8'))
#load tokenizer
tokenizer = pickle.load(open(args.tokenizer, 'rb'))
#Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("test text len:" + str(len(unique_text_test)))
#use tokenizer to convert to sequences
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build model so we can load weights into it
word_index = tokenizer.word_index
num_words = len(word_index) + 1
model, test_positive_model, test_negative_model, inter_model = build_model_from_weights(args.model, num_words)
current_model = inter_model
# print some statistics
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 17,465 | 35.848101 | 139 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM.py | import numpy as np
import random as random
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
# random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
# session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
import tensorflow as tf
# from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph())
# """
from keras import backend as K
K.set_session(sess)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Lambda, GRU, Activation
from keras.layers import Embedding
from keras.models import Model, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=30
USE_GRU=True
DEBUG = True
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
# net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
#net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
#net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
"""
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=triplet_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
K.clear_session()
| 21,712 | 37.227113 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-8.20.18.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
from scipy.stats.mstats import gmean
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
filepath="weights.best.hdf5"
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
precise = 0
triplets = {}
closest_positive_counts = []
pos_distances = []
neg_distances = []
all_pos_distances = []
all_neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
min_neg_distance = 1000000
for i in negatives:
dist_neg = t.get_distance(index, entity2unique[i])
all_neg_distances.append(dist_neg)
if dist_neg < min_neg_distance:
min_neg_distance = dist_neg
for j in expected_text:
dist_pos = t.get_distance(index, entity2unique[j])
all_pos_distances.append(dist_pos)
closest_pos_count = 0
for p in overlap:
dist_pos = t.get_distance(index, entity2unique[p])
if dist_pos < min_neg_distance:
closest_pos_count+=1
if closest_pos_count > 0:
precise+=1
closest_positive_counts.append(closest_pos_count / min(len(expected_text), NNlen - 1))
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean closest positive count:" + str(statistics.mean(closest_positive_counts)))
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("mean all positive distance:" + str(statistics.mean(all_pos_distances)))
print("stdev all positive distance:" + str(statistics.stdev(all_pos_distances)))
print("max all positive distance:" + str(max(all_pos_distances)))
print("mean all neg distance:" + str(statistics.mean(all_neg_distances)))
print("stdev all neg distance:" + str(statistics.stdev(all_neg_distances)))
print("max all neg distance:" + str(max(all_neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
print("Precision at 1: " + str(precise / len(entity2same)))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
parser.add_argument('--model', type=str, help='name for model file')
args = parser.parse_args()
filepath = args.model
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,235 | 36.061082 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/ANNBasedSampleSelection.py | import Named_Entity_Recognition_Modified
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from matcher_functions import connect
import argparse
import numpy as np
from keras.layers import Embedding, Concatenate
from keras.models import Model
import names_cleanser
from random import randint
import keras.backend as K
import sys
MARGIN = 2
DEBUG = False
def process_aliases(con, meta):
aliases = Named_Entity_Recognition_Modified.get_aliases_with_ids(con, meta)
entity2sames = {}
namesToIds = {}
def has_difft_id(name, entityid):
if name in namesToIds:
ids = namesToIds[name]
if ids != entityid:
return True
namesToIds[name] = entityid
return False
i = 0
for row in aliases:
i += 1
if DEBUG and i > 100:
break
entityid = row[2]
# filter out names that are associated with multiple ids, this will confuse the model trying to learn the distance function
if has_difft_id(row[0], entityid) or has_difft_id(row[1], entityid) or has_difft_id(row[2], entityid):
continue
if entityid not in entity2sames:
entity2sames[entityid] = [row[0]]
entity2sames[entityid].append(row[1])
else:
entity2sames[entityid].append(row[1])
# print(entity2sames)
entities = []
entity2names = {}
for e,v in entity2sames.items():
entity2names[e] = [len(entities) + i for i in range(0, len(v))]
entities.extend(v)
print(entities)
print(entity2names)
return entities, entity2names
if __name__ == '__main__':
print('Processing text dataset')
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-u', dest="user", help="username")
parser.add_argument('-p', dest="password", help="password")
parser.add_argument('-d', dest="db", help="dbname")
parser.add_argument('-o', dest="output_file", help="output file name")
parser.add_argument('-a', dest="num_pairs", help="number of same pairs in db", nargs='?', default=2, type=int)
args = parser.parse_args()
#change to get from sql and not read from file
con, meta = connect(args.user, args.password, args.db)
# get all names first
entities, entity2names = process_aliases(con, meta)
tokenizer = Tokenizer(num_words=Named_Entity_Recognition_Modified.MAX_NB_WORDS)
tokenizer.fit_on_texts(entities)
sequences = tokenizer.texts_to_sequences(entities)
print(sequences)
sequences = pad_sequences(sequences, maxlen=Named_Entity_Recognition_Modified.MAX_SEQUENCE_LENGTH)
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, Named_Entity_Recognition_Modified.EMBEDDING_DIM))
kz = KazumaCharEmbedding()
for word, i in word_index.items():
if i >= Named_Entity_Recognition_Modified.MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# note that we set trainable = False so as to keep the embeddings fixed
Named_Entity_Recognition_Modified.check_for_zeroes(embedding_matrix, "here is the first pass")
embedding_layer = Embedding(num_words,
Named_Entity_Recognition_Modified.EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=Named_Entity_Recognition_Modified.MAX_SEQUENCE_LENGTH,
trainable=False)
model = Named_Entity_Recognition_Modified.embedded_representation(embedding_layer)
embedded_output = model.predict(sequences)
print(np.shape(embedded_output))
sys.exit(0)
t = AnnoyIndex(len(embedded_output[0]), metric='euclidean')
for i in range(len(embedded_output)):
v = embedded_output[i]
t.add_item(i, v)
t.build(100) # 100 trees
with open(args.output_file, 'w') as f:
for e, v in entity2names.items():
index_for_same = entity2names[e]
anchor_index = index_for_same[0]
nearest = t.get_nns_by_vector(embedded_output[anchor_index], 10)
maximum_diff = -1
minimum_same = 100000
maximum_same = -1
same_pair_in_NN_set = False
for i in range(1, len(index_for_same)):
dist = t.get_distance(anchor_index, index_for_same[i])
print("same pair:" + entities[anchor_index] + "-" + entities[index_for_same[i]] + " distance:" + str(dist))
minimum_same = min(dist, minimum_same)
maximum_same = max(dist, maximum_same)
for i in nearest:
if i == anchor_index:
continue
dist = t.get_distance(anchor_index, i)
print(entities[anchor_index] + "-" + entities[i] + " distance:" + str(dist))
if i in index_for_same:
same_pair_in_NN_set = True
else:
maximum_diff = max(dist, maximum_diff)
if dist > minimum_same:
f.write(entities[anchor_index] + "|" + entities[index_for_same[randint(1, len(index_for_same) - 1)]] + "|" + entities[i] + "\n")
if (maximum_diff < minimum_same):
print("hard entity because maximum different is less than minimum same")
continue
elif same_pair_in_NN_set:
print("easy entity - same pair is in NN set")
else:
print("~hard entity" + entities[anchor_index])
# write a set of different completely items now
print(maximum_same)
j = 0
while j <= 30:
k = randint(0, len(entities) - 1)
if t.get_distance(anchor_index, k) > maximum_diff + MARGIN:
f.write((entities[anchor_index] + "|" + entities[index_for_same[randint(1, len(index_for_same) - 1)]] + "|" + entities[k] + "\n"))
k += 1
j += 1
print(len(entity2names))
| 6,038 | 32.181319 | 138 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-angular.py | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# """
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=True
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
# if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=modified_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,847 | 37.329825 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/Triplet_Iteration.py | from sys import argv
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
import numpy as np
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
import Named_Entity_Recognition_Modified
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
DEBUG = False
DEBUG_DATA_LENGTH = 100
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_random_image(img_groups, group_names, gid):
gname = group_names[gid]
photos = img_groups[gname]
pid = np.random.choice(np.arange(len(photos)), size=1)[0]
pname = photos[pid]
return gname + pname + ".jpg"
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
# i = 0
# while sum(embedding_vector) == 0 and i <= 1000:
# embedding_vector = k.emb(word)
# i++;
# if i == 1000:
# print("fail")
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_tokenizer(texts):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts['anchor'] + texts['negative'] + texts['positive'])
return tokenizer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
#removes the new line charecter at the end
texts['negative'].append(line_array[2][:-1])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def get_test(texts, sequences, percent):
indices = np.arange(sequences['anchor'].shape[0])
np.random.shuffle(indices)
ret_sequence = {}
ret_sequence['anchor'] = sequences['anchor'][indices]
ret_sequence['positive'] = sequences['positive'][indices]
ret_sequence['negative'] = sequences['negative'][indices]
num_validation_samples = int(percent * sequences['anchor'].shape[0])
ret_train = {}
ret_train['anchor'] = ret_sequence['anchor'][:-num_validation_samples]
ret_train['positive'] = ret_sequence['positive'][:-num_validation_samples]
ret_train['negative'] = ret_sequence['negative'][:-num_validation_samples]
ret_test = {}
ret_test['anchor']= ret_sequence['anchor'][-num_validation_samples:]
ret_test['positive']= ret_sequence['positive'][-num_validation_samples:]
ret_test['negative'] = ret_sequence['negative'][-num_validation_samples:]
ret_texts = {}
texts['anchor'] = np.array(texts['anchor'])
texts['positive'] = np.array(texts['positive'])
texts['negative'] = np.array(texts['negative'])
ret_texts['anchor'] = texts['anchor'][indices]
ret_texts['positive'] = texts['positive'][indices]
ret_texts['negative'] = texts['negative'][indices]
return ret_train, ret_test, ret_texts
def triplet_loss(y_true, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def assign_triplets(data, model):
sequences = np.concatenate((data['positive'], data['negative']))
unique_sequence = []
anchor_place = {}
place_to_item = {}
unique_set = set([])
for item in data['anchor']:
print(item)
item2 = tuple(item)
if item2 not in unique_set:
anchor_place[item2] = len(unique_sequence)
place_to_item[len(unique_sequence)] = item
unique_sequence.append(item)
unique_set.add(item2)
for item in sequences:
item2 = tuple(item)
if item2 not in unique_set:
place_to_item[len(unique_sequence)] = item
unique_sequence.append(item)
unique_set.add(item2)
#make annoy index
unique_sequence = np.array(unique_sequence)
predictions = model.predict(unique_sequence)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
for i in range(len(predictions)):
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
#create nearest neighbors list
anchor_to_nearest = {}
new_data = {}
new_data['anchor'] = []
new_data['positive'] = []
new_data['negative'] = []
index = 0
while index < len(data['anchor']):
name = data['anchor'][index]
hash_name = tuple(name)
if hash_name in anchor_to_nearest:
if anchor_to_nearest[hash_name]:
new_data['anchor'].append(name)
new_data['negative'].append(place_to_item[anchor_to_nearest[hash_name].pop()])
new_data['positive'].append(data['positive'][index])
index += 1
else:
anchor_to_nearest[hash_name] = t.get_nns_by_item(anchor_place[hash_name], 5)
new_data['anchor'] = np.array(new_data['anchor'])
new_data['positive'] = np.array(new_data['positive'])
new_data['negative'] = np.array(new_data['negative'])
return new_data
def do_annoy(model, texts, tokenizer, verbose):
unique_text = []
entity_idx = []
entity2same = {}
for i in range(len(texts['anchor'])):
if not texts['anchor'][i] in entity2same:
entity2same[texts['anchor'][i]] = []
entity_idx.append(len(unique_text))
unique_text.append(texts['anchor'][i])
l = entity2same[texts['anchor'][i]]
if texts['positive'][i] not in l:
entity2same[texts['anchor'][i]].append(texts['positive'][i])
unique_text.append(texts['positive'][i])
print(entity2same)
print(unique_text)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
for i in range(len(predictions)):
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
for index in entity_idx:
nearest = t.get_nns_by_vector(predictions[index], 5)
print(nearest)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[unique_text[index]])
nearest_text.remove(unique_text[index])
print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
if verbose:
print([t.get_distance(index, i) for i in nearest])
overlap = expected_text.intersection(nearest_text)
print(overlap)
m = len(overlap)
match += m
no_match += len(expected_text) - m
print("match: {} no_match: {}".format(match, no_match))
def print_deb_data(debbuging_data):
for i in range(debbuging_data['number']):
print('anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['texts']['anchor'][i], debbuging_data['texts']['positive'][i], debbuging_data['texts']['negative'][i]))
print('sequences: anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['sequences']['anchor'][i], debbuging_data['sequences']['positive'][i], debbuging_data['sequences']['negative'][i]))
def debugging_text_and_sequences(reordered_text, training_data, number):
debbuging_data = {}
debbuging_data['number'] = number
debbuging_data['sequences'] = {}
debbuging_data['texts'] = {}
debbuging_data['sequences']['anchor'] = []
debbuging_data['sequences']['positive'] = []
debbuging_data['sequences']['negative'] = []
debbuging_data['texts']['anchor'] = []
debbuging_data['texts']['positive'] = []
debbuging_data['texts']['negative'] = []
for i in range(number):
debbuging_data['texts']['anchor'].append(reordered_text['anchor'][i])
debbuging_data['texts']['positive'].append(reordered_text['positive'][i])
debbuging_data['texts']['negative'].append(reordered_text['negative'][i])
debbuging_data['sequences']['anchor'].append(training_data['anchor'][i])
debbuging_data['sequences']['positive'].append(training_data['positive'][i])
debbuging_data['sequences']['negative'].append(training_data['negative'][i])
return debbuging_data
# triples_data = create_triples(IMAGE_DIR)
texts = read_file(argv[1])
print("anchor: {} positive: {} negative: {}".format(texts['anchor'][0], texts['positive'][0], texts['negative'][0]))
tokenizer = get_tokenizer(texts)
print('got tokenizer')
sequences = get_sequences(texts, tokenizer)
train_data, test_data, reordered_text = get_test(texts, sequences, 0.2)
debbuging_data = debugging_text_and_sequences(reordered_text, train_data, 20)
number_of_names = len(train_data['anchor'])
print('sequenced words')
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
embedder = get_embedding_layer(tokenizer)
print('got embeddings')
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = Flatten(name='flatten')(net)
net = Dense(128, activation='relu', name='embed')(net)
net = Dense(128, activation='relu', name='embed2')(net)
net = Dense(128, activation='relu', name='embed3')(net)
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist')([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist')([net_anchor, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists'
)([positive_dist, negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=triplet_loss, metrics=[accuracy])
for x in range(3):
train_data = assign_triplets(train_data, base_model)
print(train_data)
number_of_names = len(train_data['anchor'])
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
print('fitting round' + str(x))
model.fit([train_data['anchor'], train_data['positive'], train_data['negative']], Y_train, epochs=5, batch_size=15, validation_split=0.25)
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
print("training data predictions")
positives = test_positive_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
negatives = test_negative_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
negatives = test_negative_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
# model.save('triplet_loss_resnet50.h5')
inter_model = Model(input_anchor, net_anchor)
do_annoy(inter_model, texts, tokenizer, False)
print('annoy on embeddings for debbuging_data')
do_annoy(Named_Entity_Recognition_Modified.embedded_representation(embedder), debbuging_data['texts'], tokenizer, True)
print('annoy on full model for debbuging_data')
do_annoy(inter_model, debbuging_data['texts'], tokenizer, True)
print_deb_data(debbuging_data) | 14,236 | 37.374663 | 199 | py |
fuzzyJoiner | fuzzyJoiner-master/old/ContrastiveLossLSTM-8.20.18.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
args = parser.parse_args()
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train, people)
entity2same_test = generate_names(test, people, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
counter = 0
current_model = embedder_model
prev_match_stats = 0
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 19,575 | 35.86629 | 163 | py |
fuzzyJoiner | fuzzyJoiner-master/old/seq2seq.py | '''Sequence to sequence example in Keras (character-level).
This script demonstrates how to implement a basic character-level
sequence-to-sequence model. We apply it to translating
short English sentences into short French sentences,
character-by-character. Note that it is fairly unusual to
do character-level machine translation, as word-level
models are more common in this domain.
# Summary of the algorithm
- We start with input sequences from a domain (e.g. English sentences)
and correspding target sequences from another domain
(e.g. French sentences).
- An encoder LSTM turns input sequences to 2 state vectors
(we keep the last LSTM state and discard the outputs).
- A decoder LSTM is trained to turn the target sequences into
the same sequence but offset by one timestep in the future,
a training process called "teacher forcing" in this context.
Is uses as initial state the state vectors from the encoder.
Effectively, the decoder learns to generate `targets[t+1...]`
given `targets[...t]`, conditioned on the input sequence.
- In inference mode, when we want to decode unknown input sequences, we:
- Encode the input sequence into state vectors
- Start with a target sequence of size 1
(just the start-of-sequence character)
- Feed the state vectors and 1-char target sequence
to the decoder to produce predictions for the next character
- Sample the next character using these predictions
(we simply use argmax).
- Append the sampled character to the target sequence
- Repeat until we generate the end-of-sequence character or we
hit the character limit.
# Data download
English to French sentence pairs.
http://www.manythings.org/anki/fra-eng.zip
Lots of neat sentence pairs datasets can be found at:
http://www.manythings.org/anki/
# References
- Sequence to Sequence Learning with Neural Networks
https://arxiv.org/abs/1409.3215
- Learning Phrase Representations using
RNN Encoder-Decoder for Statistical Machine Translation
https://arxiv.org/abs/1406.1078
'''
from __future__ import print_function
from keras.models import Model
from keras.layers import Input, LSTM, Dense
import numpy as np
batch_size = 64 # Batch size for training.
epochs = 100 # Number of epochs to train for.
latent_dim = 256 # Latent dimensionality of the encoding space.
num_samples = 10000 # Number of samples to train on.
# Path to the data txt file on disk.
data_path = 'fra-eng/fra.txt'
# Vectorize the data.
input_texts = []
target_texts = []
input_characters = set()
target_characters = set()
with open(data_path, 'r', encoding='utf-8') as f:
lines = f.read().split('\n')
for line in lines[: min(num_samples, len(lines) - 1)]:
input_text, target_text = line.split('\t')
# We use "tab" as the "start sequence" character
# for the targets, and "\n" as "end sequence" character.
target_text = '\t' + target_text + '\n'
input_texts.append(input_text)
target_texts.append(target_text)
for char in input_text:
if char not in input_characters:
input_characters.add(char)
for char in target_text:
if char not in target_characters:
target_characters.add(char)
input_characters = sorted(list(input_characters))
target_characters = sorted(list(target_characters))
num_encoder_tokens = len(input_characters)
num_decoder_tokens = len(target_characters)
max_encoder_seq_length = max([len(txt) for txt in input_texts])
max_decoder_seq_length = max([len(txt) for txt in target_texts])
print('Number of samples:', len(input_texts))
print('Number of unique input tokens:', num_encoder_tokens)
print('Number of unique output tokens:', num_decoder_tokens)
print('Max sequence length for inputs:', max_encoder_seq_length)
print('Max sequence length for outputs:', max_decoder_seq_length)
input_token_index = dict(
[(char, i) for i, char in enumerate(input_characters)])
target_token_index = dict(
[(char, i) for i, char in enumerate(target_characters)])
encoder_input_data = np.zeros(
(len(input_texts), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
decoder_input_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
decoder_target_data = np.zeros(
(len(input_texts), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
for i, (input_text, target_text) in enumerate(zip(input_texts, target_texts)):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
for t, char in enumerate(target_text):
# decoder_target_data is ahead of decoder_input_data by one timestep
decoder_input_data[i, t, target_token_index[char]] = 1.
if t > 0:
# decoder_target_data will be ahead by one timestep
# and will not include the start character.
decoder_target_data[i, t - 1, target_token_index[char]] = 1.
# Define an input sequence and process it.
encoder_inputs = Input(shape=(None, num_encoder_tokens))
encoder = LSTM(latent_dim, return_state=True)
encoder_outputs, state_h, state_c = encoder(encoder_inputs)
# We discard `encoder_outputs` and only keep the states.
encoder_states = [state_h, state_c]
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(None, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(num_decoder_tokens, activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the model that will turn
# `encoder_input_data` & `decoder_input_data` into `decoder_target_data`
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Run training
model.compile(optimizer='rmsprop', loss='categorical_crossentropy')
model.fit([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=batch_size,
epochs=epochs,
validation_split=0.2)
# Save model
model.save('s2s.h5')
# Next: inference mode (sampling).
# Here's the drill:
# 1) encode input and retrieve initial decoder state
# 2) run one step of decoder with this initial state
# and a "start of sequence" token as target.
# Output will be the next target token
# 3) Repeat with the current target token and current states
# Define sampling models
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(latent_dim,))
decoder_state_input_c = Input(shape=(latent_dim,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
decoder_outputs, state_h, state_c = decoder_lstm(
decoder_inputs, initial_state=decoder_states_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model(
[decoder_inputs] + decoder_states_inputs,
[decoder_outputs] + decoder_states)
# Reverse-lookup token index to decode sequences back to
# something readable.
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
def decode_sequence(input_seq):
# Encode the input as state vectors.
states_value = encoder_model.predict(input_seq)
# Generate empty target sequence of length 1.
target_seq = np.zeros((1, 1, num_decoder_tokens))
# Populate the first character of target sequence with the start character.
target_seq[0, 0, target_token_index['\t']] = 1.
# Sampling loop for a batch of sequences
# (to simplify, here we assume a batch of size 1).
stop_condition = False
decoded_sentence = ''
while not stop_condition:
output_tokens, h, c = decoder_model.predict(
[target_seq] + states_value)
# Sample a token
sampled_token_index = np.argmax(output_tokens[0, -1, :])
sampled_char = reverse_target_char_index[sampled_token_index]
decoded_sentence += sampled_char
# Exit condition: either hit max length
# or find stop character.
if (sampled_char == '\n' or
len(decoded_sentence) > max_decoder_seq_length):
stop_condition = True
# Update the target sequence (of length 1).
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sampled_token_index] = 1.
# Update states
states_value = [h, c]
return decoded_sentence
for seq_index in range(100):
# Take one sequence (part of the training set)
# for trying out decoding.
input_seq = encoder_input_data[seq_index: seq_index + 1]
decoded_sentence = decode_sequence(input_seq)
print('-')
print('Input sentence:', input_texts[seq_index])
print('Decoded sentence:', decoded_sentence) | 9,104 | 40.013514 | 79 | py |
fuzzyJoiner | fuzzyJoiner-master/old/Named_Entity_Recognition_Modified.py | """
This code is modified from
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
and ttps://github.com/fchollet/keras/blob/master/examples/
for our own purposes
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from matcher_functions import connect, get_aliases, load_good_buckets, create_double_num_dicts
from matcher_class import matcher
import os
from sqlalchemy import Table, Column, Integer, String, ForeignKey, Float
import sys
from sys import argv
import numpy as np
from embeddings import KazumaCharEmbedding
import random
from annoy import AnnoyIndex
import random
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from keras.utils import to_categorical
from keras.optimizers import RMSprop
from keras import backend as K
from keras import regularizers
from keras.regularizers import L1L2
from sqlalchemy.sql import select
import random
import argparse
BASE_DIR = './Machine_Learning/'
GLOVE_DIR = BASE_DIR + 'glove/'
TEXT_DATA_DIR = BASE_DIR + 'nerData'
DO_ANN_ON_EMBEDDINGS = False
# number of words an entity is allowed to have
# distribution of number of words in peoples names can be found in peopleNamesDisbn
# distribution of number of words in company names can be found in companyNamesDisbn
# Note most of the names above that are fairly esoteric or just plain noise. Included is
# python code to remove them
MAX_SEQUENCE_LENGTH = 10
# Total number of unique tokens in peoples names is 90K, including a lot of non-English names. To remove those
# we use an egregious hack to check if its UTF-8
# Total number of unique tokens in company names is 37K
# Assuming no overlap between the two we get about 127K. We may need to tweak this parameter as we go
# but according to the Keras documentation, this can even be left unset
MAX_NB_WORDS = 150000
# Size of embeddings from Glove (we will try the 100 dimension encoding to start with)
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.05
def check_for_zeroes(to_check, intro_string):
found = 0
for vector in to_check:
if sum(vector) == 0:
print(intro_string + str(vector))
found += 1
if not found:
print(intro_string + " no problems found")
else:
print(intro_string + ' found this many: '+ str(found))
def get_diff_names_with_overlap(con, aliases):
no_match_texts = []
texts = []
# load a mapping from entity id to names
entityid2names = {}
for row in aliases:
if row[2] in entityid2names:
names = entityid2names[row[2]];
else:
names = []
entityid2names[row[2]] = names
names.append(row[0])
names.append(row[1])
texts.append(row[0])
print(len(aliases))
print("getting word2entities")
# load a mapping from words to entities
word2entities = {}
rows = con.execute("select word, entities from word2entities;")
for row in rows:
word2entities[row[0]] = row[1]
for index in range(len(texts)):
name_arr = texts[index].split()
new_text = ''
for n in name_arr:
# a name part may have been filtered out of word2entities
if n not in word2entities:
continue
if new_text:
break
for e in word2entities[n]:
if e == texts[index] or e not in entityid2names: # if the entity is the same as this anchor's text skip it
continue
names = entityid2names[e]
for x in names:
if n in x:
new_text = x
break
if new_text:
no_match_texts.append(new_text)
else:
no_match_texts.append(texts[index + 1])
print("done processing matches with overlap")
return no_match_texts
def get_diff_names_with_no_overlap(aliases):
entitylist = []
for row in aliases:
entitylist.append(row[0])
s = [i for i in range(len(entitylist))]
random.shuffle(s)
ret = []
for i in range(len(entitylist)):
if s[i] == i :
s[i] = s[i+1]
s[i+1] = i
ret.append(entitylist[s[i]])
return ret
#this returns a new set of texts to use as similar non-matches for texts1
def get_no_match_texts(user, password, db, texts1):
def get_non_match(name1, bucket_words, matching_set):
for word in name1.split(" "):
if word in bucket_words:
bucket = bucket_words[word]
else:
return None
if len(bucket[1]) > 1:
for name2 in bucket[1]:
if (name1, name2[1]) not in matching_set:
return name2[1]
return None
no_match_texts = []
#this should not be done here and needs to be fixed up before more work is done
#it should instead be done by a singel function in matcher_functions
#establish connection to database
con, meta = connect(user, password, db)
#load pairs from database
aliases = get_aliases(con, meta)
#create dictionaries assigning serial numbers to names and names from serial numbers
num_to_word, word_to_num = create_double_num_dicts(aliases)
#load the buckets from the database bucket_list is aranges as follows:
#bucket_list[pair_of_buckets][bucket(this must be 0 or 1)][name (this represents a single name)][0 for number and 1 for pre-procced name]
bucket_list, bucket_words = load_good_buckets('wordtable1', 'wordtable2', word_to_num, con, meta)
for index in range(len(texts1)):
new_text = get_non_match(texts1[index], bucket_words, aliases)
if new_text == None:
new_text = texts1[(index + 1) % len(texts1)]
no_match_texts.append(new_text)
return no_match_texts
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
Equation 4
'''
margin = 1
return K.mean((1 - y_true) * K.square(y_pred) +
y_true * K.square(K.maximum(margin - y_pred, 0)))
#for now will take any bad pairs, will take only relivent ones later
def create_pairs(x, y, z):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
for index in range(len(x)):
pairs += [[x[index], y[index]]]
pairs += [[x[index], z[index]]]
labels += [0, 1]
# n = min([len(digit_indices[d]) for d in range(10)]) - 1
# for d in range(10):
# for i in range(n):
# z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
# pairs += [[x[z1], x[z2]]]
# inc = random.randrange(1, 10)
# dn = (d + inc) % 10
# z1, z2 = digit_indices[d][i], digit_indices[dn][i]
# pairs += [[x[z1], x[z2]]]
# labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_dim, embedding_layer, reg):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
# kernel_regularizer=reg))
seq.add(Dropout(0.1))
final_layer = Dense(128, activation='relu')
seq.add(final_layer)
# kernel_regularizer=reg))
return seq, final_layer
def embedded_representation(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def compute_accuracy(predictions, labels):
'''Compute classification accuracy with a fixed threshold on distances.
'''
same_correct = 0
diff_correct = 0
for i in range(len(labels)):
if predictions[i] < 0.5 and labels[i] == 0:
same_correct += 1
elif predictions[i] >= 0.5 and labels[i] == 1:
diff_correct += 1
print("Precision computation: same - " + str(same_correct) + " different: " + str(diff_correct) + " from total: " + str(len(labels)))
return (same_correct + diff_correct) / len(labels)
def get_aliases_with_ids(con, meta):
#load pairs from database
aliases = con.execute("select alias1, alias2, entityid from aliases order by entityid;")
entities = []
for row in aliases:
entities.append((row[0], row[1], row[2]))
return entities
def f1score(predictions, labels):
#labels[predictions.ravel() < 0.5].sum()
predictions = predictions.ravel()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(labels)):
if predictions[i] < 0.5:
if labels[i] == 0:
true_positive += 1
else:
false_positive += 1
elif labels[i] == 0:
false_negitive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
print (fscore)
return fscore
#compute accuracy using a rule based matcher
def sequence_to_word(sequence, reverse_word_index):
return " ".join([reverse_word_index[x] for x in sequence if x in reverse_word_index])
def sequence_pair_to_word_pair(sequence_pair, reverse_word_index):
return [sequence_to_word(sequence_pair[0], reverse_word_index), sequence_to_word(sequence_pair[1], reverse_word_index)]
if __name__ == '__main__':
print('Processing text dataset')
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-u', dest="user", help="username")
parser.add_argument('-p', dest="password", help="password")
parser.add_argument('-d', dest="db", help="dbname")
parser.add_argument('-a', dest="num_pairs", help="number of same pairs in db", nargs='?', default=2, type=int)
args = parser.parse_args()
texts1 = [] # list of text samples in part 1
texts2 = [] # list of text samples in part 2
#change to get from sql and not read from file
con, meta = connect(args.user, args.password, args.db)
aliases = get_aliases_with_ids(con, meta)
unique_aliases = []
# collect up all the anchors that are unique (anchors will get repeated if num_pairs > 2)
prev = int(aliases[0][2])
unique_aliases.append(aliases[0])
for tuple in aliases:
texts1.append(tuple[0])
texts2.append(tuple[1])
if int(tuple[2]) != prev:
unique_aliases.append(tuple)
prev = int(tuple[2])
print('Found %s texts.' % len(texts1))
texts3 = []
print(len(unique_aliases))
print(len(texts1))
print(len(texts2))
# get the different pairs
if args.num_pairs == 2:
print("args num pairs is 2")
texts3.extend(get_diff_names_with_overlap(connect(args.user, args.password, args.db)[0], unique_aliases))
elif args.num_pairs == 3:
print("args num pairs is 3")
texts3.extend(get_diff_names_with_overlap(connect(args.user, args.password, args.db)[0], unique_aliases))
texts3.extend(get_diff_names_with_no_overlap(unique_aliases))
elif args.num_pairs == 4:
print("args num pairs is 4")
texts3.extend(get_diff_names_with_overlap(connect(args.user, args.password, args.db)[0], unique_aliases))
texts3.extend(get_diff_names_with_no_overlap(unique_aliases))
texts3.extend(get_diff_names_with_no_overlap(unique_aliases))
print(len(texts3))
assert len(texts1) == len(texts2)
assert len(texts2) == len(texts3), str(len(texts3))
# vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts1 + texts2 + texts3)
# this step should get similar but non-matching items to keep for later matching
# this step creates a sequence of words ids for each word in each label
sequences1 = tokenizer.texts_to_sequences(texts1)
for sc in range(len(texts1)):
if sum(sequences1[sc]) == 0:
print('here is a problem word :' + texts1[sc] + '::')
sequences2 = tokenizer.texts_to_sequences(texts2)
no_match_sequences = tokenizer.texts_to_sequences(texts3)
word_index = tokenizer.word_index
check_for_zeroes(sequences1, " sequences")
print('Found %s unique tokens.' % len(word_index))
annoy_data1 = pad_sequences(sequences1, maxlen=MAX_SEQUENCE_LENGTH)
annoy_data2 = pad_sequences(sequences2, maxlen=MAX_SEQUENCE_LENGTH)
no_match_data = pad_sequences(no_match_sequences, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data1 tensor:', annoy_data1.shape)
print('Shape of data2 tensor:', annoy_data2.shape)
# split the data into a training set and a validation set, shuffling items
indices = np.arange(annoy_data1.shape[0])
np.random.shuffle(indices)
texts1 = np.array(texts1)
texts2 = np.array(texts2)
texts3 = np.array(texts3)
texts1 = texts1[indices]
texts2 = texts2[indices]
texts3 = texts3[indices]
# for i in range(len(texts1)):
# print(texts1[i] + " paired with: " + texts2[i])
# print(texts1[i] + " paired with: " + texts3[i])
data1 = annoy_data1[indices]
data2 = annoy_data2[indices]
no_match_data = no_match_data[indices]
num_validation_samples = int(VALIDATION_SPLIT * data1.shape[0])
x_train = data1[:-num_validation_samples]
y_train = data2[:-num_validation_samples]
z_train = no_match_data[:-num_validation_samples]
x_test = data1[-num_validation_samples:]
y_test = data2[-num_validation_samples:]
z_test = no_match_data[-num_validation_samples:]
print('Preparing embedding matrix.')
# prepare embedding matrix
num_words = len(word_index) + 1 # word_index is indexed from 1-N
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
kz = KazumaCharEmbedding()
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# note that we set trainable = False so as to keep the embeddings fixed
check_for_zeroes(embedding_matrix, "here is the first pass")
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# the data, shuffled and split between train and test sets
#need to change this not sure how
input_dim = MAX_SEQUENCE_LENGTH
epochs = 10
# create training+test positive and negative pairs
# these next lines also need to change
#digit_indices = [np.where(y_train == i)[0] for i in range(10)]
print("x_train {} , y_train {} , z_train {} ".format(x_train, y_train, z_train))
tr_pairs, tr_y = create_pairs(x_train, y_train, z_train)
#digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_y = create_pairs(x_test, y_test, z_test)
print (len(tr_y))
# network definition
base_network, final_layer = create_base_network(input_dim, embedding_layer, L1L2(0.0,0.0))
input_a = Input(shape=(input_dim,))
input_b = Input(shape=(input_dim,))
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model([input_a, input_b], distance)
print(base_network.summary())
# train
rms = RMSprop()
#change the optimizer (adam)
model.compile(loss=contrastive_loss, optimizer=rms)
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
batch_size=128,
epochs=epochs)
# compute final accuracy on training and test sets
#add an LSTM layer (later)
# testpairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
# [lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
# matcher = matcher(argv[1], argv[2], argv[3], test_pairs, 1)
pred_learning = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
# out = model.layers[2].get_output_at(0)
# inp = model.input
# func = K.function([inp], [out]) # evaluation functions
# print("here should be a vector")
# print(func([tr_pairs[:, 0][0], tr_pairs[:, 1][0]]))
# Testing
# print (layer_outs)
tr_acc = compute_accuracy(pred_learning, tr_y)
tr_f1 = f1score(pred_learning, tr_y)
pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
pred_learning = np.append(pred_learning, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
x_test_text = texts1[-num_validation_samples:]
y_test_text = texts2[-num_validation_samples:]
z_test_text = texts3[-num_validation_samples:]
text_pairs, text_y = create_pairs(x_test_text, y_test_text, z_test_text)
# for i in range(len(text_pairs)):
# print(str(text_pairs[i]))
# print(pred[i])
# print(model.predict([np.array([te_pairs[i, 0]]), np.array([te_pairs[i, 1]])]))
# from https://github.com/spotify/annoy
f = 128
if DO_ANN_ON_EMBEDDINGS:
inter_model = embedded_representation(embedding_layer)
else:
inter_model = Model(input_a, processed_a)
intermediate_output1 = inter_model.predict(x_test)
intermediate_output2 = inter_model.predict(y_test)
intermediate_output3 = inter_model.predict(z_test)
mid_predictions = np.concatenate((intermediate_output1, intermediate_output2, intermediate_output3))
# print(mid_predictions[0])
# print (len(mid_predictions[0]))
if DO_ANN_ON_EMBEDDINGS:
t = AnnoyIndex(len(mid_predictions[0]), metric='euclidean') # Length of item vector that will be indexed
else:
t = AnnoyIndex(f, metric='euclidean') # Length of item vector that will be indexed
for i in range(len(mid_predictions)):
v = mid_predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
t.save('test.ann')
# ...
all_texts = np.concatenate((x_test_text, y_test_text, z_test_text))
match = 0
no_match = 0
for index in range(len(x_test_text)):
nearest = t.get_nns_by_vector(mid_predictions[index], 5)
# print(nearest)
nearest_text = [all_texts[i] for i in nearest]
# print("query={} names = {} true_match = {} reject= {}".format(x_test_text[index], nearest_text, y_test_text[index], z_test_text[index]))
for i in nearest:
# print(all_texts[i])
if i >= len(x_test_text) and (i < len(x_test_text) + len(y_test_text)):
arr = np.array([y_test[i - len(x_test_text)]])
elif i >= len(x_test_text) + len(y_test_text):
arr = np.array([z_test[i - len(x_test_text) - len(y_test_text)]])
else:
arr = np.array([x_test[i]])
# print(model.predict([np.array([x_test[index]]), arr]))
# print(t.get_distance(index, i))
# print("true match prediction:")
# print(model.predict([np.array([x_test[index]]), np.array([y_test[index]])]))
# print("true match distance:")
# print(t.get_distance(index, index + len(x_test_text)))
# print("true reject prediction:")
# print(model.predict([np.array([x_test[index]]), np.array([z_test[index]])]))
# print("true reject distance:")
# print(t.get_distance(index, index + len(x_test_text) + len(y_test_text)))
if y_test_text[index] in nearest_text:
match += 1
# print("MATCH FOUND")
else:
no_match += 1
print("match: {} no_match: {}".format(match, no_match))
print("Machine Learning Accuracy")
print(tr_acc)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1socre on test set: %0.4f' % (te_f1))
reverse_word_index = {v: k for k, v in tokenizer.word_index.items()}
print(tr_pairs)
print(sequence_to_word(tr_pairs[0][0], reverse_word_index))
print(sequence_to_word(tr_pairs[1][1], reverse_word_index))
print(tr_y[0])
test_pairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
[lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
matcher = matcher(args.user, args.password, args.db, test_pairs, 1)
pred_rules = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in tr_pairs])
tr_acc = compute_accuracy(pred_rules, tr_y)
tr_f1 = f1score(pred_rules, tr_y)
pred = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in te_pairs])
pred_rules = np.append(pred_rules, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
print("Rule-Based Accuracy")
print('* Accuracy on training set (rules): %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set (rules): %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1score on test set: %0.4f' % (te_f1))
con, meta = connect(args.user, args.password, args.db)
execute_pairs = []
if 'predictions' in meta.tables:
meta.tables['predictions'].drop(con)
predictions = Table('predictions', meta, Column('name1', String), Column('name2', String), Column('rule_predict', Integer), Column('learning_predict', Float), Column('true_pair', Integer), Column('te_or_tr', String), extend_existing=True)
zipping_string = ('name1', 'name2', 'true_pair', 'rule_predict', 'learning_predict', 'te_or_tr')
print(len(tr_y))
print(len(tr_pairs))
print(len(pred_rules))
print(len(pred_learning))
print(len(te_y))
print(len(te_pairs))
for i in range(len(tr_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(tr_pairs[i][1], reverse_word_index), int(tr_y[i]), int(pred_rules[i]), float(pred_learning[i][0].item()), 'tr'))))
offset = len(tr_y)
for i in range(len(te_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(te_pairs[i][1], reverse_word_index), int(te_y[i]), int(pred_rules[offset + i]), float(pred_learning[offset + i][0].item()), 'te'))))
meta.create_all(con)
con.execute(predictions.insert(), execute_pairs)
| 23,976 | 36.289269 | 258 | py |
fuzzyJoiner | fuzzyJoiner-master/old/Named_Entity_Recognition.py | """
This code is modified from
https://github.com/fchollet/keras/blob/master/examples/pretrained_word_embeddings.py
and ttps://github.com/fchollet/keras/blob/master/examples/
for our own purposes
"""
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
from matcher_functions import connect, get_aliases, load_good_buckets, create_double_num_dicts
from matcher_class import matcher
import os
from sqlalchemy import Table, Column, Integer, String, ForeignKey, Float
import sys
from sys import argv
import numpy as np
from embeddings import KazumaCharEmbedding
import random
from annoy import AnnoyIndex
import random
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from keras.utils import to_categorical
from keras.optimizers import RMSprop
from keras import backend as K
from keras import regularizers
from keras.regularizers import L1L2
BASE_DIR = './Machine_Learning/'
# directory containing glove encodings from Wikipedia (we can swap this out for another encoding later)
# Download glove.6B.zip from https://nlp.stanford.edu/projects/glove/
GLOVE_DIR = BASE_DIR + 'glove/'
TEXT_DATA_DIR = BASE_DIR + 'nerData'
# number of words an entity is allowed to have
# distribution of number of words in peoples names can be found in peopleNamesDisbn
# distribution of number of words in company names can be found in companyNamesDisbn
# Note most of the names above that are fairly esoteric or just plain noise. Included is
# python code to remove them
MAX_SEQUENCE_LENGTH = 10
# Total number of unique tokens in peoples names is 90K, including a lot of non-English names. To remove those
# we use an egregious hack to check if its UTF-8
# Total number of unique tokens in company names is 37K
# Assuming no overlap between the two we get about 127K. We may need to tweak this parameter as we go
# but according to the Keras documentation, this can even be left unset
MAX_NB_WORDS = 140000
# Size of embeddings from Glove (we will try the 100 dimension encoding to start with)
EMBEDDING_DIM = 100
VALIDATION_SPLIT = 0.05
def check_for_zeroes(to_check, intro_string):
found = 0
for vector in to_check:
if sum(vector) == 0:
print(intro_string + str(vector))
found += 1
if not found:
print(intro_string + " no problems found")
else:
print(intro_string + ' found this many: '+ str(found))
# first, build index mapping words in the glove embeddings set
# to their embedding vector. This is a straightforward lookup of
# words in Glove and then their embeddings which should be a 100 sized array of floats
# print('Reading word embeddings: Indexing word vectors.')
# embeddings_index = {}
# f = open(os.path.join(GLOVE_DIR, 'glove.6B.100d.txt'))
# for line in f:
# values = line.split()
# word = values[0]
# coefs = np.asarray(values[1:], dtype='float32')
# embeddings_index[word] = coefs
# f.close()
# print('Found %s word vectors.' % len(embeddings_index))
# second, prepare text samples and their labels
print('Processing text dataset')
texts1 = [] # list of text samples in part 1
texts2 = [] # list of text samples in part 2
labels_index = {} # dictionary mapping label name to numeric id - here the label name is just the name of the file in the data dir
name_list = sorted(os.listdir(TEXT_DATA_DIR))
name = name_list[0]
label_id = len(labels_index)
labels_index[name] = label_id
fpath = os.path.join(TEXT_DATA_DIR, name)
if os.path.isdir(fpath):
raise ValueError('bad data directory')
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
#change to get from sql and not read from file
con, meta = connect(argv[1], argv[2], argv[3])
#load pairs from database
aliases = get_aliases(con, meta)
for pair in aliases:
num_tokens = len(pair[0].strip().split(' ')) + len(pair[1].strip().split(' '))
if 0 < num_tokens < MAX_SEQUENCE_LENGTH:
texts1.append(pair[0])
texts2.append(pair[1])
f.close()
#this returns a new set of texts to use as similar non-matches for texts1
def get_no_match_texts(argv, texts1):
def get_non_match(name1, bucket_words, matching_set):
for word in name1.split(" "):
if word in bucket_words:
bucket = bucket_words[word]
else:
return None
if len(bucket[1]) > 1:
for name2 in bucket[1]:
if (name1, name2[1]) not in matching_set:
return name2[1]
return None
no_match_texts = []
#this should not be done here and needs to be fixed up before more work is done
#it should instead be done by a singel function in matcher_functions
#establish connection to database
con, meta = connect(argv[1], argv[2], argv[3])
#load pairs from database
aliases = get_aliases(con, meta)
#create dictionaries assingning serial numbers to names and names from serial numbers
num_to_word, word_to_num = create_double_num_dicts(aliases)
#load the buckets from the database bucket_list is aranges as follows:
#bucket_list[pair_of_buckets][bucket(this must be 0 or 1)][name (this represents a single name)][0 for number and 1 for pre-procced name]
bucket_list, bucket_words = load_good_buckets('wordtable1', 'wordtable2', word_to_num, con, meta)
for index in range(len(texts1)):
new_text = get_non_match(texts1[index], bucket_words, aliases)
if new_text == None:
new_text = texts1[(index + 1) % len(texts1)]
no_match_texts.append(new_text)
return no_match_texts
print('Found %s texts.' % len(texts1))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
#gets the special no match charectars
texts3 = get_no_match_texts(argv, texts1)
#this removes all non-ascii charectars from the 3 sets of strings
texts1 = [str(item) for item in texts1]
texts2 = [str(item) for item in texts2]
texts3 = [str(item) for item in texts3]
# for i in range(len(texts1)):
print ("1 is {}".format(texts1[1]))
print ("2 is {}".format(texts2[1]))
print ("3 is {}".format(texts3[1]))
tokenizer.fit_on_texts(texts1 + texts2 + texts3)
#this step should get similar but non-matching items to keep for later matching
# this step creates a sequence of words ids for each word in each label
sequences1 = tokenizer.texts_to_sequences(texts1)
for sc in range(len(texts1)):
if sum(sequences1[sc]) == 0:
print('here is a problem word :' + texts1[sc] + '::')
sequences2 = tokenizer.texts_to_sequences(texts2)
no_match_sequences = tokenizer.texts_to_sequences(texts3)
word_index = tokenizer.word_index
check_for_zeroes(sequences1, " sequences")
print('Found %s unique tokens.' % len(word_index))
annoy_data1 = pad_sequences(sequences1, maxlen=MAX_SEQUENCE_LENGTH)
annoy_data2 = pad_sequences(sequences2, maxlen=MAX_SEQUENCE_LENGTH)
no_match_data = pad_sequences(no_match_sequences, maxlen=MAX_SEQUENCE_LENGTH)
# print (data1[0])
# print (data2[0])
# print (texts1[0])
# print (texts2[0])
#labels = to_categorical(np.asarray(labels))
print('Shape of data1 tensor:', annoy_data1.shape)
#print('Shape of label tensor:', labels.shape)
print('Shape of data2 tensor:', annoy_data2.shape)
# split the data into a training set and a validation set
indices = np.arange(annoy_data1.shape[0])
np.random.shuffle(indices)
data1 = annoy_data1[indices]
data2 = annoy_data2[indices]
no_match_data = no_match_data[indices]
num_validation_samples = int(VALIDATION_SPLIT * data1.shape[0])
x_train = data1[:-num_validation_samples]
y_train = data2[:-num_validation_samples]
z_train = no_match_data[:-num_validation_samples]
x_test = data1[-num_validation_samples:]
y_test = data2[-num_validation_samples:]
z_test = no_match_data[-num_validation_samples:]
texts1 = np.array(texts1)
texts2 = np.array(texts2)
texts3 = np.array(texts3)
texts1 = texts1[indices]
texts2 = texts2[indices]
texts3 = texts3[indices]
print('Preparing embedding matrix.')
# prepare embedding matrix
# num_words = min(MAX_NB_WORDS, len(word_index))
num_words = len(word_index) + 1 # word_index is indexed from 1-N
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
kz = KazumaCharEmbedding()
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
# i = 0
# while sum(embedding_vector) == 0 and i <= 1000:
# embedding_vector = k.emb(word)
# i++;
# if i == 1000:
# print("fail")
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
#else:
# print(word + )
# note that we set trainable = False so as to keep the embeddings fixed
check_for_zeroes(embedding_matrix, "here is the first pass")
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
print('Training model.')
# train a 1D convnet with global maxpooling
#sequence_input = Input(shape=(MAX_SEQUENCE_LENGTH,), dtype='int32')
#need two of these
#embedded_sequences = embedding_layer(sequence_input)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def contrastive_loss(y_true, y_pred):
'''Contrastive loss from Hadsell-et-al.'06
http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf
'''
margin = 1
return K.mean(y_true * K.square(y_pred) +
(1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))
#for now will take any bad pairs, will take only relivent ones later
def create_pairs(x, y, z):
'''Positive and negative pair creation.
Alternates between positive and negative pairs.
'''
pairs = []
labels = []
for index in range(len(x)):
pairs += [[x[index], y[index]]]
pairs += [[x[index], z[index]]]
labels += [1, 0]
# n = min([len(digit_indices[d]) for d in range(10)]) - 1
# for d in range(10):
# for i in range(n):
# z1, z2 = digit_indices[d][i], digit_indices[d][i + 1]
# pairs += [[x[z1], x[z2]]]
# inc = random.randrange(1, 10)
# dn = (d + inc) % 10
# z1, z2 = digit_indices[d][i], digit_indices[dn][i]
# pairs += [[x[z1], x[z2]]]
# labels += [1, 0]
return np.array(pairs), np.array(labels)
def create_base_network(input_dim, embedding_layer, reg):
'''Base network to be shared (eq. to feature extraction).
'''
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
seq.add(Dense(128, input_shape=(input_dim,), activation='relu'))
seq.add(Dropout(0.1))
seq.add(Dense(128, activation='relu'))
# kernel_regularizer=reg))
seq.add(Dropout(0.1))
final_layer = Dense(128, activation='relu')
seq.add(final_layer)
# kernel_regularizer=reg))
return seq, final_layer
def compute_accuracy(predictions, labels):
'''Compute classification accuracy with a fixed threshold on distances.
'''
return np.mean(np.equal(predictions.ravel() < 0.5, labels))
# return labels[predictions.ravel() < 0.5].mean()
def f1score(predictions, labels):
#labels[predictions.ravel() < 0.5].sum()
predictions = predictions.ravel()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(labels)):
if predictions[i] < 0.5:
if labels[i] == 1:
true_positive += 1
else:
false_positive += 1
elif labels[i] == 1:
false_negitive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
print (fscore)
return fscore
# the data, shuffled and split between train and test sets
#need to change this not sure how
input_dim = MAX_SEQUENCE_LENGTH
epochs = 1
# create training+test positive and negative pairs
# these next lines also need to change
#digit_indices = [np.where(y_train == i)[0] for i in range(10)]
print("x_train {} , y_train {} , z_train {} ".format(x_train, y_train, z_train))
tr_pairs, tr_y = create_pairs(x_train, y_train, z_train)
#digit_indices = [np.where(y_test == i)[0] for i in range(10)]
te_pairs, te_y = create_pairs(x_test, y_test, z_test)
print (len(tr_y))
# network definition
base_network, final_layer = create_base_network(input_dim, embedding_layer, L1L2(0.0,0.0))
input_a = Input(shape=(input_dim,))
input_b = Input(shape=(input_dim,))
# because we re-use the same instance `base_network`,
# the weights of the network
# will be shared across the two branches
processed_a = base_network(input_a)
processed_b = base_network(input_b)
distance = Lambda(euclidean_distance,
output_shape=eucl_dist_output_shape)([processed_a, processed_b])
model = Model([input_a, input_b], distance)
print(base_network.summary())
# train
rms = RMSprop()
#change the optimizer (adam)
model.compile(loss=contrastive_loss, optimizer=rms)
model.fit([tr_pairs[:, 0], tr_pairs[:, 1]], tr_y,
batch_size=128,
epochs=epochs,
validation_data=([te_pairs[:, 0], te_pairs[:, 1]], te_y))
# compute final accuracy on training and test sets
#add an LSTM layer (later)
# testpairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
# [lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
# matcher = matcher(argv[1], argv[2], argv[3], test_pairs, 1)
pred_learning = model.predict([tr_pairs[:, 0], tr_pairs[:, 1]])
# out = model.layers[2].get_output_at(0)
# inp = model.input
# func = K.function([inp], [out]) # evaluation functions
# print("here should be a vector")
# print(func([tr_pairs[:, 0][0], tr_pairs[:, 1][0]]))
# Testing
# print (layer_outs)
tr_acc = compute_accuracy(pred_learning, tr_y)
tr_f1 = f1score(pred_learning, tr_y)
pred = model.predict([te_pairs[:, 0], te_pairs[:, 1]])
pred_learning = np.append(pred_learning, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
x_test_text = texts1[-num_validation_samples:]
y_test_text = texts2[-num_validation_samples:]
z_test_text = texts3[-num_validation_samples:]
for i in range(len(x_test_text)):
print(x_test_text[i] + "|" + y_test_text[i])
print(pred[i])
print(x_test_text[i] + "|" + z_test_text[i])
print(pred[i + 1])
inter_model = Model(input_a, processed_a)
intermediate_output1 = inter_model.predict(annoy_data1)
intermediate_output2 = inter_model.predict(annoy_data2)
mid_predictions = np.concatenate((intermediate_output1, intermediate_output2))
# from https://github.com/spotify/annoy
f = 128
# print(mid_predictions[0])
# print (len(mid_predictions[0]))
t = AnnoyIndex(f, metric='euclidean') # Length of item vector that will be indexed
for i in range(len(mid_predictions)):
v = mid_predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
t.save('test.ann')
# ...
all_texts = np.concatenate((texts1, texts2))
match = 0
no_match = 0
print("shape of annoy data1")
print(annoy_data1[0].shape)
print(tr_pairs[:, 0].shape)
for index in range(len(texts1)):
nearest = t.get_nns_by_vector(mid_predictions[index], 2)
print("query={} names = {} true_match = {}".format(texts1[index], [all_texts[i] for i in nearest], texts2[index]))
for i in nearest:
print(t.get_distance(index, i))
print(model.predict([np.array([annoy_data1[index]]), np.array([annoy_data2[i - len(annoy_data1)]])]))
print(t.get_distance(index, index + len(texts1)))
print(model.predict([np.array([annoy_data1[index]]), np.array([annoy_data2[index]])]))
if (index + len(texts1)) in nearest:
match += 1
else:
no_match += 1
print("match: {} no_match: {}".format(match, no_match))
print("Machine Learning Accuracy")
print(tr_acc)
print('* Accuracy on training set: %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set: %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1socre on test set: %0.4f' % (te_f1))
#compute accuracy using a rule based matcher
def sequence_to_word(sequence, reverse_word_index):
return " ".join([reverse_word_index[x] for x in sequence if x in reverse_word_index])
def sequence_pair_to_word_pair(sequence_pair, reverse_word_index):
return [sequence_to_word(sequence_pair[0], reverse_word_index), sequence_to_word(sequence_pair[1], reverse_word_index)]
reverse_word_index = {v: k for k, v in tokenizer.word_index.items()}
print(tr_pairs)
print(sequence_to_word(tr_pairs[0][0], reverse_word_index))
print(sequence_to_word(tr_pairs[1][1], reverse_word_index))
print(tr_y[0])
test_pairs = [[lambda x : x.replace(" ", ""), lambda name1, name2 : name1 in name2 or name2 in name1],
[lambda x : set(x.split()), lambda name1, name2 : name1.issubset(name2) or name2.issubset(name1)]]
matcher = matcher(argv[1], argv[2], argv[3], test_pairs, 1)
pred_rules = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in tr_pairs])
tr_acc = compute_accuracy(pred_rules, tr_y)
tr_f1 = f1score(pred_rules, tr_y)
pred = np.asarray([int(not matcher.match(*sequence_pair_to_word_pair(name_pair, reverse_word_index))) for name_pair in te_pairs])
pred_rules = np.append(pred_rules, pred, axis=0)
te_acc = compute_accuracy(pred, te_y)
te_f1 = f1score(pred, te_y)
print("Rule-Based Accuracy")
print('* Accuracy on training set (rules): %0.2f%%' % (100 * tr_acc))
print('* Accuracy on test set (rules): %0.2f%%' % (100 * te_acc))
print('* f1score on the training set: %0.4f' % (tr_f1))
print('* f1socre on test set: %0.4f' % (te_f1))
con, meta = connect(argv[1], argv[2], argv[3])
execute_pairs = []
if 'predictions' in meta.tables:
meta.tables['predictions'].drop(con)
predictions = Table('predictions', meta, Column('name1', String), Column('name2', String), Column('rule_predict', Integer), Column('learning_predict', Float), Column('true_pair', Integer), Column('te_or_tr', String), extend_existing=True)
zipping_string = ('name1', 'name2', 'true_pair', 'rule_predict', 'learning_predict', 'te_or_tr')
print(len(tr_y))
print(len(tr_pairs))
print(len(pred_rules))
print(len(pred_learning))
print(len(te_y))
print(len(te_pairs))
for i in range(len(tr_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(tr_pairs[i][1], reverse_word_index), int(tr_y[i]), int(pred_rules[i]), float(pred_learning[i][0].item()), 'tr'))))
offset = len(tr_y)
for i in range(len(te_y)):
execute_pairs.append(dict(zip(zipping_string, (sequence_to_word(tr_pairs[i][0], reverse_word_index), sequence_to_word(te_pairs[i][1], reverse_word_index), int(te_y[i]), int(pred_rules[offset + i]), float(pred_learning[offset + i][0].item()), 'te'))))
meta.create_all(con)
con.execute(predictions.insert(), execute_pairs)
| 19,852 | 31.176661 | 254 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenetLSTM-modified.py | import numpy as np
import tensorflow as tf
import random as random
# import cntk as C
# """
# The below is necessary in Python 3.2.3 onwards to
# have reproducible behavior for certain hash-based operations.
# See these references for further details:
# https://docs.python.org/3.4/using/cmdline.html#envvar-PYTHONHASHSEED
# https://github.com/keras-team/keras/issues/2280#issuecomment-306959926
import os
os.environ['PYTHONHASHSEED'] = '0'
# The below is necessary for starting Numpy generated random numbers
# in a well-defined initial state.
np.random.seed(42)
# The below is necessary for starting core Python generated random numbers
# in a well-defined state.
random.seed(12345)
# Force TensorFlow to use single thread.
# Multiple threads are a potential source of
# non-reproducible results.
# For further details, see: https://stackoverflow.com/questions/42022950/which-seeds-have-to-be-set-where-to-realize-100-reproducibility-of-training-res
session_conf = tf.ConfigProto(intra_op_parallelism_threads=1, inter_op_parallelism_threads=1)
# import theano as T
from keras import backend as K
# The below tf.set_random_seed() will make random number generation
# in the TensorFlow backend have a well-defined initial state.
# For further details, see: https://www.tensorflow.org/api_docs/python/tf/set_random_seed
tf.set_random_seed(1234)
sess = tf.Session(graph=tf.get_default_graph(), config=session_conf)
K.set_session(sess)
# from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=1
ALPHA=30
USE_GRU=True
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = True
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(T.tensor.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
# margin = K.constant(MARGIN)
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]))
# return K.mean(K.square(y_pred[:,0,0]) + K.square(margin - y_pred[:,1,0]) + K.square(margin - y_pred[:,2,0]))
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(MARGIN)
lambda_p = 0.02
threshold = 0.1
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / 2) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def tanhNorm(x):
square_sum = K.sum(K.square(x), axis=-1, keepdims=True)
dist = K.sqrt(K.maximum(square_sum, K.epsilon()))
tanh = K.tanh(dist)
scale = tanh / dist
return x * scale
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = expected_text
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(accuracy / total))
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, limit_pairs=False):
num_names = 4
names_generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
entity2same = {}
for entity in entities:
ret = names_generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def get_hidden_layer(name, net, is_last):
if USE_GRU:
if is_last:
return GRU(128, activation='relu', name=name)(net)
else:
return GRU(128, return_sequences=True, activation='relu', name=name)(net)
else:
return Dense(128, activation='relu', name=name)(net)
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = GRU(128, return_sequences=True, activation='relu', name='embed')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2')(net)
net = GRU(128, return_sequences=True, activation='relu', name='embed2a')(net)
net = GRU(128, activation='relu', name='embed3')(net)
"""
for i in range(0, NUM_LAYERS):
net = get_hidden_layer('embed' + str(i), net, False)
net = get_hidden_layer('embed_last', net, True)
"""
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=improved_loss, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Run fuzzy join algorithm')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='loss function type: triplet-loss, improved-triplet-loss, modified-loss, or angular-loss')
parser.add_argument('--use_l2_norm', type=bool,
help='whether to add a l2 norm')
parser.add_argument('--test_neighbor_len', type=int,
help='size of the neighborhood for testing')
parser.add_argument('--train_neighbor_len', type=int,
help='size of the neighborhood for training')
parser.add_argument('--embedding_type', type=str, help='encoding type to use for input: Kazuma (for Kazuma character embedding) or one-hot')
parser.add_argument('--use_GRU', type=bool,
help='use GRU or default to MLP')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
args = parser.parse_args()
"""
LOSS_FUNCTION = None
if args.loss_function == 'triplet-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved_triplet_loss':
LOSS_FUNCTION=improved_triplet_loss
elif args.loss_function == 'modified_loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = true
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
MARGIN = args.margin
print('Margin:' + str(MARGIN))
TRAIN_NEIGHBOR_LEN = args.train_neighbor_len
TEST_NEIGHBOR_LEN = args.test_neighbor_len
print('Train neighbor length: ' + str(TRAIN_NEIGHBOR_LEN))
print('Test neighbor length: ' + str(TEST_NEIGHBOR_LEN))
USE_L2_NORM = args.use_l2_norm
print('Use L2Norm: ' + str(USE_L2_NORM))
EMBEDDING_TYPE = args.embedding_type
print('Embedding type: ' + EMBEDDING_TYPE)
USE_GRU = args.use_GRU
print('Use GRU: ' + str(args.use_GRU))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
"""
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
train, test = split(entities, test_split = .20)
print("TRAIN")
print(train)
print("TEST")
print(test)
entity2same_train = generate_names(train)
entity2same_test = generate_names(test, limit_pairs=True)
print(entity2same_train)
print(entity2same_test)
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same_train)
unique_text_test, entity2unique_test = build_unique_entities(entity2same_test)
print("train text len:" + str(len(unique_text)))
print("test text len:" + str(len(unique_text_test)))
tokenizer.fit_on_texts(unique_text + unique_text_test)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
sequences_test = tokenizer.texts_to_sequences(unique_text_test)
sequences_test = pad_sequences(sequences_test, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
model, test_positive_model, test_negative_model, inter_model = build_model(embedder)
embedder_model = embedded_representation_model(embedder)
if DEBUG_ANN:
generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
sys.exit()
test_data, test_match_stats = generate_triplets_from_ANN(embedder_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, False)
test_seq = get_sequences(test_data, tokenizer)
print("Test stats:" + str(test_match_stats))
match_stats = 0
# num_iter = 100
num_iter = 1
counter = 0
current_model = embedder_model
prev_match_stats = 0
while test_match_stats < .9 and counter < num_iter:
counter += 1
train_data, match_stats = generate_triplets_from_ANN(current_model, sequences, entity2unique, entity2same_train, unique_text, False)
print("Match stats:" + str(match_stats))
number_of_names = len(train_data['anchor'])
# print(train_data['anchor'])
print("number of names" + str(number_of_names))
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
filepath="weights.best.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')
early_stop = EarlyStopping(monitor='val_accuracy', patience=1, mode='max')
callbacks_list = [checkpoint, early_stop]
train_seq = get_sequences(train_data, tokenizer)
# check just for 5 epochs because this gets called many times
model.fit([train_seq['anchor'], train_seq['positive'], train_seq['negative']], Y_train, epochs=100, batch_size=40, callbacks=callbacks_list, validation_split=0.2)
current_model = inter_model
# print some statistics on this epoch
print("training data predictions")
positives = test_positive_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
negatives = test_negative_model.predict([train_seq['anchor'], train_seq['positive'], train_seq['negative']])
print("f1score for train is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
negatives = test_negative_model.predict([test_seq['anchor'], test_seq['positive'], test_seq['negative']])
print("f1score for test is: {}".format(f1score(positives, negatives)))
test_match_stats = generate_triplets_from_ANN(current_model, sequences_test, entity2unique_test, entity2same_test, unique_text_test, True)
print("Test stats:" + str(test_match_stats))
| 21,841 | 37.319298 | 167 | py |
fuzzyJoiner | fuzzyJoiner-master/old/ANNCharacteristics.py | import numpy as np
import pandas
import tensorflow as tf
import random as random
import json
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, GRU, Activation
from keras.layers.wrappers import Bidirectional
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
from keras.callbacks import ModelCheckpoint, EarlyStopping
from names_cleanser import NameDataCleanser, CompanyDataCleanser
import sys
import statistics
import argparse
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
MARGIN=10
ALPHA=45
DEBUG = False
DEBUG_DATA_LENGTH = 100
DEBUG_ANN = False
USE_ANGULAR_LOSS=False
LOSS_FUNCTION=None
TRAIN_NEIGHBOR_LEN=20
TEST_NEIGHBOR_LEN=20
EMBEDDING_TYPE = 'Kazuma'
NUM_LAYERS = 3
USE_L2_NORM = False
output_file_name_for_hpo = "val_dict_list.json"
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
print("Number of words:" + str(num_words))
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_entities(filepath):
entities = []
with open(filepath) as fl:
for line in fl:
entities.append(line)
return entities
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
texts['negative'].append(line_array[2])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def split(entities, test_split = 0.2):
if DEBUG:
ents = entities[0:DEBUG_DATA_LENGTH]
else:
random.shuffle(entities)
ents = entities
num_validation_samples = int(test_split * len(ents))
return ents[:-num_validation_samples], ents[-num_validation_samples:]
"""
define a single objective function based on angular loss instead of triplet loss
"""
def angular_loss(y_true, y_pred):
alpha = K.constant(ALPHA)
a_p = y_pred[:,0,0]
n_c = y_pred[:,1,0]
return K.mean(K.maximum(K.constant(0), K.square(a_p) - K.constant(4) * K.square(tf.tan(alpha)) * K.square(n_c)))
"""
Facenet triplet loss function: https://arxiv.org/pdf/1503.03832.pdf
"""
def schroff_triplet_loss(y_true, y_pred):
margin = K.constant(0.2)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def triplet_loss(y_true, y_pred):
margin = K.constant(MARGIN)
return K.mean(K.square(y_pred[:,0,0]) + K.square(K.maximum(margin - y_pred[:,1,0], K.constant(0))))
def triplet_tanh_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) + (K.constant(1) - K.tanh(y_pred[:,1,0])))
def triplet_tanh_pn_loss(y_true, y_pred):
return K.mean(K.tanh(y_pred[:,0,0]) +
((K.constant(1) - K.tanh(y_pred[:,1,0])) +
(K.constant(1) - K.tanh(y_pred[:,2,0]))) / K.constant(2));
# the following triplet loss function is from: Deep Metric Learning with Improved Triplet Loss for
# Face clustering in Videos
def improved_loss(y_true, y_pred):
margin = K.constant(1)
lambda_p = K.constant(0.02)
threshold = K.constant(0.1)
a_p_distance = y_pred[:,0,0]
a_n_distance = y_pred[:,1,0]
p_n_distance = y_pred[:,2,0]
phi = a_p_distance - ((a_n_distance + p_n_distance) / K.constant(2)) + margin
psi = a_p_distance - threshold
return K.maximum(K.constant(0), phi) + lambda_p * K.maximum(K.constant(0), psi)
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def n_c_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_n - ((x_a + x_p) / K.constant(2))), axis=1, keepdims=True), K.epsilon()))
def a_p_angular_distance(vects):
x_a, x_p, x_n = vects
return K.sqrt(K.maximum(K.sum(K.square(x_a - x_p), axis=1, keepdims=True), K.epsilon()))
def build_unique_entities(entity2same):
unique_text = []
entity2index = {}
for key in entity2same:
entity2index[key] = len(unique_text)
unique_text.append(key)
vals = entity2same[key]
for v in vals:
entity2index[v] = len(unique_text)
unique_text.append(v)
return unique_text, entity2index
def characterize_dataset(model, sequences, entity2unique, entity2same, unique_text, nnlens):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
for nnlen in nnlens:
print("Characteristics at neighborhood length:" + str(nnlen))
pos_distances = []
neg_distances = []
match = 0
no_match = 0
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], nnlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
overlap = expected_text.intersection(nearest_text)
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), nnlen - 1) - m
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
pos = expected_text
neg = nearest_text - expected_text
for i in pos:
dist_pos = t.get_distance(index, entity2unique[i])
pos_distances.append(dist_pos)
for i in neg:
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
recall = match / (match + no_match)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("recall:" + str(recall))
def generate_triplets_from_ANN(model, sequences, entity2unique, entity2same, unique_text, test):
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
t.set_seed(123)
for i in range(len(predictions)):
# print(predictions[i])
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
ann_accuracy = 0
total = 0
triplets = {}
pos_distances = []
neg_distances = []
triplets['anchor'] = []
triplets['positive'] = []
triplets['negative'] = []
if test:
NNlen = TEST_NEIGHBOR_LEN
else:
NNlen = TRAIN_NEIGHBOR_LEN
for key in entity2same:
index = entity2unique[key]
nearest = t.get_nns_by_vector(predictions[index], NNlen)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[key])
# annoy has this annoying habit of returning the queried item back as a nearest neighbor. Remove it.
if key in nearest_text:
nearest_text.remove(key)
# print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
overlap = expected_text.intersection(nearest_text)
# collect up some statistics on how well we did on the match
m = len(overlap)
match += m
# since we asked for only x nearest neighbors, and we get at most x-1 neighbors that are not the same as key (!)
# make sure we adjust our estimate of no match appropriately
no_match += min(len(expected_text), NNlen - 1) - m
# sample only the negatives that are true negatives
# that is, they are not in the expected set - sampling only 'semi-hard negatives is not defined here'
# positives = expected_text - nearest_text
positives = overlap
negatives = nearest_text - expected_text
# print(key + str(expected_text) + str(nearest_text))
for i in negatives:
for j in positives:
dist_pos = t.get_distance(index, entity2unique[j])
pos_distances.append(dist_pos)
dist_neg = t.get_distance(index, entity2unique[i])
neg_distances.append(dist_neg)
if dist_pos < dist_neg:
ann_accuracy += 1
total += 1
# print(key + "|" + j + "|" + i)
# print(dist_pos)
# print(dist_neg)
for i in negatives:
for j in expected_text:
triplets['anchor'].append(key)
triplets['positive'].append(j)
triplets['negative'].append(i)
print("mean positive distance:" + str(statistics.mean(pos_distances)))
print("stdev positive distance:" + str(statistics.stdev(pos_distances)))
print("max positive distance:" + str(max(pos_distances)))
print("mean neg distance:" + str(statistics.mean(neg_distances)))
print("stdev neg distance:" + str(statistics.stdev(neg_distances)))
print("max neg distance:" + str(max(neg_distances)))
print("Accuracy in the ANN for triplets that obey the distance func:" + str(ann_accuracy / total))
obj = {}
obj['accuracy'] = ann_accuracy / total
obj['steps'] = 1
with open(output_file_name_for_hpo, 'w') as out:
json.dump(obj, out)
if test:
return match/(match + no_match)
else:
return triplets, match/(match + no_match)
def generate_names(entities, people, limit_pairs=False):
if people:
num_names = 4
generator = NameDataCleanser(0, num_names, limit_pairs=limit_pairs)
else:
generator = CompanyDataCleanser(limit_pairs)
num_names = 2
entity2same = {}
for entity in entities:
ret = generator.cleanse_data(entity)
if ret and len(ret) >= num_names:
entity2same[ret[0]] = ret[1:]
return entity2same
def embedded_representation_model(embedding_layer):
seq = Sequential()
seq.add(embedding_layer)
seq.add(Flatten())
return seq
def build_model(embedder):
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
for i in range(0, NUM_LAYERS):
net = GRU(128, return_sequences=True, activation='relu', name='embed' + str(i))(net)
net = GRU(128, activation='relu', name='embed' + str(i+1))(net)
if USE_L2_NORM:
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
base_model.summary()
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist', output_shape=(1,))([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist', output_shape=(1,))([net_anchor, net_negative])
if USE_ANGULAR_LOSS:
n_c = Lambda(n_c_angular_distance, name='nc_angular_dist')([net_anchor, net_positive, net_negative])
a_p = Lambda(a_p_angular_distance, name='ap_angular_dist')([net_anchor, net_positive, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([a_p, n_c])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=angular_loss, metrics=[accuracy])
else:
exemplar_negative_dist = Lambda(euclidean_distance, name='exemplar_neg_dist', output_shape=(1,))([net_positive, net_negative])
stacked_dists = Lambda(
# lambda vects: C.splice(*vects, axis=C.Axis.new_leading_axis()).eval(vects),
lambda vects: K.stack(vects, axis=1),
name='stacked_dists', output_shape=(3, 1)
)([positive_dist, negative_dist, exemplar_negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=LOSS_FUNCTION, metrics=[accuracy])
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
inter_model = Model(input_anchor, net_anchor)
print("output_shapes")
model.summary()
# print(positive_dist.output_shape)
# print(negative_dist.output_shape)
# print(exemplar_negative_dist)
# print(neg_dist.output_shape)
return model, test_positive_model, test_negative_model, inter_model
parser = argparse.ArgumentParser(description='Characterize the dataset')
parser.add_argument('--debug_sample_size', type=int,
help='sample size for debug run')
parser.add_argument('--margin', type=int,
help='margin')
parser.add_argument('--loss_function', type=str,
help='triplet loss function type: schroff-loss, improved-loss, angular-loss, tanh-loss, improved-tanh-loss')
parser.add_argument('--use_l2_norm', type=str,
help='whether to add a l2 norm')
parser.add_argument('--num_layers', type=int,
help='num_layers to use. Minimum is 2')
parser.add_argument('--input', type=str, help='Input file')
parser.add_argument('--entity_type', type=str, help='people or companies')
args = parser.parse_args()
LOSS_FUNCTION = None
if args.loss_function == 'schroff-loss':
LOSS_FUNCTION=schroff_triplet_loss
elif args.loss_function == 'improved-loss':
LOSS_FUNCTION=improved_loss
elif args.loss_function == 'our-loss':
LOSS_FUNCTION=triplet_loss
elif args.loss_function == 'tanh-loss':
LOSS_FUNCTION=triplet_tanh_loss
elif args.loss_function == 'improved-tanh-loss':
LOSS_FUNCTION=triplet_tanh_pn_loss
elif args.loss_function == 'angular-loss':
USE_ANGULAR_LOSS = True
LOSS_FUNCTION = angular_loss
print('Loss function: ' + args.loss_function)
if args.debug_sample_size:
DEBUG=True
DEBUG_DATA_LENGTH=args.debug_sample_size
print('Debug data length:' + str(DEBUG_DATA_LENGTH))
print('Margin:' + str(MARGIN))
USE_L2_NORM = args.use_l2_norm.lower() in ("yes", "true", "t", "1")
print('Use L2Norm: ' + str(USE_L2_NORM))
print('Use L2Norm: ' + str(args.use_l2_norm))
NUM_LAYERS = args.num_layers - 1
print('Num layers: ' + str(NUM_LAYERS))
people = 'people' in args.entity_type
# read all entities and create positive parts of a triplet
entities = read_entities(args.input)
entity2same = generate_names(entities, people)
print("number of entities")
print(len(entity2same))
number_of_names = []
for i in entity2same:
number_of_names.append(len(entity2same[i]))
print("mean number of names:" + str(statistics.mean(number_of_names)))
print("max number of names:" + str(max(number_of_names)))
# change the default behavior of the tokenizer to ignore all punctuation except , - and . which are important
# clues for entity names
tokenizer = Tokenizer(num_words=MAX_NB_WORDS, lower=True, filters='!"#$%&()*+/:;<=>?@[\]^_`{|}~', split=" ")
# build a set of data structures useful for annoy, the set of unique entities (unique_text),
# a mapping of entities in texts to an index in unique_text, a mapping of entities to other same entities, and the actual
# vectorized representation of the text. These structures will be used iteratively as we build up the model
# so we need to create them once for re-use
unique_text, entity2unique = build_unique_entities(entity2same)
tokenizer.fit_on_texts(unique_text)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
# build models
embedder = get_embedding_layer(tokenizer)
embedder_model = embedded_representation_model(embedder)
characterize_dataset(embedder_model, sequences, entity2unique, entity2same, unique_text, [20, 100, 500, 1500])
| 19,558 | 35.355019 | 134 | py |
fuzzyJoiner | fuzzyJoiner-master/old/TripletLossFacenet.py | from sys import argv
from keras import backend as K
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, Flatten, Dropout, Lambda, LSTM
from keras.layers import Conv1D, MaxPooling1D, Embedding
from keras.models import Model, model_from_json, Sequential
import numpy as np
from embeddings import KazumaCharEmbedding
from annoy import AnnoyIndex
import Named_Entity_Recognition_Modified
#must fix
MAX_NB_WORDS = 140000
EMBEDDING_DIM = 100
MAX_SEQUENCE_LENGTH = 10
DEBUG = False
DEBUG_DATA_LENGTH = 1000000
def f1score(positive, negative):
#labels[predictions.ravel() < 0.5].sum()
fsocre = 0.0
true_positive = 0.0
false_positive = 0
false_negitive = 0
for i in range(len(positive)):
if positive[i] <= negative[i]:
true_positive += 1
else:
false_negitive += 1
false_positive += 1
print('tp' + str(true_positive))
print('fp' + str(false_positive))
print('fn' + str(false_negitive))
fscore = (2 * true_positive) / ((2 * true_positive) + false_negitive + false_positive)
return fscore
def get_random_image(img_groups, group_names, gid):
gname = group_names[gid]
photos = img_groups[gname]
pid = np.random.choice(np.arange(len(photos)), size=1)[0]
pname = photos[pid]
return gname + pname + ".jpg"
def get_embedding_layer(tokenizer):
word_index = tokenizer.word_index
num_words = len(word_index) + 1
embedding_matrix = np.zeros((num_words, EMBEDDING_DIM))
print('about to get kz')
kz = KazumaCharEmbedding()
print('got kz')
for word, i in word_index.items():
if i >= MAX_NB_WORDS:
continue
embedding_vector = kz.emb(word)
# i = 0
# while sum(embedding_vector) == 0 and i <= 1000:
# embedding_vector = k.emb(word)
# i++;
# if i == 1000:
# print("fail")
if embedding_vector is not None:
if sum(embedding_vector) == 0:
print("failed to find embedding for:" + word)
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
embedding_layer = Embedding(num_words,
EMBEDDING_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,
trainable=False)
return embedding_layer
def get_tokenizer(texts):
tokenizer = Tokenizer(num_words=MAX_NB_WORDS)
tokenizer.fit_on_texts(texts['anchor'] + texts['negative'] + texts['positive'])
return tokenizer
def get_sequences(texts, tokenizer):
sequences = {}
sequences['anchor'] = tokenizer.texts_to_sequences(texts['anchor'])
sequences['anchor'] = pad_sequences(sequences['anchor'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['negative'] = tokenizer.texts_to_sequences(texts['negative'])
sequences['negative'] = pad_sequences(sequences['negative'], maxlen=MAX_SEQUENCE_LENGTH)
sequences['positive'] = tokenizer.texts_to_sequences(texts['positive'])
sequences['positive'] = pad_sequences(sequences['positive'], maxlen=MAX_SEQUENCE_LENGTH)
return sequences
def read_file(file_path):
texts = {'anchor':[], 'negative':[], 'positive':[]}
fl = open(file_path, 'r')
i = 0
for line in fl:
line_array = line.split("|")
texts['anchor'].append(line_array[0])
texts['positive'].append(line_array[1])
#removes the new line charecter at the end
texts['negative'].append(line_array[2][:-1])
i += 1
if i > DEBUG_DATA_LENGTH and DEBUG:
break
return texts
def get_test(texts, sequences, percent):
indices = np.arange(sequences['anchor'].shape[0])
np.random.shuffle(indices)
ret_sequence = {}
ret_sequence['anchor'] = sequences['anchor'][indices]
ret_sequence['positive'] = sequences['positive'][indices]
ret_sequence['negative'] = sequences['negative'][indices]
num_validation_samples = int(percent * sequences['anchor'].shape[0])
ret_train = {}
ret_train['anchor'] = ret_sequence['anchor'][:-num_validation_samples]
ret_train['positive'] = ret_sequence['positive'][:-num_validation_samples]
ret_train['negative'] = ret_sequence['negative'][:-num_validation_samples]
ret_test = {}
ret_test['anchor']= ret_sequence['anchor'][-num_validation_samples:]
ret_test['positive']= ret_sequence['positive'][-num_validation_samples:]
ret_test['negative'] = ret_sequence['negative'][-num_validation_samples:]
ret_texts = {}
texts['anchor'] = np.array(texts['anchor'])
texts['positive'] = np.array(texts['positive'])
texts['negative'] = np.array(texts['negative'])
ret_texts['anchor'] = texts['anchor'][indices]
ret_texts['positive'] = texts['positive'][indices]
ret_texts['negative'] = texts['negative'][indices]
return ret_train, ret_test, ret_texts
def triplet_loss(y_true, y_pred):
margin = K.constant(1)
return K.mean(K.maximum(K.constant(0), K.square(y_pred[:,0,0]) - K.square(y_pred[:,1,0]) + margin))
def accuracy(y_true, y_pred):
return K.mean(y_pred[:,0,0] < y_pred[:,1,0])
def l2Norm(x):
return K.l2_normalize(x, axis=-1)
def euclidean_distance(vects):
x, y = vects
return K.sqrt(K.maximum(K.sum(K.square(x - y), axis=1, keepdims=True), K.epsilon()))
def do_annoy(model, texts, tokenizer, verbose):
unique_text = []
entity_idx = []
entity2same = {}
for i in range(len(texts['anchor'])):
if not texts['anchor'][i] in entity2same:
entity2same[texts['anchor'][i]] = []
entity_idx.append(len(unique_text))
unique_text.append(texts['anchor'][i])
l = entity2same[texts['anchor'][i]]
if texts['positive'][i] not in l:
entity2same[texts['anchor'][i]].append(texts['positive'][i])
unique_text.append(texts['positive'][i])
print(entity2same)
print(unique_text)
sequences = tokenizer.texts_to_sequences(unique_text)
sequences = pad_sequences(sequences, maxlen=MAX_SEQUENCE_LENGTH)
predictions = model.predict(sequences)
t = AnnoyIndex(len(predictions[0]), metric='euclidean') # Length of item vector that will be indexed
for i in range(len(predictions)):
v = predictions[i]
t.add_item(i, v)
t.build(100) # 100 trees
match = 0
no_match = 0
for index in entity_idx:
nearest = t.get_nns_by_vector(predictions[index], 5)
print(nearest)
nearest_text = set([unique_text[i] for i in nearest])
expected_text = set(entity2same[unique_text[index]])
if unique_text[index] in nearest_text:
nearest_text.remove(unique_text[index])
print("query={} names = {} true_match = {}".format(unique_text[index], nearest_text, expected_text))
if verbose:
print([t.get_distance(index, i) for i in nearest])
overlap = expected_text.intersection(nearest_text)
print(overlap)
m = len(overlap)
match += m
no_match += len(expected_text) - m
print("match: {} no_match: {}".format(match, no_match))
def print_deb_data(debbuging_data):
for i in range(debbuging_data['number']):
print('anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['texts']['anchor'][i], debbuging_data['texts']['positive'][i], debbuging_data['texts']['negative'][i]))
print('sequences: anch: --{}-- pos:--{}-- neg:--{}--'.format(debbuging_data['sequences']['anchor'][i], debbuging_data['sequences']['positive'][i], debbuging_data['sequences']['negative'][i]))
def debugging_text_and_sequences(reordered_text, training_data, number):
debbuging_data = {}
debbuging_data['number'] = number
debbuging_data['sequences'] = {}
debbuging_data['texts'] = {}
debbuging_data['sequences']['anchor'] = []
debbuging_data['sequences']['positive'] = []
debbuging_data['sequences']['negative'] = []
debbuging_data['texts']['anchor'] = []
debbuging_data['texts']['positive'] = []
debbuging_data['texts']['negative'] = []
for i in range(number):
debbuging_data['texts']['anchor'].append(reordered_text['anchor'][i])
debbuging_data['texts']['positive'].append(reordered_text['positive'][i])
debbuging_data['texts']['negative'].append(reordered_text['negative'][i])
debbuging_data['sequences']['anchor'].append(training_data['anchor'][i])
debbuging_data['sequences']['positive'].append(training_data['positive'][i])
debbuging_data['sequences']['negative'].append(training_data['negative'][i])
return debbuging_data
# triples_data = create_triples(IMAGE_DIR)
texts = read_file(argv[1])
print("anchor: {} positive: {} negative: {}".format(texts['anchor'][0], texts['positive'][0], texts['negative'][0]))
tokenizer = get_tokenizer(texts)
print('got tokenizer')
sequences = get_sequences(texts, tokenizer)
train_data, test_data, reordered_text = get_test(texts, sequences, 0.05)
debbuging_data = debugging_text_and_sequences(reordered_text, train_data, 20)
number_of_names = len(train_data['anchor'])
print('sequenced words')
Y_train = np.random.randint(2, size=(1,2,number_of_names)).T
embedder = get_embedding_layer(tokenizer)
print('got embeddings')
main_input = Input(shape=(MAX_SEQUENCE_LENGTH,))
net = embedder(main_input)
net = Flatten(name='flatten')(net)
net = Dense(128, activation='relu', name='embed')(net)
net = Dense(128, activation='relu', name='embed2')(net)
net = Dense(128, activation='relu', name='embed3')(net)
net = Lambda(l2Norm, output_shape=[128])(net)
base_model = Model(embedder.input, net, name='triplet_model')
print(base_model.summary())
input_shape=(MAX_SEQUENCE_LENGTH,)
input_anchor = Input(shape=input_shape, name='input_anchor')
input_positive = Input(shape=input_shape, name='input_pos')
input_negative = Input(shape=input_shape, name='input_neg')
net_anchor = base_model(input_anchor)
net_positive = base_model(input_positive)
net_negative = base_model(input_negative)
positive_dist = Lambda(euclidean_distance, name='pos_dist')([net_anchor, net_positive])
negative_dist = Lambda(euclidean_distance, name='neg_dist')([net_anchor, net_negative])
stacked_dists = Lambda(
lambda vects: K.stack(vects, axis=1),
name='stacked_dists'
)([positive_dist, negative_dist])
model = Model([input_anchor, input_positive, input_negative], stacked_dists, name='triple_siamese')
model.compile(optimizer="rmsprop", loss=triplet_loss, metrics=[accuracy])
model.fit([train_data['anchor'], train_data['positive'], train_data['negative']], Y_train, epochs=10, batch_size=15, validation_split=0.2)
test_positive_model = Model([input_anchor, input_positive, input_negative], positive_dist)
test_negative_model = Model([input_anchor, input_positive, input_negative], negative_dist)
print("training data predictions")
positives = test_positive_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
negatives = test_negative_model.predict([train_data['anchor'], train_data['positive'], train_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
print("test data predictions")
positives = test_positive_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
negatives = test_negative_model.predict([test_data['anchor'], test_data['positive'], test_data['negative']])
print("f1score is: {}".format(f1score(positives, negatives)))
# model.save('triplet_loss_resnet50.h5')
inter_model = Model(input_anchor, net_anchor)
do_annoy(inter_model, texts, tokenizer, False)
print('annoy on embeddings for debbuging_data')
do_annoy(Named_Entity_Recognition_Modified.embedded_representation(embedder), debbuging_data['texts'], tokenizer, True)
print('annoy on full model for debbuging_data')
do_annoy(inter_model, debbuging_data['texts'], tokenizer, True)
print_deb_data(debbuging_data) | 12,095 | 37.893891 | 199 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.