repo stringlengths 1 99 | file stringlengths 13 215 | code stringlengths 12 59.2M | file_length int64 12 59.2M | avg_line_length float64 3.82 1.48M | max_line_length int64 12 2.51M | extension_type stringclasses 1
value |
|---|---|---|---|---|---|---|
interpretability | interpretability-master/text-dream/python/dream/tokenize_sentence.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Used to tokenize a sentence."""
from absl import app
from absl import flags
from pytorch_pretrained_bert import tokenization
import sys
sys.path.insert(1, 'helpers')
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to be tokenized')
flags.DEFINE_string('sentence2', u'',
'the optional second sentence')
def main(_):
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained('bert-base-uncased')
# Print the tokenized sentence
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, FLAGS.sentence2)
print(tokens)
print(tokenizer.convert_tokens_to_ids(tokens))
if __name__ == '__main__':
app.run(main)
| 1,506 | 34.046512 | 80 | py |
interpretability | interpretability-master/text-dream/python/dream/similar_embedding_activation.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Check the activation for embeddings similar to the one of the token."""
import json
import os
from absl import app
from absl import flags
from pytorch_pretrained_bert import modeling
from pytorch_pretrained_bert import tokenization
import torch
import sys
sys.path.insert(1, 'helpers')
import activation_helper
import embeddings_helper
import inference_helper
import one_hots_helper
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be'
'written.')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'The name of the model'
'configuration to load')
flags.DEFINE_integer('num_iterations', 1000, 'number of optimization steps')
flags.DEFINE_integer('layer_id', None, 'layer to optimize activation for')
flags.DEFINE_integer('word_id', None, 'word to optimize activation for')
flags.DEFINE_integer('neuron_id', None, 'neuron to optimize activation for')
flags.DEFINE_integer('change_word', None, 'token to be changed to similar'
'embeddings')
flags.DEFINE_integer('top_k', 100, 'how many embeddings to look at')
flags.DEFINE_bool('cosine', False, 'use consine similarity instead of p2')
def write_closest_activations(closest, distances, activations, tokenizer,
tokens, furthest):
"""Write a file that contains the results of this run.
Args:
closest: The closest tokens that have been found.
distances: Distances to the initial token for the closest embeddings.
activations: The activations that these tokens produced.
tokenizer: The tokenizer to convert the closest parameter to real tokens.
tokens: The tokens of the input sentence.
furthest: The distance for the furthest embedding, for normalization.
"""
results = {
'type': 'similar_embeddings',
'sentence': FLAGS.sentence,
'layer_id': FLAGS.layer_id,
'neuron_id': FLAGS.neuron_id,
'word_id': FLAGS.word_id,
'change_word': FLAGS.change_word,
'cosine': FLAGS.cosine,
'tokens': tokens,
'tops': [],
'furthest': furthest.item()
}
closest_tokens = tokenizer.convert_ids_to_tokens(closest.data.cpu().numpy())
for i in range(len(closest)):
results['tops'].append({
'token': closest_tokens[i],
'activation': activations[i],
'distance': distances[i].item()
})
results_path = os.path.join(FLAGS.output_dir,
'closest_to_{}_{}.json'.format(FLAGS.sentence,
FLAGS.change_word))
results_file = open(results_path, 'w')
json.dump(results, results_file)
def try_similar_embeddings(tokenizer, model, device):
"""Get the activation values for similar embeddings to a given embedding.
Args:
tokenizer: Tokenizer used to tokenize the input sentence.
model: Model to retrieve the activation from.
device: Device on which the variables are stored.
Returns:
closest: Closest tokens to the one that has been passed to modify.
activations: Activations of these close tokens.
"""
embedding_map = embeddings_helper.EmbeddingMap(device, model)
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, '')
idx_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
_, modify, _ = one_hots_helper.get_one_hots(
idx_tensor.data.cpu().numpy(), FLAGS.change_word, FLAGS.change_word,
device, grad=False)
modify_embedding = torch.matmul(modify, embedding_map.embedding_map)
distances, closest = embeddings_helper.get_closest_embedding(
modify_embedding[0], embedding_map, cosine=FLAGS.cosine,
top_k=FLAGS.top_k)
furthest, _ = embeddings_helper.get_closest_embedding(
modify_embedding[0], embedding_map, cosine=FLAGS.cosine,
top_k=1, furthest=True)
idx_array = idx_tensor.data.cpu().numpy()
activations = []
for i in range(len(closest)):
idx_array[0][FLAGS.change_word] = closest[i]
tensor = torch.tensor(idx_array)
if device.type == 'cuda':
tensor = tensor.to(device)
layers_act = inference_helper.run_inference_vanilla(tensor, segments_tensor,
model)
activation = activation_helper.get_activation(
layers_act, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, True)
activations.append(activation.item())
print(f'{i}/{len(closest)}\r', end="")
print()
return closest, distances, activations, tokens, furthest
def main(_):
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained(FLAGS.model_config)
# Load pre-trained model (weights)
model = modeling.BertModel.from_pretrained(FLAGS.model_config)
_ = model.eval()
# Load pre-trained model (weights)
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device: ', device)
model = model.to(device)
closest, distances, activations, tokens, furthest = try_similar_embeddings(
tokenizer, model, device)
write_closest_activations(closest, distances, activations, tokenizer, tokens,
furthest)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| 6,200 | 39.529412 | 80 | py |
interpretability | interpretability-master/text-dream/python/dream/reconstruct_activation.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""This tries to dream so that an activation vector gets reproduced."""
from absl import app
from absl import flags
import torch
import torch.nn.functional as F
import sys
sys.path.insert(1, 'helpers')
import activation_helper
import attention_mask_helper
import embeddings_helper
import folder_helper
import inference_helper
import one_hots_helper
import optimization_helper
import output_helper
import setup_helper
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('sentence2', u'', 'an optional sencond sentence')
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be'
'written.')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'The name of the model'
'configuration to load')
flags.DEFINE_integer('num_iterations', 10000, 'number of optimization steps')
flags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')
flags.DEFINE_integer('word_id', None, 'word to optimize activation for')
flags.DEFINE_integer('neuron_id', None, 'neuron to optimize activation for')
flags.DEFINE_integer('dream_start', 1, 'first token that is to be changed in'
'the sentence')
flags.DEFINE_integer('dream_end', 0, 'first token that is to be changed in the'
'sentence')
flags.DEFINE_integer('warmup', 200, 'how long before the temperature of the'
'softmax gets adjusted')
flags.DEFINE_float('start_temp', 2.0, 'start-temperature of the softmax')
flags.DEFINE_float('end_temp', 0.1, 'end-temperature of the softmax')
flags.DEFINE_float('anneal', 0.9995, 'annealing factor for the temperature')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')
flags.DEFINE_bool('gumbel', False, 'use gumbel noise with the softmax')
flags.DEFINE_bool('write_top_k', False, 'write top words for each iteration')
flags.DEFINE_integer('k', 10, 'number of top ranked words to store for each'
'iteration')
flags.DEFINE_integer('metrics_frequency', 1000, 'frequency in which results are'
'saved')
def deep_dream(data, results, params, device, tokenizer, embedding_map, model):
"""Deep dream to a target activation.
Args:
data: The data placeholder for top_k processing.
results: Holds the results of the run.
params: Holds the parameters of the run.
device: Where to place new variables.
tokenizer: Used to convert between ids and tokens.
embedding_map: Holding all BERT token embeddings.
model: The model used for this dream.
"""
# An embedding for the tokens is obtained
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, FLAGS.sentence2)
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
_, pos_embeddings, sentence_embeddings = embeddings_helper.get_embeddings(
tokens_tensor, segments_tensor, model)
# Correct the end of the dream if necessary
if FLAGS.dream_end == 0:
FLAGS.dream_end = len(tokens) - 2
# Write the parameters to a file
output_helper.get_params(params, FLAGS, tokens)
# Get the smooth one-hot vector that is to be optimized, split into static and
# modifiable parts
before, modify, after = one_hots_helper.get_one_hots(
tokens_tensor.data.cpu().numpy(), FLAGS.dream_start, FLAGS.dream_end,
device)
modify = torch.randn(modify.shape, device=device, requires_grad=True)
# Obtain the default attention mask to be able to run the model
att_mask = attention_mask_helper.get_attention_mask(tokens_tensor)
# The optimizer used to modify the input embedding
optimizer = torch.optim.Adam([modify], lr=FLAGS.learning_rate)
# Init temperature for Gumbel
temperature = torch.tensor(FLAGS.start_temp, device=device,
requires_grad=False)
# Obtain the target activation we try to optimize towards.
target_ids = tokens_tensor.data.cpu().numpy()[0]
target_activation = activation_helper.get_ids_activation(
target_ids, pos_embeddings, sentence_embeddings, att_mask,
FLAGS.dream_start, FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, False, embedding_map, model, device)
target_activation = target_activation.clone().detach().requires_grad_(False)
# Obtain the properties of the initial embedding
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
max_values, tokens_ids = one_hots_helper.get_tokens_from_one_hots(
torch.cat([before, one_hots_sm, after], dim=1))
numpy_max_values = max_values.data.cpu().numpy()
ids = tokens_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,
FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,
embedding_map, model, device)
# Write the initial stuff for the results file
output_helper.init_results(results)
# Optimize the embedding for i iterations and update the properties to
# evaluate the result in each step
for i in range(FLAGS.num_iterations):
# Do an optimization step
max_vals, tokens_ids, loss = optimization_helper.step_towards_activation(
optimizer, before, modify, after, pos_embeddings,
sentence_embeddings, att_mask, temperature, i, FLAGS.gumbel,
FLAGS.write_top_k, FLAGS.k, data, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, FLAGS.dream_start, FLAGS.dream_end, tokenizer,
embedding_map, model, target_activation)
# Write the properties of the last step
ids_loss = F.mse_loss(ids_activation, target_activation)
if (i % FLAGS.metrics_frequency) == 0:
output_helper.get_metrics(
tokens, i, temperature, numpy_max_values, results,
loss=loss, ids_loss=ids_loss)
# Set the numpy max values
numpy_max_values = max_vals.data.cpu().numpy()
# Obtain the activation property for the id-array that would result from the
# optimization
ids = tokens_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
# Calculate the activation using the highest scoring words
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,
FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,
embedding_map, model, device)
# Check if the temperature needs to decrease
if i > FLAGS.warmup:
temperature = torch.clamp(temperature * FLAGS.anneal, FLAGS.end_temp)
# Calculate the final activation just as before, but without backprop
if (FLAGS.num_iterations % FLAGS.metrics_frequency) == 0:
with torch.no_grad():
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
if FLAGS.write_top_k:
output_helper.write_top_ks(fused_one_hots, FLAGS.k,
FLAGS.num_iterations, data,
FLAGS.dream_start, FLAGS.dream_end,
tokenizer)
layer_activations = inference_helper.run_inference(
before, one_hots_sm, after, pos_embeddings, sentence_embeddings,
att_mask, embedding_map, model)
activation = activation_helper.get_activations(
layer_activations, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id)
loss = F.mse_loss(activation, target_activation)
ids_loss = F.mse_loss(ids_activation, target_activation)
output_helper.get_metrics(
tokens, FLAGS.num_iterations, temperature, numpy_max_values, results,
loss=loss, ids_loss=ids_loss)
def reconstruct_activation(device, tokenizer, embedding_map, model, base_path):
"""Reconstruct the activation for a given sentence.
Args:
device: The device to use for training the model.
tokenizer: Used to convert between sentences, tokens, and ids.
embedding_map: Map containing all the pretrained embeddings of the model.
model: BERT model used for the dreaming process.
base_path: Location of where to write the results.
"""
data = []
results = {}
params = {}
# Actually do the optimization
deep_dream(data, results, params, device, tokenizer, embedding_map, model)
# If the top k file is to be written, write it
if FLAGS.write_top_k:
output_helper.write_top_ks(base_path, data, FLAGS.dream_start, params)
output_helper.write_results(base_path, results, params, 'reconstruct')
def main(_):
tokenizer, model, device, emb_map = setup_helper.setup_uncased(
FLAGS.model_config)
# Make a directory for the current run
folder_helper.make_folder_if_not_exists(FLAGS.output_dir)
base_path = folder_helper.make_timestamp_directory(FLAGS.output_dir,
prefix='reconstruct')
# Start the run
reconstruct_activation(device, tokenizer, emb_map, model, base_path)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| 10,106 | 46.228972 | 80 | py |
interpretability | interpretability-master/text-dream/python/dream/reconstruct_shifted_activation.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Reconstructs the activation of sentence shifted towards a different context."""
import json
import os
from absl import app
from absl import flags
import numpy as np
import torch
import torch.nn.functional as F
import sys
sys.path.insert(1, 'helpers')
import activation_helper
import attention_mask_helper
import embeddings_helper
import folder_helper
import inference_helper
import one_hots_helper
import optimization_helper
import output_helper
import setup_helper
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('sentence2', u'', 'an optional sencond sentence')
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be'
'written.')
flags.DEFINE_string('shift_vector', None, 'the file that holds the vector'
'indicating which direction to shift to')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'the name of the model'
'configuration to load')
flags.DEFINE_string('target', None, 'target of the shifted activation process')
flags.DEFINE_integer('num_iterations', 10, 'number of optimization steps')
flags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')
flags.DEFINE_integer('word_id', None, 'word to optimize activation for')
flags.DEFINE_integer('neuron_id', None, 'neuron to optimize activation for')
flags.DEFINE_integer('shift_start', 1, 'first token that is to be shifted in'
'the sentence')
flags.DEFINE_integer('shift_end', 1, 'last token that is to be shifted in the'
'sentence')
flags.DEFINE_integer('dream_start', 1, 'first token that is to be changed in'
'the sentence')
flags.DEFINE_integer('dream_end', 0, 'last token that is to be changed in the'
'sentence')
flags.DEFINE_integer('warmup', 200, 'how long before the temperature of the'
'softmax gets adjusted')
flags.DEFINE_integer('k', 10, 'number of top ranked words to store for each'
'iteration')
flags.DEFINE_integer('metrics_frequency', 250, 'frequency in which results are'
'saved')
flags.DEFINE_float('start_temp', 2.0, 'start-temperature of the softmax')
flags.DEFINE_float('end_temp', 0.1, 'end-temperature of the softmax')
flags.DEFINE_float('anneal', 0.9995, 'annealing factor for the temperature')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')
flags.DEFINE_float('shift_magnitude', 0.1, 'the magnitude with which to'
'change the activation vector')
flags.DEFINE_bool('gumbel', False, 'use gumbel noise with the softmax')
flags.DEFINE_bool('write_top_k', False, 'write top words for each iteration')
def shift_target_activation(target_activation, device):
"""Shift the target activation in the desired direction.
Args:
target_activation: The old target activation to be changed.
device: Device to load variables to.
Returns:
target_activation: The new, changed target activation to optimize for.
"""
shift_path = os.path.join(FLAGS.shift_vector, str(FLAGS.layer_id),
'final_weights.np')
shift_file = open(shift_path, 'rb')
shift = np.load(shift_file)
shift_tensor = torch.tensor(shift)
shift_tensor = shift_tensor.to(device)
shift_tensor = shift_tensor.reshape(-1)
shift_tensor = shift_tensor / torch.max(shift_tensor)
shift_tensor = shift_tensor * FLAGS.shift_magnitude
for i in range(FLAGS.shift_start, FLAGS.shift_end+1):
target_activation[i] = target_activation[i] + shift_tensor
return target_activation
def deep_dream(data, results, params, device, tokenizer, embedding_map, model):
"""Deep dream to a target activation.
Args:
data: Holds the top-k values.
results: Holds the results of the run.
params: Holds the parameters of the run.
device: Where to place new variables.
tokenizer: Used to convert between ids and tokens.
embedding_map: Holding all BERT token embeddings.
model: The model used for this dream.
"""
# An embedding for the tokens is obtained
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, FLAGS.sentence2)
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
_, pos_embeddings, sentence_embeddings = embeddings_helper.get_embeddings(
tokens_tensor, segments_tensor, model)
# Correct the end of the dream if necessary
if FLAGS.dream_end == 0:
FLAGS.dream_end = len(tokens) - 2
# Write the parameters to a file
output_helper.get_params(params, FLAGS, tokens)
# Get the smooth one-hot vector that is to be optimized, split into static and
# modifiable parts
before, modify, after = one_hots_helper.get_one_hots(
tokens_tensor.data.cpu().numpy(), FLAGS.dream_start, FLAGS.dream_end,
device)
modify = torch.randn(modify.shape, device=device, requires_grad=True)
# Obtain the default attention mask to be able to run the model
att_mask = attention_mask_helper.get_attention_mask(tokens_tensor)
# The optimizer used to modify the input embedding
optimizer = torch.optim.Adam([modify], lr=FLAGS.learning_rate)
# Init temperature for Gumbel
temperature = torch.tensor(FLAGS.start_temp, device=device,
requires_grad=False)
# Obtain the target activation we try to optimize towards.
target_ids = tokens_tensor.data.cpu().numpy()[0]
target_activation = activation_helper.get_ids_activation(
target_ids, pos_embeddings, sentence_embeddings, att_mask,
FLAGS.dream_start, FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, False, embedding_map, model, device)
target_activation = shift_target_activation(target_activation, device)
target_activation = target_activation.clone().detach().requires_grad_(False)
# Obtain the properties of the initial embedding
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
max_values, token_ids = one_hots_helper.get_tokens_from_one_hots(
torch.cat([before, one_hots_sm, after], dim=1))
numpy_max_values = max_values.data.cpu().numpy()
ids = token_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,
FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,
embedding_map, model, device)
# Write the initial stuff for the results file
output_helper.init_results(results)
# Optimize the embedding for i iterations and update the properties to
# evaluate the result in each step
for i in range(FLAGS.num_iterations):
# Do an optimization step
max_vals, token_ids, loss = optimization_helper.step_towards_activation(
optimizer, before, modify, after, pos_embeddings,
sentence_embeddings, att_mask, temperature, i, FLAGS.gumbel,
FLAGS.write_top_k, FLAGS.k, data, FLAGS.word_id, FLAGS.neuron_id,
FLAGS.layer_id, FLAGS.dream_start, FLAGS.dream_end, tokenizer,
embedding_map, model, target_activation)
# Write the properties of the last step
ids_loss = F.mse_loss(ids_activation, target_activation)
if (i % FLAGS.metrics_frequency) == 0:
output_helper.get_metrics(
tokens, i, temperature, numpy_max_values, results,
loss=loss, ids_loss=ids_loss)
# Set the numpy max values
numpy_max_values = max_vals.data.cpu().numpy()
# Obtain the activation property for the id-array that would result from the
# optimization
ids = token_ids.data.cpu().numpy()[0]
tokens = tokenizer.convert_ids_to_tokens(ids)
# Calculate the activation using the highest scoring words
ids_activation = activation_helper.get_ids_activation(
ids, pos_embeddings, sentence_embeddings, att_mask, FLAGS.dream_start,
FLAGS.dream_end, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, False,
embedding_map, model, device)
# Check if the temperature needs to decrease
if i > FLAGS.warmup:
temperature = torch.clamp(temperature * FLAGS.anneal, FLAGS.end_temp)
# Calculate the final activation just as before, but without backprop
if (FLAGS.num_iterations % FLAGS.metrics_frequency) == 0:
with torch.no_grad():
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature,
FLAGS.gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
if FLAGS.write_top_k:
output_helper.write_top_ks(fused_one_hots, FLAGS.k,
FLAGS.num_iterations, data,
FLAGS.dream_start, FLAGS.dream_end,
tokenizer)
layers = inference_helper.run_inference(before, one_hots_sm, after,
pos_embeddings,
sentence_embeddings, att_mask,
embedding_map, model)
activation = activation_helper.get_activations(
layers, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id)
loss = F.mse_loss(activation, target_activation)
ids_loss = F.mse_loss(ids_activation, target_activation)
output_helper.get_metrics(
tokens, FLAGS.num_iterations, temperature, numpy_max_values, results,
loss=loss, ids_loss=ids_loss)
def reconstruct_shifted_activation(device, tokenizer, emb_map, model):
"""Reconstruct the activation for a given sentence after they have been shifted.
Args:
device: The device to use for training the model.
tokenizer: Used to convert between sentences, tokens, and ids.
emb_map: Map containing all the pretrained embeddings of the model.
model: BERT model used for the dreaming process.
"""
data = []
results = {}
params = {}
# Create a folder for this experiment
layer_dir = os.path.join(FLAGS.output_dir, str(FLAGS.layer_id))
folder_helper.make_folder_if_not_exists(layer_dir)
magnitude_dir = os.path.join(layer_dir, str(FLAGS.shift_magnitude))
folder_helper.make_folder_if_not_exists(magnitude_dir)
# Actually do the optimization
deep_dream(data, results, params, device, tokenizer, emb_map, model)
# If the top k file is to be written, write it
if FLAGS.write_top_k:
for i in range(len(data)):
top_k_path = os.path.join(magnitude_dir, 'top_k' + str(i) + '.json')
top_k_file = open(top_k_path, 'w')
json.dump(data[i], top_k_file)
top_k_file.close()
output_helper.write_results(magnitude_dir, results, params,
'reconstruct_shifted')
def main(_):
tokenizer, model, device, emb_map = setup_helper.setup_uncased(
FLAGS.model_config)
# Make a directory for the current run
folder_helper.make_folder_if_not_exists(FLAGS.output_dir)
# Start the run
reconstruct_shifted_activation(device, tokenizer, emb_map, model)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
flags.mark_flag_as_required('shift_vector')
flags.mark_flag_as_required('target')
app.run(main)
| 12,102 | 45.55 | 82 | py |
interpretability | interpretability-master/text-dream/python/dream/token_search.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Searches the token that max. activates a sent/lay/neuron/word combination."""
import json
import os
from absl import app
from absl import flags
from pytorch_pretrained_bert import modeling
from pytorch_pretrained_bert import tokenization
import torch
import sys
sys.path.insert(1, 'helpers')
import activation_helper
import embeddings_config
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'i hate kickshaws',
'the sentence to start with')
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be'
'written.')
flags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')
flags.DEFINE_integer('word_id', 2, 'word to optimize activation for')
flags.DEFINE_integer('neuron_id', 414, 'neuron to optimize activation for')
flags.DEFINE_integer('change_word', 2, 'the word to be replaced')
flags.DEFINE_integer('top_k', 100, 'how many embeddings to look at')
def write_best_tokens(ids, activations, tokenizer, tokens):
"""Write a file that contains the results of this run.
Args:
ids: The closest tokens that have been found.
activations: The activations that these tokens produced.
tokenizer: The tokenizer to convert the closest parameter to real tokens.
tokens: The tokens of the input sentence.
"""
results = {
'type': 'token_search',
'sentence': FLAGS.sentence,
'layer_id': FLAGS.layer_id,
'neuron_id': FLAGS.neuron_id,
'word_id': FLAGS.word_id,
'change_word': FLAGS.change_word,
'tokens': tokens,
'tops': [],
}
tokens = tokenizer.convert_ids_to_tokens(ids.data.cpu().numpy())
for i in range(len(tokens)):
results['tops'].append({
'token': tokens[i],
'activation': activations[i].item(),
})
results_path = os.path.join(FLAGS.output_dir,
'top_tokens_in_{}_{}.json'.format(
FLAGS.sentence, FLAGS.change_word))
results_file = open(results_path, 'w')
json.dump(results, results_file)
def run_inference(tokens_tensor, segments_tensor, model):
"""Run inference on the model.
Args:
tokens_tensor: The tokens to infer the activation from.
segments_tensor: Segments of the sequence to retrieve the activation for.
model: The model to run inference on.
Returns:
activation: Activation of the model given the parameters.
"""
with torch.no_grad():
layers_act, _ = model(tokens_tensor, segments_tensor)
layers_act = torch.stack(layers_act).permute(1, 0, 2, 3)
return activation_helper.get_activation(
layers_act, FLAGS.word_id, FLAGS.neuron_id, FLAGS.layer_id, True)
def try_tokens(tokenizer, device, model):
"""Try all possible tokens for one word.
Args:
tokenizer: Tokenizer to convert the input.
device: Where variables will be stored.
model: The model to run inference on.
Returns:
top_k: Top activations and indices for this setting.
"""
# An embedding for the tokens is obtained
tokens = tokenization_helper.tokenize_input_sentence(
tokenizer, FLAGS.sentence, '')
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
idx_array = tokens_tensor.data.cpu().numpy()
activations = []
for i in range(embeddings_config.NUM_EMBEDDED_TOKENS):
idx_array[0][FLAGS.change_word] = i
tensor = torch.tensor(idx_array)
if device.type == 'cuda':
tensor = tensor.to(device)
activation = run_inference(tensor, segments_tensor, model)
activations.append(activation)
print(f'{i}/{embeddings_config.NUM_EMBEDDED_TOKENS}\r', end="")
print()
activations_tensor = torch.stack(activations)
token_activations, ids = torch.topk(activations_tensor, FLAGS.top_k)
write_best_tokens(ids, token_activations, tokenizer, tokens)
def main(_):
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained('bert-base-uncased')
# Load pre-trained model (weights)
model = modeling.BertModel.from_pretrained('bert-base-uncased')
_ = model.eval()
# Load pre-trained model (weights)
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
model = model.to(device)
try_tokens(tokenizer, device, model)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
app.run(main)
| 5,144 | 35.489362 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/one_hots_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides functions related to one-hot handling and conversion of tokens."""
import numpy as np
import torch
import embeddings_config
def get_one_hots(indices, modify_start, modify_end, device, grad=True):
"""Get one-hot vectors for each relevant part of the input.
Args:
indices: The indices that should be hot.
modify_start: The start index of words that are to be modified.
modify_end: The end index of words that are to be modified.
device: Where to store the newly created variables.
grad: Does the modifyable part require a gradient?
Returns:
before: One-hot tensor for anything before the word to be changed.
change: One-hot tensor for the word to be changed.
after: One-hot tensor for anything after the word to be changed.
"""
# Numpy array of zeros with correct shape
one_hot_numpy = np.zeros((1, indices.size,
embeddings_config.NUM_EMBEDDED_TOKENS))
# Set the word representative to one
one_hot_numpy[0, np.arange(indices.size), indices] = 1
# Convert the numpy array to a tensor
before = torch.tensor(one_hot_numpy[[0], 0:modify_start], device=device,
dtype=torch.float, requires_grad=False)
change = torch.tensor([one_hot_numpy[0, modify_start:modify_end+1, :]],
device=device, dtype=torch.float, requires_grad=grad)
after = torch.tensor(one_hot_numpy[[0], (modify_end+1):], device=device,
dtype=torch.float, requires_grad=False)
return before, change, after
# Return the tokens that correspond to the
# maximally activated element in the one-hot tensor
def get_tokens_from_one_hots(one_hots):
return torch.max(one_hots, 2)
# Force the word vector to be capped at one and sum to one
def softmax_one_hots(one_hots, temperature, gumbel=True):
if gumbel:
gumbel_noise = -torch.log(
-torch.log(torch.rand_like(one_hots, requires_grad=False)))
return torch.nn.functional.softmax((torch.log(
torch.clamp_min(one_hots, 1e-10)) + gumbel_noise) / temperature, -1)
else:
return torch.nn.functional.softmax(one_hots / temperature, -1)
def get_one_hots_mlm(indices, cbs, cbe, cas, cae, device, grad=True):
"""Get the one_hot vectors that are used for inference and optimization.
Args:
indices: The indices array to get the one_hots from.
cbs: The index of the start of the changeable part before maximize_word.
cbe: The index of the end of the changeable part before maximize_word.
cas: The index of the start of the changeable part after maximize_word.
cae: The index of the end of the changeable part after maximize_word.
device: The device to store new tensors on.
grad: Whether the changeable parts should be differentiable.
Returns:
before: Everything that can't be changed before the maximize_word.
change1: Everything that can be changed before the maximize_word.
max_part: Everything that can't be changed around the maximize_word.
change2: Everything that can be changed after the maximize_word.
after: Everything that can't be changed after the maximize_word.
"""
# Numpy array of zeros with correct shape
one_hot_numpy = np.zeros((1, indices.size,
embeddings_config.NUM_EMBEDDED_TOKENS))
# Set the word representative to one
one_hot_numpy[0, np.arange(indices.size), indices] = 1
# Convert the numpy array to a tensor
before = torch.tensor(one_hot_numpy[[0], 0:cbs], device=device,
dtype=torch.float, requires_grad=False)
max_part = torch.tensor(one_hot_numpy[[0], (cbe+1):cas], device=device,
dtype=torch.float, requires_grad=False)
after = torch.tensor(one_hot_numpy[[0], (cae+1):], device=device,
dtype=torch.float, requires_grad=False)
if grad:
change1 = torch.tensor(one_hot_numpy[[0], cbs:(cbe+1)], device=device,
dtype=torch.float, requires_grad=True)
change2 = torch.tensor(one_hot_numpy[[0], cas:(cae+1)], device=device,
dtype=torch.float, requires_grad=True)
else:
change1 = torch.tensor(one_hot_numpy[[0], cbs:(cbe+1)], device=device,
dtype=torch.float, requires_grad=False)
change2 = torch.tensor(one_hot_numpy[[0], cas:(cae+1)], device=device,
dtype=torch.float, requires_grad=False)
return before, change1, max_part, change2, after
| 5,144 | 45.351351 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/embeddings_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles all functionality related to embeddings."""
import os
import numpy as np
import torch
import activation_helper
import embeddings_config
class EmbeddingMap(object):
"""Holds and handles embeddings for all tokens."""
def __init__(self, device, model):
# Initialize an empty embedding map
self.embedding_map_dict = []
# Obtain a word embedding for each token in the vicabulary
for i in range(embeddings_config.NUM_EMBEDDED_TOKENS):
tokens_tensor = torch.tensor([[i]], device=device, requires_grad=False)
self.embedding_map_dict.append(
model.embeddings.word_embeddings(tokens_tensor)[0][0])
# We want this to be a leaf node that has nothing to backprop to.
# This improves backprop speed by a lot! Thus, detach.
self.embedding_map = torch.stack(
self.embedding_map_dict).clone().detach().requires_grad_(False)
self.mean_distance = torch.mean(torch.norm(self.embedding_map, dim=1))
# Get the closest embedding to a embedding vector
def get_closest_embedding(embedding, embedding_map, cosine=False, top_k=100,
furthest=False):
"""Get the closest embeddings to a given embedding.
Args:
embedding: The embedding to search other close embeddings for.
embedding_map: Contains all embeddings.
cosine: Whether to use cosine similarity.
top_k: How many of the top similar embeddings to get.
furthest: Whether to instead of the closest embedding, get the furthest.
Returns:
topk: Top candidates for closest activations.
"""
reshaped_embedding = embedding.repeat(embeddings_config.NUM_EMBEDDED_TOKENS,
1)
if cosine:
pairwise = torch.cosine_similarity(embedding_map.embedding_map,
reshaped_embedding)
return torch.topk(pairwise, top_k, largest=furthest)
else:
pairwise = torch.pairwise_distance(embedding_map.embedding_map,
reshaped_embedding)
return torch.topk(pairwise, top_k, largest=furthest)
def get_closest_embeddings(embeddings, embedding_map, cosine=False):
"""Get the closest embeddings for the current word representations.
Args:
embeddings: The current token embeddings.
embedding_map: The embedding map to lookup similar embeddings from.
cosine: Whether to use cosine similarity.
Returns:
embedding_ids: Ids for realistic embeddigns similar to current embeddings.
distances: The distances of these embeddings to the actual embedding.
"""
# Placeholders for tokens and distances to the actual embedding
embedding_ids = []
distances = []
# Do this for each word individually
for i in range(embeddings.size()[1]):
# Get the token and distance for the current word, and append them to the
# list of tokens and distances
distance, embedding = get_closest_embedding(embeddings[0][i], embedding_map,
cosine=cosine, top_k=1)
embedding_ids.append(embedding[0].item())
distances.append(distance[0].item())
return embedding_ids, distances
def analyze_current_embedding(fused_one_hots, embedding_map, modify_start,
modify_end, device, position_embeddings,
sentence_embeddings, attention_mask, model,
word_id, neuron_id, layer_id, normalize,
tokenizer, cosine=False):
"""Analyze the activation and closest tokens of the current embedding.
Args:
fused_one_hots: The one hots tensor representing all current tokens.
embedding_map: The embedding map holding all embeddings.
modify_start: The start index of the modifyable content.
modify_end: The end index of the modifyable content.
device: The device on which to store the variables.
position_embeddings: The positional embeddings of the tokens.
sentence_embeddings: Sentence embedding for the sequence.
attention_mask: The attention mask used for inference.
model: The model used for inference runs.
word_id: The id of the word to get activations for.
neuron_id: The id of the neuron to get activations for.
layer_id: The id of the layer to get activationd for.
normalize: Whether to normalize the activations.
tokenizer: The tokenizer used to convert between ids and tokens.
cosine: Whether to use cosine similarity for finding similar embeddings.
Returns:
tokens: The tokens for the closest embedding.
activation: The activation of this snapped embedding.
"""
# Get the word embedding from the smooth one_hots vector
words_embeddings = torch.matmul(fused_one_hots, embedding_map.embedding_map)
indices, _ = get_closest_embeddings(words_embeddings, embedding_map,
cosine=cosine)
# Get the activation
activation = activation_helper.get_ids_activation(
np.asarray(indices), position_embeddings, sentence_embeddings,
attention_mask, modify_start, modify_end, word_id, neuron_id, layer_id,
normalize, embedding_map, model, device)
tokens = tokenizer.convert_ids_to_tokens(indices)
return tokens, activation
def get_embeddings(tokens_tensor, segments_tensor, model):
"""Obtain embeddigs for word, position, and sequence.
adapted from:
https://github.com/huggingface/pytorch-pretrained-BERT/blob/
2a329c61868b20faee115a78bdcaf660ff74cf41/pytorch_pretrained_bert/
modeling.py#L264-L277)
Args:
tokens_tensor: Tokens for which to get the tokens embedding.
segments_tensor: Used to generate the segments embedding.
model: The model used for inference.
Returns:
words_embeddings: Word embeddings for the given tokens.
position_embeddings: Positional embeddings for the tokens.
sentence embeddings: Sentence embeddings for the tokens.
"""
seq_length = tokens_tensor.size(1)
position_ids = torch.arange(seq_length, dtype=torch.long,
device=tokens_tensor.device)
position_ids = position_ids.unsqueeze(0).expand_as(tokens_tensor)
if segments_tensor is None:
segments_tensor = torch.zeros_like(tokens_tensor)
# Get the three token types (words, positions, and sentences individually)
words_embeddings = model.embeddings.word_embeddings(tokens_tensor)
position_embeddings = model.embeddings.position_embeddings(position_ids)
sentence_embeddings = model.embeddings.token_type_embeddings(segments_tensor)
return words_embeddings, position_embeddings, sentence_embeddings
def write_embedding(layers_act, emb_pos, tokens, folder):
"""Write the activations for the specified token per layer to a folder.
Args:
layers_act: The activations of each layer in BERT.
emb_pos: The position of the written embedding in the sentence.
tokens: The tokens in the currently inspected sentence.
folder: Where to write the activations to.
"""
sent = u'_'.join(tokens)
for layer in range(layers_act.shape[1]):
file_name = str(emb_pos) + u'_V_' + str(len(tokens)) + u'_' + sent + u'.np'
file_name = file_name.replace('[', '')
file_name = file_name.replace(']', '')
file_name = file_name.replace('/', '')
path = os.path.join(folder, str(layer), file_name)
try:
activation_file = open(path, 'wb')
np.save(activation_file,
layers_act[0][layer][emb_pos].data.cpu().numpy())
except:
print('Path invalid: {}'.format(path.encode('utf8')))
| 8,157 | 42.164021 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/optimization_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Used for optimization steps during dreaming."""
import torch
import torch.nn.functional as F
import activation_helper
import inference_helper
import one_hots_helper
import output_helper
def step_towards_activation(optimizer, before, modify, after, pos_embeddings,
sentence_embeddings, att_mask, temperature,
iteration, gumbel, write_top_k, k_value, data,
word_id, neuron_id, layer_id, modify_start,
modify_end, tokenizer, embedding_map, model,
target_activation):
"""Optimize the sentence towards the target activation.
Args:
optimizer: The optimizer to be used.
before: The tensor for everything before the modifyable content.
modify: The tensor of the modifyable content.
after: The tensor for everything after the modifiable content.
pos_embeddings: The positional embeddings used for inference.
sentence_embeddings: The sentence embeddings for inference.
att_mask: The attention mask used for inference.
temperature: The temperature used for making the softmax spike.
iteration: Current iteration number of the optimization process.
gumbel: Whether to use gumbel noise.
write_top_k: Whether to write the top-rated tokens per iteration.
k_value: How many tokens to write to top_k.
data: Placeholder for the top_k data.
word_id: Word to get the activation for.
neuron_id: Neuron to get the activation for.
layer_id: Layer to get the activation for.
modify_start: The start index of the modifiable content.
modify_end: The end index of the modifyable content.
tokenizer: Used for converting between tokens and ids.
embedding_map: Holding all the token embeddings for BERT.
model: Model to run inference on.
target_activation: The activation we are aiming towards.
Returns:
max_values: The maximal values for the current token representations.
token_ids: The token ids of the current representation.
loss: The current loss towards the target activation.
"""
# Reset the gradient
optimizer.zero_grad()
# Softmax over the one-hots
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature, gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
# Check if top_k should be written
if write_top_k:
output_helper.write_top_ks(fused_one_hots, k_value, iteration, data,
modify_start, modify_end, tokenizer)
# Get the activation
layer_activations = inference_helper.run_inference(
before, one_hots_sm, after, pos_embeddings, sentence_embeddings, att_mask,
embedding_map, model)
activation = activation_helper.get_activations(layer_activations, word_id,
neuron_id, layer_id)
# Calculate the loss as an inverse activation of the layer to be optimised for
# (adam wants to minimize the training loss, we want to maximize the
# activation)
loss = F.mse_loss(activation, target_activation)
# Backpropagate the loss
loss.backward(retain_graph=True)
# Optimize the word vector based on that loss
optimizer.step()
# Get the actual tokens and distances to the embedding for this modified
# embedding
one_hots_sm = one_hots_helper.softmax_one_hots(modify, temperature, gumbel)
fused_one_hots = torch.cat([before, one_hots_sm, after], dim=1)
max_values, token_ids = one_hots_helper.get_tokens_from_one_hots(
fused_one_hots)
return max_values, token_ids, loss
| 4,249 | 45.195652 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/activation_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles activations for a given model result."""
import torch
import inference_helper
import one_hots_helper
def get_activation(activations, word_id, neuron_id, layer_id, normalize,
sample=0):
"""Get the average activation value for a parametrization.
Args:
activations: The embedding values for all layers of the model.
word_id: The index of the word for which to get the activation for.
neuron_id: The index of the neuron for which to get the activation for.
layer_id: The index of the layer for which to get the activation for.
normalize: Whether to normalize the activation across all others.
sample: Which sample to used in batched settings.
Returns:
activation: The activation that has been requested.
"""
lid, wid, nid = null_index_check(layer_id, word_id, neuron_id)
sample_activations = activations[sample] # Activations for one batch element
if wid is None: # Activation activations for the whole sentence
activations = torch.mean(sample_activations, dim=1) # Mean across all words
if nid is None: # Activation for the whole layer
activations = torch.mean(activations, dim=1) # Mean across all neurons
activation = activations[lid] # Select layer
else: # Activation for a specific neuron
activation = activations[lid][nid] # Select layer and neuron
# Normalization to relate the activation to other layers/neurons
# for the whole sentence
if normalize:
activation = activation / torch.norm(sample_activations)
else: # Activation for a specific Word
if nid is None: # Activation for the whole layer
activations = torch.mean(sample_activations, dim=2) # Mean across neurons
activation = activations[lid][wid] # Select word and layer
else: # Activation for only one neuron
activation = sample_activations[lid][wid][nid] # Sel. word, layer, neuron
# Normalization to relate the activation to other layers/neurons
# for that word
if normalize:
activation = activation / torch.norm(sample_activations[:, wid, :])
return activation
def get_activations(activations, word_id, neuron_id, layer_id, sample=0):
"""Get the activations from a inference result.
Args:
activations: The inference result for all layers across batches.
word_id: The word for which to fetch the activation.
neuron_id: The neuron for which to fetch the activation.
layer_id: The layer for which to fetch the activation.
sample: Which training sample to look at for batched training.
Returns:
activations: Activations for the current iteration.
"""
lid, wid, nid = null_index_check(layer_id, word_id, neuron_id)
activations = activations[sample]
if word_id is None: # Activations for the whole sentence
if neuron_id is None: # Activations for the whole layer
activations = activations[lid]
else: # Activations for all words in one layer for one neuron
activations = activations[lid, :, nid]
else:
if neuron_id is None: # Activations for one word and layer
activations = activations[lid][wid]
else: # Activations for one layer, word, and neuron
activations = activations[lid][wid][nid]
return activations
def get_ids_activation(ids, pos_embeddings, sentence_embeddings,
att_mask, modify_start, modify_end, word_id,
neuron_id, layer_id, normalize, embedding_map, model,
device, average=False):
"""Get the activations for an id-sequence.
Args:
ids: The ids to get the activations for.
pos_embeddings: Positional embeddings to run inference with.
sentence_embeddings: Sentence embeddings to run inference with.
att_mask: Attention mask used during inference.
modify_start: The start id of the modyfiable sequence.
modify_end: The end id of the modyfiable sequence.
word_id: The word to get the activations for.
neuron_id: The neuron to get the activations for.
layer_id: The layer to get the activations for.
normalize: Whether to normalize the activations.
embedding_map: The embedding map used to get embeddings from one-hots.
model: Model to run inference on.
device: Where to place new variables.
average: Whether to the resulting activation to an average value.
Returns:
activations: Activations for the id sequence.
"""
# Get a one_hot token for these ids
before, within, after = one_hots_helper.get_one_hots(ids, modify_start,
modify_end, device)
# Do not apply a gradient to this model run
with torch.no_grad():
layer_activations = inference_helper.run_inference(
before, within, after, pos_embeddings, sentence_embeddings, att_mask,
embedding_map, model)
if average:
activation = get_activation(layer_activations, word_id, neuron_id, layer_id,
normalize)
else:
activation = get_activations(layer_activations, word_id, neuron_id,
layer_id)
return activation
def null_index_check(layer_id, word_id, neuron_id):
"""Checks if some of the indices should be nulled.
Args:
layer_id: The index for the layer.
word_id: The index for the word.
neuron_id: The index for the neuron.
Returns:
lid: Checked layer id.
wid: Checked word id.
nid: Checked neuron id.
"""
lid = layer_id if layer_id != -1 else None
wid = word_id if word_id != -1 else None
nid = neuron_id if neuron_id != -1 else None
return lid, wid, nid
| 6,274 | 40.833333 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/output_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Writes results and parameters to files."""
import json
import os
import torch
def init_results(results):
"""Init the results variable to be able to hold all iterations.
Args:
results: The variable to be initialized.
"""
results["iterations"] = []
def write_results(base_path, results, params, experiment_type):
"""Write the results to a file.
Args:
base_path: Path where the results get written, has to exist to work.
results: The results of this run.
params: The parameters for this run.
experiment_type: The type of experiment that has been conducted.
"""
results_path = os.path.join(base_path, "results.json")
results_file = open(results_path, "w")
output = {
"params": params,
"results": results,
"type": experiment_type
}
json.dump(output, results_file)
results_file.close()
def get_metrics(tokens, iteration, temperature, max_values, results, loss=None,
ids_loss=None, activation=None, ids_activation=None,
emb_tokens=None, emb_activation=0.0, emb_ana=0, iterations=0):
"""Return the results of the current iteration.
Args:
tokens: The tokens that currently have the highest one-hot value.
iteration: The current iteration number.
temperature: The current softmax temperature for the one-hot encoding.
max_values: Current value of the maximum token value per token.
results: The file where all this gets written.
loss: The MSE between the current embedding and the target.
ids_loss: The MSE between the tokens embedding and the target.
activation: The activation of the current iteration.
ids_activation: The snapped to tokens activation for the current iteration.
emb_tokens: Optional embedding tokens for closest embedding results.
emb_activation: Optional activation value for closest embedding.
emb_ana: Optional embedding analysis frequency.
iterations: Number of total iterations in this run.
"""
iteration = {
"number": iteration,
"tokens": tokens,
"temperature": temperature.item(),
"max_values": max_values[0].tolist()
}
if loss is not None:
iteration["loss"] = loss.item()
if ids_loss is not None:
iteration["ids_loss"] = ids_loss.item()
if activation is not None:
iteration["activation"] = activation.item()
if ids_activation is not None:
iteration["ids_activation"] = ids_activation.item()
if emb_ana != 0:
if (iteration == iterations) or (iteration % emb_ana == 0):
iteration["emb_tokens"] = str(emb_tokens)
iteration["emb_activation"] = emb_activation.item()
results["iterations"].append(iteration)
def get_params(params, args, tokens, embedding_ana=0):
"""Get the params formatted of the current run.
Args:
params: Variable where the parameters are stored.
args: The arguments to write to the params file.
tokens: The tokens of the input sentence.
embedding_ana: Optional embedding analysis frequency parameter.
"""
params["layer_id"] = args.layer_id
params["word_id"] = args.word_id
params["neuron_id"] = args.neuron_id
params["dream_start"] = args.dream_start
params["dream_end"] = args.dream_end
params["num_iterations"] = args.num_iterations
params["gumbel"] = args.gumbel
params["warmup"] = args.warmup
params["start_temp"] = args.start_temp
params["end_temp"] = args.end_temp
params["anneal"] = args.anneal
params["learning_rate"] = args.learning_rate
params["sentence"] = args.sentence
params["tokens"] = tokens
params["model_config"] = args.model_config
params["metrics_frequency"] = args.metrics_frequency
if args.__contains__("normalize"):
params["normalize"] = args.normalize
if args.__contains__("shift_vector"):
params["shift_vector"] = args.shift_vector
if args.__contains__("shift_start"):
params["shift_start"] = args.shift_start
if args.__contains__("shift_end"):
params["shift_end"] = args.shift_end
if args.__contains__("shift_magnitude"):
params["shift_magnitude"] = args.shift_magnitude
if args.__contains__("target"):
params["target"] = args.target
if embedding_ana != 0:
params["emb_ana"] = embedding_ana
def write_top_ks(base_path, data, dream_start, params):
"""Write the top_k results to a file.
Args:
base_path: Path where the results get written, has to exist to work.
data: The results of the top_k analysis of this run.
dream_start: The start index of the dreaming process.
params: The parameters of this run.
"""
for i in range(len(data)):
data[i]["word_id"] = dream_start + i
data[i]["params"] = params
data[i]["type"] = "top_words"
top_k_path = os.path.join(base_path,
"top_k" + str(i + dream_start) + ".json")
top_k_file = open(top_k_path, "w")
json.dump(data[i], top_k_file)
top_k_file.close()
def get_top_ks(one_hots_sm, k, iteration, data, modify_start, modify_end,
tokenizer, activation=None):
"""Writes the top scores for one word into the data variable.
Args:
one_hots_sm: Softmaxed one hot vector of tokens.
k: Number of top tokens to output.
iteration: The current iteration number of the process.
data: The data object that holds the top values.
modify_start: The start index of modifyable content.
modify_end: The end index of modifyable content.
tokenizer: Used to convert between ids and tokens.
activation: Optionally also write the activation to the file.
"""
for i in range(modify_end - modify_start + 1):
if iteration == 0:
data.append({})
data[i]["iterations"] = []
scores, indices = torch.topk(one_hots_sm[0][modify_start + i], k, -1)
ind_np = indices.data.cpu().numpy()
tokens = tokenizer.convert_ids_to_tokens(ind_np)
scores_np = scores.data.cpu().numpy()
data[i]["iterations"].append({
"number": iteration,
"indices": ind_np.tolist(),
"tokens": tokens,
"scores": scores_np.tolist()
})
if activation is not None:
last_iter = len(data[i]["iterations"]) - 1
data[i]["iterations"][last_iter]["activation"] = activation.item()
def get_params_mlm(params, args, tokens):
"""Get the params formatted of the current run.
Args:
params: Variable where the parameters are stored.
args: The arguments to write to the params file.
tokens: The tokens of the input sentence.
"""
params["maximize_word"] = args.maximize_word
params["maximize_id"] = args.maximize_id
params["dream_before_start"] = args.dream_before_start
params["dream_before_end"] = args.dream_before_end
params["dream_after_start"] = args.dream_after_start
params["dream_after_end"] = args.dream_after_end
params["num_iterations"] = args.num_iterations
params["gumbel"] = args.gumbel
params["warmup"] = args.warmup
params["start_temp"] = args.start_temp
params["end_temp"] = args.end_temp
params["anneal"] = args.anneal
params["learning_rate"] = args.learning_rate
params["sentence"] = args.sentence
params["tokens"] = tokens
params["model_config"] = args.model_config
params["metrics_frequency"] = args.metrics_frequency
params["normalize"] = args.normalize
def get_metrics_mlm(tokens, prediction, ids_prediction, iteration, temperature,
max_values, results):
"""Get the metrics of the current iteration for masked language model dream.
Args:
tokens: The current tokens the model has optimized to.
prediction: The current prediction score from the embeddings in use.
ids_prediction: The current prediction score using the top tokens.
iteration: The current iteration of the optimization process.
temperature: The current softmax temperature for the optimization.
max_values: The current value of certainity for each token.
results: The file where all this gets written.
"""
iteration = {
"number": iteration,
"tokens": tokens,
"temperature": temperature.item(),
"max_values": max_values[0].tolist()
}
iteration["prediction"] = prediction.item()
iteration["ids_prediction"] = ids_prediction.item()
results["iterations"].append(iteration)
def write_top_ks_mlm(predictions, iteration, top_k_file, k, tokenizer,
maximize_word, normalize):
"""Write the top predictions of the current iteration to a file.
Args:
predictions: The prediction scores of the last inference run.
iteration: The current iteration number.
top_k_file: The file to write the top predictions to.
k: How many predictions to write for each iteration.
tokenizer: Used to convert between tokens and ids.
maximize_word: The word this run is trying to maximize for.
normalize: If the prediction is to be normalized.
"""
if normalize:
sm = torch.nn.functional.softmax(predictions[0][maximize_word], -1)
scores, indices = torch.topk(sm, k, -1)
else:
scores, indices = torch.topk(predictions[0][maximize_word], k, -1)
ind_np = indices.data.cpu().numpy()
tokens = tokenizer.convert_ids_to_tokens(ind_np)
scores_np = scores.data.cpu().numpy()
top_k_file.write("Iteration: {}".format(iteration))
top_k_file.write("\n")
top_k_file.write("Tokens:")
top_k_file.write("\n")
top_k_file.write(str(tokens))
top_k_file.write("\n")
top_k_file.write("Score:")
top_k_file.write("\n")
top_k_file.write(str(scores_np))
top_k_file.write("\n")
top_k_file.write("\n")
| 10,131 | 36.525926 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/classifier_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Handles common code used in linear classification contexts."""
import os
import numpy as np
import torch
def get_classification_head(device, layer_id, trained_variables_dir):
shift_path = os.path.join(trained_variables_dir, str(layer_id),
'final_weights.np')
shift_file = open(shift_path, 'rb')
shift = np.load(shift_file)
shift_tensor = torch.tensor(shift, requires_grad=False)
shift_tensor = shift_tensor.to(device)
return shift_tensor
| 1,161 | 37.733333 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/inference_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Run inference on the BERT model provided."""
import torch
def run_inference(before, within, after, position_embeddings,
sentence_embeddings, attention_mask, embedding_map, model):
"""Run inference on a given one hot embedding split into three parts.
Args:
before: Tensor containing all the one_hot values before the changed part.
within: The changed one_hot part of the sentence.
after: Tensor containing all the one_hot values after the changed part.
position_embeddings: Positional embeddings used by BERT.
sentence_embeddings: Sentence embeddings used by BERT.
attention_mask: Used by BERT for inference.
embedding_map: Containing all the token embeddings for BERT.
model: BERT model to run inference on.
Returns:
layers: The layer activations of the model.
"""
one_hots = torch.cat([before, within, after], dim=1)
# Get the word embedding from the smooth one_hots vector
words_embeddings = torch.matmul(one_hots, embedding_map.embedding_map)
# Assemble the actual embedding
embeddings = words_embeddings + position_embeddings + sentence_embeddings
embeddings = model.embeddings.LayerNorm(embeddings)
# Get the prediction by the model
layers = model.encoder(embeddings, attention_mask)
# Reorder to get more intuitive ordering of batches, layers, words, neurons
layers = torch.stack(layers).permute(1, 0, 2, 3)
return layers
def run_inference_vanilla(tokens_tensor, segments_tensor, model):
"""Run inference on the model.
Args:
tokens_tensor: The tokens to infer the activation from.
segments_tensor: Segments of the sequence to retrieve the activation for.
model: The model to run inference on.
Returns:
layers_act: Activation of the model given the parameters.
"""
with torch.no_grad():
layers_act, _ = model(tokens_tensor, segments_tensor)
# Reorder to get more intuitive ordering of batches, layers, words, neurons
layers_act = torch.stack(layers_act).permute(1, 0, 2, 3)
return layers_act
def run_inference_mlm(one_hots, position_embeddings, sentence_embeddings,
attention_mask, embedding_map, model):
"""Run inference on BERT.
Args:
one_hots: Tensor containing the one_hots used for the inference.
position_embeddings: Positional embeddings used by BERT.
sentence_embeddings: Sentence embeddings used by BERT.
attention_mask: Used by BERT for inference.
embedding_map: Containing all the token embeddings for BERT.
model: BERT model to run inference on.
Returns:
prediction_scores: The prediction scores of the model.
"""
# Get the word embedding from the smooth one_hots vector
words_embeddings = torch.matmul(one_hots, embedding_map.embedding_map)
# Assemble the actual embedding
embeddings = words_embeddings + position_embeddings + sentence_embeddings
embeddings = model.bert.embeddings.LayerNorm(embeddings)
# Get the prediction by the model
layers = model.bert.encoder(embeddings, attention_mask)
encoded_layers = layers[-1]
prediction_scores = model.cls(encoded_layers)
return prediction_scores
| 3,806 | 39.935484 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/attention_mask_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Used to retrieve a attention mask for the provided input."""
import torch
def get_attention_mask(input_ids):
"""Get the default attention mask.
Args:
input_ids: Ids representing the input tokens, used for the mask length.
Returns:
extended_attention_mask: The attention mask to be used with the model.
"""
attention_mask = torch.ones_like(input_ids)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length,
# to_seq_length]
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(dtype=torch.float)
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
| 1,836 | 41.72093 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/tokenization_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Used to tokenize sentences correctly."""
import torch
def tokenize_input_sentence(tokenizer, sentence, sentence2, mask_word=-1):
"""Tokenize the input sentence and return tokenized tensors.
Args:
tokenizer: The tokenizer to be used for the conversion.
sentence: The sentence to be tokenized.
sentence2: The optional second part of the sentence.
mask_word: Optional index to be replaced with the MASK token.
Returns:
tokens: The tokens obtained from the sentence.
"""
# Set up the sentence structure if it has not been tokenized yet
if not sentence.startswith(u'[CLS]'):
sentence = u'[CLS] ' + sentence + u' [SEP]'
if sentence2:
sentence = sentence + ' ' + sentence2 + u' [SEP]'
# Tokenized input
tokens = tokenizer.tokenize(sentence)
if mask_word > -1: # Replace a token with MASK
tokens[mask_word] = '[MASK]'
return tokens
def tensors_from_tokens(tokenizer, tokens, device):
"""Obtain tokens and segments tensors from token ids.
Args:
tokenizer: Tokenizer to be used for the conversion to ids.
tokens: Tokens to be converted and served as a tensor.
device: The device to hold the tensors in memory.
Returns:
tokens_tensor: Tensor holding the token representation of the sentence.
segments_tensor: Tensor holding the segment representation of the sentence.
"""
# Convert token to vocabulary indices
indexed_tokens = tokenizer.convert_tokens_to_ids(tokens)
# Define sentence A and B indices associated to 1st and 2nd sentences
sep_idxs = [-1] + [i for i, v in enumerate(tokens) if v == '[SEP]']
segments_ids = []
for i in range(len(sep_idxs) - 1):
segments_ids += [i] * (sep_idxs[i+1] - sep_idxs[i])
# Convert inputs to PyTorch tensors and make them accessible to CUDA
tokens_tensor = torch.tensor([indexed_tokens], device=device)
segments_tensor = torch.tensor([segments_ids], device=device)
return tokens_tensor, segments_tensor
| 2,630 | 38.268657 | 80 | py |
interpretability | interpretability-master/text-dream/python/helpers/setup_helper.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides the setup for the experiments."""
from pytorch_pretrained_bert import modeling
from pytorch_pretrained_bert import tokenization
import torch
import embeddings_helper
def setup_uncased(model_config):
"""Setup the uncased bert model.
Args:
model_config: The model configuration to be loaded.
Returns:
tokenizer: The tokenizer to be used to convert between tokens and ids.
model: The model that has been initialized.
device: The device to be used in this run.
embedding_map: Holding all token embeddings.
"""
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained(model_config)
# Load pre-trained model (weights)
model = modeling.BertModel.from_pretrained(model_config)
_ = model.eval()
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device : ', device)
model = model.to(device)
# Initialize the embedding map
embedding_map = embeddings_helper.EmbeddingMap(device, model)
return tokenizer, model, device, embedding_map
def setup_bert_vanilla(model_config):
"""Setup the uncased bert model without embedding maps.
Args:
model_config: The model configuration to be loaded.
Returns:
tokenizer: The tokenizer to be used to convert between tokens and ids.
model: The model that has been initialized.
device: The device to be used in this run.
"""
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained(model_config)
# Load pre-trained model (weights)
model = modeling.BertModel.from_pretrained(model_config)
_ = model.eval()
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device : ', device)
model = model.to(device)
return tokenizer, model, device
def setup_bert_mlm(model_config):
"""Setup the uncased bert model with classification head.
Args:
model_config: The model configuration to be loaded.
Returns:
tokenizer: The tokenizer to be used to convert between tokens and ids.
model: The model that has been initialized.
device: The device to be used in this run.
"""
# Load pre-trained model tokenizer (vocabulary)
tokenizer = tokenization.BertTokenizer.from_pretrained(model_config)
# Load pre-trained model (weights)
model = modeling.BertForMaskedLM.from_pretrained('bert-base-uncased')
_ = model.eval()
# Set up the device in use
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('device : ', device)
model = model.to(device)
# Initialize the embedding map
embedding_map = embeddings_helper.EmbeddingMap(device, model.bert)
return tokenizer, model, device, embedding_map
| 3,457 | 35.4 | 80 | py |
interpretability | interpretability-master/text-dream/python/linear_classifier/classify_token.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classify a single token in a sentence using a trained classifier."""
from absl import app
from absl import flags
import torch
import sys
sys.path.insert(1, 'helpers')
import classifier_helper
import inference_helper
import setup_helper
import tokenization_helper
# Command Line Arguments
FLAGS = flags.FLAGS
flags.DEFINE_string('sentence', u'he was a doctor',
'the sentence to start with')
flags.DEFINE_string('trained_variables_dir', None, 'the location where the'
'classifier variables are stored')
flags.DEFINE_string('model_config', 'bert-base-uncased', 'the name of the model'
'configuration to load')
flags.DEFINE_integer('layer_id', 6, 'layer to optimize activation for')
flags.DEFINE_integer('word_id', 1, 'word to feed into the classification head')
def classify_token(device, tokenizer, model):
"""Classifies a token using a trained classifier on top of BERT.
Args:
device: Where to do the calculations and store variables.
tokenizer: Converts the input sentence into tokens.
model: Used to retrieve the activations from.
"""
tokens = tokenization_helper.tokenize_input_sentence(tokenizer,
FLAGS.sentence, '')
tokens_tensor, segments_tensor = tokenization_helper.tensors_from_tokens(
tokenizer, tokens, device)
layers_act = inference_helper.run_inference_vanilla(tokens_tensor,
segments_tensor, model)
token_act = layers_act[0][FLAGS.layer_id][FLAGS.word_id]
classification_head = classifier_helper.get_classification_head(
device, FLAGS.layer_id, FLAGS.trained_variables_dir)
y = token_act.matmul(classification_head)
y = torch.sigmoid(y)
print('Prediction: {}'.format(y.item()))
def main(_):
tokenizer, model, device = setup_helper.setup_bert_vanilla(
FLAGS.model_config)
# Start the run
classify_token(device, tokenizer, model)
if __name__ == '__main__':
flags.mark_flag_as_required('trained_variables_dir')
app.run(main)
| 2,754 | 38.357143 | 80 | py |
interpretability | interpretability-master/text-dream/python/linear_classifier/train.py | # Copyright 2018 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Train a linear classifier on the activations of BERT."""
import json
import os
import time
from absl import app
from absl import flags
import numpy as np
import torch
import torch.utils.data
import sys
sys.path.insert(1, 'helpers')
import folder_helper
FLAGS = flags.FLAGS
flags.DEFINE_string('output_dir', None,
'the output directory where the results will be written')
flags.DEFINE_string('train_dir', None, 'where to get the training data')
flags.DEFINE_integer('num_epochs', 1, 'number of optimization steps')
flags.DEFINE_integer('layer_id', 5, 'layer to optimize activation for')
flags.DEFINE_integer('batch_size', 32, 'batch size used for training')
flags.DEFINE_integer('random_seed', 42, 'random dataset shuffle seed')
flags.DEFINE_string('concept1', 'he', 'first concept to classify between')
flags.DEFINE_string('concept2', 'she', 'second concept to classify between')
flags.DEFINE_float('learning_rate', 0.1, 'learning rate of the optimizer')
flags.DEFINE_float('val_split', 0.1, 'train/validation split')
flags.DEFINE_bool('verbose', True, 'print the training progess')
flags.DEFINE_bool('mae', True, 'use mean absolute error, otherwise uses mean'
'squared error')
flags.DEFINE_bool('adam', True, 'use adam instead of sgd')
flags.DEFINE_bool('sigmoid', True, 'use adam instead of sgd')
def write_params(parent_dir):
"""Write the parameters of this run to the output directory.
Args:
parent_dir: The directory to save the new file to.
"""
params_file = open(os.path.join(parent_dir, 'params.json'), 'w')
params = {
'num_epochs': FLAGS.num_epochs,
'layer_id': FLAGS.layer_id,
'concept1': FLAGS.concept1,
'concept2': FLAGS.concept2,
'learning_rate': FLAGS.learning_rate,
'train_dir': FLAGS.train_dir,
'mae': FLAGS.mae,
'val_split': FLAGS.val_split,
'random_seed': FLAGS.random_seed,
'adam': FLAGS.adam
}
json.dump(params, params_file)
params_file.close()
def write_iteration(parent_dir, y, y_truth, loss):
"""Write the results of the current iteration to a file.
Args:
parent_dir: The directory to save the new file to.
y: The classification result.
y_truth: Ground truth for the classification.
loss: The loss of the current run.
"""
iteration_file = open(os.path.join(parent_dir, 'training.txt'), 'a')
iteration_file.write('Y: {}'.format(y.data.cpu().numpy()))
iteration_file.write('\n')
iteration_file.write('Y_Truth: {}'.format(y_truth.data.cpu().numpy()))
iteration_file.write('\n')
iteration_file.write('Loss: {}'.format(loss.item()))
iteration_file.write('\n')
iteration_file.write('\n')
def write_epoch(parent_dir, accuracy, epoch):
"""Write the results of the current iteration to a file.
Args:
parent_dir: The directory to save the new file to.
accuracy: The accuracy for this epoch on the test data.
epoch: The current epoch number.
"""
iteration_file = open(os.path.join(parent_dir, 'epochs.txt'), 'a')
epoch_result = 'Epoch {}, Accuracy {}'.format(epoch, accuracy)
iteration_file.write(epoch_result)
if FLAGS.verbose:
print(epoch_result)
iteration_file.write('\n')
class Data(torch.utils.data.Dataset):
"""Represents the training dataset for the concept embedding classifier."""
def __init__(self):
start_setup = time.time()
self.init_with_files()
print('Setup Time: {}'.format(time.time() - start_setup))
def __len__(self):
return len(self.concept_classes)
def __getitem__(self, index):
get_start = time.time()
# Get the file from the path that this dataset refers to for a concept
embeddings_file = open(self.items[index], 'r')
np_item = np.load(embeddings_file)
# Convert the training elements to tensors
torch_item = torch.tensor(np_item, dtype=torch.float)
torch_class = torch.tensor(self.concept_classes[index])
get_time = time.time() - get_start
return torch_item, torch_class, get_time
def init_with_files(self):
# Get all files belonging to concept 1
paths_concept1 = os.listdir(
os.path.join(FLAGS.train_dir, FLAGS.concept1, str(FLAGS.layer_id)))
paths_concept1 = [os.path.join(
FLAGS.train_dir, FLAGS.concept1, str(FLAGS.layer_id),
x) for x in paths_concept1]
# Get all files belonging to concept 2
paths_concept2 = os.listdir(
os.path.join(FLAGS.train_dir, FLAGS.concept2, str(FLAGS.layer_id)))
paths_concept2 = [os.path.join(
FLAGS.train_dir, FLAGS.concept2, str(FLAGS.layer_id),
x) for x in paths_concept2]
self.setup_classes_and_items(paths_concept1, paths_concept2)
def setup_classes_and_items(self, concept_1_items, concept_2_items):
# Set up the classes belonging to the concepts
concept1_classes = np.zeros(len(concept_1_items))
concept2_classes = np.ones(len(concept_2_items))
print('Found {} examples for concept "{}".'.format(len(concept_1_items),
FLAGS.concept1))
print('Found {} examples for concept "{}".'.format(len(concept_2_items),
FLAGS.concept2))
# Store the paths to the concepts files and the classes in this data object
self.items = concept_1_items + concept_2_items
self.concept_classes = np.concatenate((concept1_classes, concept2_classes),
axis=0)
def train_classifier(train_data, device, parent_dir):
"""Train a classifier to distinguish activations between two concepts.
Args:
train_data: The Data object for obtaining the training data.
device: On which device to train the classifier on.
parent_dir: The directory to write training results to.
"""
# Creating data indices for training and validation splits:
dataset_size = len(train_data)
indices = list(range(dataset_size))
split = int(np.floor(FLAGS.val_split * dataset_size))
np.random.seed(FLAGS.random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(train_data,
batch_size=FLAGS.batch_size,
sampler=train_sampler)
validation_loader = torch.utils.data.DataLoader(train_data,
batch_size=FLAGS.batch_size,
sampler=valid_sampler)
# Model setup for this linear classifier
train_item, _, _ = train_data.__getitem__(0)
weights = torch.zeros((train_item.shape[0], 1), device=device,
requires_grad=True)
if FLAGS.adam:
optimizer = torch.optim.Adam([weights], lr=FLAGS.learning_rate)
else:
optimizer = torch.optim.SGD([weights], lr=FLAGS.learning_rate)
# Evaluation Parameters
it_times = []
ep_times = []
get_times = []
# Training loop (num_epochs * num_training_elements)
for epoch in range(FLAGS.num_epochs):
ep_start = time.time()
# Training
for x, y_truth, get_time in train_loader:
get_times.append(np.average(get_time.data.cpu().numpy()))
it_start = time.time()
x, y_truth = x.to(device), y_truth.to(device)
optimizer.zero_grad()
y = x.matmul(weights)
if FLAGS.sigmoid:
y = torch.sigmoid(y)
y = y.reshape(-1)
if FLAGS.mae:
loss = torch.mean(torch.abs(y - y_truth))
else:
loss = torch.mean(torch.pow(y - y_truth, 2))
# Write iteration stats to file if preferred
if FLAGS.verbose:
write_iteration(parent_dir, y, y_truth, loss)
loss.backward()
optimizer.step()
it_times.append(time.time() - it_start)
ep_times.append(time.time() - ep_start)
# Validation
with torch.no_grad():
acc_value = 0
num_acc = 0
for x, y_truth, get_time in validation_loader:
x, y_truth = x.to(device), y_truth.to(device)
y = x.matmul(weights)
if FLAGS.sigmoid:
y = torch.sigmoid(y)
y = y.reshape(-1)
y = torch.where(y > 0.5, torch.ones_like(y), torch.zeros_like(y))
acc = y - y_truth
num_acc = num_acc + acc.shape[0]
acc = torch.sum(torch.abs(acc))
acc_value = acc_value + acc.item()
accuracy = 1.0 - (acc_value / num_acc)
write_epoch(parent_dir, accuracy, epoch)
# Write the final classification vector
weights_file = open(os.path.join(parent_dir, 'final_weights.np'), 'w')
np.save(weights_file, weights.data.cpu().numpy())
print('Iteration Time: {}'.format(np.average(it_times)))
print('Item Time: {}'.format(np.average(get_times)))
print('Epoch Time: {}'.format(np.average(ep_times)))
def main(_):
# Setup a directory to write the results to
folder_helper.make_folder_if_not_exists(FLAGS.output_dir)
loss_string = 'MSE'
if FLAGS.mae:
loss_string = 'MAE'
parent_dir = os.path.join(FLAGS.output_dir, loss_string)
folder_helper.make_folder_if_not_exists(parent_dir)
parent_dir = os.path.join(parent_dir, str(FLAGS.layer_id))
folder_helper.make_folder_if_not_exists(parent_dir)
write_params(parent_dir)
# Get the device to work with
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
# Initialize the training data
dataset = Data()
# Start the training process
train_classifier(dataset, device, parent_dir)
if __name__ == '__main__':
flags.mark_flag_as_required('output_dir')
flags.mark_flag_as_required('train_dir')
app.run(main)
| 10,428 | 38.059925 | 80 | py |
WeatherBench | WeatherBench-master/src/train_nn.py | from .score import *
import os
import numpy as np
import xarray as xr
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, Dropout, Conv2D, Lambda, LeakyReLU
import tensorflow.keras.backend as K
from configargparse import ArgParser
def limit_mem():
"""Limit TF GPU mem usage"""
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
tf.compat.v1.Session(config=config)
class DataGenerator(keras.utils.Sequence):
def __init__(self, ds, var_dict, lead_time, batch_size=32, shuffle=True, load=True, mean=None, std=None):
"""
Data generator for WeatherBench data.
Template from https://stanford.edu/~shervine/blog/keras-how-to-generate-data-on-the-fly
Args:
ds: Dataset containing all variables
var_dict: Dictionary of the form {'var': level}. Use None for level if data is of single level
lead_time: Lead time in hours
batch_size: Batch size
shuffle: bool. If True, data is shuffled.
load: bool. If True, datadet is loaded into RAM.
mean: If None, compute mean from data.
std: If None, compute standard deviation from data.
"""
self.ds = ds
self.var_dict = var_dict
self.batch_size = batch_size
self.shuffle = shuffle
self.lead_time = lead_time
data = []
generic_level = xr.DataArray([1], coords={'level': [1]}, dims=['level'])
for var, levels in var_dict.items():
try:
data.append(ds[var].sel(level=levels))
except ValueError:
data.append(ds[var].expand_dims({'level': generic_level}, 1))
except KeyError:
data.append(ds[var])
self.data = xr.concat(data, 'level').transpose('time', 'lat', 'lon', 'level')
self.mean = self.data.mean(('time', 'lat', 'lon')).compute() if mean is None else mean
self.std = self.data.std('time').mean(('lat', 'lon')).compute() if std is None else std
# Normalize
self.data = (self.data - self.mean) / self.std
self.n_samples = self.data.isel(time=slice(0, -lead_time)).shape[0]
self.init_time = self.data.isel(time=slice(None, -lead_time)).time
self.valid_time = self.data.isel(time=slice(lead_time, None)).time
self.on_epoch_end()
# For some weird reason calling .load() earlier messes up the mean and std computations
if load: print('Loading data into RAM'); self.data.load()
def __len__(self):
'Denotes the number of batches per epoch'
return int(np.ceil(self.n_samples / self.batch_size))
def __getitem__(self, i):
'Generate one batch of data'
idxs = self.idxs[i * self.batch_size:(i + 1) * self.batch_size]
X = self.data.isel(time=idxs).values
y = self.data.isel(time=idxs + self.lead_time).values
return X, y
def on_epoch_end(self):
'Updates indexes after each epoch'
self.idxs = np.arange(self.n_samples)
if self.shuffle == True:
np.random.shuffle(self.idxs)
class PeriodicPadding2D(tf.keras.layers.Layer):
def __init__(self, pad_width, **kwargs):
super().__init__(**kwargs)
self.pad_width = pad_width
def call(self, inputs, **kwargs):
if self.pad_width == 0:
return inputs
inputs_padded = tf.concat(
[inputs[:, :, -self.pad_width:, :], inputs, inputs[:, :, :self.pad_width, :]], axis=2)
# Zero padding in the lat direction
inputs_padded = tf.pad(inputs_padded, [[0, 0], [self.pad_width, self.pad_width], [0, 0], [0, 0]])
return inputs_padded
def get_config(self):
config = super().get_config()
config.update({'pad_width': self.pad_width})
return config
class PeriodicConv2D(tf.keras.layers.Layer):
def __init__(self, filters,
kernel_size,
conv_kwargs={},
**kwargs, ):
super().__init__(**kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.conv_kwargs = conv_kwargs
if type(kernel_size) is not int:
assert kernel_size[0] == kernel_size[1], 'PeriodicConv2D only works for square kernels'
kernel_size = kernel_size[0]
pad_width = (kernel_size - 1) // 2
self.padding = PeriodicPadding2D(pad_width)
self.conv = Conv2D(
filters, kernel_size, padding='valid', **conv_kwargs
)
def call(self, inputs):
return self.conv(self.padding(inputs))
def get_config(self):
config = super().get_config()
config.update({'filters': self.filters, 'kernel_size': self.kernel_size, 'conv_kwargs': self.conv_kwargs})
return config
def build_cnn(filters, kernels, input_shape, dr=0):
"""Fully convolutional network"""
x = input = Input(shape=input_shape)
for f, k in zip(filters[:-1], kernels[:-1]):
x = PeriodicConv2D(f, k)(x)
x = keras.activations.elu(x)
if dr > 0: x = Dropout(dr)(x)
output = PeriodicConv2D(filters[-1], kernels[-1])(x)
return keras.models.Model(input, output)
def create_predictions(model, dg):
"""Create non-iterative predictions"""
preds = model.predict_generator(dg)
# Unnormalize
preds = preds * dg.std.values + dg.mean.values
das = []
lev_idx = 0
for var, levels in dg.var_dict.items():
if levels is None:
das.append(xr.DataArray(
preds[:, :, :, lev_idx],
dims=['time', 'lat', 'lon'],
coords={'time': dg.valid_time, 'lat': dg.ds.lat, 'lon': dg.ds.lon},
name=var
))
lev_idx += 1
else:
nlevs = len(levels)
das.append(xr.DataArray(
preds[:, :, :, lev_idx:lev_idx+nlevs],
dims=['time', 'lat', 'lon', 'level'],
coords={'time': dg.valid_time, 'lat': dg.ds.lat, 'lon': dg.ds.lon, 'level': levels},
name=var
))
lev_idx += nlevs
return xr.merge(das)
def create_iterative_predictions(model, dg, max_lead_time=5 * 24):
"""Create iterative predictions"""
state = dg.data[:dg.n_samples]
preds = []
for _ in range(max_lead_time // dg.lead_time):
state = model.predict(state)
p = state * dg.std.values + dg.mean.values
preds.append(p)
preds = np.array(preds)
lead_time = np.arange(dg.lead_time, max_lead_time + dg.lead_time, dg.lead_time)
das = [];
lev_idx = 0
for var, levels in dg.var_dict.items():
if levels is None:
das.append(xr.DataArray(
preds[:, :, :, :, lev_idx],
dims=['lead_time', 'time', 'lat', 'lon'],
coords={'lead_time': lead_time, 'time': dg.init_time, 'lat': dg.ds.lat, 'lon': dg.ds.lon},
name=var
))
lev_idx += 1
else:
nlevs = len(levels)
das.append(xr.DataArray(
preds[:, :, :, :, lev_idx:lev_idx + nlevs],
dims=['lead_time', 'time', 'lat', 'lon', 'level'],
coords={'lead_time': lead_time, 'time': dg.init_time, 'lat': dg.ds.lat, 'lon': dg.ds.lon,
'level': levels},
name=var
))
lev_idx += nlevs
return xr.merge(das)
def create_cnn(filters, kernels, dropout=0., activation='elu', periodic=True):
assert len(filters) == len(kernels), 'Requires same number of filters and kernel_sizes.'
input = Input(shape=(None, None, 1,))
x = input
for f, k in zip(filters[:-1], kernels[:-1]):
if periodic:
x = PeriodicConv2D(f, k, padding='valid', activation=activation)(x)
else:
x = Conv2D(f, k, padding='same', activation=activation)(x)
if dropout > 0:
x = Dropout(dropout)(x)
if periodic:
output = PeriodicConv2D(filters[-1], kernels[-1], padding='valid')(x)
else:
output = Conv2D(filters[-1], kernels[-1], padding='same')(x)
model = keras.models.Model(inputs=input, outputs=output)
return model
def main(datadir, vars, filters, kernels, lr, activation, dr, batch_size, patience, model_save_fn, pred_save_fn,
train_years, valid_years, test_years, lead_time, gpu, iterative):
os.environ["CUDA_VISIBLE_DEVICES"]=str(gpu)
# Limit TF memory usage
limit_mem()
# Open dataset and create data generators
# TODO: Flexible input data
z = xr.open_mfdataset(f'{datadir}/geopotential_500/*.nc', combine='by_coords')
t = xr.open_mfdataset(f'{datadir}/temperature_850/*.nc', combine='by_coords')
ds = xr.merge([z, t], compat='override') # Override level. discarded later anyway.
# TODO: Flexible valid split
ds_train = ds.sel(time=slice(*train_years))
ds_valid = ds.sel(time=slice(*valid_years))
ds_test = ds.sel(time=slice(*test_years))
dic = {var: None for var in vars}
dg_train = DataGenerator(ds_train, dic, lead_time, batch_size=batch_size)
dg_valid = DataGenerator(ds_valid, dic, lead_time, batch_size=batch_size, mean=dg_train.mean,
std=dg_train.std, shuffle=False)
dg_test = DataGenerator(ds_test, dic, lead_time, batch_size=batch_size, mean=dg_train.mean,
std=dg_train.std, shuffle=False)
print(f'Mean = {dg_train.mean}; Std = {dg_train.std}')
# Build model
# TODO: Flexible input shapes and optimizer
model = build_cnn(filters, kernels, input_shape=(32, 64, len(vars)), dr=dr)
model.compile(keras.optimizers.Adam(lr), 'mse')
print(model.summary())
# Train model
# TODO: Learning rate schedule
model.fit(dg_train, epochs=100, validation_data=dg_valid,
callbacks=[tf.keras.callbacks.EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=patience,
verbose=1,
mode='auto'
)]
)
print(f'Saving model weights: {model_save_fn}')
model.save_weights(model_save_fn)
# Create predictions
pred = create_iterative_predictions(model, dg_test) if iterative else create_predictions(model, dg_test)
print(f'Saving predictions: {pred_save_fn}')
pred.to_netcdf(pred_save_fn)
# Print score in real units
# TODO: Make flexible for other states
z500_valid = load_test_data(f'{datadir}geopotential_500', 'z')
t850_valid = load_test_data(f'{datadir}temperature_850', 't')
valid = xr.merge([z500_valid, t850_valid], compat='override')
print(evaluate_iterative_forecast(pred, valid).load() if iterative else compute_weighted_rmse(pred, valid).load())
if __name__ == '__main__':
p = ArgParser()
p.add_argument('-c', '--my-config', is_config_file=True, help='config file path')
p.add_argument('--datadir', type=str, required=True, help='Path to data')
p.add_argument('--model_save_fn', type=str, required=True, help='Path to save model')
p.add_argument('--pred_save_fn', type=str, required=True, help='Path to save predictions')
p.add_argument('--vars', type=str, nargs='+', required=True, help='Variables')
p.add_argument('--filters', type=int, nargs='+', required=True, help='Filters for each layer')
p.add_argument('--kernels', type=int, nargs='+', required=True, help='Kernel size for each layer')
p.add_argument('--lead_time', type=int, required=True, help='Forecast lead time')
p.add_argument('--iterative', type=bool, default=False, help='Is iterative forecast')
p.add_argument('--iterative_max_lead_time', type=int, default=5*24, help='Max lead time for iterative forecasts')
p.add_argument('--lr', type=float, default=1e-4, help='Learning rate')
p.add_argument('--activation', type=str, default='elu', help='Activation function')
p.add_argument('--dr', type=float, default=0, help='Dropout rate')
p.add_argument('--batch_size', type=int, default=128, help='batch_size')
p.add_argument('--patience', type=int, default=3, help='Early stopping patience')
p.add_argument('--train_years', type=str, nargs='+', default=('1979', '2015'), help='Start/stop years for training')
p.add_argument('--valid_years', type=str, nargs='+', default=('2016', '2016'), help='Start/stop years for validation')
p.add_argument('--test_years', type=str, nargs='+', default=('2017', '2018'), help='Start/stop years for testing')
p.add_argument('--gpu', type=int, default=0, help='Which GPU')
args = p.parse_args()
main(
datadir=args.datadir,
vars=args.vars,
filters=args.filters,
kernels=args.kernels,
lr=args.lr,
activation=args.activation,
dr=args.dr,
batch_size=args.batch_size,
patience=args.patience,
model_save_fn=args.model_save_fn,
pred_save_fn=args.pred_save_fn,
train_years=args.train_years,
valid_years=args.valid_years,
test_years=args.test_years,
lead_time=args.lead_time,
gpu=args.gpu,
iterative=args.iterative
)
| 13,309 | 40.59375 | 122 | py |
BOSS | BOSS-main/Eval_Codes/GenNNs_BOSS_MNIST_fashion_HCs.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(28, 28)
imageB = imageB.reshape(28, 28)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
####################################################################################
################################ some dataset - MNIST fashion
# download mnist data and split into train and test sets
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
y_test = np_utils.to_categorical(test_labels,10)
# ###############################################
## get a trained model (such as the MNIST didgits)
#trained_model = load_model("MNIST_digits__avgPool_dense_softmax_together_model.h5") # input is \in [0,1]
#trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
# trained model for the MNIST fashion used for the FHC with 1D input and \in [0,1]
trained_model = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')
# test the CA of the model = 90.09
#X_test = 2*X_test - 1
#results = trained_model.evaluate(X_test, y_test)
#print("test loss, test acc:", results)
######## freeze trained_model
for layer in trained_model.layers:
layer.trainable = False
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=102)
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((7, 7, 256),name='reshape_gen')
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='sigmoid', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
# below is added for the 1D modification
layer = layers.Reshape((784, 1, 1),name='reshape_gen_final')
layer.trainable=True
gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
# make the image in [-1,1]
#X_desired = X_test[21]*2 -1
X_desired = X_test[71]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping thresholds
delta_s = 0.25
delta_js = 0.20
delta_ssim = 0.8
delta_c = 0.25
traning_steps = 20
############################################################
### automated desired for confidence reduction y_d (desired PMF)
################################################################
number_of_classes = 10
target_class = test_labels[559]
desired_confidence = 0.5
#1 code the confidence only
desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
for i in range(number_of_classes):
if i == target_class:
desired_PMF_confidence[:,i] = desired_confidence
else:
desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
#2 code the decision boundary examples between class i and class j
desired_PMF_boundary = np.zeros(shape=(1,number_of_classes))
class_i = 7
class_j = 6
#below is the values of PMF[i] and PMF[j] (i.e. maximum of 0.5)
desired_confidence_boundary = 0.5
for i in range(number_of_classes):
if i == class_i or i == class_j:
desired_PMF_boundary[:,i] = desired_confidence_boundary
else:
desired_PMF_boundary[:,i] = (1-(2*desired_confidence_boundary)) / (number_of_classes-2)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# # below is for the 2D image
# Y_train_np_gen = np.zeros(shape=(batch_size_gen,28,28,1))
# Y_val_gen = X_desired.reshape(1,28,28,1)
# for i in range(batch_size_gen):
# Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(28,28,1)
# # convert Y_train to tf eager tensor
# Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
# below is for the 1D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
Y_val_gen = X_desired.reshape(1,784,1,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
##this is for unifrom distribution
# Y_train_combined = 0.1*np.ones(shape=(batch_size_2,10))
# Y_val_combined = 0.1*np.ones(shape=(1,10))
###for targeted, we need to change Y_train and Y_val:
###let the target lbl be 0, then
Y_train_combined = np.zeros(shape=(batch_size_2,10))
#Y_val_combined = np.array([[0.6,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0]])
Y_val_combined = desired_PMF_confidence
Y_val_combined = np.array([[0,0,0,0,1,0,0,0,0,0]])
Y_val_combined = np.array([[0,0,0.25,0.25,0.25,0,0.25,0,0,0]])
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
Y_desired = Y_val_combined[0]
print('break')
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
### Calling the gen model and defining the first output (the out of model g)
#this is for the first gen
# x = gen_NN.layers[0](input)
# for lay in range(10):
# layer = gen_NN.layers[lay+1]
# layer.trainable = True
# x = layer(x)
# out_1 = x
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
# # this is for the second gen
# x = gen_NN.layers[0](input)
# for lay in range(5):
# layer = gen_NN.layers[lay+1]
# layer.trainable = True
# x = layer(x)
# out_1 = x
#### Calling the trained model and defining the 2nd output (the output of f or h)
# x_2 = trained_model.layers[0](x)
# for lay in range(5):
# layer = trained_model.layers[lay + 1]
# layer.trainable = False
# x_2 = layer(x_2)
# out_2 = x_2
x_2 = trained_model.layers[0](x)
for lay in range(len(trained_model.layers) - 1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
### define the loss functions for each head
#combined_NN.compile(optimizer = optimizer, loss=['MeanSquaredError','categorical_crossentropy'],loss_weights=[5,0.15])
#combined_NN.compile(optimizer = optimizer, loss=['MeanAbsolutePercentageError','KLDivergence'],loss_weights=[5,0.15])
#combined_NN.compile(optimizer = optimizer, loss=['MeanSquaredError','MeanAbsolutePercentageError'],loss_weights=[1,1])
#combined_NN.compile(optimizer = optimizer, loss=['MeanAbsolutePercentageError','categorical_crossentropy'],loss_weights=[1.1,5])
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
#loss_2 = tf.keras.losses.MeanSquaredLogarithmicError(name='LOSS_2')
#combined_NN.compile(optimizer = optimizer, loss=[loss_1,loss_2],loss_weights=[5,10])
#combined_NN.compile(optimizer = optimizer, loss=['MeanAbsolutePercentageError','MeanAbsoluteError'],loss_weights=[1,5])
# poisson and tanh at gen model does not work
#combined_NN.compile(optimizer = optimizer, loss=['Poisson','KLDivergence'],loss_weights=[5,0.11])
#combined_NN.compile(optimizer = optimizer, loss=['CosineSimilarity','KLDivergence'],loss_weights=[5,0.11])
# ### we need to enforce again in "combined_NN" to freeze weights for the trained model.
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
#combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=10, batch_size=32, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=1)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.02
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0 )
#combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
# output probabilities at step i ==> this is J in the paper
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
trained_model = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')
output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.05 * 0.02 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.02
### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)
fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)
### below is the same thing (just for sanity check)
#trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
trained_model = tf.keras.models.load_model('my_model_1d_last_dense_activation_seperate')
output_vector_probabilities = trained_model(fake_image.reshape(1,28,28,1)).numpy()[0]
#output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# this is to make sure that above vectr are identical
#print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
#print(output_vector_probabilities_2)
real_image = X_desired.reshape(28,28)
plt.figure()
plt.subplot(2,2,1)
plt.title('Desired example')
plt.imshow(real_image,cmap='gray',vmin=0, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,2)
plt.title('Generated example')
plt.imshow(fake_image,cmap='gray',vmin=0, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,4)
plt.title('Generated example PMF')
plt.stem(output_vector_probabilities)
plt.ylim(top=1.2)
plt.subplot(2,2,3)
plt.title('Desired PMF')
plt.stem(Y_val_combined[0])
plt.ylim(top=1.2)
print('break')
| 17,240 | 32.938976 | 181 | py |
BOSS | BOSS-main/Eval_Codes/GenNNs_BOSS_GTSRB.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from keras.layers import Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
import pickle
import pandas as pd
import random
import cv2
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(32, 32)
imageB = imageB.reshape(32, 32)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
#################################################################
########################################## load data set
#################################################################
"""
Download GTSRB data
"""
X_train, y_train = train_data['features'], train_data['labels']
X_val, y_val = val_data['features'], val_data['labels']
X_test, y_test = test_data['features'], test_data['labels']
#################################################################
########################################## process dataset
#################################################################
def grayscale(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
return img
def gray2BGR(img):
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def equalize(img):
img = cv2.equalizeHist(img)
return img
def preprocessing(img):
img = grayscale(img)
img = equalize(img)
#normalize the images, i.e. convert the pixel values to fit btwn 0 and 1
img = img/255
return img
y_train = to_categorical(y_train, 43)
y_val = to_categorical(y_val, 43)
y_test = to_categorical(y_test, 43)
X_train = np.array(list(map(preprocessing, X_train)))
X_val = np.array(list(map(preprocessing, X_val)))
X_test = np.array(list(map(preprocessing, X_test)))
X_train = X_train.reshape(34799, 32, 32, 1)
X_val = X_val.reshape(4410, 32, 32, 1)
X_test = X_test.reshape(12630, 32, 32, 1)
#################################################################
########################################## load model
#################################################################
"""
The source of this trained model is:
https://github.com/ItsCosmas/Traffic-Sign-Classification/blob/master/Traffic_Sign_Classification.ipynb
"""
trained_model = keras.models.load_model("my_model_GTSRB.h5")
#################################################################
########################################## test model
#################################################################
# # CA = 97.54
# results = trained_model.evaluate(X_test, y_test)
# print("test loss, test acc:", results)
######## freeze trained_model
for layer in trained_model.layers:
layer.trainable = False
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=102)
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(8 * 8 * 256, use_bias=False, input_shape=(100,), name='dense_gen', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((8, 8, 256))
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='sigmoid', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
# # below is added for the 1D modification
# layer = layers.Reshape((784, 1, 1))
# layer.trainable=True
# gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
X_desired = X_test[93]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping thresholds
delta_s = 0.25
delta_js = 0.25
delta_ssim = 0.85
delta_c = 0.25
traning_steps = 20
############################################################
### automated desired for confidence reduction y_d (desired PMF)
################################################################
number_of_classes = 43
# target_class = test_labels[21]
# desired_confidence = 0.5
#
# #1 code the confidence only
# desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
# for i in range(number_of_classes):
# if i == target_class:
# desired_PMF_confidence[:,i] = desired_confidence
# else:
# desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
#
#
# #2 code the decision boundary examples between class i and class j
# desired_PMF_boundary = np.zeros(shape=(1,number_of_classes))
# class_i = 7
# class_j = 6
# #below is the values of PMF[i] and PMF[j] (i.e. maximum of 0.5)
# desired_confidence_boundary = 0.5
# for i in range(number_of_classes):
# if i == class_i or i == class_j:
# desired_PMF_boundary[:,i] = desired_confidence_boundary
# else:
# desired_PMF_boundary[:,i] = (1-(2*desired_confidence_boundary)) / (number_of_classes-2)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# below is for the 2D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,32,32,1))
Y_val_gen = X_desired.reshape(1,32,32,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(32,32,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
# # below is for the 1D image
# Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
# Y_val_gen = X_desired.reshape(1,784,1,1)
# for i in range(batch_size_gen):
# Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# # convert Y_train to tf eager tensor
# Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
##this is for unifrom distribution
# Y_train_combined = 0.1*np.ones(shape=(batch_size_2,10))
# Y_val_combined = 0.1*np.ones(shape=(1,10))
###for targeted, we need to change Y_train and Y_val:
###let the target lbl be 0, then
Y_train_combined = np.zeros(shape=(batch_size_2,number_of_classes))
Y_val_combined = np.zeros(shape=(number_of_classes))
Y_val_combined[4] = 0.5 # speed 70
Y_val_combined[13] = 0.5 # yield sign
#Y_val_combined = np.array([[1,0,0,0,0,0,0,0,0,0]])
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
#Y_desired = Y_val_combined[0]
Y_desired = Y_val_combined
print('break')
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
### Calling the gen model and defining the first output (the out of model g)
#this is for the first gen
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
x_2 = trained_model.layers[0](x)
for lay in range(len(trained_model.layers) - 1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
#combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=10, batch_size=32, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=1)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.01
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined.reshape(1,number_of_classes)]), verbose=0 )
#combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(32, 32)
# output probabilities at step i ==> this is J in the paper
trained_model = keras.models.load_model("my_model_GTSRB.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 32, 32, 1)).numpy()[0]
#output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(1024,) - fake_image.reshape(1024,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.01 * 0.02 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.01
### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)
fake_image = combined_NN(X_val)[0].numpy().reshape(32,32)
### below is the same thing (just for sanity check)
trained_model = keras.models.load_model("my_model_GTSRB.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1,32,32,1)).numpy()[0]
#output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# this is to make sure that above vectr are identical
#print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
#print(output_vector_probabilities_2)
#X_desired = X_test[93]
real_image = X_desired.reshape(32,32)
def bgr2rbg(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
real_image = gray2BGR(np.float32(X_desired).reshape(32,32))
real_image = bgr2rbg(real_image)
fake_image = gray2BGR(fake_image)
fake_image = bgr2rbg(fake_image)
#real_image_rgb = cv2.cvtColor(real_image, cv2.COLOR_RGB2GRAY)
plt.figure()
plt.subplot(2,2,1)
plt.title('Desired example')
#plt.imshow(real_image,cmap='gray',vmin=0, vmax=1)
plt.imshow(real_image)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,2)
plt.title('Generated example')
#plt.imshow(fake_image,cmap='gray',vmin=0, vmax=1)
plt.imshow(fake_image)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,4)
plt.title('Generated example PMF')
plt.stem(output_vector_probabilities)
plt.ylim(top=1.2)
plt.subplot(2,2,3)
plt.title('Desired PMF')
plt.stem(Y_val_combined)
plt.ylim(top=1.2)
print('break')
| 16,912 | 31.153992 | 178 | py |
BOSS | BOSS-main/Eval_Codes/GenNNs_BOSS_MNIST_digits.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(28, 28)
imageB = imageB.reshape(28, 28)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
################################################### below is the MNIST digits
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_test = X_test.astype(np.float32)
####################################################################################
# ################################ some dataset - MNIST fashion
#
# # download mnist data and split into train and test sets
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# # reshape data to fit model
# X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
# X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
# X_train, X_test = X_train/255, X_test/255
# # normalization:
# train_images = train_images / 255
# test_images = test_images / 255
# # ###############################################
## get a trained model (such as the MNIST didgits)
#trained_model = load_model("MNIST_digits__avgPool_dense_softmax_together_model.h5") # input is \in [0,1]
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
#trained_model = load_model("MNIST_digits_trained_model_3.h5") # input is \in [0,1]
#trained_model = load_model("MNIST_digits_trained_model_2.h5") # input is \in [-1,1]
# X_test = 2*X_test - 1
# results = trained_model.evaluate(X_test, y_test)
# print("test loss, test acc:", results)
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
#trained_model = load_model("MNIST_digits_trained_model_gan_like.h5")
######## freeze trained_model
for layer in trained_model.layers:
layer.trainable = False
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen')
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((7, 7, 256))
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
layer.trainable=True
gen_NN.add(layer)
# below is added for the 1D modification
layer = layers.Reshape((784, 1, 1))
layer.trainable=True
gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
# make the image in [-1,1]
X_desired = X_test[21]*2 -1
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping thresholds
delta_s = 0.25
delta_js = 0.25
delta_ssim = 0.85
delta_c = 0.25
traning_steps = 20
############################################################
### automated desired for confidence reduction y_d (desired PMF)
################################################################
number_of_classes = 10
target_class = test_labels[21]
desired_confidence = 0.5
#1 code the confidence only
desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
for i in range(number_of_classes):
if i == target_class:
desired_PMF_confidence[:,i] = desired_confidence
else:
desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
#2 code the decision boundary examples between class i and class j
desired_PMF_boundary = np.zeros(shape=(1,number_of_classes))
class_i = 7
class_j = 6
#below is the values of PMF[i] and PMF[j] (i.e. maximum of 0.5)
desired_confidence_boundary = 0.5
for i in range(number_of_classes):
if i == class_i or i == class_j:
desired_PMF_boundary[:,i] = desired_confidence_boundary
else:
desired_PMF_boundary[:,i] = (1-(2*desired_confidence_boundary)) / (number_of_classes-2)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# # below is for the 2D image
# Y_train_np_gen = np.zeros(shape=(batch_size_gen,28,28,1))
# Y_val_gen = X_desired.reshape(1,28,28,1)
# for i in range(batch_size_gen):
# Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(28,28,1)
# # convert Y_train to tf eager tensor
# Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
# below is for the 1D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
Y_val_gen = X_desired.reshape(1,784,1,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
##this is for unifrom distribution
# Y_train_combined = 0.1*np.ones(shape=(batch_size_2,10))
# Y_val_combined = 0.1*np.ones(shape=(1,10))
###for targeted, we need to change Y_train and Y_val:
###let the target lbl be 0, then
Y_train_combined = np.zeros(shape=(batch_size_2,10))
#Y_val_combined = np.array([[0.6,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0]])
Y_val_combined = desired_PMF_confidence
#Y_val_combined = np.array([[1,0,0,0,0,0,0,0,0,0]])
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
Y_desired = Y_val_combined[0]
print('break')
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
### Calling the gen model and defining the first output (the out of model g)
#this is for the first gen
# x = gen_NN.layers[0](input)
# for lay in range(10):
# layer = gen_NN.layers[lay+1]
# layer.trainable = True
# x = layer(x)
# out_1 = x
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
# # this is for the second gen
# x = gen_NN.layers[0](input)
# for lay in range(5):
# layer = gen_NN.layers[lay+1]
# layer.trainable = True
# x = layer(x)
# out_1 = x
#### Calling the trained model and defining the 2nd output (the output of f or h)
# x_2 = trained_model.layers[0](x)
# for lay in range(5):
# layer = trained_model.layers[lay + 1]
# layer.trainable = False
# x_2 = layer(x_2)
# out_2 = x_2
x_2 = trained_model.layers[0](x)
for lay in range(len(trained_model.layers) - 1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
### define the loss functions for each head
#combined_NN.compile(optimizer = optimizer, loss=['MeanSquaredError','categorical_crossentropy'],loss_weights=[5,0.15])
#combined_NN.compile(optimizer = optimizer, loss=['MeanAbsolutePercentageError','KLDivergence'],loss_weights=[5,0.15])
#combined_NN.compile(optimizer = optimizer, loss=['MeanSquaredError','MeanAbsolutePercentageError'],loss_weights=[1,1])
#combined_NN.compile(optimizer = optimizer, loss=['MeanAbsolutePercentageError','categorical_crossentropy'],loss_weights=[1.1,5])
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
#loss_2 = tf.keras.losses.MeanSquaredLogarithmicError(name='LOSS_2')
#combined_NN.compile(optimizer = optimizer, loss=[loss_1,loss_2],loss_weights=[5,10])
#combined_NN.compile(optimizer = optimizer, loss=['MeanAbsolutePercentageError','MeanAbsoluteError'],loss_weights=[1,5])
# poisson and tanh at gen model does not work
#combined_NN.compile(optimizer = optimizer, loss=['Poisson','KLDivergence'],loss_weights=[5,0.11])
#combined_NN.compile(optimizer = optimizer, loss=['CosineSimilarity','KLDivergence'],loss_weights=[5,0.11])
# ### we need to enforce again in "combined_NN" to freeze weights for the trained model.
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
#combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=10, batch_size=32, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=1)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.02
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0 )
#combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
# output probabilities at step i ==> this is J in the paper
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.05 * 0.02 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.02
### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)
fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)
### below is the same thing (just for sanity check)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1,28,28,1)).numpy()[0]
#output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# this is to make sure that above vectr are identical
#print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
#print(output_vector_probabilities_2)
real_image = X_desired.reshape(28,28)
plt.figure()
plt.subplot(2,2,1)
plt.title('Desired example')
plt.imshow(real_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,2)
plt.title('Generated example')
plt.imshow(fake_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,4)
plt.title('Generated example PMF')
plt.stem(output_vector_probabilities)
plt.ylim(top=1.2)
plt.subplot(2,2,3)
plt.title('Desired PMF')
plt.stem(Y_val_combined[0])
plt.ylim(top=1.2)
print('break')
| 17,703 | 31.724584 | 180 | py |
BOSS | BOSS-main/Eval_Codes/GenNNs_cifar_10.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
from keras.datasets import cifar10
from keras.layers import GlobalAveragePooling2D, Lambda, Conv2D, MaxPooling2D, Dropout, Dense, Flatten, Activation
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
from skimage.metrics import structural_similarity
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#from tensorflow.keras.models import load_model
import tensorflow
import keras
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(32, 32, 3)
imageB = imageB.reshape(32, 32, 3)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# ################################################### below is the MNIST digits
#
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# # reshape data to fit model
# X_train = train_images.reshape(train_images.shape[0], 32,32,3)
# X_test = test_images.reshape(test_images.shape[0], 32,32,3)
# X_train, X_test = X_train/255, X_test/255
# # normalization:
# train_images = train_images / 255
# test_images = test_images / 255
# print("")
#
# y_train = np_utils.to_categorical(train_labels,10)
# y_test = np_utils.to_categorical(test_labels,10)
#
# X_test = X_test.astype(np.float32)
#
# ####################################################################################
################################ some dataset - MNIST fashion
# download mnist data
#(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# split into train and test sets
'''
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 32,32,3)
X_test = test_images.reshape(test_images.shape[0], 32,32,3)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
'''
# ###############################################
## load the cifar10 data set
(train_images, train_labels), (test_images, test_labels) = cifar10.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 32, 32, 3)
X_test = test_images.reshape(test_images.shape[0], 32, 32, 3)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
y_test = np_utils.to_categorical(test_labels,10)
## get a trained model (such as the MNIST didgits)
#trained_model = load_model("MNIST_digits__avgPool_dense_softmax_together_model.h5") # input is \in [0,1]
#trained_model = load_model("MNIST_digits_trained_model_3.h5") # input is \in [0,1]
#trained_model = load_model("MNIST_digits_trained_model_2.h5") # input is \in [-1,1]
#model = load_model("MNIST_digits_trained_model_3.h5") # input is \in [0,1]
#results = trained_model.evaluate(X_test, y_test)
#print("test loss, test acc:", results)
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
#trained_model = load_model("MNIST_digits_trained_model_gan_like.h5")
########################################################################################
## using CIFAR-10 model
# trained_model = load_model("cifar10-resnet20-30abc31d.pth")
# trained_model = load_model("augmented_best_model.h5", compile=False)
#
#model = cifar10vgg()
#trained_model = tensorflow.keras.models.load_model("cifar10vgg.h5")
#trained_model = load_model("cifar10vgg.h5", compile=True)
#trained_model_1 = tensorflow.keras.models.load_model("MNIST_digits_trained_model_4.h5")
trained_model = load_model("augmented_best_model.h5")
#
#
# results = trained_model.evaluate(X_test, y_test)
# print("test loss, test acc:", results)
for layer in trained_model.layers:
layer.trainable = False
#trained_model = load_model("best_model_improved.h5")
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
trained_model.summary()
# fixin the initial weights:
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=102)
#initializer = tf.keras.initializers.Ones()
gen_NN = tf.keras.Sequential()
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(8*8*256, use_bias=False, input_shape=(100,), kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((8, 8, 256))
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5,5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(3, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='sigmoid', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
gen_NN.summary()
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
X_desired = X_test[82]
print('length x_desired list: ',len(X_desired))
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping criteria
delta_s = 0.25
delta_js = 0.20
delta_ssim = 0.85
delta_c = 0.25
traning_steps = 20
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=103)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
Y_train_np_gen = np.zeros(shape=(batch_size_gen,32,32,3))
Y_val_gen = X_desired.reshape(1,32,32,3)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(32,32,3)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
##this is for unifrom distribution
# Y_train_combined = 0.1*np.ones(shape=(batch_size_2,10))
# Y_val_combined = 0.1*np.ones(shape=(1,10))
###for targeted, we need to change Y_train and Y_val:
###let the target lbl be 0, then
Y_train_combined = np.zeros(shape=(batch_size_2,10))
#Y_val_combined = np.array([[0,0,0,0,0,0,1,0,0,0]])
#Y_val_combined = np.array([[0.25,0.25,0,0,0,0,0,0,0.25,0.25]])
#Y_val_combined = np.array([[0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1,0.1]])
Y_val_combined = np.array([[0,0,0,0,0,0,0.5,0.5,0,0]])
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
Y_desired = Y_val_combined[0]
print('break')
###################################################################
##add flatten and dense layers to trained_model?
################################################################
#trained_model.add(GlobalAveragePooling2D())
#trained_model.add(Activation('softmax'))
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
### Calling the gen model and defining the first output (the out of model g)
x = gen_NN.layers[0](input)
for lay in range(10):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
print(x)
#### Calling the trained model and defining the 2nd output (the output of f or h)
x_2 = trained_model.layers[0](x)
print(len(trained_model.layers))
for lay in range(len(trained_model.layers)-1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
### define the loss functions for each head
#
#combined_NN.summary()
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.001
############# trainING LOOP
for i in range(traning_steps):
# compile at every step so as to update the loss functions weights
combined_NN.compile(optimizer=optimizer, loss=['MeanSquaredError', 'categorical_crossentropy'],
loss_weights=[lambda_gen, lambda_pmf])
# training
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0)
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(32, 32, 3)
# output probabilities at step i ==> this is J in the paper
'''
The warning: WARNING:tensorflow:Error in loading the saved optimizer state. As a result, your model is starting with a freshly initialized optimizer.
This is happenning because this saved model optimizer state was not saved correctly which is not important in our case.
'''
trained_model = load_model("augmented_best_model.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 32,32,3)).numpy()[0]
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(3072,) - fake_image.reshape(3072,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = structural_similarity(X_desired, fake_image, multichannel = True)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.001 * 0.001 * ((delta_js / D_JS)) * np.sign((delta_js /D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.02
### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)
fake_image = combined_NN(X_val)[0].numpy().reshape(32,32,3)
trained_model
### below is the same thing (just for sanity check)
trained_model = load_model("augmented_best_model.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1,32,32,3)).numpy()[0]
predicted_lbl_w_pert = np.argmax(output_vector_probabilities)
#output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# this is to make sure that above vectr are identical
#print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
#print(output_vector_probabilities_2)
real_image = X_desired.reshape(32,32,3)
plt.figure()
plt.subplot(2,2,1)
plt.title('Desired example')
plt.imshow(real_image,vmin=0, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,2)
plt.title('Generated example')
plt.imshow(fake_image,vmin=0, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,4)
plt.title('Generated example PMF')
plt.stem(output_vector_probabilities)
plt.ylim(top=1.2)
plt.subplot(2,2,3)
plt.title('Desired PMF')
plt.stem(Y_val_combined[0])
plt.ylim(top=1.2)
print('break')
| 15,923 | 32.244259 | 158 | py |
BOSS | BOSS-main/Eval_Codes/GenNNs_BOSS_COVID_19.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
import numpy as np
import matplotlib.pyplot as plt
import keras
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.utils.np_utils import to_categorical
from keras.layers import Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
import pickle
import pandas as pd
import random
import cv2
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(296, 296)
imageB = imageB.reshape(296, 296)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
#################################################################
########################################## load data set
#################################################################
"""
dataset is from https://www.kaggle.com/tawsifurrahman/covid19-radiography-database
pull up X_test and Y_test accordingly
"""
#################################################################
########################################## test model
#################################################################
trained_model = load_model('trained_model_COVID_19_2.h5')
number_of_classes = 4
# # CA = 85.6
# results = trained_model.evaluate(X_test, y_test)
# print("test loss, test acc:", results)
y_test = to_categorical(Y_test , number_of_classes)
######## freeze trained_model
for layer in trained_model.layers:
layer.trainable = False
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=102)
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(74 * 74 * 256, use_bias=False, input_shape=(100,), name='dense_gen', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((74, 74, 256))
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='sigmoid', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
# # below is added for the 1D modification
# layer = layers.Reshape((784, 1, 1))
# layer.trainable=True
# gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
X_desired = X_test[21]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping thresholds
delta_s = 0.25
delta_js = 0.25
delta_ssim = 0.85
delta_c = 0.25
traning_steps = 20
############################################################
### automated desired for confidence reduction y_d (desired PMF)
################################################################
number_of_classes = 4
# target_class = test_labels[21]
# desired_confidence = 0.5
#
# #1 code the confidence only
# desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
# for i in range(number_of_classes):
# if i == target_class:
# desired_PMF_confidence[:,i] = desired_confidence
# else:
# desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
#
#
# #2 code the decision boundary examples between class i and class j
# desired_PMF_boundary = np.zeros(shape=(1,number_of_classes))
# class_i = 7
# class_j = 6
# #below is the values of PMF[i] and PMF[j] (i.e. maximum of 0.5)
# desired_confidence_boundary = 0.5
# for i in range(number_of_classes):
# if i == class_i or i == class_j:
# desired_PMF_boundary[:,i] = desired_confidence_boundary
# else:
# desired_PMF_boundary[:,i] = (1-(2*desired_confidence_boundary)) / (number_of_classes-2)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# below is for the 2D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,296,296,1))
Y_val_gen = X_desired.reshape(1,296,296,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(296,296,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
# # below is for the 1D image
# Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
# Y_val_gen = X_desired.reshape(1,784,1,1)
# for i in range(batch_size_gen):
# Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# # convert Y_train to tf eager tensor
# Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
##this is for unifrom distribution
# Y_train_combined = 0.1*np.ones(shape=(batch_size_2,10))
# Y_val_combined = 0.1*np.ones(shape=(1,10))
###for targeted, we need to change Y_train and Y_val:
###let the target lbl be 0, then
Y_train_combined = np.zeros(shape=(batch_size_2,number_of_classes))
#Y_val_combined = np.zeros(shape=(number_of_classes))
#Y_val_combined[0] = 1
Y_val_combined = np.array([[0,1,0,0]])
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
Y_desired = Y_val_combined[0]
#Y_desired = Y_val_combined
print('break')
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
### Calling the gen model and defining the first output (the out of model g)
#this is for the first gen
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
x_2 = trained_model.layers[0](x)
for lay in range(len(trained_model.layers) - 1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
#combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=10, batch_size=32, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=1)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.01
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0 )
#combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(296,296)
# output probabilities at step i ==> this is J in the paper
trained_model = load_model('trained_model_COVID_19_2.h5')
output_vector_probabilities = trained_model(fake_image.reshape(1, 296, 296, 1)).numpy()[0]
#output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(87616,) - fake_image.reshape(87616,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.01 * 0.02 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.01
### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)
fake_image = combined_NN(X_val)[0].numpy().reshape(296,296)
### below is the same thing (just for sanity check)
trained_model = load_model('trained_model_COVID_19_2.h5')
output_vector_probabilities = trained_model(fake_image.reshape(1,296,296,1)).numpy()[0]
#output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# this is to make sure that above vectr are identical
#print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
#print(output_vector_probabilities_2)
real_image = X_desired.reshape(296,296)
def bgr2rbg(img):
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
return img
# real_image = gray2BGR(np.float32(X_desired).reshape(32,32))
# real_image = bgr2rbg(real_image)
# fake_image = gray2BGR(fake_image)
# fake_image = bgr2rbg(fake_image)
#real_image_rgb = cv2.cvtColor(real_image, cv2.COLOR_RGB2GRAY)
plt.figure()
plt.subplot(2,2,1)
plt.title('Desired example')
plt.imshow(real_image,cmap='gray',vmin=0, vmax=1)
#plt.imshow(real_image)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,2)
plt.title('Generated example')
plt.imshow(fake_image,cmap='gray',vmin=0, vmax=1)
#plt.imshow(fake_image)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,4)
plt.title('Generated example PMF')
plt.stem(output_vector_probabilities)
plt.ylim(top=1.2)
plt.subplot(2,2,3)
plt.title('Desired PMF')
plt.stem(Y_val_combined[0])
plt.ylim(top=1.2)
print('break')
| 15,510 | 31.047521 | 158 | py |
BOSS | BOSS-main/Eval_Codes/applications/GenNNs_MNIST_digits_ensemble_two_models_table_diff_target.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#########################################################################################################################################
############################### gradient of the model f w.r.t input image function ######
#########################################################################################################################################
def grad_discriminant_sm_wrt_1d_img(input_image, lbl, model):
"""
:param input_image: this is a numpy array of size X_test whichi 28,28,1
:param input_label: the index of the output dis function
:param model: sequential keras model trained with 1D image
:return: gradient - same size as the input image
"""
extractor = tf.keras.Model(inputs=model.inputs,
outputs=[layer.output for layer in model.layers])
# image processing: convert input image to tf variable
input_image_2 = tf.Variable(input_image, name='input_image_var')
# reshape input tf.variable to 4 dim
input_image_3 = tf.reshape(input_image_2, [1, 784, 1])
with tf.GradientTape(watch_accessed_variables=True) as tape:
tape.watch(input_image_3)
# get the actual outputs
features = extractor(input_image_3)
# output of last layer
# dis_func = model1.predict(input_image.reshape(1,784,1))[0]
# i have no clue what this does
dis_func = features[-1]
# # i have no clue what this does
#
func_val = dis_func[0][lbl]
grad = tape.gradient(func_val, input_image_3)
return grad
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(28, 28)
imageB = imageB.reshape(28, 28)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
################################################### below is the MNIST digits
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_test = X_test.astype(np.float32)
#X_test = 2*X_test - 1
####################################################################################
# ################################ some dataset - MNIST fashion
#
# # download mnist data and split into train and test sets
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# # reshape data to fit model
# X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
# X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
# X_train, X_test = X_train/255, X_test/255
# # normalization:
# train_images = train_images / 255
# test_images = test_images / 255
# # ###############################################
## get a trained model (such as the MNIST didgits)
trained_model_1 = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
#trained_model_2 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_1.h5") # THIS IS A DIFFERENT TRAINED CONV MODEL
trained_model_3 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_2.h5") # THIS IS A DIFFERENT TRAINED MODEL WITH ONLY DENSE LAYERS AND A SOFTMAX
######## freeze trained_model, trained_model_1, and trained_model_2
for layer in trained_model_1.layers:
layer.trainable = False
# for layer in trained_model_2.layers:
# layer.trainable = False
for layer in trained_model_3.layers:
layer.trainable = False
# ########################################################################################
# ###########################################################################
# #################################### BUILDING THE gen model g(z,\phi)
# ###########################################################################
#
# gen_NN = tf.keras.Sequential()
#
# ## ADDING THE GEN MODEL layers that will be trained
#
# layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen')
# layer.trainable=True
# gen_NN.add(layer)
#
# layer = layers.BatchNormalization()
# layer.trainable=True
# gen_NN.add(layer)
#
# layer = layers.LeakyReLU()
# layer.trainable=True
# gen_NN.add(layer)
#
# layer = layers.Reshape((7, 7, 256))
# layer.trainable=True
# gen_NN.add(layer)
# #assert combined_NN.output_shape == (None, 7, 7, 256)
#
# layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
# layer.trainable=True
# gen_NN.add(layer)
# #assert gen_NN.output_shape == (None, 14, 14, 64)
#
# layer = layers.BatchNormalization()
# layer.trainable=True
# gen_NN.add(layer)
#
# layer = layers.LeakyReLU()
# layer.trainable=True
# gen_NN.add(layer)
#
# layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
# layer.trainable=True
# gen_NN.add(layer)
# #assert gen_NN.output_shape == (None, 14, 14, 64)
#
# layer = layers.BatchNormalization()
# layer.trainable=True
# gen_NN.add(layer)
#
# layer = layers.LeakyReLU()
# layer.trainable=True
# gen_NN.add(layer)
#
# layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
# layer.trainable=True
# gen_NN.add(layer)
#
#
# # below is added for the 1D modification
# layer = layers.Reshape((784, 1, 1))
# layer.trainable=True
# gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
#X_desired = X_test[4]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping criteria
delta_s = 0.25
delta_js = 0.20
delta_ssim = 0.85
delta_c = 0.25
traning_steps = 50
# #########################################################################################################################################
# ### automated desired for confidence reduction y_d (desired PMF) ##### use this for boundary examples AND confidence reduction attacks
# ############################################################################################################################################
# number_of_classes = 10
# target_class = 6
# desired_confidence = 0.6
#
# #1 code the confidence only
# desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
# for i in range(number_of_classes):
# if i == target_class:
# desired_PMF_confidence[:,i] = desired_confidence
# else:
# desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
#
#
# #2 code the decision boundary examples between class i and class j
# desired_PMF_boundary = np.zeros(shape=(1,number_of_classes))
# class_i = 7
# class_j = 6
# #below is the values of PMF[i] and PMF[j] (i.e. maximum of 0.5)
# desired_confidence_boundary = 0.5
# for i in range(number_of_classes):
# if i == class_i or i == class_j:
# desired_PMF_boundary[:,i] = desired_confidence_boundary
# else:
# desired_PMF_boundary[:,i] = (1-(2*desired_confidence_boundary)) / (number_of_classes-2)
# ############################################################################################################################################
### define X_d as the desired
# make the image in [-1,1]
#X_desired = X_test[21]*2 -1
############## here what to save:
perturbed_images_ens_diff_tar = []
prob_vectors_ens_diff_tar = []
JSs__ens_diff_tar = []
SSIM_ens_diff_tar = []
confidence__ens_diff_tar = []
#idx_to_pick = [3, 2, 1, 18, 56, 15, 21, 0, 5621, 16]
idx_to_pick = [61]
for idx in idx_to_pick:
X_desired = X_test[idx]
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# below is for the 1D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
Y_val_gen = X_desired.reshape(1,784,1,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
##this is for unifrom distribution
# Y_train_combined = 0.1*np.ones(shape=(batch_size_2,10))
# Y_val_combined = 0.1*np.ones(shape=(1,10))
###for targeted, we need to change Y_train and Y_val:
###let the target lbl be 0, then
Y_train_combined = np.zeros(shape=(batch_size_2,10))
#Y_val_combined = np.array([[0.6,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0]])
#Y_val_combined = desired_PMF_confidence
target_labels_1 = [0]
#target_labels_1 = [0,5]
target_labels_2 = [9]
#target_labels_2 = [1,3]
#target_labels = [0, 1]
for target_label_1 in target_labels_1:
for target_label_2 in target_labels_2:
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen')
layer.trainable = True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable = True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable = True
gen_NN.add(layer)
layer = layers.Reshape((7, 7, 256))
layer.trainable = True
gen_NN.add(layer)
# assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
layer.trainable = True
gen_NN.add(layer)
# assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable = True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable = True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
layer.trainable = True
gen_NN.add(layer)
# assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable = True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable = True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
layer.trainable = True
gen_NN.add(layer)
# below is added for the 1D modification
layer = layers.Reshape((784, 1, 1))
layer.trainable = True
gen_NN.add(layer)
Y_train_combined_1 = np.zeros(shape=(batch_size_2,10))
Y_val_combined_1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
Y_val_combined_1[0][target_label_1] = 1
for i in range(batch_size_2):
Y_train_combined_1[i,:] = Y_val_combined_1
Y_desired_1 = Y_val_combined_1[0]
# Y_train_combined_2 = np.zeros(shape=(batch_size_2,10))
# Y_val_combined_2 = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# for i in range(batch_size_2):
# Y_train_combined_2[i,:] = Y_val_combined_2
# Y_desired_2 = Y_val_combined_2[0]
Y_train_combined_3 = np.zeros(shape=(batch_size_2,10))
Y_val_combined_3 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
Y_val_combined_3[0][target_label_2] = 1
for i in range(batch_size_2):
Y_train_combined_3[i,:] = Y_val_combined_3
Y_desired_3 = Y_val_combined_3[0]
print('break')
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
# this is for the trained model 1
x_2 = trained_model_1.layers[0](x)
for lay in range(len(trained_model_1.layers) - 1):
layer = trained_model_1.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
# this is for the trained model 2
x_4 = trained_model_3.layers[0](x)
for lay in range(len(trained_model_3.layers) - 1):
layer = trained_model_3.layers[lay + 1]
layer.trainable = False
x_4 = layer(x_4)
out_4 = x_4
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2, out_4])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
#loss_3 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_3', from_logits=False,label_smoothing=0)
loss_4 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_4', from_logits=False,label_smoothing=0)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf_1 = 0.02
#lambda_pmf_2 = 0.02
lambda_pmf_3 = 0.02
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2, loss_4], loss_weights=[lambda_gen, lambda_pmf_1, lambda_pmf_3])
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined_1,Y_train_combined_3], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined_1,Y_val_combined_3]), verbose=0 )
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
output_vector_probabilities_1 = trained_model_1(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#output_vector_probabilities_2 = trained_model_2(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
output_vector_probabilities_3 = trained_model_3(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
#D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS_1 = D_JS_PMFs(output_vector_probabilities_1, Y_desired_1)
#D_JS_2 = D_JS_PMFs(output_vector_probabilities_2, Y_desired_2)
D_JS_3 = D_JS_PMFs(output_vector_probabilities_3, Y_desired_3)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS_1 <= delta_js and D_JS_3 <= delta_js:
print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and JS of = ', [D_JS_1,D_JS_3])
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; JS_Distances = ', [D_JS_1,D_JS_3], ' ; current loss weights = ', lambda_gen,' , ', [lambda_pmf_1,lambda_pmf_3 ])
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf_1 = relu_scaler_(lambda_pmf_1 - 0.05 * 0.02 * ((delta_js/D_JS_1)) * np.sign((delta_js/D_JS_1 )-1))
#lambda_pmf_2 = relu_scaler_(lambda_pmf_2 - 0.05 * 0.02 * ((delta_js / D_JS_2)) * np.sign((delta_js / D_JS_2) - 1))
lambda_pmf_3 = relu_scaler_(lambda_pmf_3 - 0.05 * 0.02 * ((delta_js / D_JS_3)) * np.sign((delta_js / D_JS_3) - 1))
else:
lambda_gen = 1
lambda_pmf = 0.02
### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)
fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)
### below is the same thing (just for sanity check)
trained_model_1 = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
#trained_model_2 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_1.h5") # THIS IS A DIFFERENT TRAINED CONV MODEL
trained_model_3 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_2.h5") # THIS IS A DIFFERENT TRAINED MODEL WITH ONLY DENSE LAYERS AND A SOFTMAX
output_vector_probabilities_1 = trained_model_1(fake_image.reshape(1,28,28,1)).numpy()[0]
#output_vector_probabilities_2 = trained_model_2(fake_image.reshape(1,28,28,1)).numpy()[0]
confidence_1 = np.max(output_vector_probabilities_1)
confidence_3 = np.max(output_vector_probabilities_3)
output_vector_probabilities_3 = trained_model_3(fake_image.reshape(1,28,28,1)).numpy()[0]
print('Finished the image of index = ', idx, ' with target lbls = ' , [target_label_1,target_label_2], ' with SSIM = ', D_ssim_images, ' and JS = ',[D_JS_1,D_JS_3])
######## save shit here:
perturbed_images_ens_diff_tar.append(fake_image)
prob_vectors_ens_diff_tar.append([output_vector_probabilities_1,output_vector_probabilities_3])
JSs__ens_diff_tar.append([D_JS_1,D_JS_3])
SSIM_ens_diff_tar.append(D_ssim_images)
confidence__ens_diff_tar.append([confidence_1,confidence_3])
real_image = X_desired.reshape(28,28)
plt.figure()
plt.subplot(2,3,1)
plt.title('Desired example')
plt.imshow(real_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,3,4)
plt.title('Generated example')
plt.imshow(fake_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,3,5)
plt.title('Generated example PMF 1')
plt.stem(output_vector_probabilities_1)
plt.ylim(top=1.2)
plt.subplot(2,3,3)
plt.title('Desired PMF against model 2')
plt.stem(Y_val_combined_3[0])
plt.ylim(top=1.2)
#plt.axis('off')
plt.subplot(2,3,6)
plt.title('Generated example PMF 2')
plt.stem(output_vector_probabilities_3)
plt.ylim(top=1.2)
plt.subplot(2,3,2)
plt.title('Desired PMF against model 1')
plt.stem(Y_val_combined_1[0])
plt.ylim(top=1.2)
# #########################################################################################################################################
# ################## optimality condition KKT1
# #########################################################################################################################################
# # case 0: define a \mu and make it case_ + \mu (x* - x_d)
# mu = 0.005
# # case 1
# # \sum_{i\in[M]} grad(J_i(X^*)) \log(J_i(X^*) / J_i(X^*)+y_i) = 0
#
# # case 3
# # \sum_{i\in[M]} grad(J_i(X^*)) = 0
#
# # below is gget the gradients
# grad_matrix = np.zeros(shape=(784,10))
# matrix_with_log = np.zeros(shape=(784,10))
# temp = np.zeros(shape=(10,))
# for i in range(10):
# temp[i] = np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i]+Y_desired[i]) )
# np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
# grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# matrix_with_log[:,i] = temp[i] * grad_matrix[:,i]
#
# sum_of_all_grads_case_1 = np.sum(matrix_with_log,1)
# sum_of_all_grads_case_0 = np.zeros(shape=(784,))
# sum_of_all_grads_case_0 = sum_of_all_grads_case_1 + mu*( fake_image.reshape(784,) - X_desired.reshape(784,) )
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_0-np.zeros(shape=(784,)), 2 )
# print("solution for case 0 is of L2 distance from zeros which is = ", opt_ball)
#
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_1-np.zeros(shape=(784,)), 2 )
# print("solution for case 1 is of L2 distance from zeros which is = ", opt_ball)
#
#
# sum_of_all_grads = np.sum(grad_matrix,1)
# # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# print("solution for case 3 is of L2 distance from zeros which is = ", opt_ball)
#
# # #########################################################################################################################################
# # ################## optimality condition KKT1 case 3:
# # #########################################################################################################################################
# # # \sum_{i\in[M]} grad(J_i(X^*)) = 0
# #
# # # for x^* obtained from the above algorithm, get grad(J_i(X^*)) for i \in [M]
# #
# # # for below function to work, we need a model with 1-D input AND seperate last dense and activation
# #
# # grad_matrix = np.zeros(shape=(784,10))
# # for i in range(10):
# # grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# #
# # sum_of_all_grads = np.sum(grad_matrix,1)
# # # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# # opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# # print("solution is of L2 distance from zeros which is = ", opt_ball)
#
#
# print('break')
| 27,324 | 38.316547 | 201 | py |
BOSS | BOSS-main/Eval_Codes/applications/GenNNs_MNIST_digits_confide_reduction.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#########################################################################################################################################
############################### gradient of the model f w.r.t input image function ######
#########################################################################################################################################
def grad_discriminant_sm_wrt_1d_img(input_image, lbl, model):
"""
:param input_image: this is a numpy array of size X_test whichi 28,28,1
:param input_label: the index of the output dis function
:param model: sequential keras model trained with 1D image
:return: gradient - same size as the input image
"""
extractor = tf.keras.Model(inputs=model.inputs,
outputs=[layer.output for layer in model.layers])
# image processing: convert input image to tf variable
input_image_2 = tf.Variable(input_image, name='input_image_var')
# reshape input tf.variable to 4 dim
input_image_3 = tf.reshape(input_image_2, [1, 784, 1])
with tf.GradientTape(watch_accessed_variables=True) as tape:
tape.watch(input_image_3)
# get the actual outputs
features = extractor(input_image_3)
# output of last layer
# dis_func = model1.predict(input_image.reshape(1,784,1))[0]
# i have no clue what this does
dis_func = features[-1]
# # i have no clue what this does
#
func_val = dis_func[0][lbl]
grad = tape.gradient(func_val, input_image_3)
return grad
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(28, 28)
imageB = imageB.reshape(28, 28)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
################################################### below is the MNIST digits
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_test = X_test.astype(np.float32)
X_test = 2*X_test - 1
####################################################################################
# ################################ some dataset - MNIST fashion
#
# # download mnist data and split into train and test sets
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# # reshape data to fit model
# X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
# X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
# X_train, X_test = X_train/255, X_test/255
# # normalization:
# train_images = train_images / 255
# test_images = test_images / 255
# # ###############################################
####################################################################################
# ################################ some dataset - MNIST fashion
## get a trained model (such as the MNIST didgits)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
# ###############################################################################################################################################################################
# ####################################################################################### results for clean enviroment ########################################################################################
# confidence_clean_MNIST = []
# JS_clean_MNIST = []
#
# for idx in range(200):
# sample = X_test[idx,:,:,:]
# prob = trained_model(sample.reshape(1,28,28,1))[0].numpy()
# prediction_conf = np.argmax(prob)
# # if the sample were correctly classified
# if prediction_conf == test_labels[idx]:
# # get the confidence level:
# confidence_clean_MNIST.append(np.max(prob))
# # get the JS distance with the ont hot encoding of the true lbl
# if np.max(prob) >= 0.9999:
# prob = y_test[idx]
#
# JD_dis = D_JS_PMFs(prob, y_test[idx])
# JS_clean_MNIST.append(JD_dis)
# # logger:
# print('idx = ', idx, 'predicted lbl = ',prediction_conf,'tru lbl = ',test_labels[idx], 'confidence_clean_CIFAR = ',np.max(prob), 'JS = ',JD_dis)
#
# print('For clean env, we get [ Avg_confidence,Avg_JS] = ', [np.mean(confidence_clean_MNIST),np.mean(JS_clean_MNIST)])
# ####################################################################################################################
# # ################################ saving predicetd confidence scores of the first 200 images w.r.t the trained model
# ####################################################################################################################
# import pickle
# confidence_1st_200_images = []
#
# clean_200_test_images = []
#
# for ii in range(200):
# X_test_sample = X_test[ii] * 2 - 1
# clean_200_test_images.append(X_test_sample)
# #output_vector_probabilities = trained_model(X_test_sample.reshape(1,28,28,1)).numpy()[0]
# #confidence_1st_200_images.append(np.max(output_vector_probabilities))
#
######## freeze trained_model
for layer in trained_model.layers:
layer.trainable = False
############## here what to save:
perturbed_images_conf_red = []
prob_vectors_conf_red = []
JS_conf_red = []
SSIM__conf_red = []
JS_conf_red_per_train_step=[]
SSIM_conf_per_train_step=[]
kk_2_per_train_step=[]
kk_i_per_train_step=[]
lambda_g_save = []
lambda_h_save = []
confidence_SCiTaP_C_MNIST = []
JS_SCiTaP_C_MNIST = []
SSIM_SCiTaP_C_MNIST = []
for idx in range(20):
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen')
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((7, 7, 256))
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
layer.trainable=True
gen_NN.add(layer)
# below is added for the 1D modification
layer = layers.Reshape((784, 1, 1))
layer.trainable=True
gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
# make the image in [-1,1]
X_desired = X_test[idx]
#X_desired = X_test[4]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping criteria
delta_s = 0.25
delta_js = 0.15
delta_ssim = 0.90
delta_c = 0.25
traning_steps = 20
############################################################
### automated desired for confidence reduction y_d (desired PMF)
################################################################
number_of_classes = 10
#target_class = 6
true_label = test_labels[idx]
desired_confidence = 0.6
#1 code the confidence only
desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
for i in range(number_of_classes):
if i == true_label:
desired_PMF_confidence[:,i] = desired_confidence
else:
desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# below is for the 1D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
Y_val_gen = X_desired.reshape(1,784,1,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
Y_train_combined = np.zeros(shape=(batch_size_2,10))
Y_val_combined = desired_PMF_confidence
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
Y_desired = Y_val_combined[0]
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
x_2 = trained_model.layers[0](x)
for lay in range(len(trained_model.layers) - 1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
### define the loss functions for each head
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.02
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0 )
#combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
# output probabilities at step i ==> this is J in the paper
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
# print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.05 * 0.02 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.02
#########################################################################################################################################
################## optimality condition KKT1
#########################################################################################################################################
# case 0: define a \mu and make it case_ + \mu (x* - x_d)
mu = 0.025
# below is gget the gradients
grad_matrix = np.zeros(shape=(784, 10))
matrix_with_log = np.zeros(shape=(784, 10))
temp = np.zeros(shape=(10,))
for ii in range(10):
temp[ii] = np.log2(output_vector_probabilities[ii] / (output_vector_probabilities[ii] + Y_desired[ii]))
# np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
grad_matrix[:, ii] = grad_discriminant_sm_wrt_1d_img(fake_image, ii, trained_model).numpy()[0, :, :].reshape(
784, )
matrix_with_log[:, ii] = temp[ii] * grad_matrix[:, ii]
sum_of_all_grads_case_1 = np.sum(matrix_with_log, 1)
kk = np.zeros(shape=(784,))
kk = sum_of_all_grads_case_1 + 2 * mu * (fake_image.reshape(784, ) - X_desired.reshape(784, ))
opt_ball_2 = LA.norm(kk - np.zeros(shape=(784,)), 2)
opt_ball_i = LA.norm(kk - np.zeros(shape=(784,)), np.inf)
############## here what to save per training step:
JS_conf_red_per_train_step.append([idx,i,D_JS])
SSIM_conf_per_train_step.append([idx,i,D_ssim_images])
kk_2_per_train_step.append([idx,i,opt_ball_2])
kk_i_per_train_step.append([idx,i,opt_ball_i])
lambda_g_save.append(lambda_gen)
lambda_h_save.append(lambda_pmf)
#print("KKT1: solution is of distance from zeros [L2,Linf] = ", [opt_ball_2, opt_ball_i])
# fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
#
# # confidence_SCiTaP_C_MNIST = []
# # JS_SCiTaP_C_MNIST = []
# # SSIM_SCiTaP_C_MNIST = []
#
#
# ### below is the same thing (just for sanity check)
# trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
# output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#
# confidence = np.max(output_vector_probabilities)
#
# confidence_SCiTaP_C_MNIST.append(confidence)
# JS_SCiTaP_C_MNIST.append(D_JS)
# SSIM_SCiTaP_C_MNIST.append(D_ssim_images)
#
# print('idx = ', idx, 'is done with [conf,JS,ssim] = ', [confidence, D_JS, D_ssim_images])
#
#
#
# print(np.mean(confidence_SCiTaP_C_MNIST),np.mean(JS_SCiTaP_C_MNIST),np.mean(SSIM_SCiTaP_C_MNIST))
print('Finished the image of index = ', idx,' and traing steps = ',i, ' with SSIM = ', D_ssim_images,
' and JS = ', [D_JS])
# ############## here what to save:
# perturbed_images_conf_red.append(fake_image)
# prob_vectors_conf_red.append([output_vector_probabilities])
# JS_conf_red.append([D_JS])
# SSIM__conf_red.append([D_ssim_images])
fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)
### below is the same thing (just for sanity check)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1,28,28,1)).numpy()[0]
#output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# this is to make sure that above vectr are identical
#print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
#print(output_vector_probabilities_2)
real_image = X_desired.reshape(28,28)
plt.figure()
plt.subplot(2,2,1)
plt.title('Desired example')
plt.imshow(real_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,2)
plt.title('Generated example')
plt.imshow(fake_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,4)
plt.title('Generated example PMF')
plt.stem(output_vector_probabilities)
plt.ylim(top=1.2)
plt.subplot(2,2,3)
plt.title('Desired PMF')
plt.stem(Y_val_combined[0])
plt.ylim(top=1.2)
# #########################################################################################################################################
# ################## optimality condition KKT1
# #########################################################################################################################################
# # case 0: define a \mu and make it case_ + \mu (x* - x_d)
# mu = 0.005
# # case 1
# # \sum_{i\in[M]} grad(J_i(X^*)) \log(J_i(X^*) / J_i(X^*)+y_i) = 0
#
# # case 3
# # \sum_{i\in[M]} grad(J_i(X^*)) = 0
#
# # below is gget the gradients
# grad_matrix = np.zeros(shape=(784,10))
# matrix_with_log = np.zeros(shape=(784,10))
# temp = np.zeros(shape=(10,))
# for i in range(10):
# temp[i] = np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i]+Y_desired[i]) )
# np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
# grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# matrix_with_log[:,i] = temp[i] * grad_matrix[:,i]
#
# sum_of_all_grads_case_1 = np.sum(matrix_with_log,1)
# sum_of_all_grads_case_0 = np.zeros(shape=(784,))
# sum_of_all_grads_case_0 = sum_of_all_grads_case_1 + mu*( fake_image.reshape(784,) - X_desired.reshape(784,) )
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_0-np.zeros(shape=(784,)), 2 )
# print("solution for case 0 is of L2 distance from zeros which is = ", opt_ball)
#
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_1-np.zeros(shape=(784,)), 2 )
# print("solution for case 1 is of L2 distance from zeros which is = ", opt_ball)
#
#
# sum_of_all_grads = np.sum(grad_matrix,1)
# # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# print("solution for case 3 is of L2 distance from zeros which is = ", opt_ball)
#
# # #########################################################################################################################################
# # ################## optimality condition KKT1 case 3:
# # #########################################################################################################################################
# # # \sum_{i\in[M]} grad(J_i(X^*)) = 0
# #
# # # for x^* obtained from the above algorithm, get grad(J_i(X^*)) for i \in [M]
# #
# # # for below function to work, we need a model with 1-D input AND seperate last dense and activation
# #
# # grad_matrix = np.zeros(shape=(784,10))
# # for i in range(10):
# # grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# #
# # sum_of_all_grads = np.sum(grad_matrix,1)
# # # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# # opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# # print("solution is of L2 distance from zeros which is = ", opt_ball)
#
#
print('break')
#
#
#
| 24,839 | 36.185629 | 207 | py |
BOSS | BOSS-main/Eval_Codes/applications/GenNNs_MNIST_digits_targeted_attack.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#########################################################################################################################################
############################### gradient of the model f w.r.t input image function ######
#########################################################################################################################################
def grad_discriminant_sm_wrt_1d_img(input_image, lbl, model):
"""
:param input_image: this is a numpy array of size X_test whichi 28,28,1
:param input_label: the index of the output dis function
:param model: sequential keras model trained with 1D image
:return: gradient - same size as the input image
"""
extractor = tf.keras.Model(inputs=model.inputs,
outputs=[layer.output for layer in model.layers])
# image processing: convert input image to tf variable
input_image_2 = tf.Variable(input_image, name='input_image_var')
# reshape input tf.variable to 4 dim
input_image_3 = tf.reshape(input_image_2, [1, 784, 1])
with tf.GradientTape(watch_accessed_variables=True) as tape:
tape.watch(input_image_3)
# get the actual outputs
features = extractor(input_image_3)
# output of last layer
# dis_func = model1.predict(input_image.reshape(1,784,1))[0]
# i have no clue what this does
dis_func = features[-1]
# # i have no clue what this does
#
func_val = dis_func[0][lbl]
grad = tape.gradient(func_val, input_image_3)
return grad
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(28, 28)
imageB = imageB.reshape(28, 28)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
################################################### below is the MNIST digits
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_test = X_test.astype(np.float32)
#X_test = 2*X_test - 1
####################################################################################
# ################################ some dataset - MNIST fashion
#
# # download mnist data and split into train and test sets
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# # reshape data to fit model
# X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
# X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
# X_train, X_test = X_train/255, X_test/255
# # normalization:
# train_images = train_images / 255
# test_images = test_images / 255
# # ###############################################
## get a trained model (such as the MNIST didgits)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
######## freeze trained_model
for layer in trained_model.layers:
layer.trainable = False
############## here what to save:
perturbed_images_tar = []
prob_vectors_tar = []
JS_tar = []
SSIM_tar = []
confidence_tar = []
succ_tar=[]
run_time_tar=[]
#target_lbl_save = []
JS_tar_att_per_train_step=[]
SSIM_tar_att_train_step=[]
kk_2_tar_att_per_train_step=[]
kk_i_tar_att_per_train_step=[]
import timeit
number_of_observations = 5
for idx in range(number_of_observations):
for layer in trained_model.layers:
layer.trainable = False
start = timeit.default_timer()
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
initializer = tf.keras.initializers.RandomNormal(mean=0.0, stddev=0.05, seed=102)
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((7, 7, 256))
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False, kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh', kernel_initializer=initializer)
layer.trainable=True
gen_NN.add(layer)
# below is added for the 1D modification
layer = layers.Reshape((784, 1, 1))
layer.trainable=True
gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
# make the image in [-1,1]
X_desired = X_test[idx]*2 -1
#X_desired = X_test[4]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping criteria
delta_s = 0.25
delta_js = 0.30
delta_ssim = 0.83
delta_c = 0.25
traning_steps = 25
############################################################
### automated desired for confidence reduction y_d (desired PMF)
################################################################
number_of_classes = 10
# choices_to_slect_from = np.setdiff1d(np.array([0,1,2,3,4,5,6,7,8,9]), test_labels[idx])
#
# target_class = np.random.choice(choices_to_slect_from)
#
# target_lbl_save.append(target_class)
import pickle
target_class = pickle.load(open("target_lbl_save.p","rb"))
target_class = target_class[idx]
#true_label = test_labels[idx]
desired_confidence = 1
#1 code the confidence only
desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
for i in range(number_of_classes):
if i == target_class:
desired_PMF_confidence[:,i] = desired_confidence
else:
desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# below is for the 1D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
Y_val_gen = X_desired.reshape(1,784,1,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
Y_train_combined = np.zeros(shape=(batch_size_2,10))
Y_val_combined = desired_PMF_confidence
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
Y_desired = Y_val_combined[0]
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
x_2 = trained_model.layers[0](x)
for lay in range(len(trained_model.layers) - 1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
### define the loss functions for each head
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.02
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0 )
#combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
# output probabilities at step i ==> this is J in the paper
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
# print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.05 * 0.02 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.02
# #########################################################################################################################################
# ################## optimality condition KKT1
# #########################################################################################################################################
# # case 0: define a \mu and make it case_ + \mu (x* - x_d)
# mu = 0.025
#
# # below is gget the gradients
# grad_matrix = np.zeros(shape=(784, 10))
# matrix_with_log = np.zeros(shape=(784, 10))
# temp = np.zeros(shape=(10,))
# for ii in range(10):
# temp[ii] = np.log2(output_vector_probabilities[ii] / (output_vector_probabilities[ii] + Y_desired[ii]))
# # np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
# grad_matrix[:, ii] = grad_discriminant_sm_wrt_1d_img(fake_image, ii, trained_model).numpy()[0, :, :].reshape(
# 784, )
# matrix_with_log[:, ii] = temp[ii] * grad_matrix[:, ii]
#
# sum_of_all_grads_case_1 = np.sum(matrix_with_log, 1)
# kk = np.zeros(shape=(784,))
# kk = sum_of_all_grads_case_1 + 2 * mu * (fake_image.reshape(784, ) - X_desired.reshape(784, ))
#
# opt_ball_2 = LA.norm(kk - np.zeros(shape=(784,)), 2)
# opt_ball_i = LA.norm(kk - np.zeros(shape=(784,)), np.inf)
#
# ############## here what to save per training step:
#
# # JS_tar_att_per_train_step = []
# # SSIM_tar_att_train_step = []
# # kk_2_tar_att_per_train_step = []
# # kk_i_tar_att_per_train_step = []
#
# JS_tar_att_per_train_step.append([idx,i,D_JS])
# SSIM_tar_att_train_step.append([idx,i,D_ssim_images])
# kk_2_tar_att_per_train_step.append([idx,i,opt_ball_2])
# kk_i_tar_att_per_train_step.append([idx,i,opt_ball_i])
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
### below is the same thing (just for sanity check)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
confidence_tar_temp = np.max(output_vector_probabilities)
predicted_perturbed_label = np.argmax(output_vector_probabilities)
if predicted_perturbed_label == target_class:
succ_tar.append(1)
stop = timeit.default_timer()
JS_tar.append([D_JS])
SSIM_tar.append([D_ssim_images])
run_time_tar.append([stop-start])
print('Finished the image of index = ', idx,'succ = ',len(succ_tar), ' with SSIM = ', D_ssim_images,
' and JS = ', [D_JS], 'confidence = ',[confidence_tar_temp])
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
# ############## here what to save:
#
#
#
perturbed_images_tar.append(fake_image)
prob_vectors_tar.append([output_vector_probabilities])
JS_tar.append([D_JS])
SSIM_tar.append([D_ssim_images])
confidence_tar.append([confidence_tar_temp])
success_rate = 100*(len(succ_tar) / number_of_observations)
print('[succ,avg ssim,avg JS,avg run time]',[success_rate,np.mean(SSIM_tar),np.mean(JS_tar),np.mean(run_time_tar)],'seconds')
fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)
# ### below is the same thing (just for sanity check)
# trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
# output_vector_probabilities = trained_model(fake_image.reshape(1,28,28,1)).numpy()[0]
# #output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# # this is to make sure that above vectr are identical
# #print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
# #print(output_vector_probabilities_2)
#
# real_image = X_desired.reshape(28,28)
#
# plt.figure()
# plt.subplot(2,2,1)
# plt.title('Desired example')
# plt.imshow(real_image,cmap='gray',vmin=-1, vmax=1)
# plt.colorbar()
# plt.axis('off')
# plt.subplot(2,2,2)
# plt.title('Generated example')
# plt.imshow(fake_image,cmap='gray',vmin=-1, vmax=1)
# plt.colorbar()
# plt.axis('off')
# plt.subplot(2,2,4)
# plt.title('Generated example PMF')
# plt.stem(output_vector_probabilities)
# plt.ylim(top=1.2)
# plt.subplot(2,2,3)
# plt.title('Desired PMF')
# plt.stem(Y_val_combined[0])
# plt.ylim(top=1.2)
#
# #########################################################################################################################################
# ################## optimality condition KKT1
# #########################################################################################################################################
# # case 0: define a \mu and make it case_ + \mu (x* - x_d)
# mu = 0.005
# # case 1
# # \sum_{i\in[M]} grad(J_i(X^*)) \log(J_i(X^*) / J_i(X^*)+y_i) = 0
#
# # case 3
# # \sum_{i\in[M]} grad(J_i(X^*)) = 0
#
# # below is gget the gradients
# grad_matrix = np.zeros(shape=(784,10))
# matrix_with_log = np.zeros(shape=(784,10))
# temp = np.zeros(shape=(10,))
# for i in range(10):
# temp[i] = np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i]+Y_desired[i]) )
# np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
# grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# matrix_with_log[:,i] = temp[i] * grad_matrix[:,i]
#
# sum_of_all_grads_case_1 = np.sum(matrix_with_log,1)
# sum_of_all_grads_case_0 = np.zeros(shape=(784,))
# sum_of_all_grads_case_0 = sum_of_all_grads_case_1 + mu*( fake_image.reshape(784,) - X_desired.reshape(784,) )
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_0-np.zeros(shape=(784,)), 2 )
# print("solution for case 0 is of L2 distance from zeros which is = ", opt_ball)
#
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_1-np.zeros(shape=(784,)), 2 )
# print("solution for case 1 is of L2 distance from zeros which is = ", opt_ball)
#
#
# sum_of_all_grads = np.sum(grad_matrix,1)
# # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# print("solution for case 3 is of L2 distance from zeros which is = ", opt_ball)
#
# # #########################################################################################################################################
# # ################## optimality condition KKT1 case 3:
# # #########################################################################################################################################
# # # \sum_{i\in[M]} grad(J_i(X^*)) = 0
# #
# # # for x^* obtained from the above algorithm, get grad(J_i(X^*)) for i \in [M]
# #
# # # for below function to work, we need a model with 1-D input AND seperate last dense and activation
# #
# # grad_matrix = np.zeros(shape=(784,10))
# # for i in range(10):
# # grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# #
# # sum_of_all_grads = np.sum(grad_matrix,1)
# # # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# # opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# # print("solution is of L2 distance from zeros which is = ", opt_ball)
#
#
# print('break')
#
#
#
| 23,610 | 35.380586 | 180 | py |
BOSS | BOSS-main/Eval_Codes/applications/GenNNs_MNIST_digits_boudary_examples.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#########################################################################################################################################
############################### gradient of the model f w.r.t input image function ######
#########################################################################################################################################
def grad_discriminant_sm_wrt_1d_img(input_image, lbl, model):
"""
:param input_image: this is a numpy array of size X_test whichi 28,28,1
:param input_label: the index of the output dis function
:param model: sequential keras model trained with 1D image
:return: gradient - same size as the input image
"""
extractor = tf.keras.Model(inputs=model.inputs,
outputs=[layer.output for layer in model.layers])
# image processing: convert input image to tf variable
input_image_2 = tf.Variable(input_image, name='input_image_var')
# reshape input tf.variable to 4 dim
input_image_3 = tf.reshape(input_image_2, [1, 784, 1])
with tf.GradientTape(watch_accessed_variables=True) as tape:
tape.watch(input_image_3)
# get the actual outputs
features = extractor(input_image_3)
# output of last layer
# dis_func = model1.predict(input_image.reshape(1,784,1))[0]
# i have no clue what this does
dis_func = features[-1]
# # i have no clue what this does
#
func_val = dis_func[0][lbl]
grad = tape.gradient(func_val, input_image_3)
return grad
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(28, 28)
imageB = imageB.reshape(28, 28)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
################################################### below is the MNIST digits
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_test = X_test.astype(np.float32)
#X_test = 2*X_test - 1
####################################################################################
# ################################ some dataset - MNIST fashion
#
# # download mnist data and split into train and test sets
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# # reshape data to fit model
# X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
# X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
# X_train, X_test = X_train/255, X_test/255
# # normalization:
# train_images = train_images / 255
# test_images = test_images / 255
# # ###############################################
## get a trained model (such as the MNIST didgits)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
######## freeze trained_model
for layer in trained_model.layers:
layer.trainable = False
############## here what to save:
perturbed_images_boundary = []
prob_vectors_boundary = []
JS_boundary = []
SSIM_boundary = []
labels_boundary_save = []
############## here what to save per training step:
JS_bounday_per_train_step=[]
SSIM_bounday_train_step=[]
kk_2_bounday_per_train_step=[]
kk_i_bounday_per_train_step=[]
for idx in range(50):
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen')
layer.trainable=True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Reshape((7, 7, 256))
layer.trainable=True
gen_NN.add(layer)
#assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
layer.trainable=True
gen_NN.add(layer)
#assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable=True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable=True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
layer.trainable=True
gen_NN.add(layer)
# below is added for the 1D modification
layer = layers.Reshape((784, 1, 1))
layer.trainable=True
gen_NN.add(layer)
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
### define X_d as the desired
# make the image in [-1,1]
X_desired = X_test[idx]*2 -1
#X_desired = X_test[4]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping criteria
delta_s = 0.25
delta_js = 0.20
delta_ssim = 0.90
delta_c = 0.25
traning_steps = 25
############################################################
### automated desired for confidence reduction y_d (desired PMF)
################################################################
number_of_classes = 10
choices_to_slect_from = np.setdiff1d(np.array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]), test_labels[idx])
class_i = np.random.choice(choices_to_slect_from)
choices_to_slect_from_j = np.setdiff1d(choices_to_slect_from, class_i)
class_j = np.random.choice(choices_to_slect_from_j)
# 2 code the decision boundary examples between class i and class j
desired_PMF_boundary = np.zeros(shape=(1, number_of_classes))
#class_i = 7
#class_j = 6
# below is the values of PMF[i] and PMF[j] (i.e. maximum of 0.5)
labels_boundary_save.append([idx,class_i,class_j])
desired_confidence_boundary = 0.5
for i in range(number_of_classes):
if i == class_i or i == class_j:
desired_PMF_boundary[:, i] = desired_confidence_boundary
else:
desired_PMF_boundary[:, i] = (1 - (2 * desired_confidence_boundary)) / (number_of_classes - 2)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# below is for the 1D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
Y_val_gen = X_desired.reshape(1,784,1,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
Y_train_combined = np.zeros(shape=(batch_size_2,10))
Y_val_combined = desired_PMF_boundary
for i in range(batch_size_2):
Y_train_combined[i,:] = Y_val_combined
Y_desired = Y_val_combined[0]
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
x_2 = trained_model.layers[0](x)
for lay in range(len(trained_model.layers) - 1):
layer = trained_model.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
### define the loss functions for each head
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf = 0.02
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2], loss_weights=[lambda_gen, lambda_pmf])
# for lay in range(18):
# if lay >= 12:
# layer = combined_NN.layers[lay]
# layer.trainable = False
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined]), verbose=0 )
#combined_NN.train_on_batch(X_train, [Y_train_gen, Y_train_combined])
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
# output probabilities at step i ==> this is J in the paper
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#output_vector_probabilities = combined_NN(X_val)[1].numpy().reshape(10,)
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS = D_JS_PMFs(output_vector_probabilities, Y_desired)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS <= delta_js:
# print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and D_JS = ', D_JS)
break
### logger:
#print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; PMF_JS_Distance = ', D_JS, ' ; current loss weights = ', lambda_gen,' , ', lambda_pmf )
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf = relu_scaler_(lambda_pmf - 0.05 * 0.02 * ((delta_js/D_JS)) * np.sign((delta_js/D_JS )-1))
else:
lambda_gen = 1
lambda_pmf = 0.02
#########################################################################################################################################
################## optimality condition KKT1
#########################################################################################################################################
# case 0: define a \mu and make it case_ + \mu (x* - x_d)
mu = 0.025
# below is gget the gradients
grad_matrix = np.zeros(shape=(784, 10))
matrix_with_log = np.zeros(shape=(784, 10))
temp = np.zeros(shape=(10,))
for ii in range(10):
temp[ii] = np.log2(output_vector_probabilities[ii] / (output_vector_probabilities[ii] + Y_desired[ii]))
# np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
grad_matrix[:, ii] = grad_discriminant_sm_wrt_1d_img(fake_image, ii, trained_model).numpy()[0, :, :].reshape(
784, )
matrix_with_log[:, ii] = temp[ii] * grad_matrix[:, ii]
sum_of_all_grads_case_1 = np.sum(matrix_with_log, 1)
kk = np.zeros(shape=(784,))
kk = sum_of_all_grads_case_1 + 2 * mu * (fake_image.reshape(784, ) - X_desired.reshape(784, ))
opt_ball_2 = LA.norm(kk - np.zeros(shape=(784,)), 2)
opt_ball_i = LA.norm(kk - np.zeros(shape=(784,)), np.inf)
############# here what to save per training step:
JS_bounday_per_train_step.append([idx,i,D_JS])
SSIM_bounday_train_step.append([idx,i,D_ssim_images])
kk_2_bounday_per_train_step.append([idx,i,opt_ball_2])
kk_i_bounday_per_train_step.append([idx,i,opt_ball_i])
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
### below is the same thing (just for sanity check)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
print('Finished the image of index = ', idx, ' with SSIM = ', D_ssim_images,
' and JS = ', [D_JS])
# ############## here what to save:
# perturbed_images_boundary.append(fake_image)
# prob_vectors_boundary.append([output_vector_probabilities])
# JS_boundary.append([D_JS])
# SSIM_boundary.append([D_ssim_images])
fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)
### below is the same thing (just for sanity check)
trained_model = load_model("MNIST_digits_trained_range_1to1_1d_input.h5")
output_vector_probabilities = trained_model(fake_image.reshape(1,28,28,1)).numpy()[0]
#output_vector_probabilities_2 = combined_NN(X_val)[1].numpy().reshape(10,)
# this is to make sure that above vectr are identical
#print('This vector MUST be zero',output_vector_probabilities-output_vector_probabilities_2)
#print(output_vector_probabilities_2)
real_image = X_desired.reshape(28,28)
plt.figure()
plt.subplot(2,2,1)
plt.title('Desired example')
plt.imshow(real_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,2)
plt.title('Generated example')
plt.imshow(fake_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,2,4)
plt.title('Generated example PMF')
plt.stem(output_vector_probabilities)
plt.ylim(top=1.2)
plt.subplot(2,2,3)
plt.title('Desired PMF')
plt.stem(Y_val_combined[0])
plt.ylim(top=1.2)
#########################################################################################################################################
################## optimality condition KKT1
#########################################################################################################################################
# case 0: define a \mu and make it case_ + \mu (x* - x_d)
mu = 0.005
# case 1
# \sum_{i\in[M]} grad(J_i(X^*)) \log(J_i(X^*) / J_i(X^*)+y_i) = 0
# case 3
# \sum_{i\in[M]} grad(J_i(X^*)) = 0
# below is gget the gradients
grad_matrix = np.zeros(shape=(784,10))
matrix_with_log = np.zeros(shape=(784,10))
temp = np.zeros(shape=(10,))
for i in range(10):
temp[i] = np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i]+Y_desired[i]) )
np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
matrix_with_log[:,i] = temp[i] * grad_matrix[:,i]
sum_of_all_grads_case_1 = np.sum(matrix_with_log,1)
sum_of_all_grads_case_0 = np.zeros(shape=(784,))
sum_of_all_grads_case_0 = sum_of_all_grads_case_1 + mu*( fake_image.reshape(784,) - X_desired.reshape(784,) )
opt_ball = LA.norm(sum_of_all_grads_case_0-np.zeros(shape=(784,)), 2 )
print("solution for case 0 is of L2 distance from zeros which is = ", opt_ball)
opt_ball = LA.norm(sum_of_all_grads_case_1-np.zeros(shape=(784,)), 2 )
print("solution for case 1 is of L2 distance from zeros which is = ", opt_ball)
sum_of_all_grads = np.sum(grad_matrix,1)
# hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
print("solution for case 3 is of L2 distance from zeros which is = ", opt_ball)
# #########################################################################################################################################
# ################## optimality condition KKT1 case 3:
# #########################################################################################################################################
# # \sum_{i\in[M]} grad(J_i(X^*)) = 0
#
# # for x^* obtained from the above algorithm, get grad(J_i(X^*)) for i \in [M]
#
# # for below function to work, we need a model with 1-D input AND seperate last dense and activation
#
# grad_matrix = np.zeros(shape=(784,10))
# for i in range(10):
# grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
#
# sum_of_all_grads = np.sum(grad_matrix,1)
# # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# print("solution is of L2 distance from zeros which is = ", opt_ball)
print('break')
| 22,178 | 35.359016 | 180 | py |
BOSS | BOSS-main/Eval_Codes/applications/GenNNs_MNIST_digits_ensemble_two_models_table_same_target.py | import tensorflow as tf
from keras.utils import np_utils
import glob
#import imageio
import matplotlib.pyplot as plt
import numpy as np
import os
#import PIL
from tensorflow.keras import layers
import time
#from skimage.measure import compare_ssim
from skimage.measure import compare_ssim
import pydot
import graphviz
from numpy import linalg as LA
from IPython import display
from tensorflow.keras import Input, Model
from keras.models import load_model
#########################################################################################################################################
############################### gradient of the model f w.r.t input image function ######
#########################################################################################################################################
def grad_discriminant_sm_wrt_1d_img(input_image, lbl, model):
"""
:param input_image: this is a numpy array of size X_test whichi 28,28,1
:param input_label: the index of the output dis function
:param model: sequential keras model trained with 1D image
:return: gradient - same size as the input image
"""
extractor = tf.keras.Model(inputs=model.inputs,
outputs=[layer.output for layer in model.layers])
# image processing: convert input image to tf variable
input_image_2 = tf.Variable(input_image, name='input_image_var')
# reshape input tf.variable to 4 dim
input_image_3 = tf.reshape(input_image_2, [1, 784, 1])
with tf.GradientTape(watch_accessed_variables=True) as tape:
tape.watch(input_image_3)
# get the actual outputs
features = extractor(input_image_3)
# output of last layer
# dis_func = model1.predict(input_image.reshape(1,784,1))[0]
# i have no clue what this does
dis_func = features[-1]
# # i have no clue what this does
#
func_val = dis_func[0][lbl]
grad = tape.gradient(func_val, input_image_3)
return grad
#########################################################################################################################################
############################### relu_scaler_ ######
#########################################################################################################################################
def relu_scaler_(x):
'''
:param x: scaler
:return: y=relu(x)
'''
y=0
if x >= 0:
y=x
else:
y=0
return y
#########################################################################################################################################
#########################################################################################################################################
############################### SSIM fucntion ######
#########################################################################################################################################
def SSIM_index(imageA, imageB):
imageA = imageA.reshape(28, 28)
imageB = imageB.reshape(28, 28)
# rho_inf = LA.norm(input_image.reshape(784, 1) - X_test_pert[idx].reshape(784, 1) , np.inf)
(D_s, diff) = compare_ssim(imageA, imageB, full=True)
return D_s
#########################################################################################################################################
# calculate the kl divergence
##########################################################################################
############################### jensen shannon divergence fucntion - ######
##########################################################################################
"""
it is a normalized and stable version of the KL divergence and return values between [0,1] where 0 is two identical distributions
"""
from scipy.spatial.distance import jensenshannon
from math import log2
def D_JS_PMFs(p, q):
# D_JS_PMFs(p,q) = D_JS_PMFs(q,p)
return jensenshannon(p, q, base=2)
############################################################################################
# from math import log2
# # calculate the kl divergence
# def kl_divergence(p, q):
# return sum(p[i] * log2(p[i]/q[i]) for i in range(len(p)))
#
# # calculate the kl divergence
################################################### below is the MNIST digits
(train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.mnist.load_data()
# reshape data to fit model
X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
X_train, X_test = X_train/255, X_test/255
# normalization:
train_images = train_images / 255
test_images = test_images / 255
print("")
y_train = np_utils.to_categorical(train_labels,10)
y_test = np_utils.to_categorical(test_labels,10)
X_test = X_test.astype(np.float32)
#X_test = 2*X_test - 1
####################################################################################
# ################################ some dataset - MNIST fashion
#
# # download mnist data and split into train and test sets
# (train_images, train_labels), (test_images, test_labels) = tf.keras.datasets.fashion_mnist.load_data()
# # reshape data to fit model
# X_train = train_images.reshape(train_images.shape[0], 28, 28, 1)
# X_test = test_images.reshape(test_images.shape[0], 28, 28, 1)
# X_train, X_test = X_train/255, X_test/255
# # normalization:
# train_images = train_images / 255
# test_images = test_images / 255
# # ###############################################
## get a trained model (such as the MNIST didgits)
trained_model_1 = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
#trained_model_2 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_1.h5") # THIS IS A DIFFERENT TRAINED CONV MODEL
trained_model_3 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_2.h5") # THIS IS A DIFFERENT TRAINED MODEL WITH ONLY DENSE LAYERS AND A SOFTMAX
######## freeze trained_model, trained_model_1, and trained_model_2
for layer in trained_model_1.layers:
layer.trainable = False
# for layer in trained_model_2.layers:
# layer.trainable = False
for layer in trained_model_3.layers:
layer.trainable = False
#########################################################################################################################################
############################### this is NOT sequentiail traning (traning two loss fucntions from different heads of the NN) ######
#########################################################################################################################################
#X_desired = X_test[4]
# these two need to be have the same values as of now since X_train is the same for both
batch_size_gen = 80
batch_size_2 = 80
#################### training steps and stopping criteria
delta_s = 0.25
delta_js = 0.45
delta_ssim = 0.70
delta_c = 0.25
traning_steps = 50
# #########################################################################################################################################
# ### automated desired for confidence reduction y_d (desired PMF) ##### use this for boundary examples AND confidence reduction attacks
# ############################################################################################################################################
# number_of_classes = 10
# target_class = 6
# desired_confidence = 0.6
#
# #1 code the confidence only
# desired_PMF_confidence = np.zeros(shape=(1,number_of_classes))
# for i in range(number_of_classes):
# if i == target_class:
# desired_PMF_confidence[:,i] = desired_confidence
# else:
# desired_PMF_confidence[:,i] = (1-desired_confidence) / (number_of_classes-1)
#
#
# #2 code the decision boundary examples between class i and class j
# desired_PMF_boundary = np.zeros(shape=(1,number_of_classes))
# class_i = 7
# class_j = 6
# #below is the values of PMF[i] and PMF[j] (i.e. maximum of 0.5)
# desired_confidence_boundary = 0.5
# for i in range(number_of_classes):
# if i == class_i or i == class_j:
# desired_PMF_boundary[:,i] = desired_confidence_boundary
# else:
# desired_PMF_boundary[:,i] = (1-(2*desired_confidence_boundary)) / (number_of_classes-2)
# ############################################################################################################################################
### define X_d as the desired
# make the image in [-1,1]
#X_desired = X_test[21]*2 -1
############## here what to save:
perturbed_images_ens_same_tar = []
prob_vectors_ens_same_tar = []
JSs__ens_same_tar = []
SSIM_ens_same_tar = []
confidence__ens_same_tar = []
idx_to_pick = [3, 2, 1, 18, 56, 15, 21, 0, 61, 16]
idx_to_pick = [61]
for idx in idx_to_pick:
X_desired = X_test[idx]
########################################################################################
###########################################################################
#################################### BUILDING THE gen model g(z,\phi)
###########################################################################
gen_NN = tf.keras.Sequential()
## ADDING THE GEN MODEL layers that will be trained
layer = layers.Dense(7 * 7 * 256, use_bias=False, input_shape=(100,), name='dense_gen')
layer.trainable = True
gen_NN.add(layer)
layer = layers.BatchNormalization()
layer.trainable = True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable = True
gen_NN.add(layer)
layer = layers.Reshape((7, 7, 256))
layer.trainable = True
gen_NN.add(layer)
# assert combined_NN.output_shape == (None, 7, 7, 256)
layer = layers.Conv2DTranspose(128, (5, 5), strides=(1, 1), padding='same', use_bias=False)
layer.trainable = True
gen_NN.add(layer)
# assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable = True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable = True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(64, (5, 5), strides=(2, 2), padding='same', use_bias=False)
layer.trainable = True
gen_NN.add(layer)
# assert gen_NN.output_shape == (None, 14, 14, 64)
layer = layers.BatchNormalization()
layer.trainable = True
gen_NN.add(layer)
layer = layers.LeakyReLU()
layer.trainable = True
gen_NN.add(layer)
layer = layers.Conv2DTranspose(1, (5, 5), strides=(2, 2), padding='same', use_bias=False, activation='tanh')
layer.trainable = True
gen_NN.add(layer)
# below is added for the 1D modification
layer = layers.Reshape((784, 1, 1))
layer.trainable = True
gen_NN.add(layer)
################################################################
##### X_train is the same for both gen and combined models #####
################################################################
# build x_train as some random input and y_train to be the desired image
# X_train is the same as z in the paper
# create one vector and repeat
X_train_one = tf.random.uniform(shape=[1,100], minval=0., maxval=1., seed=101)
X_train_one_np = X_train_one.numpy()
X_train_np = np.zeros(shape=(batch_size_gen,100))
for i in range(batch_size_gen):
X_train_np[i,:] = X_train_one_np
X_train = tf.convert_to_tensor(X_train_np, dtype=tf.float32)
X_val_np = X_train_one_np
X_val = tf.convert_to_tensor(X_val_np, dtype=tf.float32)
############################################################
### Y_train_gen for the gen model (whcih is the image)
################################################################
# below is for the 1D image
Y_train_np_gen = np.zeros(shape=(batch_size_gen,784,1,1))
Y_val_gen = X_desired.reshape(1,784,1,1)
for i in range(batch_size_gen):
Y_train_np_gen[i,:,:,:] = Y_val_gen.reshape(784,1,1)
# convert Y_train to tf eager tensor
Y_train_gen = tf.convert_to_tensor(Y_train_np_gen, dtype=tf.float32)
############################################################
### Y_train_combined is the y_d (desired PMF)
################################################################
##this is for unifrom distribution
# Y_train_combined = 0.1*np.ones(shape=(batch_size_2,10))
# Y_val_combined = 0.1*np.ones(shape=(1,10))
###for targeted, we need to change Y_train and Y_val:
###let the target lbl be 0, then
Y_train_combined = np.zeros(shape=(batch_size_2,10))
#Y_val_combined = np.array([[0.6,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0.05,0]])
#Y_val_combined = desired_PMF_confidence
#target_labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
target_labels = [5]
for target_label in target_labels:
Y_train_combined_1 = np.zeros(shape=(batch_size_2,10))
Y_val_combined_1 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
Y_val_combined_1[0][target_label] = 1
for i in range(batch_size_2):
Y_train_combined_1[i,:] = Y_val_combined_1
Y_desired_1 = Y_val_combined_1[0]
# Y_train_combined_2 = np.zeros(shape=(batch_size_2,10))
# Y_val_combined_2 = np.array([[1, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
# for i in range(batch_size_2):
# Y_train_combined_2[i,:] = Y_val_combined_2
# Y_desired_2 = Y_val_combined_2[0]
Y_train_combined_3 = np.zeros(shape=(batch_size_2,10))
Y_val_combined_3 = np.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]])
Y_val_combined_3[0][target_label] = 1
for i in range(batch_size_2):
Y_train_combined_3[i,:] = Y_val_combined_3
Y_desired_3 = Y_val_combined_3[0]
print('break')
####################################################################################################################
### defining the combined model such that its the concatenation of g, then f ==> this is defing model h in the paper
#############################################################################################################################
input = Input(shape=100)
x = gen_NN.layers[0](input)
for lay in range(len(gen_NN.layers) - 1):
layer = gen_NN.layers[lay+1]
layer.trainable = True
x = layer(x)
out_1 = x
# this is for the trained model 1
x_2 = trained_model_1.layers[0](x)
for lay in range(len(trained_model_1.layers) - 1):
layer = trained_model_1.layers[lay + 1]
layer.trainable = False
x_2 = layer(x_2)
out_2 = x_2
# this is for the trained model 2
x_4 = trained_model_3.layers[0](x)
for lay in range(len(trained_model_3.layers) - 1):
layer = trained_model_3.layers[lay + 1]
layer.trainable = False
x_4 = layer(x_4)
out_4 = x_4
### defining the model: this is h(z,\psi)
combined_NN = Model(input, [out_1, out_2, out_4])
### defning the optimizer
optimizer = tf.keras.optimizers.Adam(learning_rate=0.025)
loss_1 = tf.keras.losses.MeanSquaredError(name='LOSS_1')
loss_2 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_2', from_logits=False,label_smoothing=0)
#loss_3 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_3', from_logits=False,label_smoothing=0)
loss_4 = tf.keras.losses.CategoricalCrossentropy(name='LOSS_4', from_logits=False,label_smoothing=0)
dynamic_weights_selection = True
# initial losses functions weights
lambda_gen = 1
lambda_pmf_1 = 0.02
#lambda_pmf_2 = 0.02
lambda_pmf_3 = 0.02
############# trainING LOOP
for i in range(traning_steps):
combined_NN.compile(optimizer=optimizer, loss=[loss_1, loss_2, loss_4], loss_weights=[lambda_gen, lambda_pmf_1, lambda_pmf_3])
# traning
combined_NN.fit(X_train,[Y_train_gen, Y_train_combined_1,Y_train_combined_3], epochs=1, batch_size=1, validation_data=(X_val, [Y_val_gen,Y_val_combined_1,Y_val_combined_3]), verbose=0 )
# fake image at step i ==> this is X in the paper and X_val is z in the paper
fake_image = combined_NN(X_val)[0].numpy().reshape(28, 28)
trained_model_1 = load_model(
"MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
# trained_model_2 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_1.h5") # THIS IS A DIFFERENT TRAINED CONV MODEL
trained_model_3 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_2.h5")
#trained_model = load_model("MNIST_digits_trained_model_3.h5")
output_vector_probabilities_1 = trained_model_1(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
#output_vector_probabilities_2 = trained_model_2(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
output_vector_probabilities_3 = trained_model_3(fake_image.reshape(1, 28, 28, 1)).numpy()[0]
# D_2 distance between real image and fake image at step i==> this is equation (9)
D_2_s = LA.norm(X_desired.reshape(784,) - fake_image.reshape(784,), 2)
# SSIM distance between real image and fake image at step i ==>
D_ssim_images = SSIM_index(X_desired, fake_image)
# D_2 distance between desired PMF and the PMF returned by the fake image ==> this is equation (3)
#D_2 = LA.norm(output_vector_probabilities-Y_desired, 2 )
# D_JS: JS divergence distance between desired and actual PMFs (it uses KL divergence)
D_JS_1 = D_JS_PMFs(output_vector_probabilities_1, Y_desired_1)
#D_JS_2 = D_JS_PMFs(output_vector_probabilities_2, Y_desired_2)
D_JS_3 = D_JS_PMFs(output_vector_probabilities_3, Y_desired_3)
### THE STOPPING EXIT CRITERIA
if D_ssim_images >= delta_ssim and D_JS_1 <= delta_js and D_JS_3 <= delta_js:
print('BREAKING FOR IS USED with Distance SSIM = ', D_ssim_images, ' and JS of = ', [D_JS_1,D_JS_3])
break
### logger:
print('training step = ', i, '; image SSIM = ', D_ssim_images, ' ; JS_Distances = ', [D_JS_1,D_JS_3], ' ; current loss weights = ', lambda_gen,' , ', [lambda_pmf_1,lambda_pmf_3 ])
##### dynamic weight selection option in training
if dynamic_weights_selection is True:
lambda_gen = relu_scaler_(lambda_gen - 0.01 * 1 * ((D_ssim_images/delta_ssim)) * np.sign((D_ssim_images/delta_ssim)-1))
lambda_pmf_1 = relu_scaler_(lambda_pmf_1 - 0.05 * 0.02 * ((delta_js/D_JS_1)) * np.sign((delta_js/D_JS_1 )-1))
#lambda_pmf_2 = relu_scaler_(lambda_pmf_2 - 0.05 * 0.02 * ((delta_js / D_JS_2)) * np.sign((delta_js / D_JS_2) - 1))
lambda_pmf_3 = relu_scaler_(lambda_pmf_3 - 0.05 * 0.02 * ((delta_js / D_JS_3)) * np.sign((delta_js / D_JS_3) - 1))
else:
lambda_gen = 1
lambda_pmf = 0.02
### SAVE THE DISTNCE AND PERTURBED IMAGE SO AS TO TAKE THE MINIMUM AT THE END OF THE TRAINING STEP (THIS IS TO OVER COME OVERFITTING DURING TRAINING)
fake_image = combined_NN(X_val)[0].numpy().reshape(28,28)
### below is the same thing (just for sanity check)
trained_model_1 = load_model("MNIST_digits_trained_range_1to1_1d_input.h5") # input is \in [-1,1]; this model here has 1D input of f, hence we need generator NN to be of output 1,128
#trained_model_2 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_1.h5") # THIS IS A DIFFERENT TRAINED CONV MODEL
trained_model_3 = load_model("MNIST_digits_trained_range_1to1_1d_input_ensemble_2.h5") # THIS IS A DIFFERENT TRAINED MODEL WITH ONLY DENSE LAYERS AND A SOFTMAX
output_vector_probabilities_1 = trained_model_1(fake_image.reshape(1,28,28,1)).numpy()[0]
#output_vector_probabilities_2 = trained_model_2(fake_image.reshape(1,28,28,1)).numpy()[0]
confidence_1 = np.max(output_vector_probabilities_1)
confidence_3 = np.max(output_vector_probabilities_3)
output_vector_probabilities_3 = trained_model_3(fake_image.reshape(1,28,28,1)).numpy()[0]
print('Finished the image of index = ', idx, ' with target lbl = ' , target_label, ' with SSIM = ', D_ssim_images, ' and JS = ',[D_JS_1,D_JS_3])
######## save shit here:
perturbed_images_ens_same_tar.append(fake_image)
prob_vectors_ens_same_tar.append([output_vector_probabilities_1,output_vector_probabilities_3])
JSs__ens_same_tar.append([D_JS_1,D_JS_3])
SSIM_ens_same_tar.append(D_ssim_images)
confidence__ens_same_tar.append([confidence_1,confidence_3])
real_image = X_desired.reshape(28,28)
plt.figure()
plt.subplot(2,3,1)
plt.title('Desired example')
plt.imshow(real_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,3,4)
plt.title('Generated example')
plt.imshow(fake_image,cmap='gray',vmin=-1, vmax=1)
plt.colorbar()
plt.axis('off')
plt.subplot(2,3,5)
plt.title('Generated example PMF 1')
plt.stem(output_vector_probabilities_1)
plt.ylim(top=1.2)
plt.subplot(2,3,3)
plt.title('Desired PMF against model 2')
plt.stem(Y_val_combined_3[0])
plt.ylim(top=1.2)
#plt.axis('off')
plt.subplot(2,3,6)
plt.title('Generated example PMF 2')
plt.stem(output_vector_probabilities_3)
plt.ylim(top=1.2)
plt.subplot(2,3,2)
plt.title('Desired PMF against model 1')
plt.stem(Y_val_combined_1[0])
plt.ylim(top=1.2)
# #########################################################################################################################################
# ################## optimality condition KKT1
# #########################################################################################################################################
# # case 0: define a \mu and make it case_ + \mu (x* - x_d)
# mu = 0.005
# # case 1
# # \sum_{i\in[M]} grad(J_i(X^*)) \log(J_i(X^*) / J_i(X^*)+y_i) = 0
#
# # case 3
# # \sum_{i\in[M]} grad(J_i(X^*)) = 0
#
# # below is gget the gradients
# grad_matrix = np.zeros(shape=(784,10))
# matrix_with_log = np.zeros(shape=(784,10))
# temp = np.zeros(shape=(10,))
# for i in range(10):
# temp[i] = np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i]+Y_desired[i]) )
# np.log2(output_vector_probabilities[i] / (output_vector_probabilities[i] + Y_desired[i]))
# grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# matrix_with_log[:,i] = temp[i] * grad_matrix[:,i]
#
# sum_of_all_grads_case_1 = np.sum(matrix_with_log,1)
# sum_of_all_grads_case_0 = np.zeros(shape=(784,))
# sum_of_all_grads_case_0 = sum_of_all_grads_case_1 + mu*( fake_image.reshape(784,) - X_desired.reshape(784,) )
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_0-np.zeros(shape=(784,)), 2 )
# print("solution for case 0 is of L2 distance from zeros which is = ", opt_ball)
#
#
#
# opt_ball = LA.norm(sum_of_all_grads_case_1-np.zeros(shape=(784,)), 2 )
# print("solution for case 1 is of L2 distance from zeros which is = ", opt_ball)
#
#
# sum_of_all_grads = np.sum(grad_matrix,1)
# # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# print("solution for case 3 is of L2 distance from zeros which is = ", opt_ball)
#
# # #########################################################################################################################################
# # ################## optimality condition KKT1 case 3:
# # #########################################################################################################################################
# # # \sum_{i\in[M]} grad(J_i(X^*)) = 0
# #
# # # for x^* obtained from the above algorithm, get grad(J_i(X^*)) for i \in [M]
# #
# # # for below function to work, we need a model with 1-D input AND seperate last dense and activation
# #
# # grad_matrix = np.zeros(shape=(784,10))
# # for i in range(10):
# # grad_matrix[:,i] = grad_discriminant_sm_wrt_1d_img(fake_image, i, trained_model).numpy()[0,:,:].reshape(784,)
# #
# # sum_of_all_grads = np.sum(grad_matrix,1)
# # # hence solution is of by the lp distance of sum_of_all_grads and vector of all zeros
# # opt_ball = LA.norm(sum_of_all_grads-np.zeros(shape=(784,)), 2 )
# # print("solution is of L2 distance from zeros which is = ", opt_ball)
#
#
# print('break')
| 25,026 | 38.227273 | 197 | py |
AGCRN | AGCRN-master/model/AGCRN.py | import torch
import torch.nn as nn
from model.AGCRNCell import AGCRNCell
class AVWDCRNN(nn.Module):
def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim, num_layers=1):
super(AVWDCRNN, self).__init__()
assert num_layers >= 1, 'At least one DCRNN layer in the Encoder.'
self.node_num = node_num
self.input_dim = dim_in
self.num_layers = num_layers
self.dcrnn_cells = nn.ModuleList()
self.dcrnn_cells.append(AGCRNCell(node_num, dim_in, dim_out, cheb_k, embed_dim))
for _ in range(1, num_layers):
self.dcrnn_cells.append(AGCRNCell(node_num, dim_out, dim_out, cheb_k, embed_dim))
def forward(self, x, init_state, node_embeddings):
#shape of x: (B, T, N, D)
#shape of init_state: (num_layers, B, N, hidden_dim)
assert x.shape[2] == self.node_num and x.shape[3] == self.input_dim
seq_length = x.shape[1]
current_inputs = x
output_hidden = []
for i in range(self.num_layers):
state = init_state[i]
inner_states = []
for t in range(seq_length):
state = self.dcrnn_cells[i](current_inputs[:, t, :, :], state, node_embeddings)
inner_states.append(state)
output_hidden.append(state)
current_inputs = torch.stack(inner_states, dim=1)
#current_inputs: the outputs of last layer: (B, T, N, hidden_dim)
#output_hidden: the last state for each layer: (num_layers, B, N, hidden_dim)
#last_state: (B, N, hidden_dim)
return current_inputs, output_hidden
def init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.dcrnn_cells[i].init_hidden_state(batch_size))
return torch.stack(init_states, dim=0) #(num_layers, B, N, hidden_dim)
class AGCRN(nn.Module):
def __init__(self, args):
super(AGCRN, self).__init__()
self.num_node = args.num_nodes
self.input_dim = args.input_dim
self.hidden_dim = args.rnn_units
self.output_dim = args.output_dim
self.horizon = args.horizon
self.num_layers = args.num_layers
self.default_graph = args.default_graph
self.node_embeddings = nn.Parameter(torch.randn(self.num_node, args.embed_dim), requires_grad=True)
self.encoder = AVWDCRNN(args.num_nodes, args.input_dim, args.rnn_units, args.cheb_k,
args.embed_dim, args.num_layers)
#predictor
self.end_conv = nn.Conv2d(1, args.horizon * self.output_dim, kernel_size=(1, self.hidden_dim), bias=True)
def forward(self, source, targets, teacher_forcing_ratio=0.5):
#source: B, T_1, N, D
#target: B, T_2, N, D
#supports = F.softmax(F.relu(torch.mm(self.nodevec1, self.nodevec1.transpose(0,1))), dim=1)
init_state = self.encoder.init_hidden(source.shape[0])
output, _ = self.encoder(source, init_state, self.node_embeddings) #B, T, N, hidden
output = output[:, -1:, :, :] #B, 1, N, hidden
#CNN based predictor
output = self.end_conv((output)) #B, T*C, N, 1
output = output.squeeze(-1).reshape(-1, self.horizon, self.output_dim, self.num_node)
output = output.permute(0, 1, 3, 2) #B, T, N, C
return output | 3,454 | 44.460526 | 113 | py |
AGCRN | AGCRN-master/model/Run.py |
import os
import sys
file_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
print(file_dir)
sys.path.append(file_dir)
import torch
import numpy as np
import torch.nn as nn
import argparse
import configparser
from datetime import datetime
from model.AGCRN import AGCRN as Network
from model.BasicTrainer import Trainer
from lib.TrainInits import init_seed
from lib.dataloader import get_dataloader
from lib.TrainInits import print_model_parameters
#*************************************************************************#
Mode = 'Train'
DEBUG = 'True'
DATASET = 'PEMSD4' #PEMSD4 or PEMSD8
DEVICE = 'cuda:0'
MODEL = 'AGCRN'
#get configuration
config_file = './{}_{}.conf'.format(DATASET, MODEL)
#print('Read configuration file: %s' % (config_file))
config = configparser.ConfigParser()
config.read(config_file)
from lib.metrics import MAE_torch
def masked_mae_loss(scaler, mask_value):
def loss(preds, labels):
if scaler:
preds = scaler.inverse_transform(preds)
labels = scaler.inverse_transform(labels)
mae = MAE_torch(pred=preds, true=labels, mask_value=mask_value)
return mae
return loss
#parser
args = argparse.ArgumentParser(description='arguments')
args.add_argument('--dataset', default=DATASET, type=str)
args.add_argument('--mode', default=Mode, type=str)
args.add_argument('--device', default=DEVICE, type=str, help='indices of GPUs')
args.add_argument('--debug', default=DEBUG, type=eval)
args.add_argument('--model', default=MODEL, type=str)
args.add_argument('--cuda', default=True, type=bool)
#data
args.add_argument('--val_ratio', default=config['data']['val_ratio'], type=float)
args.add_argument('--test_ratio', default=config['data']['test_ratio'], type=float)
args.add_argument('--lag', default=config['data']['lag'], type=int)
args.add_argument('--horizon', default=config['data']['horizon'], type=int)
args.add_argument('--num_nodes', default=config['data']['num_nodes'], type=int)
args.add_argument('--tod', default=config['data']['tod'], type=eval)
args.add_argument('--normalizer', default=config['data']['normalizer'], type=str)
args.add_argument('--column_wise', default=config['data']['column_wise'], type=eval)
args.add_argument('--default_graph', default=config['data']['default_graph'], type=eval)
#model
args.add_argument('--input_dim', default=config['model']['input_dim'], type=int)
args.add_argument('--output_dim', default=config['model']['output_dim'], type=int)
args.add_argument('--embed_dim', default=config['model']['embed_dim'], type=int)
args.add_argument('--rnn_units', default=config['model']['rnn_units'], type=int)
args.add_argument('--num_layers', default=config['model']['num_layers'], type=int)
args.add_argument('--cheb_k', default=config['model']['cheb_order'], type=int)
#train
args.add_argument('--loss_func', default=config['train']['loss_func'], type=str)
args.add_argument('--seed', default=config['train']['seed'], type=int)
args.add_argument('--batch_size', default=config['train']['batch_size'], type=int)
args.add_argument('--epochs', default=config['train']['epochs'], type=int)
args.add_argument('--lr_init', default=config['train']['lr_init'], type=float)
args.add_argument('--lr_decay', default=config['train']['lr_decay'], type=eval)
args.add_argument('--lr_decay_rate', default=config['train']['lr_decay_rate'], type=float)
args.add_argument('--lr_decay_step', default=config['train']['lr_decay_step'], type=str)
args.add_argument('--early_stop', default=config['train']['early_stop'], type=eval)
args.add_argument('--early_stop_patience', default=config['train']['early_stop_patience'], type=int)
args.add_argument('--grad_norm', default=config['train']['grad_norm'], type=eval)
args.add_argument('--max_grad_norm', default=config['train']['max_grad_norm'], type=int)
args.add_argument('--teacher_forcing', default=False, type=bool)
#args.add_argument('--tf_decay_steps', default=2000, type=int, help='teacher forcing decay steps')
args.add_argument('--real_value', default=config['train']['real_value'], type=eval, help = 'use real value for loss calculation')
#test
args.add_argument('--mae_thresh', default=config['test']['mae_thresh'], type=eval)
args.add_argument('--mape_thresh', default=config['test']['mape_thresh'], type=float)
#log
args.add_argument('--log_dir', default='./', type=str)
args.add_argument('--log_step', default=config['log']['log_step'], type=int)
args.add_argument('--plot', default=config['log']['plot'], type=eval)
args = args.parse_args()
init_seed(args.seed)
if torch.cuda.is_available():
torch.cuda.set_device(int(args.device[5]))
else:
args.device = 'cpu'
#init model
model = Network(args)
model = model.to(args.device)
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
nn.init.uniform_(p)
print_model_parameters(model, only_num=False)
#load dataset
train_loader, val_loader, test_loader, scaler = get_dataloader(args,
normalizer=args.normalizer,
tod=args.tod, dow=False,
weather=False, single=False)
#init loss function, optimizer
if args.loss_func == 'mask_mae':
loss = masked_mae_loss(scaler, mask_value=0.0)
elif args.loss_func == 'mae':
loss = torch.nn.L1Loss().to(args.device)
elif args.loss_func == 'mse':
loss = torch.nn.MSELoss().to(args.device)
else:
raise ValueError
optimizer = torch.optim.Adam(params=model.parameters(), lr=args.lr_init, eps=1.0e-8,
weight_decay=0, amsgrad=False)
#learning rate decay
lr_scheduler = None
if args.lr_decay:
print('Applying learning rate decay.')
lr_decay_steps = [int(i) for i in list(args.lr_decay_step.split(','))]
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer=optimizer,
milestones=lr_decay_steps,
gamma=args.lr_decay_rate)
#lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer=optimizer, T_max=64)
#config log path
current_time = datetime.now().strftime('%Y%m%d%H%M%S')
current_dir = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(current_dir,'experiments', args.dataset, current_time)
args.log_dir = log_dir
#start training
trainer = Trainer(model, loss, optimizer, train_loader, val_loader, test_loader, scaler,
args, lr_scheduler=lr_scheduler)
if args.mode == 'train':
trainer.train()
elif args.mode == 'test':
model.load_state_dict(torch.load('../pre-trained/{}.pth'.format(args.dataset)))
print("Load saved model")
trainer.test(model, trainer.args, test_loader, scaler, trainer.logger)
else:
raise ValueError
| 6,865 | 43.584416 | 129 | py |
AGCRN | AGCRN-master/model/AGCN.py | import torch
import torch.nn.functional as F
import torch.nn as nn
class AVWGCN(nn.Module):
def __init__(self, dim_in, dim_out, cheb_k, embed_dim):
super(AVWGCN, self).__init__()
self.cheb_k = cheb_k
self.weights_pool = nn.Parameter(torch.FloatTensor(embed_dim, cheb_k, dim_in, dim_out))
self.bias_pool = nn.Parameter(torch.FloatTensor(embed_dim, dim_out))
def forward(self, x, node_embeddings):
#x shaped[B, N, C], node_embeddings shaped [N, D] -> supports shaped [N, N]
#output shape [B, N, C]
node_num = node_embeddings.shape[0]
supports = F.softmax(F.relu(torch.mm(node_embeddings, node_embeddings.transpose(0, 1))), dim=1)
support_set = [torch.eye(node_num).to(supports.device), supports]
#default cheb_k = 3
for k in range(2, self.cheb_k):
support_set.append(torch.matmul(2 * supports, support_set[-1]) - support_set[-2])
supports = torch.stack(support_set, dim=0)
weights = torch.einsum('nd,dkio->nkio', node_embeddings, self.weights_pool) #N, cheb_k, dim_in, dim_out
bias = torch.matmul(node_embeddings, self.bias_pool) #N, dim_out
x_g = torch.einsum("knm,bmc->bknc", supports, x) #B, cheb_k, N, dim_in
x_g = x_g.permute(0, 2, 1, 3) # B, N, cheb_k, dim_in
x_gconv = torch.einsum('bnki,nkio->bno', x_g, weights) + bias #b, N, dim_out
return x_gconv | 1,453 | 54.923077 | 112 | py |
AGCRN | AGCRN-master/model/AGCRNCell.py | import torch
import torch.nn as nn
from model.AGCN import AVWGCN
class AGCRNCell(nn.Module):
def __init__(self, node_num, dim_in, dim_out, cheb_k, embed_dim):
super(AGCRNCell, self).__init__()
self.node_num = node_num
self.hidden_dim = dim_out
self.gate = AVWGCN(dim_in+self.hidden_dim, 2*dim_out, cheb_k, embed_dim)
self.update = AVWGCN(dim_in+self.hidden_dim, dim_out, cheb_k, embed_dim)
def forward(self, x, state, node_embeddings):
#x: B, num_nodes, input_dim
#state: B, num_nodes, hidden_dim
state = state.to(x.device)
input_and_state = torch.cat((x, state), dim=-1)
z_r = torch.sigmoid(self.gate(input_and_state, node_embeddings))
z, r = torch.split(z_r, self.hidden_dim, dim=-1)
candidate = torch.cat((x, z*state), dim=-1)
hc = torch.tanh(self.update(candidate, node_embeddings))
h = r*state + (1-r)*hc
return h
def init_hidden_state(self, batch_size):
return torch.zeros(batch_size, self.node_num, self.hidden_dim) | 1,065 | 40 | 80 | py |
AGCRN | AGCRN-master/model/BasicTrainer.py | import torch
import math
import os
import time
import copy
import numpy as np
from lib.logger import get_logger
from lib.metrics import All_Metrics
class Trainer(object):
def __init__(self, model, loss, optimizer, train_loader, val_loader, test_loader,
scaler, args, lr_scheduler=None):
super(Trainer, self).__init__()
self.model = model
self.loss = loss
self.optimizer = optimizer
self.train_loader = train_loader
self.val_loader = val_loader
self.test_loader = test_loader
self.scaler = scaler
self.args = args
self.lr_scheduler = lr_scheduler
self.train_per_epoch = len(train_loader)
if val_loader != None:
self.val_per_epoch = len(val_loader)
self.best_path = os.path.join(self.args.log_dir, 'best_model.pth')
self.loss_figure_path = os.path.join(self.args.log_dir, 'loss.png')
#log
if os.path.isdir(args.log_dir) == False and not args.debug:
os.makedirs(args.log_dir, exist_ok=True)
self.logger = get_logger(args.log_dir, name=args.model, debug=args.debug)
self.logger.info('Experiment log path in: {}'.format(args.log_dir))
#if not args.debug:
#self.logger.info("Argument: %r", args)
# for arg, value in sorted(vars(args).items()):
# self.logger.info("Argument %s: %r", arg, value)
def val_epoch(self, epoch, val_dataloader):
self.model.eval()
total_val_loss = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(val_dataloader):
data = data[..., :self.args.input_dim]
label = target[..., :self.args.output_dim]
output = self.model(data, target, teacher_forcing_ratio=0.)
if self.args.real_value:
label = self.scaler.inverse_transform(label)
loss = self.loss(output.cuda(), label)
#a whole batch of Metr_LA is filtered
if not torch.isnan(loss):
total_val_loss += loss.item()
val_loss = total_val_loss / len(val_dataloader)
self.logger.info('**********Val Epoch {}: average Loss: {:.6f}'.format(epoch, val_loss))
return val_loss
def train_epoch(self, epoch):
self.model.train()
total_loss = 0
for batch_idx, (data, target) in enumerate(self.train_loader):
data = data[..., :self.args.input_dim]
label = target[..., :self.args.output_dim] # (..., 1)
self.optimizer.zero_grad()
#teacher_forcing for RNN encoder-decoder model
#if teacher_forcing_ratio = 1: use label as input in the decoder for all steps
if self.args.teacher_forcing:
global_step = (epoch - 1) * self.train_per_epoch + batch_idx
teacher_forcing_ratio = self._compute_sampling_threshold(global_step, self.args.tf_decay_steps)
else:
teacher_forcing_ratio = 1.
#data and target shape: B, T, N, F; output shape: B, T, N, F
output = self.model(data, target, teacher_forcing_ratio=teacher_forcing_ratio)
if self.args.real_value:
label = self.scaler.inverse_transform(label)
loss = self.loss(output.cuda(), label)
loss.backward()
# add max grad clipping
if self.args.grad_norm:
torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.args.max_grad_norm)
self.optimizer.step()
total_loss += loss.item()
#log information
if batch_idx % self.args.log_step == 0:
self.logger.info('Train Epoch {}: {}/{} Loss: {:.6f}'.format(
epoch, batch_idx, self.train_per_epoch, loss.item()))
train_epoch_loss = total_loss/self.train_per_epoch
self.logger.info('**********Train Epoch {}: averaged Loss: {:.6f}, tf_ratio: {:.6f}'.format(epoch, train_epoch_loss, teacher_forcing_ratio))
#learning rate decay
if self.args.lr_decay:
self.lr_scheduler.step()
return train_epoch_loss
def train(self):
best_model = None
best_loss = float('inf')
not_improved_count = 0
train_loss_list = []
val_loss_list = []
start_time = time.time()
for epoch in range(1, self.args.epochs + 1):
#epoch_time = time.time()
train_epoch_loss = self.train_epoch(epoch)
#print(time.time()-epoch_time)
#exit()
if self.val_loader == None:
val_dataloader = self.test_loader
else:
val_dataloader = self.val_loader
val_epoch_loss = self.val_epoch(epoch, val_dataloader)
#print('LR:', self.optimizer.param_groups[0]['lr'])
train_loss_list.append(train_epoch_loss)
val_loss_list.append(val_epoch_loss)
if train_epoch_loss > 1e6:
self.logger.warning('Gradient explosion detected. Ending...')
break
#if self.val_loader == None:
#val_epoch_loss = train_epoch_loss
if val_epoch_loss < best_loss:
best_loss = val_epoch_loss
not_improved_count = 0
best_state = True
else:
not_improved_count += 1
best_state = False
# early stop
if self.args.early_stop:
if not_improved_count == self.args.early_stop_patience:
self.logger.info("Validation performance didn\'t improve for {} epochs. "
"Training stops.".format(self.args.early_stop_patience))
break
# save the best state
if best_state == True:
self.logger.info('*********************************Current best model saved!')
best_model = copy.deepcopy(self.model.state_dict())
training_time = time.time() - start_time
self.logger.info("Total training time: {:.4f}min, best loss: {:.6f}".format((training_time / 60), best_loss))
#save the best model to file
if not self.args.debug:
torch.save(best_model, self.best_path)
self.logger.info("Saving current best model to " + self.best_path)
#test
self.model.load_state_dict(best_model)
#self.val_epoch(self.args.epochs, self.test_loader)
self.test(self.model, self.args, self.test_loader, self.scaler, self.logger)
def save_checkpoint(self):
state = {
'state_dict': self.model.state_dict(),
'optimizer': self.optimizer.state_dict(),
'config': self.args
}
torch.save(state, self.best_path)
self.logger.info("Saving current best model to " + self.best_path)
@staticmethod
def test(model, args, data_loader, scaler, logger, path=None):
if path != None:
check_point = torch.load(path)
state_dict = check_point['state_dict']
args = check_point['config']
model.load_state_dict(state_dict)
model.to(args.device)
model.eval()
y_pred = []
y_true = []
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
data = data[..., :args.input_dim]
label = target[..., :args.output_dim]
output = model(data, target, teacher_forcing_ratio=0)
y_true.append(label)
y_pred.append(output)
y_true = scaler.inverse_transform(torch.cat(y_true, dim=0))
if args.real_value:
y_pred = torch.cat(y_pred, dim=0)
else:
y_pred = scaler.inverse_transform(torch.cat(y_pred, dim=0))
np.save('./{}_true.npy'.format(args.dataset), y_true.cpu().numpy())
np.save('./{}_pred.npy'.format(args.dataset), y_pred.cpu().numpy())
for t in range(y_true.shape[1]):
mae, rmse, mape, _, _ = All_Metrics(y_pred[:, t, ...], y_true[:, t, ...],
args.mae_thresh, args.mape_thresh)
logger.info("Horizon {:02d}, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}%".format(
t + 1, mae, rmse, mape*100))
mae, rmse, mape, _, _ = All_Metrics(y_pred, y_true, args.mae_thresh, args.mape_thresh)
logger.info("Average Horizon, MAE: {:.2f}, RMSE: {:.2f}, MAPE: {:.4f}%".format(
mae, rmse, mape*100))
@staticmethod
def _compute_sampling_threshold(global_step, k):
"""
Computes the sampling probability for scheduled sampling using inverse sigmoid.
:param global_step:
:param k:
:return:
"""
return k / (k + math.exp(global_step / k)) | 8,935 | 42.590244 | 148 | py |
AGCRN | AGCRN-master/lib/TrainInits.py | import torch
import random
import numpy as np
def init_seed(seed):
'''
Disable cudnn to maximize reproducibility
'''
torch.cuda.cudnn_enabled = False
torch.backends.cudnn.deterministic = True
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
def init_device(opt):
if torch.cuda.is_available():
opt.cuda = True
torch.cuda.set_device(int(opt.device[5]))
else:
opt.cuda = False
opt.device = 'cpu'
return opt
def init_optim(model, opt):
'''
Initialize optimizer
'''
return torch.optim.Adam(params=model.parameters(),lr=opt.lr_init)
def init_lr_scheduler(optim, opt):
'''
Initialize the learning rate scheduler
'''
#return torch.optim.lr_scheduler.StepLR(optimizer=optim,gamma=opt.lr_scheduler_rate,step_size=opt.lr_scheduler_step)
return torch.optim.lr_scheduler.MultiStepLR(optimizer=optim, milestones=opt.lr_decay_steps,
gamma = opt.lr_scheduler_rate)
def print_model_parameters(model, only_num = True):
print('*****************Model Parameter*****************')
if not only_num:
for name, param in model.named_parameters():
print(name, param.shape, param.requires_grad)
total_num = sum([param.nelement() for param in model.parameters()])
print('Total params num: {}'.format(total_num))
print('*****************Finish Parameter****************')
def get_memory_usage(device):
allocated_memory = torch.cuda.memory_allocated(device) / (1024*1024.)
cached_memory = torch.cuda.memory_cached(device) / (1024*1024.)
return allocated_memory, cached_memory
#print('Allocated Memory: {:.2f} MB, Cached Memory: {:.2f} MB'.format(allocated_memory, cached_memory)) | 1,818 | 33.980769 | 120 | py |
AGCRN | AGCRN-master/lib/dataloader.py | import torch
import numpy as np
import torch.utils.data
from lib.add_window import Add_Window_Horizon
from lib.load_dataset import load_st_dataset
from lib.normalization import NScaler, MinMax01Scaler, MinMax11Scaler, StandardScaler, ColumnMinMaxScaler
def normalize_dataset(data, normalizer, column_wise=False):
if normalizer == 'max01':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax01Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax01 Normalization')
elif normalizer == 'max11':
if column_wise:
minimum = data.min(axis=0, keepdims=True)
maximum = data.max(axis=0, keepdims=True)
else:
minimum = data.min()
maximum = data.max()
scaler = MinMax11Scaler(minimum, maximum)
data = scaler.transform(data)
print('Normalize the dataset by MinMax11 Normalization')
elif normalizer == 'std':
if column_wise:
mean = data.mean(axis=0, keepdims=True)
std = data.std(axis=0, keepdims=True)
else:
mean = data.mean()
std = data.std()
scaler = StandardScaler(mean, std)
data = scaler.transform(data)
print('Normalize the dataset by Standard Normalization')
elif normalizer == 'None':
scaler = NScaler()
data = scaler.transform(data)
print('Does not normalize the dataset')
elif normalizer == 'cmax':
#column min max, to be depressed
#note: axis must be the spatial dimension, please check !
scaler = ColumnMinMaxScaler(data.min(axis=0), data.max(axis=0))
data = scaler.transform(data)
print('Normalize the dataset by Column Min-Max Normalization')
else:
raise ValueError
return data, scaler
def split_data_by_days(data, val_days, test_days, interval=60):
'''
:param data: [B, *]
:param val_days:
:param test_days:
:param interval: interval (15, 30, 60) minutes
:return:
'''
T = int((24*60)/interval)
test_data = data[-T*test_days:]
val_data = data[-T*(test_days + val_days): -T*test_days]
train_data = data[:-T*(test_days + val_days)]
return train_data, val_data, test_data
def split_data_by_ratio(data, val_ratio, test_ratio):
data_len = data.shape[0]
test_data = data[-int(data_len*test_ratio):]
val_data = data[-int(data_len*(test_ratio+val_ratio)):-int(data_len*test_ratio)]
train_data = data[:-int(data_len*(test_ratio+val_ratio))]
return train_data, val_data, test_data
def data_loader(X, Y, batch_size, shuffle=True, drop_last=True):
cuda = True if torch.cuda.is_available() else False
TensorFloat = torch.cuda.FloatTensor if cuda else torch.FloatTensor
X, Y = TensorFloat(X), TensorFloat(Y)
data = torch.utils.data.TensorDataset(X, Y)
dataloader = torch.utils.data.DataLoader(data, batch_size=batch_size,
shuffle=shuffle, drop_last=drop_last)
return dataloader
def get_dataloader(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True):
#load raw st dataset
data = load_st_dataset(args.dataset) # B, N, D
#normalize st data
data, scaler = normalize_dataset(data, normalizer, args.column_wise)
#spilit dataset by days or by ratio
if args.test_ratio > 1:
data_train, data_val, data_test = split_data_by_days(data, args.val_ratio, args.test_ratio)
else:
data_train, data_val, data_test = split_data_by_ratio(data, args.val_ratio, args.test_ratio)
#add time window
x_tra, y_tra = Add_Window_Horizon(data_train, args.lag, args.horizon, single)
x_val, y_val = Add_Window_Horizon(data_val, args.lag, args.horizon, single)
x_test, y_test = Add_Window_Horizon(data_test, args.lag, args.horizon, single)
print('Train: ', x_tra.shape, y_tra.shape)
print('Val: ', x_val.shape, y_val.shape)
print('Test: ', x_test.shape, y_test.shape)
##############get dataloader######################
train_dataloader = data_loader(x_tra, y_tra, args.batch_size, shuffle=True, drop_last=True)
if len(x_val) == 0:
val_dataloader = None
else:
val_dataloader = data_loader(x_val, y_val, args.batch_size, shuffle=False, drop_last=True)
test_dataloader = data_loader(x_test, y_test, args.batch_size, shuffle=False, drop_last=False)
return train_dataloader, val_dataloader, test_dataloader, scaler
if __name__ == '__main__':
import argparse
#MetrLA 207; BikeNYC 128; SIGIR_solar 137; SIGIR_electric 321
DATASET = 'SIGIR_electric'
if DATASET == 'MetrLA':
NODE_NUM = 207
elif DATASET == 'BikeNYC':
NODE_NUM = 128
elif DATASET == 'SIGIR_solar':
NODE_NUM = 137
elif DATASET == 'SIGIR_electric':
NODE_NUM = 321
parser = argparse.ArgumentParser(description='PyTorch dataloader')
parser.add_argument('--dataset', default=DATASET, type=str)
parser.add_argument('--num_nodes', default=NODE_NUM, type=int)
parser.add_argument('--val_ratio', default=0.1, type=float)
parser.add_argument('--test_ratio', default=0.2, type=float)
parser.add_argument('--lag', default=12, type=int)
parser.add_argument('--horizon', default=12, type=int)
parser.add_argument('--batch_size', default=64, type=int)
args = parser.parse_args()
train_dataloader, val_dataloader, test_dataloader, scaler = get_dataloader(args, normalizer = 'std', tod=False, dow=False, weather=False, single=True) | 5,728 | 42.401515 | 154 | py |
AGCRN | AGCRN-master/lib/metrics.py | '''
Always evaluate the model with MAE, RMSE, MAPE, RRSE, PNBI, and oPNBI.
Why add mask to MAE and RMSE?
Filter the 0 that may be caused by error (such as loop sensor)
Why add mask to MAPE and MARE?
Ignore very small values (e.g., 0.5/0.5=100%)
'''
import numpy as np
import torch
def MAE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean(torch.abs(true-pred))
def MSE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean((pred - true) ** 2)
def RMSE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.sqrt(torch.mean((pred - true) ** 2))
def RRSE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.sqrt(torch.sum((pred - true) ** 2)) / torch.sqrt(torch.sum((pred - true.mean()) ** 2))
def CORR_torch(pred, true, mask_value=None):
#input B, T, N, D or B, N, D or B, N
if len(pred.shape) == 2:
pred = pred.unsqueeze(dim=1).unsqueeze(dim=1)
true = true.unsqueeze(dim=1).unsqueeze(dim=1)
elif len(pred.shape) == 3:
pred = pred.transpose(1, 2).unsqueeze(dim=1)
true = true.transpose(1, 2).unsqueeze(dim=1)
elif len(pred.shape) == 4:
#B, T, N, D -> B, T, D, N
pred = pred.transpose(2, 3)
true = true.transpose(2, 3)
else:
raise ValueError
dims = (0, 1, 2)
pred_mean = pred.mean(dim=dims)
true_mean = true.mean(dim=dims)
pred_std = pred.std(dim=dims)
true_std = true.std(dim=dims)
correlation = ((pred - pred_mean)*(true - true_mean)).mean(dim=dims) / (pred_std*true_std)
index = (true_std != 0)
correlation = (correlation[index]).mean()
return correlation
def MAPE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean(torch.abs(torch.div((true - pred), true)))
def PNBI_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
indicator = torch.gt(pred - true, 0).float()
return indicator.mean()
def oPNBI_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
bias = (true+pred) / (2*true)
return bias.mean()
def MARE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.div(torch.sum(torch.abs((true - pred))), torch.sum(true))
def SMAPE_torch(pred, true, mask_value=None):
if mask_value != None:
mask = torch.gt(true, mask_value)
pred = torch.masked_select(pred, mask)
true = torch.masked_select(true, mask)
return torch.mean(torch.abs(true-pred)/(torch.abs(true)+torch.abs(pred)))
def MAE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
MAE = np.mean(np.absolute(pred-true))
return MAE
def RMSE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
RMSE = np.sqrt(np.mean(np.square(pred-true)))
return RMSE
#Root Relative Squared Error
def RRSE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
mean = true.mean()
return np.divide(np.sqrt(np.sum((pred-true) ** 2)), np.sqrt(np.sum((true-mean) ** 2)))
def MAPE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
return np.mean(np.absolute(np.divide((true - pred), true)))
def PNBI_np(pred, true, mask_value=None):
#if PNBI=0, all pred are smaller than true
#if PNBI=1, all pred are bigger than true
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
bias = pred-true
indicator = np.where(bias>0, True, False)
return indicator.mean()
def oPNBI_np(pred, true, mask_value=None):
#if oPNBI>1, pred are bigger than true
#if oPNBI<1, pred are smaller than true
#however, this metric is too sentive to small values. Not good!
if mask_value != None:
mask = np.where(true > (mask_value), True, False)
true = true[mask]
pred = pred[mask]
bias = (true + pred) / (2 * true)
return bias.mean()
def MARE_np(pred, true, mask_value=None):
if mask_value != None:
mask = np.where(true> (mask_value), True, False)
true = true[mask]
pred = pred[mask]
return np.divide(np.sum(np.absolute((true - pred))), np.sum(true))
def CORR_np(pred, true, mask_value=None):
#input B, T, N, D or B, N, D or B, N
if len(pred.shape) == 2:
#B, N
pred = pred.unsqueeze(dim=1).unsqueeze(dim=1)
true = true.unsqueeze(dim=1).unsqueeze(dim=1)
elif len(pred.shape) == 3:
#np.transpose include permute, B, T, N
pred = np.expand_dims(pred.transpose(0, 2, 1), axis=1)
true = np.expand_dims(true.transpose(0, 2, 1), axis=1)
elif len(pred.shape) == 4:
#B, T, N, D -> B, T, D, N
pred = pred.transpose(0, 1, 2, 3)
true = true.transpose(0, 1, 2, 3)
else:
raise ValueError
dims = (0, 1, 2)
pred_mean = pred.mean(axis=dims)
true_mean = true.mean(axis=dims)
pred_std = pred.std(axis=dims)
true_std = true.std(axis=dims)
correlation = ((pred - pred_mean)*(true - true_mean)).mean(axis=dims) / (pred_std*true_std)
index = (true_std != 0)
correlation = (correlation[index]).mean()
return correlation
def All_Metrics(pred, true, mask1, mask2):
#mask1 filter the very small value, mask2 filter the value lower than a defined threshold
assert type(pred) == type(true)
if type(pred) == np.ndarray:
mae = MAE_np(pred, true, mask1)
rmse = RMSE_np(pred, true, mask1)
mape = MAPE_np(pred, true, mask2)
rrse = RRSE_np(pred, true, mask1)
corr = 0
#corr = CORR_np(pred, true, mask1)
#pnbi = PNBI_np(pred, true, mask1)
#opnbi = oPNBI_np(pred, true, mask2)
elif type(pred) == torch.Tensor:
mae = MAE_torch(pred, true, mask1)
rmse = RMSE_torch(pred, true, mask1)
mape = MAPE_torch(pred, true, mask2)
rrse = RRSE_torch(pred, true, mask1)
corr = CORR_torch(pred, true, mask1)
#pnbi = PNBI_torch(pred, true, mask1)
#opnbi = oPNBI_torch(pred, true, mask2)
else:
raise TypeError
return mae, rmse, mape, rrse, corr
def SIGIR_Metrics(pred, true, mask1, mask2):
rrse = RRSE_torch(pred, true, mask1)
corr = CORR_torch(pred, true, 0)
return rrse, corr
if __name__ == '__main__':
pred = torch.Tensor([1, 2, 3,4])
true = torch.Tensor([2, 1, 4,5])
print(All_Metrics(pred, true, None, None))
| 7,947 | 34.641256 | 103 | py |
AGCRN | AGCRN-master/lib/normalization.py | import numpy as np
import torch
class NScaler(object):
def transform(self, data):
return data
def inverse_transform(self, data):
return data
class StandardScaler:
"""
Standard the input
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def transform(self, data):
return (data - self.mean) / self.std
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.mean) == np.ndarray:
self.std = torch.from_numpy(self.std).to(data.device).type(data.dtype)
self.mean = torch.from_numpy(self.mean).to(data.device).type(data.dtype)
return (data * self.std) + self.mean
class MinMax01Scaler:
"""
Standard the input
"""
def __init__(self, min, max):
self.min = min
self.max = max
def transform(self, data):
return (data - self.min) / (self.max - self.min)
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.min) == np.ndarray:
self.min = torch.from_numpy(self.min).to(data.device).type(data.dtype)
self.max = torch.from_numpy(self.max).to(data.device).type(data.dtype)
return (data * (self.max - self.min) + self.min)
class MinMax11Scaler:
"""
Standard the input
"""
def __init__(self, min, max):
self.min = min
self.max = max
def transform(self, data):
return ((data - self.min) / (self.max - self.min)) * 2. - 1.
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.min) == np.ndarray:
self.min = torch.from_numpy(self.min).to(data.device).type(data.dtype)
self.max = torch.from_numpy(self.max).to(data.device).type(data.dtype)
return ((data + 1.) / 2.) * (self.max - self.min) + self.min
class ColumnMinMaxScaler():
#Note: to use this scale, must init the min and max with column min and column max
def __init__(self, min, max):
self.min = min
self.min_max = max - self.min
self.min_max[self.min_max==0] = 1
def transform(self, data):
print(data.shape, self.min_max.shape)
return (data - self.min) / self.min_max
def inverse_transform(self, data):
if type(data) == torch.Tensor and type(self.min) == np.ndarray:
self.min_max = torch.from_numpy(self.min_max).to(data.device).type(torch.float32)
self.min = torch.from_numpy(self.min).to(data.device).type(torch.float32)
#print(data.dtype, self.min_max.dtype, self.min.dtype)
return (data * self.min_max + self.min)
def one_hot_by_column(data):
#data is a 2D numpy array
len = data.shape[0]
for i in range(data.shape[1]):
column = data[:, i]
max = column.max()
min = column.min()
#print(len, max, min)
zero_matrix = np.zeros((len, max-min+1))
zero_matrix[np.arange(len), column-min] = 1
if i == 0:
encoded = zero_matrix
else:
encoded = np.hstack((encoded, zero_matrix))
return encoded
def minmax_by_column(data):
# data is a 2D numpy array
for i in range(data.shape[1]):
column = data[:, i]
max = column.max()
min = column.min()
column = (column - min) / (max - min)
column = column[:, np.newaxis]
if i == 0:
_normalized = column
else:
_normalized = np.hstack((_normalized, column))
return _normalized
if __name__ == '__main__':
test_data = np.array([[0,0,0, 1], [0, 1, 3, 2], [0, 2, 1, 3]])
print(test_data)
minimum = test_data.min(axis=1)
print(minimum, minimum.shape, test_data.shape)
maximum = test_data.max(axis=1)
print(maximum)
print(test_data-minimum)
test_data = (test_data-minimum) / (maximum-minimum)
print(test_data)
print(0 == 0)
print(0.00 == 0)
print(0 == 0.00)
#print(one_hot_by_column(test_data))
#print(minmax_by_column(test_data)) | 4,047 | 30.138462 | 93 | py |
a3c_continuous | a3c_continuous-master/main.py | from __future__ import print_function, division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
import torch.multiprocessing as mp
from environment import create_env
from model import A3C_MLP, A3C_CONV
from train import train
from test import test
from shared_optim import SharedRMSprop, SharedAdam
import time
parser = argparse.ArgumentParser(description="A3C")
parser.add_argument(
"-l", "--lr", type=float, default=0.0001, help="learning rate (default: 0.0001)"
)
parser.add_argument(
"-ec",
"--entropy-coef",
type=float,
default=0.01,
help="entropy loss coefficient (default: 0.01)",
)
parser.add_argument(
"-vc",
"--value-coef",
type=float,
default=0.5,
help="value loss coefficient (default: 0.5)",
)
parser.add_argument(
"-g",
"--gamma",
type=float,
default=0.99,
help="discount factor for rewards (default: 0.99)",
)
parser.add_argument(
"-t", "--tau", type=float, default=1.00, help="parameter for GAE (default: 1.00)"
)
parser.add_argument(
"-s", "--seed", type=int, default=1, help="random seed (default: 1)"
)
parser.add_argument(
"-w",
"--workers",
type=int,
default=32,
help="how many training processes to use (default: 32)",
)
parser.add_argument(
"-ns",
"--num-steps",
type=int,
default=20,
help="number of forward steps in A3C (default: 20)",
)
parser.add_argument(
"-mel",
"--max-episode-length",
type=int,
default=10000,
help="maximum length of an episode (default: 10000)",
)
parser.add_argument(
"-ev",
"--env",
default="BipedalWalker-v2",
help="environment to train on (default: BipedalWalker-v2)",
)
parser.add_argument(
"-so",
"--shared-optimizer",
default=True,
help="use an optimizer with shared statistics.",
)
parser.add_argument("-ld", "--load", action="store_true", help="load a trained model")
parser.add_argument(
"-sm",
"--save-max",
action="store_true",
help="Save model on every test run high score matched or bested",
)
parser.add_argument(
"-o",
"--optimizer",
default="Adam",
choices=["Adam", "RMSprop"],
help="optimizer choice of Adam or RMSprop",
)
parser.add_argument(
"-lmd",
"--load-model-dir",
default="trained_models/",
help="folder to load trained models from",
)
parser.add_argument(
"-smd",
"--save-model-dir",
default="trained_models/",
help="folder to save trained models",
)
parser.add_argument("-lg", "--log-dir", default="logs/", help="folder to save logs")
parser.add_argument(
"-m", "--model", default="MLP", choices=["MLP", "CONV"], help="Model type to use"
)
parser.add_argument(
"-sf",
"--stack-frames",
type=int,
default=1,
help="Choose number of observations to stack",
)
parser.add_argument(
"-gp",
"--gpu-ids",
type=int,
default=[-1],
nargs="+",
help="GPUs to use [-1 CPU only] (default: -1)",
)
parser.add_argument(
"-a", "--amsgrad", action="store_true", help="Adam optimizer amsgrad parameter"
)
parser.add_argument(
"-hs",
"--hidden-size",
type=int,
default=128,
help="LSTM Cell number of features in the hidden state h",
)
parser.add_argument(
"-tl",
"--tensorboard-logger",
action="store_true",
help="Creates tensorboard logger to see graph of model, view model weights and biases, and monitor test agent reward progress",
)
# Based on
# https://github.com/pytorch/examples/tree/master/mnist_hogwild
# Training settings
# Implemented multiprocessing using locks but was not beneficial. Hogwild
# training was far superior
if __name__ == "__main__":
args = parser.parse_args()
torch.manual_seed(args.seed)
if args.gpu_ids != [-1]:
torch.cuda.manual_seed(args.seed)
mp.set_start_method("spawn")
env = create_env(args.env, args)
if args.model == "MLP":
shared_model = A3C_MLP(env.observation_space.shape[0], env.action_space, args)
if args.model == "CONV":
shared_model = A3C_CONV(args.stack_frames, env.action_space, args)
if args.load:
saved_state = torch.load(
f"{args.load_model_dir}{args.env}.dat",
map_location=lambda storage, loc: storage,
)
shared_model.load_state_dict(saved_state)
shared_model.share_memory()
if args.shared_optimizer:
if args.optimizer == "RMSprop":
optimizer = SharedRMSprop(shared_model.parameters(), lr=args.lr)
if args.optimizer == "Adam":
optimizer = SharedAdam(
shared_model.parameters(), lr=args.lr, amsgrad=args.amsgrad
)
optimizer.share_memory()
else:
optimizer = None
processes = []
p = mp.Process(target=test, args=(args, shared_model))
p.start()
time.sleep(0.001)
processes.append(p)
for rank in range(0, args.workers):
p = mp.Process(target=train, args=(rank, args, shared_model, optimizer))
p.start()
time.sleep(0.001)
processes.append(p)
for p in processes:
p.join()
time.sleep(0.001)
| 5,128 | 25.713542 | 131 | py |
a3c_continuous | a3c_continuous-master/test.py | from __future__ import division
import os
os.environ["OMP_NUM_THREADS"] = "1"
from setproctitle import setproctitle as ptitle
import numpy as np
import torch
from environment import create_env
from utils import setup_logger
from model import A3C_CONV, A3C_MLP
from player_util import Agent
from torch.autograd import Variable
import time
import logging
import gym
import copy
def test(args, shared_model):
ptitle("Test Agent")
gpu_id = args.gpu_ids[-1]
setup_logger(f"{args.env}_log", rf"{args.log_dir}{args.env}_log")
log = logging.getLogger(f"{args.env}_log")
d_args = vars(args)
for k in d_args.keys():
log.info(f"{k}: {d_args[k]}")
torch.manual_seed(args.seed)
if gpu_id >= 0:
torch.cuda.manual_seed(args.seed)
env = create_env(args.env, args)
reward_sum = 0
start_time = time.time()
num_tests = 0
reward_total_sum = 0
player = Agent(None, env, args, None)
player.gpu_id = gpu_id
if args.model == "MLP":
player.model = A3C_MLP(
player.env.observation_space.shape[0], player.env.action_space, args
)
if args.model == "CONV":
player.model = A3C_CONV(args.stack_frames, player.env.action_space, args)
if args.tensorboard_logger:
from torch.utils.tensorboard import SummaryWriter
if args.model == "CONV":
dummy_input = (
torch.zeros(1, args.stack_frames, 24),
torch.zeros(1, args.hidden_size),
torch.zeros(1, args.hidden_size),
)
if args.model == "MLP":
dummy_input = (
torch.zeros(1, args.stack_frames, 24),
torch.zeros(1, args.hidden_size),
torch.zeros(1, args.hidden_size),
)
writer = SummaryWriter(f"runs/{args.env}_training")
writer.add_graph(player.model, dummy_input, False)
writer.close()
player.state = player.env.reset()
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.model = player.model.cuda()
player.state = torch.from_numpy(player.state).float().cuda()
else:
player.state = torch.from_numpy(player.state).float()
player.model.eval()
max_score = 0
try:
while 1:
if player.done:
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.model.load_state_dict(shared_model.state_dict())
else:
player.model.load_state_dict(shared_model.state_dict())
player.action_test()
reward_sum += player.reward
if player.done:
num_tests += 1
reward_total_sum += reward_sum
reward_mean = reward_total_sum / num_tests
log.info(
f'Time {time.strftime("%Hh %Mm %Ss", time.gmtime(time.time() - start_time))}, episode reward {reward_sum}, episode length {player.eps_len}, reward mean {reward_mean:.4f}'
)
if args.tensorboard_logger:
writer.add_scalar(
f"{args.env}_Episode_Rewards", reward_sum, num_tests
)
for name, weight in player.model.named_parameters():
writer.add_histogram(name, weight, num_tests)
if (args.save_max and reward_sum >= max_score) or not args.save_max:
if reward_sum >= max_score:
max_score = reward_sum
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
state_to_save = player.model.state_dict()
torch.save(
state_to_save, f"{args.save_model_dir}{args.env}.dat"
)
else:
state_to_save = player.model.state_dict()
torch.save(
state_to_save, f"{args.save_model_dir}{args.env}.dat"
)
reward_sum = 0
player.eps_len = 0
state = player.env.reset()
time.sleep(60)
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.state = torch.from_numpy(state).float().cuda()
else:
player.state = torch.from_numpy(state).float()
except KeyboardInterrupt:
time.sleep(0.01)
print("KeyboardInterrupt exception is caught")
finally:
print("test agent process finished")
if args.tensorboard_logger:
writer.close()
| 4,727 | 35.091603 | 190 | py |
a3c_continuous | a3c_continuous-master/shared_optim.py | from __future__ import division
import math
import torch
import torch.optim as optim
from collections import defaultdict
from math import sqrt
class SharedRMSprop(optim.Optimizer):
"""Implements RMSprop algorithm with shared states."""
def __init__(
self,
params,
lr=7e-4,
alpha=0.99,
eps=0.1,
weight_decay=0,
momentum=0,
centered=False,
):
defaults = defaultdict(
lr=lr,
alpha=alpha,
eps=eps,
weight_decay=weight_decay,
momentum=momentum,
centered=centered,
)
super(SharedRMSprop, self).__init__(params, defaults)
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["step"] = torch.zeros(1)
state["grad_avg"] = p.data.new().resize_as_(p.data).zero_()
state["square_avg"] = p.data.new().resize_as_(p.data).zero_()
state["momentum_buffer"] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["square_avg"].share_memory_()
state["step"].share_memory_()
state["grad_avg"].share_memory_()
state["momentum_buffer"].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError("RMSprop does not support sparse gradients")
state = self.state[p]
square_avg = state["square_avg"]
alpha = group["alpha"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(p, alpha=group["weight_decay"])
square_avg.mul_(alpha).addcmul_(grad, grad, value=1 - alpha)
if group["centered"]:
grad_avg = state["grad_avg"]
grad_avg.mul_(alpha).add_(grad, alpha=1 - alpha)
avg = (
square_avg.addcmul(grad_avg, grad_avg, value=-1)
.sqrt_()
.add_(group["eps"])
)
else:
avg = square_avg.sqrt().add_(group["eps"])
if group["momentum"] > 0:
buf = state["momentum_buffer"]
buf.mul_(group["momentum"]).addcdiv_(grad, avg)
# Need to avoid version tracking for parameter.
p.data.add_(buf, alpha=-group["lr"])
else:
# Need to avoid version tracking for parameter.
p.data.addcdiv_(grad, avg, value=-group["lr"])
return loss
class SharedAdam(optim.Optimizer):
"""Implements Adam algorithm with shared states."""
def __init__(
self,
params,
lr=1e-3,
betas=(0.9, 0.999),
eps=1e-3,
weight_decay=0,
amsgrad=False,
):
defaults = defaultdict(
lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad
)
super(SharedAdam, self).__init__(params, defaults)
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["step"] = torch.zeros(1)
state["exp_avg"] = p.data.new().resize_as_(p.data).zero_()
state["exp_avg_sq"] = p.data.new().resize_as_(p.data).zero_()
state["max_exp_avg_sq"] = p.data.new().resize_as_(p.data).zero_()
def share_memory(self):
for group in self.param_groups:
for p in group["params"]:
state = self.state[p]
state["step"].share_memory_()
state["exp_avg"].share_memory_()
state["exp_avg_sq"].share_memory_()
state["max_exp_avg_sq"].share_memory_()
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group["params"]:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError(
"Adam does not support sparse gradients, please consider SparseAdam instead"
)
amsgrad = group["amsgrad"]
state = self.state[p]
exp_avg, exp_avg_sq = state["exp_avg"], state["exp_avg_sq"]
if amsgrad:
max_exp_avg_sq = state["max_exp_avg_sq"]
beta1, beta2 = group["betas"]
state["step"] += 1
if group["weight_decay"] != 0:
grad = grad.add(group["weight_decay"], p.data)
# Decay the first and second moment running average coefficient
exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1)
exp_avg_sq.mul_(beta2).addcmul_(grad, grad.conj(), value=1 - beta2)
step_t = state["step"].item()
bias_correction1 = 1 - beta1**step_t
bias_correction2 = 1 - beta2**step_t
step_size = group["lr"] / bias_correction1
bias_correction2_sqrt = sqrt(bias_correction2)
if amsgrad:
# Maintains the maximum of all 2nd moment running avg. till now
torch.maximum(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq)
# Use the max. for normalizing running avg. of gradient
denom = (max_exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(
group["eps"]
)
else:
denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(
group["eps"]
)
p.data.addcdiv_(exp_avg, denom, value=-step_size)
return loss
| 6,802 | 34.06701 | 100 | py |
a3c_continuous | a3c_continuous-master/player_util.py | from __future__ import division
import os
os.environ["OMP_NUM_THREADS"] = "1"
from math import pi as PI
import numpy as np
from numpy import fromiter, float32
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from utils import normal # , pi
class Agent(object):
def __init__(self, model, env, args, state):
self.model = model
self.env = env
self.state = state
self.hx = None
self.cx = None
self.eps_len = 0
self.args = args
self.values = []
self.log_probs = []
self.rewards = []
self.entropies = []
self.done = True
self.info = None
self.reward = 0
self.gpu_id = -1
self.hidden_size = args.hidden_size
def action_train(self):
value, mu, sigma, self.hx, self.cx = self.model(
self.state.unsqueeze(0), self.hx, self.cx
)
mu = torch.clamp(mu, -1.0, 1.0)
sigma = F.softplus(sigma) + 1e-5
pi = np.array([PI])
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
eps = torch.randn(mu.size()).float().cuda()
pi = torch.from_numpy(pi).float().cuda()
else:
eps = torch.randn(mu.size()).float()
pi = torch.from_numpy(pi).float()
action = (mu + sigma.sqrt() * eps).data
act = action
prob = normal(act, mu, sigma, self.gpu_id, gpu=self.gpu_id >= 0)
action = torch.clamp(action, -1.0, 1.0)
entropy = 0.5 * ((sigma * 2 * pi.expand_as(sigma)).log() + 1)
self.entropies.append(entropy)
log_prob = (prob + 1e-6).log()
self.log_probs.append(log_prob)
state, reward, self.done, self.info = self.env.step(
fromiter(action.tolist()[0], dtype=float32)
) # faster than action.cpu().numpy()[0])
reward = max(min(float(reward), 1.0), -1.0)
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self.state = torch.from_numpy(state).float().cuda()
else:
self.state = torch.from_numpy(state).float()
self.eps_len += 1
self.done = self.done or self.eps_len >= self.args.max_episode_length
self.values.append(value)
self.rewards.append(reward)
return self
def action_test(self):
with torch.no_grad():
if self.done:
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self.cx = torch.zeros(1, self.hidden_size).cuda()
self.hx = torch.zeros(1, self.hidden_size).cuda()
else:
self.cx = torch.zeros(1, self.hidden_size)
self.hx = torch.zeros(1, self.hidden_size)
value, mu, sigma, self.hx, self.cx = self.model(
self.state.unsqueeze(0), self.hx, self.cx
)
mu = torch.clamp(mu, -1.0, 1.0)
state, self.reward, self.done, self.info = self.env.step(
fromiter(mu.tolist()[0], dtype=float32)
)
if self.gpu_id >= 0:
with torch.cuda.device(self.gpu_id):
self.state = torch.from_numpy(state).float().cuda()
else:
self.state = torch.from_numpy(state).float()
self.eps_len += 1
self.done = self.done or self.eps_len >= self.args.max_episode_length
return self
def clear_actions(self):
self.values = []
self.log_probs = []
self.rewards = []
self.entropies = []
return self
| 3,619 | 33.807692 | 77 | py |
a3c_continuous | a3c_continuous-master/utils.py | from __future__ import division
from math import pi as PI
import numpy as np
import torch
from torch.autograd import Variable
import json
import logging
def setup_logger(logger_name, log_file, level=logging.INFO):
l = logging.getLogger(logger_name)
formatter = logging.Formatter("%(asctime)s : %(message)s")
fileHandler = logging.FileHandler(log_file, mode="w")
fileHandler.setFormatter(formatter)
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
l.setLevel(level)
l.addHandler(fileHandler)
l.addHandler(streamHandler)
def read_config(file_path):
"""Read JSON config."""
json_object = json.load(open(file_path, "r"))
return json_object
def norm_col_init(weights, std=1.0):
x = torch.randn(weights.size())
x *= std / x.square().sum(1, keepdim=True).sqrt()
return x
def ensure_shared_grads(model, shared_model, gpu=False):
for param, shared_param in zip(model.parameters(), shared_model.parameters()):
if shared_param.grad is not None and not gpu:
return
elif not gpu:
shared_param._grad = param.grad
else:
shared_param._grad = param.grad.cpu()
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
weight_shape = list(m.weight.data.size())
fan_in = np.prod(weight_shape[1:4])
fan_out = np.prod(weight_shape[2:4]) * weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
elif classname.find("Linear") != -1:
weight_shape = list(m.weight.data.size())
fan_in = weight_shape[1]
fan_out = weight_shape[0]
w_bound = np.sqrt(6.0 / (fan_in + fan_out))
m.weight.data.uniform_(-w_bound, w_bound)
m.bias.data.fill_(0)
def weights_init_mlp(m):
classname = m.__class__.__name__
if classname.find("Linear") != -1:
m.weight.data.normal_(0, 1)
m.weight.data *= 1 / torch.sqrt(m.weight.data.pow(2).sum(1, keepdim=True))
if m.bias is not None:
m.bias.data.fill_(0)
def normal(x, mu, sigma, gpu_id, gpu=False):
pi = np.array([PI])
if gpu:
with torch.cuda.device(gpu_id):
pi = torch.from_numpy(pi).float().cuda()
else:
pi = torch.from_numpy(pi).float()
a = (-1 * (x - mu).pow(2) / (2 * sigma)).exp()
b = 1 / (2 * sigma * pi.expand_as(sigma)).sqrt()
return a * b
| 2,514 | 29.670732 | 82 | py |
a3c_continuous | a3c_continuous-master/model.py | from __future__ import division
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.autograd import Variable
from utils import norm_col_init, weights_init, weights_init_mlp
class A3C_CONV(torch.nn.Module):
def __init__(self, num_inputs, action_space, args):
super(A3C_CONV, self).__init__()
self.hidden_size = args.hidden_size
self.conv1 = nn.Conv1d(num_inputs, 32, 3, stride=1, padding=1)
self.lrelu1 = nn.LeakyReLU(0.1)
self.conv2 = nn.Conv1d(32, 32, 3, stride=1, padding=1)
self.lrelu2 = nn.LeakyReLU(0.1)
self.conv3 = nn.Conv1d(32, 64, 2, stride=1, padding=1)
self.lrelu3 = nn.LeakyReLU(0.1)
self.conv4 = nn.Conv1d(64, 64, 1, stride=1)
self.lrelu4 = nn.LeakyReLU(0.1)
self.lstm = nn.LSTMCell(1600, self.hidden_size)
num_outputs = action_space.shape[0]
self.critic_linear = nn.Linear(self.hidden_size, 1)
self.actor_linear = nn.Linear(self.hidden_size, num_outputs)
self.actor_linear2 = nn.Linear(self.hidden_size, num_outputs)
self.apply(weights_init)
lrelu_gain = nn.init.calculate_gain("leaky_relu")
self.conv1.weight.data.mul_(lrelu_gain)
self.conv2.weight.data.mul_(lrelu_gain)
self.conv3.weight.data.mul_(lrelu_gain)
self.conv4.weight.data.mul_(lrelu_gain)
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
self.actor_linear2.weight.data = norm_col_init(
self.actor_linear2.weight.data, 0.01
)
self.actor_linear2.bias.data.fill_(0)
self.critic_linear.weight.data = norm_col_init(
self.critic_linear.weight.data, 1.0
)
self.critic_linear.bias.data.fill_(0)
for name, p in self.named_parameters():
if "lstm" in name:
if "weight_ih" in name:
nn.init.xavier_uniform_(p.data)
elif "weight_hh" in name:
nn.init.orthogonal_(p.data)
elif "bias_ih" in name:
p.data.fill_(0)
# Set forget-gate bias to 1
n = p.size(0)
p.data[(n // 4) : (n // 2)].fill_(1)
elif "bias_hh" in name:
p.data.fill_(0)
self.train()
def forward(self, input, hx, cx):
x = self.lrelu1(self.conv1(input))
x = self.lrelu2(self.conv2(x))
x = self.lrelu3(self.conv3(x))
x = self.lrelu4(self.conv4(x))
x = x.view(x.size(0), -1)
hx, cx = self.lstm(x, (hx, cx))
x = hx
return (
self.critic_linear(x),
F.softsign(self.actor_linear(x)),
self.actor_linear2(x),
hx,
cx,
)
class A3C_MLP(torch.nn.Module):
def __init__(self, num_inputs, action_space, args):
super(A3C_MLP, self).__init__()
self.hidden_size = args.hidden_size
self.fc1 = nn.Linear(num_inputs, 256)
self.lrelu1 = nn.LeakyReLU(0.1)
self.fc2 = nn.Linear(256, 256)
self.lrelu2 = nn.LeakyReLU(0.1)
self.fc3 = nn.Linear(256, 128)
self.lrelu3 = nn.LeakyReLU(0.1)
self.fc4 = nn.Linear(128, 128)
self.lrelu4 = nn.LeakyReLU(0.1)
self.m1 = args.stack_frames * 128
self.lstm = nn.LSTMCell(self.m1, self.hidden_size)
num_outputs = action_space.shape[0]
self.critic_linear = nn.Linear(self.hidden_size, 1)
self.actor_linear = nn.Linear(self.hidden_size, num_outputs)
self.actor_linear2 = nn.Linear(self.hidden_size, num_outputs)
self.apply(weights_init_mlp)
lrelu = nn.init.calculate_gain("leaky_relu")
self.fc1.weight.data.mul_(lrelu)
self.fc2.weight.data.mul_(lrelu)
self.fc3.weight.data.mul_(lrelu)
self.fc4.weight.data.mul_(lrelu)
self.actor_linear.weight.data = norm_col_init(
self.actor_linear.weight.data, 0.01
)
self.actor_linear.bias.data.fill_(0)
self.actor_linear2.weight.data = norm_col_init(
self.actor_linear2.weight.data, 0.01
)
self.actor_linear2.bias.data.fill_(0)
self.critic_linear.weight.data = norm_col_init(
self.critic_linear.weight.data, 1.0
)
self.critic_linear.bias.data.fill_(0)
for name, p in self.named_parameters():
if "lstm" in name:
if "weight_ih" in name:
nn.init.xavier_uniform_(p.data)
elif "weight_hh" in name:
nn.init.orthogonal_(p.data)
elif "bias_ih" in name:
p.data.fill_(0)
# Set forget-gate bias to 1
n = p.size(0)
p.data[(n // 4) : (n // 2)].fill_(1)
elif "bias_hh" in name:
p.data.fill_(0)
self.train()
def forward(self, input, hx, cx):
x = self.lrelu1(self.fc1(input))
x = self.lrelu2(self.fc2(x))
x = self.lrelu3(self.fc3(x))
x = self.lrelu4(self.fc4(x))
x = x.view(x.size(0), -1)
hx, cx = self.lstm(x, (hx, cx))
x = hx
return (
self.critic_linear(x),
F.softsign(self.actor_linear(x)),
self.actor_linear2(x),
hx,
cx,
)
| 5,526 | 34.203822 | 70 | py |
a3c_continuous | a3c_continuous-master/gym_eval.py | from __future__ import division
import os
os.environ["OMP_NUM_THREADS"] = "1"
import argparse
import torch
from environment import create_env
from utils import setup_logger
from model import A3C_CONV, A3C_MLP
from player_util import Agent
from torch.autograd import Variable
import gym
import logging
import time
gym.logger.set_level(40)
parser = argparse.ArgumentParser(description="A3C_EVAL")
parser.add_argument(
"-ev",
"--env",
default="BipedalWalker-v3",
help="environment to train on (default: BipedalWalker-v2)",
)
parser.add_argument(
"-ne",
"--num-episodes",
type=int,
default=100,
help="how many episodes in evaluation (default: 100)",
)
parser.add_argument(
"-lmd",
"--load-model-dir",
default="trained_models/",
help="folder to load trained models from",
)
parser.add_argument("-lgd", "--log-dir", default="logs/", help="folder to save logs")
parser.add_argument(
"-r", "--render", action="store_true", help="Watch game as it being played"
)
parser.add_argument(
"-rf",
"--render-freq",
type=int,
default=1,
help="Frequency to watch rendered game play",
)
parser.add_argument(
"-mel",
"--max-episode-length",
type=int,
default=100000,
help="maximum length of an episode (default: 100000)",
)
parser.add_argument(
"-m", "--model", default="MLP", choices=["MLP", "CONV"], help="Model type to use"
)
parser.add_argument(
"-sf",
"--stack-frames",
type=int,
default=1,
help="Choose whether to stack observations",
)
parser.add_argument(
"-nge",
"--new-gym-eval",
action="store_true",
help="Create a gym evaluation for upload",
)
parser.add_argument(
"-s", "--seed", type=int, default=1, help="random seed (default: 1)"
)
parser.add_argument(
"-gid",
"--gpu-id",
type=int,
default=-1,
help="GPU to use [-1 CPU only] (default: -1)",
)
parser.add_argument(
"-hs",
"--hidden-size",
type=int,
default=128,
help="LSTM Cell number of features in the hidden state h",
)
args = parser.parse_args()
torch.set_default_tensor_type("torch.FloatTensor")
saved_state = torch.load(
f"{args.load_model_dir}{args.env}.dat", map_location=lambda storage, loc: storage
)
setup_logger(f"{args.env}_mon_log", rf"{args.log_dir}{args.env}_mon_log")
log = logging.getLogger(f"{args.env}_mon_log")
gpu_id = args.gpu_id
torch.manual_seed(args.seed)
if gpu_id >= 0:
torch.cuda.manual_seed(args.seed)
d_args = vars(args)
for k in d_args.keys():
log.info(f"{k}: {d_args[k]}")
env = create_env(args.env, args)
num_tests = 0
reward_total_sum = 0
player = Agent(None, env, args, None)
if args.model == "MLP":
player.model = A3C_MLP(env.observation_space.shape[0], env.action_space, args)
if args.model == "CONV":
player.model = A3C_CONV(args.stack_frames, env.action_space, args)
player.gpu_id = gpu_id
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.model = player.model.cuda()
if args.new_gym_eval:
player.env = gym.wrappers.Monitor(player.env, f"{args.env}_monitor", force=True)
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.model.load_state_dict(saved_state)
else:
player.model.load_state_dict(saved_state)
player.model.eval()
start_time = time.time()
try:
for i_episode in range(args.num_episodes):
player.state = player.env.reset()
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.state = torch.from_numpy(player.state).float().cuda()
else:
player.state = torch.from_numpy(player.state).float()
player.eps_len = 0
reward_sum = 0
while 1:
if args.render:
if i_episode % args.render_freq == 0:
player.env.render()
player.action_test()
reward_sum += player.reward
if player.done:
num_tests += 1
reward_total_sum += reward_sum
reward_mean = reward_total_sum / num_tests
log.info(
f"Time {time.strftime('%Hh %Mm %Ss', time.gmtime(time.time() - start_time))}, episode reward {reward_sum}, episode length {player.eps_len}, reward mean {reward_mean:.4f}"
)
break
except KeyboardInterrupt:
print("KeyboardInterrupt exception is caught")
finally:
print("gym evalualtion process finished")
player.env.close()
| 4,450 | 25.337278 | 190 | py |
a3c_continuous | a3c_continuous-master/train.py | from __future__ import division
import os
os.environ["OMP_NUM_THREADS"] = "1"
from setproctitle import setproctitle as ptitle
import numpy as np
import torch
import torch.optim as optim
from environment import create_env
from utils import ensure_shared_grads
from model import A3C_CONV, A3C_MLP
from player_util import Agent
from torch.autograd import Variable
import gym
import time
from marshal import dumps
def train(rank, args, shared_model, optimizer):
ptitle(f"Train Agent: {rank}")
gpu_id = args.gpu_ids[rank % len(args.gpu_ids)]
torch.manual_seed(args.seed + rank)
if gpu_id >= 0:
torch.cuda.manual_seed(args.seed + rank)
hidden_size = args.hidden_size
env = create_env(args.env, args)
if optimizer is None:
if args.optimizer == "RMSprop":
optimizer = optim.RMSprop(shared_model.parameters(), lr=args.lr)
if args.optimizer == "Adam":
optimizer = optim.Adam(shared_model.parameters(), lr=args.lr)
env.seed(args.seed + rank)
player = Agent(None, env, args, None)
player.gpu_id = gpu_id
if args.model == "MLP":
player.model = A3C_MLP(
player.env.observation_space.shape[0], player.env.action_space, args
)
if args.model == "CONV":
player.model = A3C_CONV(args.stack_frames, player.env.action_space, args)
player.state = player.env.reset()
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.state = torch.from_numpy(player.state).float().cuda()
player.model = player.model.cuda()
else:
player.state = torch.from_numpy(player.state).float()
player.model.train()
try:
while 1:
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.model.load_state_dict(shared_model.state_dict())
else:
player.model.load_state_dict(shared_model.state_dict())
if player.done:
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.cx = torch.zeros(1, hidden_size).cuda()
player.hx = torch.zeros(1, hidden_size).cuda()
else:
player.cx = torch.zeros(1, hidden_size)
player.hx = torch.zeros(1, hidden_size)
else:
player.cx = player.cx.data
player.hx = player.hx.data
for step in range(args.num_steps):
player.action_train()
if player.done:
break
if player.done:
player.eps_len = 0
state = player.env.reset()
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
player.state = torch.from_numpy(state).float().cuda()
else:
player.state = torch.from_numpy(state).float()
if gpu_id >= 0:
with torch.cuda.device(gpu_id):
R = torch.zeros(1, 1).cuda()
gae = torch.zeros(1, 1).cuda()
else:
R = torch.zeros(1, 1)
gae = torch.zeros(1, 1)
if not player.done:
state = player.state
value, _, _, _, _ = player.model(
state.unsqueeze(0), player.hx, player.cx
)
R = value.detach()
player.values.append(R)
policy_loss = 0
value_loss = 0
for i in reversed(range(len(player.rewards))):
R = args.gamma * R + player.rewards[i]
advantage = R - player.values[i]
value_loss = value_loss + 0.5 * advantage.pow(2)
# Generalized Advantage Estimataion
delta_t = (
player.rewards[i]
+ args.gamma * player.values[i + 1].data
- player.values[i].data
)
gae = gae * args.gamma * args.tau + delta_t
policy_loss = (
policy_loss
- (player.log_probs[i].sum() * gae)
- (args.entropy_coef * player.entropies[i].sum())
)
player.model.zero_grad()
(policy_loss + 0.5 * value_loss).sum().backward()
ensure_shared_grads(player.model, shared_model, gpu=gpu_id >= 0)
optimizer.step()
player.clear_actions()
except KeyboardInterrupt:
time.sleep(0.01)
print("KeyboardInterrupt exception is caught")
finally:
print(f"train agent {rank} process finished")
| 4,686 | 35.333333 | 81 | py |
wikiworkshop2023_imgaccessibility | wikiworkshop2023_imgaccessibility-main/human_alt_quality_exp/study/trial_info/sample_data.py | import pandas as pd
import json
import numpy as np
import random
import shutil
import random
import os
import torch
import csv
import sys
sys.path.insert(0, '../../../models/03_xu2015/code')
import caption
gpu_id = 0
os.environ['KMP_DUPLICATE_LIB_OK']='True'
device = torch.device("cuda:"+str(gpu_id) if torch.cuda.is_available() else "cpu")
def load_model(label_cond):
if label_cond == "description":
run_id = "20210505_150530_descriptioncaptionnoneFalseFalseFalserevised"
epoch = 29 # epoch 23
else:
run_id = "20210505_150623_captiondescriptionnoneFalseFalseFalserevised"
epoch = 26 # epoch 20
# Load specs
specs = json.load(open('../../../models/03_xu2015/runs/run_' + run_id + '/' + 'specs.json', 'r'))
data_location = specs['data_folder'].replace("../../../../../..","")
# print(data_location)
# Load model
print("Loading model")
checkpoint = torch.load("/mnt/fs5/ekreiss/qud_captioning/03_xu2015/runs/run_" + run_id + "/checkpoint_wikipedia_1_min_word_freq_epoch" + str(epoch) + ".pth.tar", map_location=str(device))
decoder = checkpoint['decoder']
decoder = decoder.to(device)
decoder.eval()
encoder = checkpoint['encoder']
encoder = encoder.to(device)
encoder.eval()
# Load word map (word2ix)
print("Loading word map")
with open(data_location + "/WORDMAP_wikipedia_1_min_word_freq.json", 'r') as j:
word_map = json.load(j)
rev_word_map = {v: k for k, v in word_map.items()} # ix2word
return([encoder, decoder, word_map, rev_word_map])
def generate(model, img_filename, context):
[encoder, decoder, word_map, rev_word_map] = model
img = "/mnt/fs5/ekreiss/datasets/Wikipedia/wikicommons/resized/" + img_filename
# Generate model output
# print("Generating model output")
# try:
seq = caption.labelwcontext_image_beam_search(encoder, decoder, img, context, word_map, 5, gpu_id=str(gpu_id))
generated_label = " ".join([rev_word_map[ind] for ind in seq])
generated_label = generated_label.replace('<start> ', '')
generated_label = generated_label.replace(' <end>', '')
return(generated_label)
# except Exception as error_msg:
# print("An exception occurred.")
# print(error_msg)
# return("NA")
with open('/mnt/fs5/ekreiss/datasets/Wikipedia/wiki_split.json') as f:
js_obj = json.load(f)
to_pd = js_obj['images']
data = pd.DataFrame.from_dict(to_pd)
print(len(data))
clean_data = data[(data['split']=="val")].copy(deep=True)
# print(len(clean_data))
randomlist = random.sample(range(0, len(clean_data)), 300)
sample = clean_data.iloc[randomlist].copy(deep=True)
# print(len(sample))
descr_model = load_model("description")
caption_model = load_model("caption")
# descr_encoder, descr_decoder, descr_wordmap, descr_rev_word_map = load_model("description")
# caption_encoder, caption_decoder, caption_wordmap, caption_rev_word_map = load_model("caption")
generated_descriptions = []
generated_captions = []
s_id = 0
for idx, row in sample.iterrows():
gen_description = generate(descr_model, row['filename'], row['caption']['raw'])
gen_caption = generate(caption_model, row['filename'], row['description']['raw'])
# if (gen_description == "NA") or (gen_caption == "NA"):
# print(kasjdfhk)
generated_descriptions.append(gen_description)
generated_captions.append(gen_caption)
shutil.copyfile('/mnt/fs5/ekreiss/datasets/Wikipedia/wikicommons/resized/' + row['filename'],
'../images/' + row['filename'])
sample['generated_descr'] = generated_descriptions
sample['generated_capt'] = generated_captions
# revert to original format
data_sample = {'images': sample.to_dict('records')}
with open('exp_sample.json', 'w') as outfile:
json.dump(data_sample, outfile, indent=4) | 3,722 | 33.155963 | 189 | py |
Meta-RegGNN | Meta-RegGNN-main/evaluators.py | '''
Functions for k-fold evaluation of models.
'''
import random
import pickle
import numpy as np
from sklearn.model_selection import KFold
import torch
import proposed_method.data_utils as data_utils
from proposed_method.MetaRegGNN import MetaRegGNN
from collections import OrderedDict
from config import Config
def evaluate_MetaRegGNN(shuffle=False, random_state=None,
dropout=0.1, k_list=list(range(2, 16)), lr=1e-3, wd=5e-4,
device=torch.device('cpu'), num_epoch=100):
overall_preds = {k: [] for k in k_list}
overall_scores = {k: [] for k in k_list}
train_mae = {k: [] for k in k_list}
data = data_utils.load_dataset_pytorch()
fold = -1
for train_idx, test_idx in KFold(Config.K_FOLDS, shuffle=shuffle,
random_state=random_state).split(data):
fold += 1
print(f"Cross Validation Fold {fold+1}/{Config.K_FOLDS}")
for k in k_list:
selected_train_data = [data[i] for i in train_idx]
test_data = [data[i] for i in test_idx]
candidate_model = MetaRegGNN(116, 64, 1, dropout).float().to(device)
optimizer = torch.optim.Adam(candidate_model.parameters(), lr=Config.MetaRegGNN.ETA, weight_decay=wd)
train_loader, test_loader = data_utils.get_loaders(selected_train_data, test_data)
candidate_model.train()
for epoch in range(num_epoch):
tgt_data = iter(test_loader)
preds = []
scores = []
outer_loss = torch.tensor(0., device=device)
for i,(batch_src) in enumerate(train_loader):
out_src = candidate_model(batch_src.x.to(device), data_utils.to_dense(batch_src).adj.to(device))
inner_loss = candidate_model.loss(out_src.view(-1, 1), batch_src.y.to(device).view(-1, 1))
candidate_model.zero_grad()
params = OrderedDict(candidate_model.named_parameters())
grads = torch.autograd.grad(inner_loss,
params.values(),
create_graph=True)
updated_params = OrderedDict()
for (name, param), grad in zip(params.items(), grads):
updated_params[name] = param - Config.MetaRegGNN.GAMMA * grad
candidate_model.load_state_dict(updated_params)
try:
batch_tgt = next(tgt_data)
except StopIteration:
tgt_data = iter(test_loader)
batch_tgt = next(tgt_data)
out_tgt = candidate_model(batch_tgt.x.to(device), data_utils.to_dense(batch_tgt).adj.to(device),params)
outer_loss = candidate_model.loss(out_tgt.view(-1, 1), batch_tgt.y.to(device).view(-1, 1))
if i%5==0: #number of shots
outer_loss.backward()
optimizer.step()
preds.append(out_src.cpu().data.numpy())
scores.append(batch_src.y.long().numpy())
preds = np.hstack(preds)
scores = np.hstack(scores)
epoch_mae = np.mean(np.abs(preds.reshape(-1, 1) - scores.reshape(-1, 1)))
train_mae[k].append(epoch_mae)
candidate_model.eval()
with torch.no_grad():
preds = []
scores = []
for batch in test_loader:
out = candidate_model(batch.x.to(device), data_utils.to_dense(batch).adj.to(device))
loss = candidate_model.loss(out.view(-1, 1), batch.y.to(device).view(-1, 1))
preds.append(out.cpu().data.numpy())
scores.append(batch.y.cpu().long().numpy())
preds = np.hstack(preds)
scores = np.hstack(scores)
overall_preds[k].extend(preds)
overall_scores[k].extend(scores)
for k in k_list:
overall_preds[k] = np.vstack(overall_preds[k]).ravel()
overall_scores[k] = np.vstack(overall_scores[k]).ravel()
overall_preds = overall_preds[k_list[0]]
overall_scores = overall_scores[k_list[0]]
return overall_preds, overall_scores, train_mae | 4,454 | 43.108911 | 123 | py |
Meta-RegGNN | Meta-RegGNN-main/demo.py | '''
Main file for creating simulated data or loading real data
and running MetaRegGNN and sample selection methods.
Usage:
For data processing:
python demo.py --mode data
For inferences:
python demo.py --mode infer
For more information:
python demo.py -h
'''
import argparse
import pickle
import torch
import numpy as np
import proposed_method.data_utils as data_utils
import evaluators
from config import Config
parser = argparse.ArgumentParser()
parser.add_argument('--mode', choices=['data', 'infer'],
help="Creates data and topological features OR make inferences on data")
opts = parser.parse_args()
if opts.mode == 'data':
'''
Connectome and scores are simulated to the folder specified in config.py.
'''
data_utils.create_dataset()
print(f"Data and topological features are created and saved at {Config.DATA_FOLDER} successfully.")
elif opts.mode == 'infer':
'''
Cross validation will be used to train and generate inferences
on the data saved in the folder specified in config.py.
Overall MAE and RMSE will be printed and predictions will be saved
in same data folder.
'''
#print(f"{opts.model} will be run on the data.")
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
mae_evaluator = lambda p, s: np.mean(np.abs(p - s))
rmse_evaluator = lambda p, s: np.sqrt(np.mean((p - s) ** 2))
preds, scores, _ = evaluators.evaluate_MetaRegGNN(shuffle=Config.SHUFFLE, random_state=Config.MODEL_SEED,
dropout=Config.MetaRegGNN.DROPOUT,
lr=Config.MetaRegGNN.LR, wd=Config.MetaRegGNN.WD, device=device,
num_epoch=Config.MetaRegGNN.NUM_EPOCH)
print(f"MAE: {mae_evaluator(preds, scores):.3f}")
print(f"RMSE: {rmse_evaluator(preds, scores):.3f}")
with open(f"{Config.RESULT_FOLDER}preds.pkl", 'wb') as f:
pickle.dump(preds, f)
with open(f"{Config.RESULT_FOLDER}scores.pkl", 'wb') as f:
pickle.dump(scores, f)
print(f"Predictions are successfully saved at {Config.RESULT_FOLDER}.")
else:
raise Exception("Unknown argument.")
| 2,264 | 31.357143 | 114 | py |
Meta-RegGNN | Meta-RegGNN-main/proposed_method/MetaRegGNN.py | '''MetaRegGNN regression model architecture.
torch_geometric needs to be installed.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_geometric.nn.dense import DenseGCNConv
from torchmeta.modules import MetaModule
class MetaRegGNN(MetaModule):
'''Regression using a DenseGCNConv layer from pytorch geometric.
Layers in this model are identical to GCNConv.
'''
def __init__(self, nfeat, nhid, nclass, dropout):
super(MetaRegGNN, self).__init__()
self.gc1 = DenseGCNConv(nfeat, nhid)
self.gc2 = DenseGCNConv(nhid, nclass)
self.dropout = dropout
self.LinearLayer = nn.Linear(nfeat, 1)
def forward(self, x, edge_index, params=None):
x = F.relu(self.gc1(x, edge_index))
x = F.dropout(x, self.dropout, training=self.training)
x = self.gc2(x, edge_index)
x = self.LinearLayer(torch.transpose(x, 2, 1))
return torch.transpose(x, 2, 1)
def loss(self, pred, score):
return F.mse_loss(pred, score)
| 1,053 | 26.736842 | 68 | py |
Meta-RegGNN | Meta-RegGNN-main/proposed_method/data_utils.py | import copy
import scipy.io # to read .mat files
from scipy.sparse import coo_matrix
from sklearn.model_selection import KFold
import numpy as np
import pandas as pd
import torch
import torch_geometric
from config import Config
DATA_PATH = "/home/latis/Documents/RegGNN/IQ_data_clean/" # this should point to the directory of connectomes in .mat format
def load_matrices_from_matfile(pop="NT"):
'''Loads the matrices for given population, for 226 subjects
Args:
pop (string): Population code, "NT" or "ASD"
Returns:
connectomes (np.array): Connectome tensor of shape (116x116x226)
fiq_scores (np.array): FIQ score vector of shape (226x1)
viq_scores (np.array): FIQ score vector of shape (226x1)
Raises:
ValueError: if given population is not "NT" or "ASD"
'''
if pop not in ["NT", "ASD"]:
raise ValueError("Population not found")
connectomes = scipy.io.loadmat(f"{DATA_PATH}/allMatrices_{pop}.mat")[f"allMatrices_{pop}"]
fiq_scores = scipy.io.loadmat(f"{DATA_PATH}/FIQ_{pop}.mat")[f"FIQ_{pop}"]
viq_scores = scipy.io.loadmat(f"{DATA_PATH}/VIQ_{pop}.mat")[f"VIQ_{pop}"]
connectomes = torch.tensor(np.nan_to_num(connectomes))
fiq_scores = torch.tensor(fiq_scores)
viq_scores = torch.tensor(viq_scores)
return connectomes, fiq_scores, viq_scores
def create_dataset():
'''Does preprocessing on matrices in .mat files and saves them to pickle files
Files will be saved in connectome_{population}.pickle, fiq_{population}.pickle,
and viq_{population}.pickle
'''
con_n, fiq_n, viq_n = load_matrices_from_matfile("NT")
con_a, fiq_a, viq_a = load_matrices_from_matfile("ASD")
con_n[con_n < 0] = 0
con_a[con_a < 0] = 0
torch.save(con_n, f"{Config.DATA_FOLDER}connectome_NT.ts")
torch.save(fiq_n, f"{Config.DATA_FOLDER}fiq_NT.ts")
torch.save(viq_n, f"{Config.DATA_FOLDER}viq_NT.ts")
torch.save(con_a, f"{Config.DATA_FOLDER}connectome_ASD.ts")
torch.save(fiq_a, f"{Config.DATA_FOLDER}fiq_ASD.ts")
torch.save(viq_a, f"{Config.DATA_FOLDER}viq_ASD.ts")
def load_dataset_pytorch(pop="ASD", score="fiq"):
'''Loads the data for the given population into a list of Pytorch Geometric
Data objects, which then can be used to create DataLoaders.
'''
connectomes = torch.load(f"{Config.DATA_FOLDER}connectome_{pop}.ts")
scores = torch.load(f"{Config.DATA_FOLDER}{score}_{pop}.ts")
pyg_data = []
for subject in range(scores.shape[0]):
sparse_mat = to_sparse(connectomes[:, :, subject])
pyg_data.append(torch_geometric.data.Data(x=torch.eye(116, dtype=torch.float),
y=scores[subject].float(), edge_index=sparse_mat._indices(),
edge_attr=sparse_mat._values().float()))
return pyg_data
def to_sparse(mat):
'''Transforms a square matrix to torch.sparse tensor
Methods ._indices() and ._values() can be used to access to
edge_index and edge_attr while generating Data objects
'''
coo = coo_matrix(mat, dtype='float64')
row = torch.from_numpy(coo.row.astype(np.int64))
col = torch.from_numpy(coo.col.astype(np.int64))
coo_index = torch.stack([row, col], dim=0)
coo_values = torch.from_numpy(coo.data.astype(np.float64).reshape(-1, 1)).reshape(-1)
sparse_mat = torch.sparse.LongTensor(coo_index, coo_values)
return sparse_mat
def load_dataset_cpm(pop="NT"):
'''Loads the data for given population in the upper triangular matrix form
as required by CPM functions.
'''
connectomes = np.array(torch.load(f"connectome_{pop}.ts"))
fiq_scores = np.array(torch.load(f"fiq_{pop}.ts"))
viq_scores = np.array(torch.load(f"viq_{pop}.ts"))
fc_data = {}
behav_data = {}
for subject in range(fiq_scores.shape[0]): # take upper triangular part of each matrix
fc_data[subject] = connectomes[:, :, subject][np.triu_indices_from(connectomes[:, :, subject], k=1)]
behav_data[subject] = {'fiq': fiq_scores[subject].item(), 'viq': viq_scores[subject].item()}
return pd.DataFrame.from_dict(fc_data, orient='index'), pd.DataFrame.from_dict(behav_data, orient='index')
def get_folds(data_list, k_folds=5):
'''Divides a data list into lists
with k elements such that each element
is the data used in that cross validation fold
'''
train_folds, test_folds = [], []
for train_idx, test_idx in KFold(k_folds, shuffle=False, random_state=None).split(data_list):
train_folds.append([data_list[i] for i in train_idx])
test_folds.append([data_list[i] for i in test_idx])
return train_folds, test_folds
def get_loaders(train, test, batch_size=1):
'''Returns data loaders for given data lists
'''
train_loader = torch_geometric.data.DataLoader(train, batch_size=batch_size)
test_loader = torch_geometric.data.DataLoader(test, batch_size=batch_size)
return train_loader, test_loader
def load_dataset_tensor(pop="NT"):
'''Loads dataset as tuple of (tensor of connectomes,
tensor of fiq scores, tensor of viq scores)
'''
connectomes = torch.load(f"connectome_{pop}.ts")
fiq_scores = torch.load(f"fiq_{pop}.ts")
viq_scores = torch.load(f"viq_{pop}.ts")
return connectomes, fiq_scores, viq_scores
def to_dense(data):
'''Returns a copy of the data object in Dense form.
'''
denser = torch_geometric.transforms.ToDense()
copy_data = denser(copy.deepcopy(data))
return copy_data | 5,565 | 37.386207 | 125 | py |
PAC-Bayesian_Sliced-Wasserstein | PAC-Bayesian_Sliced-Wasserstein-main/swd_pac.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import torch
import torch.nn as nn
from torch import optim
import torch.utils.data as data_utils
from utils import rand_projections, TransformNet, Wasserstein_1D, sliced_wasserstein_distance
from tqdm import tqdm
from vmf_utils import hyperspherical_uniform as unif, von_mises_fisher as vmf
class Discriminator(nn.Module):
"""
used as a discriminator for computing KL
"""
def __init__(self, dim,dim_hidden=10):
super(Discriminator, self).__init__()
self.dim = dim
self.dim_hidden = dim_hidden
self.net = nn.Sequential(nn.Linear(self.dim,self.dim_hidden),
nn.Sigmoid(),
#nn.ReLU(),
nn.Linear(self.dim_hidden,self.dim_hidden),
nn.Sigmoid(),
#nn.ReLU(),
nn.Linear(self.dim_hidden,self.dim_hidden),
nn.Sigmoid(),
#nn.ReLU(),
nn.Linear(self.dim_hidden, 1),
#nn.Sigmoid(),
#nn.ReLU(),
)
def forward(self, input):
out =self.net(input)
return out
def create_data_loader(X, batch_size):
#print(X.shape)
data = data_utils.TensorDataset(X)
return data_utils.DataLoader(data, batch_size= batch_size, drop_last = False,sampler = data_utils.sampler.RandomSampler(data))
def loop_iterable(iterable):
while True:
yield from iterable
def set_requires_grad(model, requires_grad=True):
for param in model.parameters():
param.requires_grad = requires_grad
def compute_kl(Xs,Xt,discr,optimizer,optim = True,nb_iter=10,device='cuda'):
"""
Xt are the prior samples
"""
batch_size = 2000
# we work with without optimizing sample representation
source_loader = create_data_loader(Xs.detach(), batch_size= batch_size)
target_loader = create_data_loader(Xt, batch_size = batch_size)
if optim == True:
# learning the discriminator function
for epoch in range(nb_iter):
S_batches = loop_iterable(source_loader)
T_batches = loop_iterable(target_loader)
iterations = len(source_loader)
total_loss = 0
for i in range(iterations):
source_s = next(S_batches)[0]
source_t = next(T_batches)[0]
out_s = discr(source_s.to(device))
out_t = discr(source_t.to(device))
loss = - (torch.log(torch.sigmoid(out_s)).mean() + torch.log(1-torch.sigmoid(out_t)).mean())
total_loss += loss.item()
#print(out_s.shape,out_t.shape,loss)
optimizer.zero_grad()
loss.backward()
optimizer.step()
#print('kl',epoch,-total_loss)
# computing the KL with fixed discriminator and optimizable sample
somme = 0
set_requires_grad(discr, requires_grad=False)
source_loader = create_data_loader(Xs, batch_size= batch_size)
for data in source_loader:
somme += discr(data[0]).sum()
return somme/Xs.shape[0]
def PAC_SWD(first_samples, second_samples, num_projections, prior_samples, lr = 0.005, p=2,
max_iter=10, power_p = True, optim_lam = False, device="cuda",
method= "NN", approx_vmf = True, mu_0=None, kappa_0=None):
n = first_samples.size(0)
lam = n**(0.5)
dim = first_samples.size(1)
rho = unif.HypersphericalUniform(dim = dim - 1)
thetas = rho.sample(shape = num_projections)
pro = thetas.to(device)
first_samples_detach = first_samples.detach().to(device)
second_samples_detach = second_samples.detach().to(device)
if method == "NN":
f = TransformNet(dim).to(device)
f_op = optim.Adam(f.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.)
f_op.zero_grad()
discriminator = Discriminator(dim).to(device)
discrim_optim = optim.Adam(discriminator.parameters(), lr=0.01, betas=(0.5, 0.999), weight_decay=0.00001)
if method == "vmf":
mu_t = mu_0
kappa_t = kappa_0
kappa_t.requires_grad = True
kappa_pos = kappa_t
params = [kappa_t]
f_op = optim.Adam(params, lr=lr)
rho_0 = unif.HypersphericalUniform(dim=dim-1,device=device)
if optim_lam == 'GD' or optim_lam == 'optimal':
K = torch.cdist(first_samples, second_samples, p = 2.0)
Delta = torch.max(K).detach().cpu().numpy()
if optim_lam == 'GD':
alpha = torch.tensor(0.5, dtype = torch.float, device = device, requires_grad = True)
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
f_op.add_param_group({'params': [alpha]})
for i in range(max_iter):
if method == "NN":
thetas = f(pro)
set_requires_grad(discriminator,True)
kl_val = compute_kl(thetas,prior_samples, discriminator, discrim_optim,device=device)
set_requires_grad(discriminator,False)
wasserstein_distance = sliced_wasserstein_distance(first_samples_detach, second_samples_detach, projections = thetas, num_projections = num_projections, p=p, power_p = power_p, device=device)
elif method == "vmf":
kappa_pos.data.clamp_(min=1e-3)
if kappa_t.data < 1e-3:
print('Kappa is less than 1e-3, be careful!')
rho_t = unif.HypersphericalUniform(dim=dim-1)
thetas = rho_t.sample(shape=num_projections).to(device)
else:
rho_t = vmf.VonMisesFisher(mu_t / torch.norm(mu_t), kappa_pos, approx=approx_vmf)
thetas = rho_t.rsample(shape=num_projections).to(device)
wasserstein_distance = sliced_wasserstein_distance(first_samples_detach, second_samples_detach,
projections=thetas, num_projections=num_projections,
p=p, power_p = power_p, device=device)
kl_val = torch.distributions.kl.kl_divergence(rho_t, rho_0).mean()
if optim_lam == 'GD':
reg = (kl_val + 4.6) / lam + lam * (Delta**(2*p))/(4*n)
else:
reg = kl_val / lam
loss = - wasserstein_distance + reg
if i % 100 == 0:
print("Iteration {}".format(i))
print("\t SW:{}".format(wasserstein_distance))
print("\t Regularization:{}".format(reg))
f_op.zero_grad()
loss.backward()#retain_graph = True)
f_op.step()
if optim_lam =='GD':
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
elif optim_lam == 'optimal':
C_2 = Delta**(2*p)/4
if method =="NN":
K_1 = compute_kl(thetas,prior_samples, discriminator, discrim_optim, optim=False, device=device).detach().cpu().numpy().item()
if method =="vmf":
K_1 = kl_val.detach().cpu().numpy().item()
alpha_0 = 0.5 + 1/(2*np.log(n)) * np.log(2*(K_1+4.6)/C_2)
lam = n**alpha_0
else:
pass
if method == "NN":
thetas = f(pro).detach()
wasserstein_distance = sliced_wasserstein_distance(first_samples, second_samples, projections = thetas, num_projections = num_projections, p=p, power_p = power_p, device=device)
return wasserstein_distance, thetas #projections
if method == "vmf":
final_mu = (mu_t / torch.norm(mu_t)).detach()
# final_kappa = kappa_pos.detach()
kappa_pos.data.clamp_(min=1e-3)
rho = vmf.VonMisesFisher(final_mu, kappa_pos, approx=approx_vmf)
thetas = rho.rsample(shape=num_projections).to(device)
wasserstein_distance = sliced_wasserstein_distance(first_samples, second_samples, projections = thetas, num_projections = num_projections, p=p, power_p = power_p, device=device)
kl_val = torch.distributions.kl.kl_divergence(rho, rho_0).mean()
reg = kl_val / lam
bound = wasserstein_distance - kl_val
return wasserstein_distance, bound, (final_mu.detach(), kappa_pos.detach())
def pac_sliced_wasserstein_distance2(
first_samples, second_samples, num_projections, prior_samples, p=2, max_iter=10, lam=1, power_p = True, optim_lam = False, device="cuda"):
if optim_lam == 'GD' or optim_lam == 'optimal':
K = torch.cdist(first_samples, second_samples, p = 2.0)
Delta = torch.max(K).cpu().numpy()
n = first_samples.size(1)
lam = n**(0.5)
if optim_lam == 'GD':
alpha = torch.tensor(0.5, dtype = torch.float, device = device, requires_grad = True)
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
f_alpha = optim.Adam([alpha], lr=0.01, betas=(0.5, 0.999),weight_decay=0.)
else:
n = first_samples.size(1)
lam = n**(0.5)
embedding_dim = first_samples.size(1)
pro = rand_projections(embedding_dim, num_projections).to(device)
first_samples_detach = first_samples.detach()
second_samples_detach = second_samples.detach()
f = TransformNet(embedding_dim).to(device)
f_op = optim.Adam(f.parameters(), lr=0.001, betas=(0.5, 0.999),weight_decay=0.)
discriminator = Discriminator(embedding_dim).to(device)
discrim_optim = optim.Adam(discriminator.parameters(), lr=0.001, betas=(0.5, 0.999),weight_decay=0.00001)
for i in range(max_iter):
projections = f(pro)
set_requires_grad(discriminator,True)
kl_val = compute_kl(projections,prior_samples, discriminator, discrim_optim,device=device)
#if optim_lam == 'GD':
# lam_numpy = torch.clone(lam).detach().cpu().numpy().item()
#
#else:
# lam_numpy = lam
reg = kl_val / lam
#reg = kl_val / lam_numpy
set_requires_grad(discriminator,False)
encoded_projections = first_samples_detach.matmul(projections.transpose(0, 1))
distribution_projections = second_samples_detach.matmul(projections.transpose(0, 1))
wasserstein_distance = torch.abs((torch.sort(encoded_projections.transpose(0, 1), dim=1)[0]- torch.sort(distribution_projections.transpose(0, 1), dim=1)[0]))
if power_p == True:
wasserstein_distance = torch.mean(torch.pow(wasserstein_distance, p), dim=1).mean()
if power_p == False:
wasserstein_distance = torch.pow(torch.mean(torch.pow(wasserstein_distance, p), dim=1).mean(), 1.0 / p) #added the mean
loss = reg - wasserstein_distance
#print('lamb before', lam)
f_op.zero_grad()
loss.backward(retain_graph = True)
f_op.step()
#print('lamb after', lam)
if optim_lam == 'GD':
f_alpha.zero_grad()
#set_requires_grad(discriminator,False)
KL = compute_kl(projections,prior_samples, discriminator, discrim_optim, optim= False, device=device).detach().cpu().numpy().item()
loss_lamb = lam * (Delta**(2*p))/(4*n) + (KL+4.6)/lam #4.6 = ln(10**2)
#print('alpha before', alpha)
f_alpha.zero_grad()
loss_lamb.backward()
f_alpha.step()
#print('alpha after', alpha)
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
if optim_lam == 'optimal':
C_2 = Delta**(2*p)/4
K_1 = compute_kl(projections,prior_samples, discriminator, discrim_optim, optim=False, device=device).detach().cpu().numpy().item()
alpha_0 = 0.5 + 1/(2*np.log(n)) * np.log(2*(K_1+4.6)/C_2)
lam = n**alpha_0
projections = f(pro)
encoded_projections = first_samples.matmul(projections.transpose(0, 1))
distribution_projections = second_samples.matmul(projections.transpose(0, 1))
wasserstein_distance = torch.abs((torch.sort(encoded_projections.transpose(0, 1), dim=1)[0] - torch.sort(distribution_projections.transpose(0, 1), dim=1)[0]))
if power_p == True:
wasserstein_distance = torch.mean(torch.pow(wasserstein_distance, p), dim=1).mean()
if power_p == False:
wasserstein_distance = torch.pow(torch.mean(torch.pow(wasserstein_distance, p), dim=1).mean(), 1.0 / p)
return wasserstein_distance,projections
#def pac_sliced_wasserstein_distance(
# first_samples, second_samples, num_projections, f, f_op, prior_samples, discriminator,discrim_optim,p=2, max_iter=10, lam=1, optim_lam = False, device="cuda"):
# if optim_lam == 'GD' or optim_lam == 'optimal':
# K = torch.cdist(first_samples, second_samples, p = 2.0)
# Delta = torch.max(K).requires_grad_(False)
# #print(Delta)
# n = first_samples.size(1)
# if optim_lam == 'GD':
# lamm = n**(-0.5)
# lamb = torch.tensor(lamm, requires_grad = True).to(device)
#
# f_op.add_param_group({'params':lamb})
# #lamb = nn.Parameter(lamm*torch.ones(1))
# #lamb = torch.ones((1), requires_grad=True, device=device) * lamm
# #lamb_optim = optim.Adam(lamb, lr=0.001, betas=(0.5, 0.999),weight_decay=0.)
# embedding_dim = first_samples.size(1)
# pro = rand_projections(embedding_dim, num_projections).to(device)
# #proj_prior = rand_projections(embedding_dim, num_projections).to(device)
# discriminator=discriminator.to(device)
# first_samples_detach = first_samples.detach()
# second_samples_detach = second_samples.detach()
# # learning the best distribution for max SWD under KL regularization
# # we actually learn a pushforward
# for _ in range(max_iter):
# projections = f(pro)
# # computing KL on the distribution of pushed projections vs projection
# # samples from the prior using an adversarial approach
# #discr = Discriminator(embedding_dim)
# #optimizer = optim.Adam(discr.parameters(),lr = 0.001,weight_decay =0.001)
# set_requires_grad(discriminator,True)
#
# kl_val = compute_kl(projections,prior_samples, discriminator, discrim_optim,device=device)
# if optim_lam == 'GD':
# reg = kl_val / (lamb.cpu().detach().numpy())
# lamb.requires_grad_(False)
# else:
# reg = kl_val/ lam
# set_requires_grad(discriminator,False)
#
# # computing SWD
# #set_requires_grad(projections,True)
# #projections.requires_grad_(True)
# encoded_projections = first_samples_detach.matmul(projections.transpose(0, 1))
# distribution_projections = second_samples_detach.matmul(projections.transpose(0, 1))
# wasserstein_distance = torch.abs(
# (
# torch.sort(encoded_projections.transpose(0, 1), dim=1)[0]
# - torch.sort(distribution_projections.transpose(0, 1), dim=1)[0]
# )
# )
# wasserstein_distance = torch.pow(torch.sum(torch.pow(wasserstein_distance, p), dim=1), 1.0 / p)
# wasserstein_distance = torch.pow(torch.pow(wasserstein_distance, p).mean(), 1.0 / p)
# loss = reg - wasserstein_distance
#
# f_op.zero_grad()
# loss.backward()
# f_op.step()
# #projections.requires_grad_(False)
# if optim_lam == 'GD':
# set_requires_grad(discriminator,False)
# lamb.requires_grad_(True)
# KL = np.asscalar(compute_kl(projections,prior_samples, discriminator, discrim_optim,optim= False,device=device).detach().cpu().numpy())
# #KL.requires_grad_(False).detach()
# loss_lamb = lamb * Delta**(2*p)/(4*n) + (KL+4.6)/lamb #4.6 = ln(10**2)
# f_op.zero_grad()
# loss_lamb.backward()
# f_op.step()
# if optim_lam == 'optimal':
# C_2 = Delta**(2*p)/4
# C_2 = C_2.cpu().numpy()
# K_1 = np.asscalar(compute_kl(projections,prior_samples, discriminator, discrim_optim,optim=False,device=device).detach().cpu().numpy())
# alpha_0 = 0.5 + 1/(2*np.log(n)) * np.log(2*(K_1+4.6)/C_2)
# #print('alpha',alpha_0)
# lam = n**alpha_0
# #print('lam',lam)
#
# #print(lamb)
# #print(reg.item(),loss.item())
#
# projections = f(pro)
# encoded_projections = first_samples.matmul(projections.transpose(0, 1))
# distribution_projections = second_samples.matmul(projections.transpose(0, 1))
# wasserstein_distance = torch.abs((torch.sort(encoded_projections.transpose(0, 1), dim=1)[0] - torch.sort(distribution_projections.transpose(0, 1), dim=1)[0]))
# wasserstein_distance = torch.pow(torch.sum(torch.pow(wasserstein_distance, p), dim=1), 1.0 / p)
# wasserstein_distance = torch.pow(torch.pow(wasserstein_distance, p).mean(), 1.0 / p)
#
# return wasserstein_distance,projections
def PAC_SWD_before_adding_vmf(first_samples, second_samples, num_projections, prior_samples, lr = 0.05, p=2, max_iter=10, power_p = True, optim_lam = False, device="cuda", method= "NN"):
embedding_dim = first_samples.size(1)
pro = rand_projections(embedding_dim, num_projections).to(device)
first_samples_detach = first_samples.detach()
second_samples_detach = second_samples.detach()
#print(first_samples.shape,second_samples.shape)
f = TransformNet(embedding_dim).to(device)
f_op = optim.Adam(f.parameters(), lr=lr, betas=(0.5, 0.999),weight_decay=0.)
discriminator = Discriminator(embedding_dim).to(device)
discrim_optim = optim.Adam(discriminator.parameters(), lr=0.01, betas=(0.5, 0.999),weight_decay=0.00001)
if optim_lam == 'GD' or optim_lam == 'optimal':
K = torch.cdist(first_samples, second_samples, p = 2.0)
Delta = torch.max(K).detach().cpu().numpy()
n = first_samples.size(1)
lam = n**(0.5)
if optim_lam == 'GD':
alpha = torch.tensor(0.5, dtype = torch.float, device = device, requires_grad = True)
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
#f_alpha = optim.Adam([alpha], lr=0.01, betas=(0.5, 0.999),weight_decay=0.)
f_op.add_param_group({'params': [alpha]})
else:
n = first_samples.size(1)
lam = n**(0.5)
for i in range(max_iter):
projections = f(pro)
set_requires_grad(discriminator,True)
kl_val = compute_kl(projections,prior_samples, discriminator, discrim_optim,device=device)
set_requires_grad(discriminator,False)
if optim_lam == 'GD':
reg = (kl_val+4.6) / lam + lam * (Delta**(2*p))/(4*n)
#print((kl_val + 4.6)/lam)
else:
reg = (kl_val + 4.6) / lam
encoded_projections = first_samples_detach.matmul(projections.transpose(0, 1))
distribution_projections = second_samples_detach.matmul(projections.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
loss = reg - wasserstein_distance
f_op.zero_grad()
loss.backward(retain_graph = True)
f_op.step()
if optim_lam =='GD':
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
if optim_lam == 'optimal':
C_2 = Delta**(2*p)/4
K_1 = compute_kl(projections,prior_samples, discriminator, discrim_optim, optim=False, device=device).detach().cpu().numpy().item()
alpha_0 = 0.5 + 1/(2*np.log(n)) * np.log(2*(K_1+4.6)/C_2)
lam = n**alpha_0
projections = f(pro).detach()
encoded_projections = first_samples.matmul(projections.transpose(0, 1))
distribution_projections = second_samples.matmul(projections.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
return wasserstein_distance, projections
def PAC_SWD_vmf(first_samples, second_samples, num_projections, prior_samples, mu_t,kappa_t,lr = 0.001, p=2, max_iter=10, power_p = True, optim_lam = False, device="cuda", approx_vmf = True):
# if method == "vmf":
# device='cpu'
method = "vmf"
n = first_samples.size(0)
lam = n**(0.5)
dim = first_samples.size(1)
mu = torch.zeros(dim)
kappa = torch.tensor([1])
rho = unif.HypersphericalUniform(dim = dim - 1)
thetas = rho.sample(shape = num_projections)
pro = thetas.to(device)
first_samples_detach = first_samples.detach().to(device)
second_samples_detach = second_samples.detach().to(device)
if method == "NN":
f = TransformNet(dim).to(device)
f_op = optim.Adam(f.parameters(), lr=lr, betas=(0.5, 0.999), weight_decay=0.)
f_op.zero_grad()
discriminator = Discriminator(dim).to(device)
discrim_optim = optim.Adam(discriminator.parameters(), lr=0.01, betas=(0.5, 0.999), weight_decay=0.00001)
if method == "vmf":
#m1 = torch.mean(first_samples_detach.clone().cpu(), dim=0)
#m2 = torch.mean(second_samples_detach.clone().cpu(), dim=0)
#mu_t = torch.tensor(m1-m2, requires_grad=True, dtype=torch.float).to(device)
# mu_t = torch.zeros(dim, requires_grad=True)#,dtype=torch.float )
#mu_t = torch.randn(dim, requires_grad=True,device=device)
#kappa_t = torch.tensor([dim/2], requires_grad=True, dtype=torch.float,device=device) #torch.ones(1, requires_grad=True, dtype=torch.float)
kappa_pos = torch.exp(kappa_t)
params = [mu_t, kappa_t]
f_op = optim.Adam(params, lr=lr)
f_op.zero_grad()
rho_0 = unif.HypersphericalUniform(dim=dim-1,device=device)
#print(kappa_t)
if optim_lam == 'GD' or optim_lam == 'optimal':
K = torch.cdist(first_samples, second_samples, p = 2.0)
Delta = torch.max(K).detach().cpu().numpy()
if optim_lam == 'GD':
alpha = torch.tensor(0.5, dtype = torch.float, device = device, requires_grad = True)
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
f_op.add_param_group({'params': [alpha]})
for i in range(max_iter):
if method == "NN":
thetas = f(pro)
set_requires_grad(discriminator,True)
kl_val = compute_kl(thetas,prior_samples, discriminator, discrim_optim,device=device)
set_requires_grad(discriminator,False)
wasserstein_distance = sliced_wasserstein_distance(first_samples_detach, second_samples_detach, projections = thetas, num_projections = num_projections, p=p, power_p = power_p, device=device)
#encoded_projections = first_samples_detach.matmul(projections.transpose(0, 1))
#distribution_projections = second_samples_detach.matmul(projections.transpose(0, 1))
#wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
elif method == "vmf":
#kappa_t = kappa_t.data.clamp_(min=1e-4, max = 1000)
#kappa_t = torch.clamp(kappa_t, min = 1.5e-3)
#if kappa_t.data == 0:
#print(kappa_t)
kappa_t.data.clamp_(min=1e-3)
if kappa_t.data < 1e-3:
print('Kappa is less than 1e-3, be careful!')
#kappa_t.data.clamp_(min=1e-3)
#kappa_t = torch.abs(kappa_t.clone())
rho_t = unif.HypersphericalUniform(dim=dim-1)
thetas = rho_t.sample(shape=num_projections).to(device)
#kappa_t = kappa_t *10
else:
#print('good to be here')
rho_t = vmf.VonMisesFisher(mu_t / torch.norm(mu_t), kappa_pos, approx=approx_vmf) # torch.abs(kappa_t))
thetas = rho_t.rsample(shape=num_projections).to(device)
wasserstein_distance = sliced_wasserstein_distance(first_samples_detach, second_samples_detach,
projections=thetas, num_projections=num_projections,
p=p, power_p = power_p, device=device)
#rho_t = vmf.VonMisesFisher(mu_t, kappa_t)
kl_val = torch.distributions.kl.kl_divergence(rho_t, rho_0).mean()
#print('kl',kl_val)
if optim_lam == 'GD':
reg = (kl_val + 4.6) / lam + lam * (Delta**(2*p))/(4*n)
else:
reg = kl_val / lam
f_op.zero_grad() #to uncomment if issues of graph
loss = - wasserstein_distance + reg
# if i % 50 == 0:
# print('kl',kl_val)
# print("method {}, value of reg {} and wass {}".format(method, reg,wasserstein_distance))
# if method == 'vmf':
# print(kappa_t)
loss.backward(retain_graph = True)
f_op.step()
if optim_lam =='GD':
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
elif optim_lam == 'optimal':
C_2 = Delta**(2*p)/4
if method =="NN":
K_1 = compute_kl(thetas,prior_samples, discriminator, discrim_optim, optim=False, device=device).detach().cpu().numpy().item()
if method =="vmf":
K_1 = kl_val.detach().cpu().numpy().item()
alpha_0 = 0.5 + 1/(2*np.log(n)) * np.log(2*(K_1+4.6)/C_2)
lam = n**alpha_0
else:
pass
if method == "NN":
thetas = f(pro).detach()
wasserstein_distance = sliced_wasserstein_distance(first_samples, second_samples, projections = thetas, num_projections = num_projections, p=p, power_p = power_p, device=device)
if method == "vmf":
rho = vmf.VonMisesFisher(mu_t.detach(), kappa_pos.detach(), approx = approx_vmf)
thetas = rho.rsample(shape=num_projections).to(device)
wasserstein_distance = sliced_wasserstein_distance(first_samples, second_samples, projections = thetas, num_projections = num_projections, p=p, power_p = power_p, device=device)
#print(kappa_t)
#encoded_projections = first_samples.matmul(projections.transpose(0, 1))
#distribution_projections = second_samples.matmul(projections.transpose(0, 1))
#wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
return wasserstein_distance, thetas #projections
def PAC_SWD_nn(first_samples, second_samples, num_projections, prior_samples, f,f_op
,discriminator,discrim_optim, lr = 0.05, p=2, max_iter=10, power_p = True, optim_lam = False, device="cuda", method= "NN"):
embedding_dim = first_samples.size(1)
pro = rand_projections(embedding_dim, num_projections).to(device)
first_samples_detach = first_samples.detach()
second_samples_detach = second_samples.detach()
#print(first_samples.shape,second_samples.shape)
# #f = TransformNet(embedding_dim).to(device)
# #f_op = optim.Adam(f.parameters(), lr=lr, betas=(0.5, 0.999),weight_decay=0.)
# discriminator = Discriminator(embedding_dim).to(device)
# discrim_optim = optim.Adam(discriminator.parameters(), lr=0.01, betas=(0.5, 0.999),weight_decay=0.00001)
if optim_lam == 'GD' or optim_lam == 'optimal':
K = torch.cdist(first_samples, second_samples, p = 2.0)
#Delta = torch.max(K).detach().cpu().numpy()
Delta = torch.max(K).detach()
n = first_samples.size(1)
lam = n**(0.5)
if optim_lam == 'GD':
alpha = torch.tensor(0.5, dtype = torch.float, device = device, requires_grad = True)
lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
#f_alpha = optim.Adam([alpha], lr=0.01, betas=(0.5, 0.999),weight_decay=0.)
f_op.add_param_group({'params': [alpha]})
else:
n = first_samples.size(1)
lam = n**(0.5)
for i in range(max_iter):
projections = f(pro)
set_requires_grad(discriminator,True)
kl_val = compute_kl(projections,prior_samples, discriminator, discrim_optim,device=device)
set_requires_grad(discriminator,False)
if optim_lam == 'GD':
reg = (kl_val+4.6) / lam + lam * (Delta**(2*p))/(4*n)
#print((kl_val + 4.6)/lam)
else:
reg = (kl_val + 4.6) / lam
encoded_projections = first_samples_detach.matmul(projections.transpose(0, 1))
distribution_projections = second_samples_detach.matmul(projections.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
loss = reg - wasserstein_distance
#alpha_old = alpha.detach()
f_op.zero_grad()
loss.backward(retain_graph = True)
f_op.step()
if optim_lam =='GD':
#lam = torch.pow(torch.tensor(n, dtype = torch.float, device = device, requires_grad = False), alpha)
lam = torch.pow(n, alpha)
if optim_lam == 'optimal':
C_2 = Delta**(2*p)/4
K_1 = compute_kl(projections,prior_samples, discriminator, discrim_optim, optim=False, device=device).detach().cpu().numpy().item()
alpha_0 = 0.5 + 1/(2*np.log(n)) * np.log(2*(K_1+4.6)/C_2)
lam = n**alpha_0
projections = f(pro).detach()
encoded_projections = first_samples.matmul(projections.transpose(0, 1))
distribution_projections = second_samples.matmul(projections.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
return wasserstein_distance, projections
| 30,509 | 47.816 | 203 | py |
PAC-Bayesian_Sliced-Wasserstein | PAC-Bayesian_Sliced-Wasserstein-main/evaluate_vmf.py | import torch
from torch.distributions.multivariate_normal import MultivariateNormal
from utils import distributional_sliced_wasserstein_distance
from swd_pac import PAC_SWD
import matplotlib.pyplot as plt
import pickle
import os
from vmf_utils import hyperspherical_uniform as unif, von_mises_fisher
torch.manual_seed(10)
plt.rcParams.update({'font.size': 18})
def generate_data(n, d, type):
if type == "uniform":
X = torch.FloatTensor(n, d).uniform_(0, 5)
Y = torch.FloatTensor(n, d).uniform_(0, 5)
elif type == "gaussian":
Id = torch.eye(d)
mean = torch.zeros(d)
Sigma = torch.rand(d, d)
Sigma = torch.mm(Sigma, Sigma.t())
Sigma.add_(Id)
X = MultivariateNormal(mean, Sigma).rsample(sample_shape=torch.Size([n]))
Y = MultivariateNormal(mean, Sigma).rsample(sample_shape=torch.Size([n]))
else:
return "Not implemented."
return X, Y
def compute_vmf(kappas, n_samples, dims, n_runs, n_proj, order, datatype):
vmf_sw = torch.zeros(size=(len(kappas), len(dims), len(n_samples), n_runs))
for ki in range(len(kappas)):
kappa = torch.tensor([kappas[ki]], dtype=torch.float)
for i in range(len(dims)):
d = dims[i]
print("Dimension = {}".format(d))
mu = torch.randn(d)
mu /= torch.norm(mu)
for j in range(len(n_samples)):
for nr in range(n_runs):
print("\t Run {}".format(nr+1))
X, Y = generate_data(n=n_samples[j], d=d, type=datatype)
vmf_sw[ki, i, j, nr] = PAC_SWD(
X, Y, n_proj, prior_samples=None, p=2, max_iter=0, power_p=True,
optim_lam=False, device=device, method="vmf", approx_vmf=False, mu_0=mu, kappa_0=kappa
)
with open(type + "_vmf_sw", "wb") as f:
pickle.dump(vmf_sw, f, pickle.HIGHEST_PROTOCOL)
plt.plot(n_samples, torch.abs(vmf_sw[ki, i]).mean(axis=1), label="d={}, kappa={}".format(d, kappas[ki]))
plt.legend()
plt.show()
def exp_1():
dims = [5, 10, 50, 100]
n_samples = [50, 100, 500, 1000, 2000]
n_proj = 1000
type = "gaussian"
kappas = [0.1, 1, 10, 100]
n_runs = 30
compute_vmf(kappas=kappas, n_samples=n_samples, dims=dims, n_runs=n_runs, n_proj=n_proj, order=2, datatype=type)
file = open(type + "_vmf_sw", "rb")
vmf_sw = pickle.load(file)
# Study influence of kappas (impact on KL)
for di in range(len(dims)):
dim = dims[di]
plt.figure()
for ki in range(len(kappas)):
kappa = kappas[ki]
plt.plot(n_samples, vmf_sw[ki, di].mean(axis=1), label=r"$d={}, \kappa={}$".format(dim, kappa))
plt.fill_between(n_samples, vmf_sw[ki, di].quantile(q=0.1, axis=1), vmf_sw[ki, di].quantile(q=0.9, axis=1),
alpha=0.2)
plt.xlabel(r"$n$")
plt.ylabel(r"$SW_2^2(\mu_n, \nu_n ; vMF_{\kappa})$")
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.savefig(os.path.join("results_exp_synth", str(type) + "_vmf_sw_d=" + str(dim) + ".pdf"))
# Study influence of dimension (impact on diameter)
for ki in range(len(kappas)):
kappa = kappas[ki]
plt.figure()
for di in range(len(dims)):
dim = dims[di]
plt.plot(n_samples, vmf_sw[ki, di].mean(axis=1), label=r"$d={}, \kappa={}$".format(dim, kappa))
plt.fill_between(n_samples, vmf_sw[ki, di].quantile(q=0.1, axis=1), vmf_sw[ki, di].quantile(q=0.9, axis=1),
alpha=0.2)
plt.xlabel(r"$n$")
plt.ylabel(r"$SW_2^2(\mu_n, \nu_n ; vMF_{\kappa})$")
plt.xscale('log')
plt.yscale('log')
plt.legend()
plt.savefig(os.path.join("results_exp_synth", str(type) + "_vmf_sw_kap=" + str(kappa) + ".pdf"))
def exp_2(datatype, factors_y, dims, lbds, folder):
if not os.path.exists(folder):
os.makedirs(folder)
n_samples = 500
n_test = 2000
n_proj = 1000
max_iter = 1500
device = "cuda"
n_runs = 10
dsw_vmf_train = torch.zeros(size=(len(dims), len(factors_y), len(lbds), n_runs))
dsw_vmf_test = torch.zeros(size=(len(dims), len(factors_y), len(lbds), n_runs))
bound_dsw = torch.zeros(size=(len(dims), len(factors_y), len(lbds), n_runs))
pac_sw_vmf_train = torch.zeros(size=(len(dims), len(factors_y), n_runs))
pac_sw_vmf_test = torch.zeros(size=(len(dims), len(factors_y), n_runs))
bound_pac = torch.zeros(size=(len(dims), len(factors_y), n_runs))
for di in range(len(dims)):
dim = dims[di]
print("Dimension: {}".format(dim))
Id = torch.eye(dim)
mean_x = torch.zeros(dim)
mu = torch.randn(dim).to(device)
mu /= torch.norm(mu)
kappa = torch.tensor([1], dtype=torch.float).to(device)
if datatype == "gaussian":
# Generate random covariance matrix
Sigma_k = torch.rand(dim, dim)
Sigma_k = torch.mm(Sigma_k, Sigma_k.t())
Sigma_k.add_(torch.eye(dim))
for fi in range(len(factors_y)):
for nr in range(n_runs):
print("Run {}".format(nr + 1))
if datatype == "uniform":
X = torch.FloatTensor(n_samples, dim).uniform_(-1, 1)
Y = torch.FloatTensor(n_samples, dim).uniform_(-factors_y[fi], factors_y[fi])
Xtest = torch.FloatTensor(n_test, dim).uniform_(-1, 1)
Ytest = torch.FloatTensor(n_test, dim).uniform_(-factors_y[fi], factors_y[fi])
elif datatype == "gaussian":
# Generate data
X = MultivariateNormal(mean_x, Sigma_k).rsample(sample_shape=torch.Size([n_samples]))
Y = MultivariateNormal(factors_y[fi] * torch.ones(dim), Sigma_k).rsample(sample_shape=torch.Size([n_samples]))
Xtest = MultivariateNormal(mean_x, Sigma_k).rsample(sample_shape=torch.Size([n_test]))
Ytest = MultivariateNormal(factors_y[fi] * torch.ones(dim), Sigma_k).rsample(sample_shape=torch.Size([n_test]))
for li in range(len(lbds)):
print("Lambda {}".format(lbds[li]))
dsw_vmf_train[di, fi, li, nr], bound_dsw[di, fi, li, nr], (_, kappa_dsw) = distributional_sliced_wasserstein_distance(
X, Y, n_proj, f=None, f_op=None, p=2, power_p=True, max_iter=max_iter, lam=lbds[li],
device=device, method="vMF", mu_0=mu.clone(), kappa_0=kappa.clone(), approx_vmf=False
)
with open(os.path.join(folder, "dsw_train"), "wb") as f:
pickle.dump(dsw_vmf_train, f, pickle.HIGHEST_PROTOCOL)
with open(os.path.join(folder, "bound_dsw"), "wb") as f:
pickle.dump(bound_dsw, f, pickle.HIGHEST_PROTOCOL)
print("kappa dsw:{}".format(kappa_dsw))
dsw_vmf_test[di, fi, li, nr], _, _ = distributional_sliced_wasserstein_distance(
Xtest, Ytest, n_proj, f=None, f_op=None, p=2, power_p=True,
max_iter=0, lam=lbds[li], device=device, method="vMF", mu_0=mu.clone(), kappa_0=kappa_dsw.clone()
)
with open(os.path.join(folder, "dsw_test"), "wb") as f:
pickle.dump(dsw_vmf_test, f, pickle.HIGHEST_PROTOCOL)
print("DSW test:{}".format(dsw_vmf_test[di, fi, li, nr]))
print("DSW done. PAC-SW begins...")
pac_sw_vmf_train[di, fi, nr], bound_pac[di, fi, nr], (_, kappa_pac) = PAC_SWD(
X, Y, n_proj, prior_samples=None, p=2, max_iter=max_iter, power_p=True,
optim_lam=False, device=device, method="vmf", approx_vmf=False, mu_0=mu.clone(), kappa_0=kappa.clone()
)
print("PAC-SW done.")
print("kappa pac:{}".format(kappa_pac))
with open(os.path.join(folder, "pac_sw_train"), "wb") as f:
pickle.dump(pac_sw_vmf_train, f, pickle.HIGHEST_PROTOCOL)
with open(os.path.join(folder, "bound_pac"), "wb") as f:
pickle.dump(bound_pac, f, pickle.HIGHEST_PROTOCOL)
pac_sw_vmf_test[di, fi, nr], _, _ = PAC_SWD(
Xtest, Ytest, n_proj, prior_samples=None, p=2, max_iter=0, power_p=True,
optim_lam=False, device=device, method="vmf", approx_vmf=False, mu_0=mu.clone(), kappa_0=kappa_pac.clone()
)
print("PAC test:{}".format(pac_sw_vmf_test[di, fi, nr]))
with open(os.path.join(folder, "pac_sw_test"), "wb") as f:
pickle.dump(pac_sw_vmf_test, f, pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
# Generate Figure 1
exp_1()
# Generate Figure 2
folder = "exp_2"
datatype = "gaussian"
factors_y = [2, 3, 4, 5]
dims = [5, 20]
lbds = [1, 10, 100, 1000]
folder = os.path.join(folder, datatype)
exp_2(datatype, factors_y, dims, lbds, folder)
file = open(os.path.join(folder, "dsw_train"), "rb")
dsw_vmf_train = pickle.load(file)
file = open(os.path.join(folder, "dsw_test"), "rb")
dsw_vmf_test = pickle.load(file)
file = open(os.path.join(folder, "pac_sw_train"), "rb")
pac_sw_vmf_train = pickle.load(file)
file = open(os.path.join(folder, "pac_sw_test"), "rb")
pac_sw_vmf_test = pickle.load(file)
file = open(os.path.join(folder, "bound_pac"), "rb")
bound_pac = pickle.load(file)
file = open(os.path.join(folder, "bound_dsw"), "rb")
bound_dsw = pickle.load(file)
for di in range(len(dims)):
plt.figure(figsize=(8,4))
plt.grid()
for li in range(len(lbds)):
# Train set
plt.plot(factors_y, dsw_vmf_train[di, :, li].detach().numpy().mean(axis=1), label=r"DSW train".format(lbds[li]), c="blue") #$\lambda={}$
plt.fill_between(factors_y, dsw_vmf_train[di, :, li].detach().quantile(q=0.1, axis=1), dsw_vmf_train[di, :, li].detach().quantile(q=0.9, axis=1), alpha=0.2, facecolor="blue")
# Test set
plt.plot(factors_y, dsw_vmf_test[di, :, li].detach().numpy().mean(axis=1), label=r"DSW test".format(lbds[li]), ls="--", c="blue")
plt.fill_between(factors_y, dsw_vmf_test[di, :, li].detach().quantile(q=0.1, axis=1), dsw_vmf_test[di, :, li].detach().quantile(q=0.9, axis=1), alpha=0.2, facecolor="blue")
# Plot DSW bound
plt.plot(factors_y, bound_dsw[di, :, li].detach().numpy().mean(axis=1), label=r"DSW bound".format(lbds[li]), ls="-.", c="orange")
plt.fill_between(factors_y, bound_dsw[di, :, li].detach().quantile(q=0.1, axis=1), bound_dsw[di, :, li].detach().quantile(q=0.9, axis=1), alpha=0.2, facecolor="red")
plt.plot(factors_y, pac_sw_vmf_train[di].detach().numpy().mean(axis=1), label="PAC-SW train", c="red")
plt.fill_between(factors_y, pac_sw_vmf_train[di].detach().quantile(q=0.1, axis=1), pac_sw_vmf_train[di].detach().quantile(q=0.9, axis=1), alpha=0.2, facecolor="red")
plt.plot(factors_y, pac_sw_vmf_test[di].detach().numpy().mean(axis=1), label="PAC-SW test", ls="--", c="red")
plt.fill_between(factors_y, pac_sw_vmf_test[di].detach().quantile(q=0.1, axis=1), pac_sw_vmf_test[di].detach().quantile(q=0.9, axis=1), alpha=0.2, color="red")
# Plot PAC Bound
plt.plot(factors_y, bound_pac[di].detach().numpy().mean(axis=1), label="PAC bound", ls="-.", c="green")
plt.fill_between(factors_y, bound_pac[di].detach().quantile(q=0.1, axis=1), bound_pac[di].detach().quantile(q=0.9, axis=1), alpha=0.2, facecolor="green")
plt.xlabel(r"$\gamma$")
if di == 0:
plt.legend()
plt.tight_layout()
plt.savefig(os.path.join(folder, "d=" + str(dims[di]) + ".pdf"))
| 12,216 | 47.868 | 186 | py |
PAC-Bayesian_Sliced-Wasserstein | PAC-Bayesian_Sliced-Wasserstein-main/utils.py | import numpy as np
import ot
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torchvision import datasets, transforms
from torch import optim
from vmf_utils import hyperspherical_uniform as unif, von_mises_fisher as vmf
def Wasserstein_1D(mu_n, nu_n, p = 2, power_p = True):
wasserstein_distance = torch.abs((torch.sort(mu_n, dim = 0)[0]* 1.0 - torch.sort(nu_n,dim = 0)[0]* 1.0))
if power_p == True:
wasserstein_distance = torch.mean(torch.pow(wasserstein_distance, p), dim=0).mean()
else:
wasserstein_distance = torch.pow(torch.mean(torch.pow(wasserstein_distance, p), dim=0).mean(), 1.0 / p)
return wasserstein_distance
def rand_projections(dim, num_projections=1000):
projections = torch.randn((num_projections, dim))
projections = projections / torch.sqrt(torch.sum(projections ** 2, dim=1, keepdim=True))
return projections
def sliced_wasserstein_distance(first_samples, second_samples, projections = None, num_projections=1000, p=2, power_p = True, device="cuda"):
first_samples = first_samples.to(device)
second_samples = second_samples.to(device)
dim = second_samples.size(1)
if projections == None:
projections = rand_projections(dim, num_projections).to(device)
else:
projections = projections
first_projections = torch.matmul(first_samples,projections.transpose(0, 1))
second_projections = torch.matmul(second_samples, projections.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(first_projections, second_projections, p = p, power_p = power_p)
return wasserstein_distance
def max_sliced_wasserstein_distance(first_samples, second_samples, lr = 0.01, p=2, max_iter=100, power_p = True, device="cuda"):
theta = torch.randn((1, first_samples.shape[1]), device=device, requires_grad=True)
theta.data = theta.data / torch.sqrt(torch.sum(theta.data ** 2, dim=1))
opt = torch.optim.Adam([theta], lr=lr)
for _ in range(max_iter):
encoded_projections = torch.matmul(first_samples, theta.transpose(0, 1))
distribution_projections = torch.matmul(second_samples, theta.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
l = -wasserstein_distance
opt.zero_grad()
l.backward(retain_graph=True)
opt.step()
theta.data = theta.data / torch.sqrt(torch.sum(theta.data ** 2, dim=1))
return wasserstein_distance,theta
def distributional_sliced_wasserstein_distance(first_samples, second_samples, num_projections, f, f_op, p=2, power_p=True, max_iter=10, lam=1, device="cpu", method="NN", mu_0=None, kappa_0=None, approx_vmf=False):
first_samples = first_samples.to(device)
second_samples = second_samples.to(device)
first_samples_detach = first_samples.detach()
second_samples_detach = second_samples.detach()
embedding_dim = first_samples.size(1)
if method == "NN":
pro = rand_projections(embedding_dim, num_projections).to(device)
f = f.to(device)
elif method == "vMF":
mu_t = mu_0
# mu_t.requires_grad = True
kappa_t = kappa_0
kappa_t.requires_grad = True
f_op = optim.Adam([kappa_t], lr=0.001)
for i in range(max_iter):
if method == "NN":
projections = f(pro)
elif method == "vMF":
vmf_t = vmf.VonMisesFisher(loc=mu_t/torch.norm(mu_t), scale=kappa_t, approx=approx_vmf)
projections = vmf_t.rsample(shape=num_projections).to(device)
cos = cosine_distance_torch(projections, projections)
reg = lam * cos
encoded_projections = first_samples_detach.matmul(projections.transpose(0, 1))
distribution_projections = second_samples_detach.matmul(projections.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p=p, power_p=power_p)
loss = reg - wasserstein_distance
if i % 100 == 0:
print("Iteration {}".format(i))
print("\t SW:{}".format(wasserstein_distance))
print("\t Regularization:{}".format(reg))
f_op.zero_grad()
loss.backward(retain_graph=True)
f_op.step()
if method == "NN":
projections = f(pro)
elif method == "vMF":
vmf_t = vmf.VonMisesFisher(loc=mu_t/torch.norm(mu_t), scale=kappa_t, approx=approx_vmf)
projections = vmf_t.rsample(shape=num_projections).to(device)
encoded_projections = first_samples.matmul(projections.transpose(0, 1))
distribution_projections = second_samples.matmul(projections.transpose(0, 1))
wasserstein_distance = Wasserstein_1D(encoded_projections, distribution_projections, p = p, power_p = power_p)
cos = cosine_distance_torch(projections, projections)
reg = lam * cos
objective = wasserstein_distance - reg
if method == "NN":
return wasserstein_distance, projections
elif method == "vMF":
final_mu = (mu_t/torch.norm(mu_t)).detach()
final_kappa = kappa_t.detach()
return wasserstein_distance, objective, (final_mu, final_kappa)
return wasserstein_distance, projections
def cosine_distance_torch(x1, x2=None, eps=1e-8):
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
return torch.mean(torch.abs(torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps)))
def cosine_sum_distance_torch(x1, x2=None, eps=1e-8):
x2 = x1 if x2 is None else x2
w1 = x1.norm(p=2, dim=1, keepdim=True)
w2 = w1 if x2 is x1 else x2.norm(p=2, dim=1, keepdim=True)
return torch.mean(torch.mm(x1, x2.t()) / (w1 * w2.t()).clamp(min=eps))
def cost_matrix(encoded_smaples, distribution_samples, p=2):
n = encoded_smaples.size(0)
m = distribution_samples.size(0)
d = encoded_smaples.size(1)
x = encoded_smaples.unsqueeze(1).expand(n, m, d)
y = distribution_samples.unsqueeze(0).expand(n, m, d)
C = torch.pow(torch.abs(x - y), p).sum(2)
return C
def cost_matrix_slow(x, y):
"""
Input: x is a Nxd matrix
y is an optional Mxd matirx
Output: dist is a NxM matrix where dist[i,j] is the square norm between x[i,:] and y[j,:]
if y is not given then use 'y=x'.
i.e. dist[i,j] = ||x[i,:]-y[j,:]||^2
"""
x_norm = (x ** 2).sum(1).view(-1, 1)
if y is not None:
y_t = torch.transpose(y, 0, 1)
y_norm = (y ** 2).sum(1).view(1, -1)
else:
y_t = torch.transpose(x, 0, 1)
y_norm = x_norm.view(1, -1)
dist = x_norm + y_norm - 2.0 * torch.mm(x, y_t)
# Ensure diagonal is zero if x=y
# if y is None:
# dist = dist - torch.diag(dist.diag)
return torch.clamp(dist, 0.0, np.inf)
def compute_true_Wasserstein(X, Y, p=2):
M = ot.dist(X, Y)
a = np.ones((X.shape[0],)) / X.shape[0]
b = np.ones((Y.shape[0],)) / Y.shape[0]
return ot.emd2(a, b, M)
def save_dmodel(model, optimizer, dis, disoptimizer, tnet, optnet, epoch, folder):
dictionary = {}
dictionary["epoch"] = epoch
dictionary["model"] = model.state_dict()
dictionary["optimizer"] = optimizer.state_dict()
if not (disoptimizer is None):
dictionary["dis"] = dis.state_dict()
dictionary["disoptimizer"] = disoptimizer.state_dict()
else:
dictionary["dis"] = None
dictionary["disoptimizer"] = None
if not (tnet is None):
dictionary["tnet"] = tnet.state_dict()
dictionary["optnet"] = optnet.state_dict()
else:
dictionary["tnet"] = None
dictionary["optnet"] = None
torch.save(dictionary, folder + "/model.pth")
def load_dmodel(folder):
dictionary = torch.load(folder + "/model.pth")
return (
dictionary["epoch"],
dictionary["model"],
dictionary["optimizer"],
dictionary["tnet"],
dictionary["optnet"],
dictionary["dis"],
dictionary["disoptimizer"],
)
def compute_Wasserstein(x, y, device, p=2):
M = cost_matrix(x, y, p)
pi = ot.emd([], [], M.cpu().detach().numpy())
pi = torch.from_numpy(pi).to(device)
return torch.sum(pi * M)
def make_spiral(n_samples, noise=.5):
n = np.sqrt(np.random.rand(n_samples,1)) * 780 * (2*np.pi)/360
d1x = -np.cos(n)*n + np.random.rand(n_samples,1) * noise
d1y = np.sin(n)*n + np.random.rand(n_samples,1) * noise
return np.array(np.hstack((d1x,d1y)))
get_rot= lambda theta : np.array([[np.cos(theta), -np.sin(theta)],[np.sin(theta),np.cos(theta)]])
def get_data(n_samples,theta,scale=1,transla=0):
Xs = make_spiral(n_samples=n_samples, noise=1)-transla
Xt = make_spiral(n_samples=n_samples, noise=1)
A=get_rot(theta)
Xt = (np.dot(Xt,A))*scale+transla
return Xs,Xt
def rand_projections(dim, num_projections=1000):
projections = torch.randn((num_projections, dim))
projections = projections / torch.sqrt(torch.sum(projections ** 2, dim=1, keepdim=True))
return projections
def spiral(N=100,phi=0):
theta = np.sqrt(np.random.rand(N))*4*pi # np.linspace(0,2*pi,100)
r_a = theta/2 + pi
data_a = np.array([np.cos(theta + phi)*r_a, np.sin(theta+phi)*r_a]).T
x_a = data_a + np.random.randn(N,2)*0.3
return torch.from_numpy(x_a).float()
def prior_similarity_vector(maxi,dim,nb_samples,tau, device):
prior_samples = rand_projections(dim, nb_samples*10).to(device)
inner_prod = prior_samples@maxi.detach().T
ind = torch.where(torch.abs(inner_prod)> 1 - tau)[0]
return prior_samples[ind]
class TransformNet(nn.Module):
"""
used usually for changing the distribution of the random projection
"""
def __init__(self, size):
super(TransformNet, self).__init__()
self.size = size
self.net = nn.Sequential(nn.Linear(self.size,self.size),
nn.LeakyReLU(),
nn.Linear(self.size,self.size),
nn.LeakyReLU(),
nn.Linear(self.size,self.size))
def forward(self, input):
out =self.net(input)
return out/torch.sqrt(torch.sum(out**2,dim=1,keepdim=True))
class TransformLatenttoOrig(nn.Module):
"""
used for mapping the random projection vector into the the ambient space
of the distribution
"""
def __init__(self, dim_latent,dim_orig,dim_hidden=10):
super(TransformLatenttoOrig, self).__init__()
self.dim_latent = dim_latent
self.dim_orig = dim_orig
self.dim_hidden = dim_hidden
self.net = nn.Sequential(nn.Linear(self.dim_latent,self.dim_hidden),
#nn.Sigmoid(),
nn.ReLU(),
nn.Linear(self.dim_hidden,self.dim_hidden),
#nn.Sigmoid(),
nn.ReLU(),
nn.Linear(self.dim_hidden,self.dim_hidden),
#nn.Sigmoid(),
nn.ReLU(),
nn.Linear(self.dim_hidden, self.dim_orig),
nn.Sigmoid(),
#nn.ReLU(),
)
def forward(self, input):
out =self.net(input)
return out/torch.sqrt(torch.sum(out**2,dim=1,keepdim=True))
def circular_function(x1, x2, theta, r, p):
cost_matrix_1 = torch.sqrt(cost_matrix_slow(x1, theta * r))
cost_matrix_2 = torch.sqrt(cost_matrix_slow(x2, theta * r))
wasserstein_distance = torch.abs(
(torch.sort(cost_matrix_1.transpose(0, 1), dim=1)[0] - torch.sort(cost_matrix_2.transpose(0, 1), dim=1)[0])
)
wasserstein_distance = torch.pow(torch.mean(torch.pow(wasserstein_distance, p), dim=1).mean(), 1.0 / p)
return wasserstein_distance
def rescale(X):
maxx = torch.max(X, dim = 1)[0].reshape(-1,1)
minn = torch.min(X, dim = 1)[0].reshape(-1,1)
return (X - minn) / (maxx - minn)
def generateFMNIST(class_1 = 4, class_2 = 5, scale = True, device = "cuda"):
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,),(0.5,),)])
training_set = datasets.FashionMNIST(root="data",train=True,download=True,transform=transform)
test_set = datasets.FashionMNIST(root="data",train=False,download=True,transform=transform)
class_1 = class_1
class_2 = class_2
Xt_train = training_set.data[(training_set.targets==class_1)]
Xt_train = Xt_train.reshape(Xt_train.shape[0], Xt_train.shape[1]*Xt_train.shape[2])
Xs_train = training_set.data[(training_set.targets==class_2)]
Xs_train = Xs_train.reshape(Xs_train.shape[0], Xs_train.shape[1]*Xs_train.shape[2])
Xt_test= test_set.data[(test_set.targets==class_1)]
Xt_test = Xt_test.reshape(Xt_test.shape[0], Xt_test.shape[1]*Xt_test.shape[2])
Xs_test = test_set.data[(test_set.targets==class_2)]
Xs_test = Xs_test.reshape(Xs_test.shape[0], Xs_test.shape[1]*Xs_test.shape[2])
if scale is True:
Xt_train = rescale(Xt_train.float().to(device))
Xs_train = rescale(Xs_train.float().to(device))
Xt_test = rescale(Xt_test.float().to(device))
Xs_test = rescale(Xs_test.float().to(device))
return Xt_train, Xs_train, Xt_test, Xs_test
| 13,452 | 39.158209 | 213 | py |
PAC-Bayesian_Sliced-Wasserstein | PAC-Bayesian_Sliced-Wasserstein-main/vmf_utils/von_mises_fisher.py | # Credit: https://github.com/nicola-decao/s-vae-pytorch
import math
import torch
from torch.distributions.kl import register_kl
from vmf_utils.ive import ive, ive_fraction_approx3
from vmf_utils.hyperspherical_uniform import HypersphericalUniform
class VonMisesFisher(torch.distributions.Distribution):
arg_constraints = {
"loc": torch.distributions.constraints.real,
"scale": torch.distributions.constraints.positive,
}
support = torch.distributions.constraints.real
has_rsample = True
_mean_carrier_measure = 0
@property
def mean(self):
# option 1:
return self.loc * (
ive(self.__m / 2, self.scale) / ive(self.__m / 2 - 1, self.scale))
@property
def stddev(self):
return self.scale
def __init__(self, loc, scale, validate_args=None, k=1, approx=False):
self.dtype = loc.dtype
self.loc = loc
self.scale = scale
self.device = loc.device
self.__m = loc.shape[-1]
self.__e1 = (torch.Tensor([1.0] + [0] * (loc.shape[-1] - 1))).to(self.device)
self.k = k
self.approx = approx
super().__init__(self.loc.size(), validate_args=validate_args)
def sample(self, shape=torch.Size()):
with torch.no_grad():
return self.rsample(shape)
def rsample(self, shape=torch.Size()):
shape = shape if isinstance(shape, torch.Size) else torch.Size([shape])
w = (
self.__sample_w3(shape=shape)
if self.__m == 3
else self.__sample_w_rej(shape=shape))
v = (torch.distributions.Normal(0, 1)
.sample(shape + torch.Size(self.loc.shape))
.to(self.device)
.transpose(0, -1)[1:]
).transpose(0, -1)
v = v / v.norm(dim=-1, keepdim=True)
w_ = torch.sqrt(torch.clamp(1 - (w ** 2), 1e-10))
x = torch.cat((w, w_ * v), -1)
z = self.__householder_rotation(x)
return z.type(self.dtype)
def __sample_w3(self, shape):
shape = shape + torch.Size(self.scale.shape)
u = torch.distributions.Uniform(0, 1).sample(shape).to(self.device)
self.__w = (1 + torch.stack(
[torch.log(u), torch.log(1 - u) - 2 * self.scale], dim=0).logsumexp(0)
/ self.scale)
return self.__w
def __sample_w_rej(self, shape):
c = torch.sqrt((4 * (self.scale ** 2)) + (self.__m - 1) ** 2)
b_true = (-2 * self.scale + c) / (self.__m - 1)
# using Taylor approximation with a smooth swift from 10 < scale < 11
# to avoid numerical errors for large scale
b_app = (self.__m - 1) / (4 * self.scale)
s = torch.min(
torch.max(
torch.tensor([0.0], dtype=self.dtype, device=self.device),
self.scale - 10,),
torch.tensor([1.0], dtype=self.dtype, device=self.device),)
b = b_app * s + b_true * (1 - s)
a = (self.__m - 1 + 2 * self.scale + c) / 4
d = (4 * a * b) / (1 + b) - (self.__m - 1) * math.log(self.__m - 1)
self.__b, (self.__e, self.__w) = b, self.__while_loop(b, a, d, shape, k=self.k)
return self.__w
@staticmethod
def first_nonzero(x, dim, invalid_val=-1):
mask = x > 0
idx = torch.where(
mask.any(dim=dim),
mask.float().argmax(dim=1).squeeze(),
torch.tensor(invalid_val, device=x.device),
)
return idx
def __while_loop(self, b, a, d, shape, k=20, eps=1e-20):
# matrix while loop: samples a matrix of [A, k] samples, to avoid looping all together
b, a, d = [
e.repeat(*shape, *([1] * len(self.scale.shape))).reshape(-1, 1)
for e in (b, a, d)
]
w, e, bool_mask = (
torch.zeros_like(b).to(self.device),
torch.zeros_like(b).to(self.device),
(torch.ones_like(b) == 1).to(self.device),
)
sample_shape = torch.Size([b.shape[0], k])
shape = shape + torch.Size(self.scale.shape)
while bool_mask.sum() != 0:
con1 = torch.tensor((self.__m - 1) / 2, dtype=torch.float64)
con2 = torch.tensor((self.__m - 1) / 2, dtype=torch.float64)
e_ = (
torch.distributions.Beta(con1, con2)
.sample(sample_shape)
.to(self.device)
.type(self.dtype)
)
u = (
torch.distributions.Uniform(0 + eps, 1 - eps)
.sample(sample_shape)
.to(self.device)
.type(self.dtype)
)
w_ = (1 - (1 + b) * e_) / (1 - (1 - b) * e_)
t = (2 * a * b) / (1 - (1 - b) * e_)
accept = ((self.__m - 1.0) * t.log() - t + d) > torch.log(u)
accept_idx = self.first_nonzero(accept, dim=-1, invalid_val=-1).unsqueeze(1)
accept_idx_clamped = accept_idx.clamp(0)
# we use .abs(), in order to not get -1 index issues, the -1 is still used afterwards
w_ = w_.gather(1, accept_idx_clamped.view(-1, 1))
e_ = e_.gather(1, accept_idx_clamped.view(-1, 1))
reject = accept_idx < 0
accept = ~reject if torch.__version__ >= "1.2.0" else 1 - reject
w[bool_mask * accept] = w_[bool_mask * accept]
e[bool_mask * accept] = e_[bool_mask * accept]
bool_mask[bool_mask * accept] = reject[bool_mask * accept]
return e.reshape(shape), w.reshape(shape)
def __householder_rotation(self, x):
u = self.__e1 - self.loc
u = u / (u.norm(dim=-1, keepdim=True) + 1e-5)
z = x - 2 * (x * u).sum(-1, keepdim=True) * u
return z
def entropy(self):
if not self.approx:
# option 1:
output = (
-self.scale
* ive(self.__m / 2, self.scale)
/ ive((self.__m / 2) - 1, self.scale)
)
else:
output = - self.scale * ive_fraction_approx3(torch.tensor(self.__m / 2), self.scale)
return output + self._log_normalization()
def log_prob(self, x):
return self._log_unnormalized_prob(x) - self._log_normalization()
def _log_unnormalized_prob(self, x):
output = self.scale * (self.loc * x).sum(-1, keepdim=True)
return output
def _log_normalization(self):
if not self.approx:
output = -(
(self.__m / 2 - 1) * torch.log(self.scale)
- (self.__m / 2) * math.log(2 * math.pi)
- (torch.log(ive(self.__m / 2 - 1, self.scale)))
)
else:
# Approximation (source: https://arxiv.org/pdf/2103.15718.pdf)
term_1 = torch.sqrt(((self.__m - 1)/2) ** 2 + self.scale ** 2)
term_2 = torch.sqrt(((self.__m + 1) / 2) ** 2 + self.scale ** 2)
log_term_1 = torch.log((self.__m - 1) / 2 + term_1)
log_term_2 = torch.log((self.__m - 1) / 2 + term_2)
output = (self.__m - 1) / 4 * (log_term_1 + log_term_2) - 0.5 * (term_1 + term_2)
return output
@register_kl(VonMisesFisher, HypersphericalUniform)
def _kl_vmf_uniform(vmf, hyu):
return -vmf.entropy() + hyu.entropy()
| 7,267 | 34.627451 | 97 | py |
PAC-Bayesian_Sliced-Wasserstein | PAC-Bayesian_Sliced-Wasserstein-main/vmf_utils/hyperspherical_uniform.py | # Credit: https://github.com/nicola-decao/s-vae-pytorch
import math
import torch
class HypersphericalUniform(torch.distributions.Distribution):
arg_constraints = {}
support = torch.distributions.constraints.real
has_rsample = False
_mean_carrier_measure = 0
@property
def dim(self):
return self._dim
@property
def device(self):
return self._device
@device.setter
def device(self, val):
self._device = val if isinstance(val, torch.device) else torch.device(val)
def __init__(self, dim, validate_args=None, device="cpu"):
super(HypersphericalUniform, self).__init__(
torch.Size([dim]), validate_args=validate_args
)
self._dim = dim
self.device = device
def sample(self, shape=torch.Size()):
output = (
torch.distributions.Normal(0, 1)
.sample(
(shape if isinstance(shape, torch.Size) else torch.Size([shape]))
+ torch.Size([self._dim + 1])
)
.to(self.device)
)
return output / output.norm(dim=-1, keepdim=True)
def entropy(self):
return self.__log_surface_area()
def log_prob(self, x):
return -torch.ones(x.shape[:-1], device=self.device) * self.__log_surface_area()
def __log_surface_area(self):
if torch.__version__ >= "1.0.0":
lgamma = torch.lgamma(torch.tensor([(self._dim + 1) / 2]).to(self.device))
else:
lgamma = torch.lgamma(
torch.Tensor([(self._dim + 1) / 2], device=self.device)
)
return math.log(2) + ((self._dim + 1) / 2) * math.log(math.pi) - lgamma | 1,699 | 27.813559 | 88 | py |
PAC-Bayesian_Sliced-Wasserstein | PAC-Bayesian_Sliced-Wasserstein-main/vmf_utils/ive.py | # Credit: https://github.com/nicola-decao/s-vae-pytorch
import torch
import numpy as np
import scipy.special
from numbers import Number
class IveFunction(torch.autograd.Function):
@staticmethod
def forward(self, v, z):
assert isinstance(v, Number), "v must be a scalar"
self.save_for_backward(z)
self.v = v
z_cpu = z.data.cpu().numpy()
if np.isclose(v, 0):
output = scipy.special.i0e(z_cpu, dtype=z_cpu.dtype)
elif np.isclose(v, 1):
output = scipy.special.i1e(z_cpu, dtype=z_cpu.dtype)
else: # v > 0
output = scipy.special.ive(v, z_cpu, dtype=z_cpu.dtype)
return torch.Tensor(output).to(z.device)
@staticmethod
def backward(self, grad_output):
z = self.saved_tensors[-1]
return (
None,
grad_output * (ive(self.v - 1, z) - ive(self.v, z) * (self.v + z) / z),
)
class Ive(torch.nn.Module):
def __init__(self, v):
super(Ive, self).__init__()
self.v = v
def forward(self, z):
return ive(self.v, z)
ive = IveFunction.apply
##########
# The below provided approximations were provided in the
# respective source papers, to improve the stability of
# the Bessel fractions.
# I_(v/2)(k) / I_(v/2 - 1)(k)
# source: https://arxiv.org/pdf/1606.02008.pdf
def ive_fraction_approx(v, z):
return z / (v - 1 + torch.pow(torch.pow(v + 1, 2) + torch.pow(z, 2), 0.5))
# source: https://arxiv.org/pdf/1902.02603.pdf
def ive_fraction_approx2(v, z, eps=1e-20):
def delta_a(a):
lamb = v + (a - 1.0) / 2.0
return (v - 0.5) + lamb / (
2 * torch.sqrt((torch.pow(lamb, 2) + torch.pow(z, 2)).clamp(eps))
)
delta_0 = delta_a(0.0)
delta_2 = delta_a(2.0)
B_0 = z / (
delta_0 + torch.sqrt((torch.pow(delta_0, 2) + torch.pow(z, 2))).clamp(eps)
)
B_2 = z / (
delta_2 + torch.sqrt((torch.pow(delta_2, 2) + torch.pow(z, 2))).clamp(eps)
)
return (B_0 + B_2) / 2.0
# source: https://arxiv.org/pdf/2103.15718.pdf
def ive_fraction_approx3(v, z):
lbound = z / ((v - 1)/2 + torch.pow(torch.pow((v + 1)/2, 2) + torch.pow(z, 2), 0.5))
ubound = z / ((v - 1)/2 + torch.pow(torch.pow((v - 1)/2, 2) + torch.pow(z, 2), 0.5))
return 0.5 * (lbound + ubound)
| 2,327 | 26.388235 | 88 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/knowledge_aware/load_data.py | '''
Created on Dec 18, 2018
Tensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang (xiangwang@u.nus.edu)
'''
import collections
import os
import numpy as np
import random as rd
import torch
import torch.utils.data
from torch.utils.data import Dataset
import math
class Data(Dataset):
def __init__(self, args, path, batch_style='list'):
super(Data).__init__()
self.batch_styles = {'list':0,'map':1}
assert batch_style in list(self.batch_styles.keys()), f'Error: got {batch_style} but valid batch styles are {list(self.batch_styles.keys())}'
self.path = path
self.args = args
self.batch_style = batch_style
self.batch_style_id = self.batch_styles[self.batch_style]
self.batch_size = args.batch_size
train_file = os.path.join(path,'train.txt')
valid_file = os.path.join(path, 'valid.txt')
test_file = os.path.join(path, 'test.txt')
kg_file = os.path.join(path, 'kg_final.txt')
# ----------get number of users and items & then load rating data from train_file & test_file------------.
self.n_train, self.n_valid, self.n_test = 0, 0, 0
self.n_users, self.n_items = 0, 0
self.train_data, self.train_user_dict = self._load_ratings(train_file)
self.valid_data, self.valid_user_dict = self._load_ratings(valid_file)
self.test_data, self.test_user_dict = self._load_ratings(test_file)
self.exist_users = list(self.train_user_dict.keys())
self.N_exist_users = len(self.exist_users)
self._statistic_ratings()
# ----------get number of entities and relations & then load kg data from kg_file ------------.
self.n_relations, self.n_entities, self.n_triples = 0, 0, 0
self.kg_data, self.kg_dict, self.relation_dict = self._load_kg(kg_file)
# ----------print the basic info about the dataset-------------.
self.batch_size_kg = self.n_triples // (self.n_train // self.batch_size)
self._print_data_info()
# reading train & test interaction data.
def _load_ratings(self, file_name):
user_dict = dict()
inter_mat = list()
lines = open(file_name, 'r').readlines()
for l in lines:
tmps = l.strip()
inters = [int(i) for i in tmps.split(' ')]
u_id, pos_ids = inters[0], inters[1:]
pos_ids = list(set(pos_ids))
for i_id in pos_ids:
inter_mat.append([u_id, i_id])
if len(pos_ids) > 0:
user_dict[u_id] = pos_ids
return np.array(inter_mat), user_dict
def _statistic_ratings(self):
self.n_users = max(max(self.train_data[:, 0]), max(self.test_data[:, 0])) + 1
self.n_items = max(max(max(self.train_data[:, 1]), max(self.valid_data[:, 1]) ),
max(self.test_data[:, 1])) + 1
self.n_train = len(self.train_data)
self.n_valid = len(self.valid_data)
self.n_test = len(self.test_data)
# reading train & test interaction data.
def _load_kg(self, file_name):
def _construct_kg(kg_np):
kg = collections.defaultdict(list)
rd = collections.defaultdict(list)
for head, relation, tail in kg_np:
kg[head].append((tail, relation))
rd[relation].append((head, tail))
return kg, rd
kg_np = np.loadtxt(file_name, dtype=np.int32)
kg_np = np.unique(kg_np, axis=0)
# self.n_relations = len(set(kg_np[:, 1]))
# self.n_entities = len(set(kg_np[:, 0]) | set(kg_np[:, 2]))
self.n_relations = max(kg_np[:, 1]) + 1
self.n_entities = max(max(kg_np[:, 0]), max(kg_np[:, 2])) + 1
self.n_triples = len(kg_np)
kg_dict, relation_dict = _construct_kg(kg_np)
return kg_np, kg_dict, relation_dict
def _print_data_info(self):
print('[n_users, n_items]=[%d, %d]' % (self.n_users, self.n_items))
print('[n_train, n_test]=[%d, %d]' % (self.n_train, self.n_test))
print('[n_entities, n_relations, n_triples]=[%d, %d, %d]' % (self.n_entities, self.n_relations, self.n_triples))
print('[batch_size, batch_size_kg]=[%d, %d]' % (self.batch_size, self.batch_size_kg))
def get_sparsity_split(self):
try:
split_uids, split_state = [], []
lines = open(self.path + '/sparsity.split', 'r').readlines()
for idx, line in enumerate(lines):
if idx % 2 == 0:
split_state.append(line.strip())
print(line.strip())
else:
split_uids.append([int(uid) for uid in line.strip().split(' ')])
print('get sparsity split.')
except Exception:
split_uids, split_state = self.create_sparsity_split()
f = open(self.path + '/sparsity.split', 'w')
for idx in range(len(split_state)):
f.write(split_state[idx] + '\n')
f.write(' '.join([str(uid) for uid in split_uids[idx]]) + '\n')
print('create sparsity split.')
return split_uids, split_state
def create_sparsity_split(self):
all_users_to_test = list(self.test_user_dict.keys())
user_n_iid = dict()
# generate a dictionary to store (key=n_iids, value=a list of uid).
for uid in all_users_to_test:
train_iids = self.train_user_dict[uid]
test_iids = self.test_user_dict[uid]
n_iids = len(train_iids) + len(test_iids)
if n_iids not in user_n_iid.keys():
user_n_iid[n_iids] = [uid]
else:
user_n_iid[n_iids].append(uid)
split_uids = list()
# split the whole user set into four subset.
temp = []
count = 1
fold = 4
n_count = (self.n_train + self.n_test)
n_rates = 0
split_state = []
for idx, n_iids in enumerate(sorted(user_n_iid)):
temp += user_n_iid[n_iids]
n_rates += n_iids * len(user_n_iid[n_iids])
n_count -= n_iids * len(user_n_iid[n_iids])
if n_rates >= count * 0.25 * (self.n_train + self.n_test):
split_uids.append(temp)
state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' %(n_iids, len(temp), n_rates)
split_state.append(state)
print(state)
temp = []
n_rates = 0
fold -= 1
if idx == len(user_n_iid.keys()) - 1 or n_count == 0:
split_uids.append(temp)
state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' % (n_iids, len(temp), n_rates)
split_state.append(state)
print(state)
return split_uids, split_state
def __len__(self):
# number of existing users after the preprocessing described in the paper,
# determines the length of the training dataset, for which a positive an negative are extracted
return len(self.exist_users)
##_generate_train_cf_batch
def __getitem__(self, idx):
"""
if self.batch_size <= self.n_users:
user = rd.sample(self.exist_users, self.batch_size)
else:
users = [rd.choice(self.exist_users) for _ in range(self.batch_size)]
"""
def sample_pos_items_for_u(u, num):
pos_items = self.train_user_dict[u]
n_pos_items = len(pos_items)
pos_batch = []
while True:
if len(pos_batch) == num: break
pos_id = np.random.randint(low=0, high=n_pos_items, size=1)[0]
pos_i_id = pos_items[pos_id]
if pos_i_id not in pos_batch:
pos_batch.append(pos_i_id)
return pos_batch
def sample_neg_items_for_u(u, num):
neg_items = []
while True:
if len(neg_items) == num: break
neg_i_id = np.random.randint(low=0, high=self.n_items,size=1)[0]
if neg_i_id not in self.train_user_dict[u] and neg_i_id not in neg_items:
neg_items.append(neg_i_id)
return neg_items
"""
pos_items, neg_items = [], []
for u in users:
pos_items += sample_pos_items_for_u(u, 1)
neg_items += sample_neg_items_for_u(u, 1)
"""
u = self.exist_users[idx]
pos_item = sample_pos_items_for_u(u, 1)
neg_item = sample_neg_items_for_u(u, 1)
if len(pos_item) == 1:
pos_item = pos_item[0]
if len(neg_item) == 1:
neg_item = neg_item[0]
if self.batch_style_id == 0:
return u, pos_item, neg_item
else:
return {'users': u, 'pos_items': pos_item, 'neg_items':neg_item}#u, pos_item, neg_item #users, pos_items, neg_items
def as_test_feed_dict(self, model, user_batch, item_batch, drop_flag=True):
feed_dict ={
model.users: user_batch,
model.pos_items: item_batch,
model.mess_dropout: [0.] * len(eval(self.args.layer_size)),
model.node_dropout: [0.] * len(eval(self.args.layer_size)),
}
return feed_dict
def as_train_feed_dict(self, model, batch_data):
if self.batch_style_id == 0:
users, pos_items, neg_items = batch_data
batch_data = {}
batch_data['users'] = users
batch_data['pos_items'] = pos_items
batch_data['neg_items'] = neg_items
feed_dict = {
model.users: batch_data['users'],
model.pos_items: batch_data['pos_items'],
model.neg_items: batch_data['neg_items'],
model.mess_dropout: eval(self.args.mess_dropout),
model.node_dropout: eval(self.args.node_dropout),
}
return feed_dict | 10,100 | 35.204301 | 149 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/knowledge_aware/KGAT/batch_test.py | '''
Created on Dec 18, 2018
Tensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang (xiangwang@u.nus.edu)
'''
import models.knowledge_aware.metrics as metrics
from parser import parse_args
from models.knowledge_aware.load_data import Data
import multiprocessing
import heapq
import numpy as np
import random
from torch.utils.data import DataLoader, RandomSampler
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from loader_kgat import KGAT_loader
from utils import *
train_cores = multiprocessing.cpu_count()
test_cores = multiprocessing.cpu_count()//2
args = parse_args()
Ks = eval(args.Ks)
data_generator = {}
MANUAL_SEED = 2019
torch.manual_seed(MANUAL_SEED)
def seed_worker(worker_id):
torch.manual_seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
random.seed(MANUAL_SEED)
g = torch.Generator(device='cpu')
g.manual_seed(MANUAL_SEED)
kgat_a_ds = KGAT_loader(args=args, path=DATA_DIR[args.dataset])
kgat_ds = Data(args=args, path=DATA_DIR[args.dataset])
data_generator['A_dataset'] = kgat_a_ds
data_generator['dataset'] = kgat_ds
data_generator['A_loader'] = DataLoader(kgat_a_ds,
batch_size=kgat_a_ds.batch_size_kg,
sampler=RandomSampler(kgat_a_ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
data_generator['loader'] = DataLoader(kgat_ds,
batch_size=kgat_ds.batch_size,
sampler=RandomSampler(kgat_ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
batch_test_flag = False
USR_NUM, ITEM_NUM = data_generator['dataset'].n_users, data_generator['dataset'].n_items
N_TRAIN, N_TEST = data_generator['dataset'].n_train, data_generator['dataset'].n_test
BATCH_SIZE = args.batch_size
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks, save_topk=True):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
pids = []
r = []
for i in K_max_item_score:
pids.append(i)
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc,pids
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks, save_topk=True):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
pids = []
r = []
for i in K_max_item_score:
pids.append(i)
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc, pids
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
# user u's ratings for user u
rating = x[0]
#uid
u = x[1]
#user u's items in the training set
try:
training_items = data_generator['dataset'].train_user_dict[u]
valid_items = data_generator['dataset'].valid_user_dict[u]
except Exception:
training_items = []
valid_items = []
#user u's items in the test set
user_pos_test = data_generator['dataset'].test_user_dict[u]
all_items = set(range(ITEM_NUM))
test_items = list((all_items - set(training_items)) - set(valid_items) )
if args.test_flag == 'part':
r, auc, pids = ranklist_by_heapq(user_pos_test, test_items, rating, Ks, save_topk=True)
else:
r, auc, pids = ranklist_by_sorted(user_pos_test, test_items, rating, Ks, save_topk=True)
# # .......checking.......
# try:
# assert len(user_pos_test) != 0
# except Exception:
# print(u)
# print(training_items)
# print(user_pos_test)
# exit()
# # .......checking.......
result_dict = get_performance(user_pos_test, r, auc, Ks)
result_dict['uid'] = u
result_dict['pids'] = pids
return result_dict
def test(sess, model, users_to_test, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
pool = multiprocessing.Pool(test_cores)
if args.model_type in ['ripple']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE // 20
elif args.model_type in ['fm', 'nfm']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE
else:
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
from collections import defaultdict
user_topk_dict = defaultdict(list)
DATASET_KEY = 'A_dataset' if args.model_type == 'cke' else 'dataset'
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
i_rate_batch = model.eval(sess, feed_dict=feed_dict)
i_rate_batch = i_rate_batch.reshape((-1, len(item_batch)))
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
rate_batch = model.eval(sess, feed_dict=feed_dict)
rate_batch = rate_batch.reshape((-1, len(item_batch)))
user_batch_rating_uid = zip(rate_batch, user_batch)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
u = re['uid']
pids = re['pids']
user_topk_dict[u] = pids
assert count == n_test_users
pool.close()
return result, user_topk_dict | 8,738 | 32.102273 | 101 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/knowledge_aware/CKE/batch_test.py | '''
Created on Dec 18, 2018
Tensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang (xiangwang@u.nus.edu)
'''
import models.knowledge_aware.metrics as metrics
from parser import parse_args
from models.knowledge_aware.load_data import Data
import multiprocessing
import heapq
import numpy as np
import random
from torch.utils.data import DataLoader, RandomSampler
import torch
from utils import *
torch.multiprocessing.set_sharing_strategy('file_system')
from loader_cke import CKE_loader
train_cores = multiprocessing.cpu_count()
test_cores = multiprocessing.cpu_count()//2
args = parse_args()
Ks = eval(args.Ks)
data_generator = {}
MANUAL_SEED = 2019
torch.manual_seed(MANUAL_SEED)
def seed_worker(worker_id):
torch.manual_seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
random.seed(MANUAL_SEED)
g = torch.Generator(device='cpu')
g.manual_seed(MANUAL_SEED)
cke_a_ds = CKE_loader(args=args, path=DATA_DIR[args.dataset])
cke_ds = Data(args=args, path=DATA_DIR[args.dataset])
data_generator['A_dataset'] = cke_a_ds
data_generator['dataset'] = cke_ds
data_generator['A_loader'] = DataLoader(cke_a_ds,
batch_size=cke_a_ds.batch_size_kg,
sampler=RandomSampler(cke_a_ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
data_generator['loader'] = DataLoader(cke_ds,
batch_size=cke_ds.batch_size,
sampler=RandomSampler(cke_ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
batch_test_flag = False
USR_NUM, ITEM_NUM = data_generator['dataset'].n_users, data_generator['dataset'].n_items
N_TRAIN, N_TEST = data_generator['dataset'].n_train, data_generator['dataset'].n_test
BATCH_SIZE = args.batch_size
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks, save_topk=True):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
pids = []
r = []
for i in K_max_item_score:
pids.append(i)
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc,pids
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks, save_topk=True):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
pids = []
r = []
for i in K_max_item_score:
pids.append(i)
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc, pids
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
# user u's ratings for user u
rating = x[0]
#uid
u = x[1]
#user u's items in the training set
try:
training_items = data_generator['dataset'].train_user_dict[u]
valid_items = data_generator['dataset'].valid_user_dict[u]
except Exception:
training_items = []
valid_items = []
#user u's items in the test set
user_pos_test = data_generator['dataset'].test_user_dict[u]
all_items = set(range(ITEM_NUM))
test_items = list((all_items - set(training_items)) - set(valid_items) )
if args.test_flag == 'part':
r, auc, pids = ranklist_by_heapq(user_pos_test, test_items, rating, Ks, save_topk=True)
else:
r, auc, pids = ranklist_by_sorted(user_pos_test, test_items, rating, Ks, save_topk=True)
# # .......checking.......
# try:
# assert len(user_pos_test) != 0
# except Exception:
# print(u)
# print(training_items)
# print(user_pos_test)
# exit()
# # .......checking.......
result_dict = get_performance(user_pos_test, r, auc, Ks)
result_dict['uid'] = u
result_dict['pids'] = pids
return result_dict
def test(sess, model, users_to_test, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
pool = multiprocessing.Pool(test_cores)
if args.model_type in ['ripple']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE // 20
elif args.model_type in ['fm', 'nfm']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE
else:
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
from collections import defaultdict
user_topk_dict = defaultdict(list)
DATASET_KEY = 'A_dataset' if args.model_type == 'cke' else 'dataset'
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
i_rate_batch = model.eval(sess, feed_dict=feed_dict)
i_rate_batch = i_rate_batch.reshape((-1, len(item_batch)))
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
rate_batch = model.eval(sess, feed_dict=feed_dict)
rate_batch = rate_batch.reshape((-1, len(item_batch)))
user_batch_rating_uid = zip(rate_batch, user_batch)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
u = re['uid']
pids = re['pids']
user_topk_dict[u] = pids
assert count == n_test_users
pool.close()
return result, user_topk_dict | 9,203 | 33.863636 | 110 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/knowledge_aware/CFKG/batch_test.py | '''
Created on Dec 18, 2018
Tensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang (xiangwang@u.nus.edu)
'''
import models.knowledge_aware.metrics as metrics
from parser import parse_args
import multiprocessing
import heapq
import numpy as np
import random
from itertools import cycle
from torch.utils.data import DataLoader, RandomSampler
import torch
from utils import *
torch.multiprocessing.set_sharing_strategy('file_system')
from loader_cfkg import CFKG_loader
train_cores = multiprocessing.cpu_count()
test_cores = multiprocessing.cpu_count()//2
args = parse_args()
Ks = eval(args.Ks)
data_generator = {}
MANUAL_SEED = 2019
torch.manual_seed(MANUAL_SEED)
def seed_worker(worker_id):
torch.manual_seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
random.seed(MANUAL_SEED)
g = torch.Generator(device='cpu')
g.manual_seed(MANUAL_SEED)
ds = CFKG_loader(args=args, path=DATA_DIR[args.dataset])
data_generator['dataset'] = ds
data_generator['loader'] = DataLoader(ds,
batch_size=ds.batch_size,
sampler=RandomSampler(ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
batch_test_flag = True
USR_NUM, ITEM_NUM = data_generator['dataset'].n_users, data_generator['dataset'].n_items
N_TRAIN, N_TEST = data_generator['dataset'].n_train, data_generator['dataset'].n_test
BATCH_SIZE = args.batch_size
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks, save_topk=True):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
pids = []
r = []
for i in K_max_item_score:
pids.append(i)
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc,pids
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks, save_topk=True):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
pids = []
r = []
for i in K_max_item_score:
pids.append(i)
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc, pids
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
# user u's ratings for user u
rating = x[0]
#uid
u = x[1]
#user u's items in the training set
try:
training_items = data_generator['dataset'].train_user_dict[u]
valid_items = data_generator['dataset'].valid_user_dict[u]
except Exception:
training_items = []
valid_items = []
#user u's items in the test set
user_pos_test = data_generator['dataset'].test_user_dict[u]
all_items = set(range(ITEM_NUM))
test_items = list((all_items - set(training_items)) - set(valid_items) )
if args.test_flag == 'part':
r, auc, pids = ranklist_by_heapq(user_pos_test, test_items, rating, Ks, save_topk=True)
else:
r, auc, pids = ranklist_by_sorted(user_pos_test, test_items, rating, Ks, save_topk=True)
# # .......checking.......
# try:
# assert len(user_pos_test) != 0
# except Exception:
# print(u)
# print(training_items)
# print(user_pos_test)
# exit()
# # .......checking.......
result_dict = get_performance(user_pos_test, r, auc, Ks)
result_dict['uid'] = u
result_dict['pids'] = pids
return result_dict
def test(sess, model, users_to_test, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
pool = multiprocessing.Pool(test_cores)
if args.model_type in ['ripple']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE // 20
elif args.model_type in ['fm', 'nfm']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE
else:
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
from collections import defaultdict
user_topk_dict = defaultdict(list)
DATASET_KEY = 'A_dataset' if args.model_type == 'cke' else 'dataset'
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
i_rate_batch = model.eval(sess, feed_dict=feed_dict)
i_rate_batch = i_rate_batch.reshape((-1, len(item_batch)))
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
rate_batch = model.eval(sess, feed_dict=feed_dict)
rate_batch = rate_batch.reshape((-1, len(item_batch)))
user_batch_rating_uid = zip(rate_batch, user_batch)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
u = re['uid']
pids = re['pids']
user_topk_dict[u] = pids
assert count == n_test_users
pool.close()
return result,user_topk_dict | 8,135 | 30.905882 | 101 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/PGPR/train_agent.py | from __future__ import absolute_import, division, print_function
import warnings
import numpy as np
import torch
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
warnings.filterwarnings("ignore", category=DeprecationWarning)
import os
import argparse
from collections import namedtuple
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.distributions import Categorical
#from models.PGPR.pgpr_utils import ML1M, TMP_DIR, get_logger, set_random_seed, USER, LOG_DIR, HPARAMS_FILE
from models.PGPR.pgpr_utils import *
from models.PGPR.kg_env import BatchKGEnvironment
from easydict import EasyDict as edict
from collections import defaultdict
import wandb
import sys
from models.utils import MetricsLogger
logger = None
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class ActorCritic(nn.Module):
def __init__(self, state_dim, act_dim, gamma=0.99, hidden_sizes=[512, 256]):
super(ActorCritic, self).__init__()
self.state_dim = state_dim
self.act_dim = act_dim
self.gamma = gamma
self.l1 = nn.Linear(state_dim, hidden_sizes[0])
self.l2 = nn.Linear(hidden_sizes[0], hidden_sizes[1])
self.actor = nn.Linear(hidden_sizes[1], act_dim)
self.critic = nn.Linear(hidden_sizes[1], 1)
self.saved_actions = []
self.rewards = []
self.entropy = []
def forward(self, inputs):
state, act_mask = inputs # state: [bs, state_dim], act_mask: [bs, act_dim]
x = self.l1(state)
x = F.dropout(F.elu(x), p=0.5)
out = self.l2(x)
x = F.dropout(F.elu(out), p=0.5)
actor_logits = self.actor(x)
#actor_logits[1 - act_mask] = -999999.0
actor_logits[1-act_mask] = -999999.0
act_probs = F.softmax(actor_logits, dim=-1) # Tensor of [bs, act_dim]
state_values = self.critic(x) # Tensor of [bs, 1]
return act_probs, state_values
def select_action(self, batch_state, batch_act_mask, device):
state = torch.FloatTensor(batch_state).to(device) # Tensor [bs, state_dim]
act_mask = torch.ByteTensor(batch_act_mask).to(device) # Tensor of [bs, act_dim]
probs, value = self((state, act_mask)) # act_probs: [bs, act_dim], state_value: [bs, 1]
m = Categorical(probs)
acts = m.sample() # Tensor of [bs, ], requires_grad=False
# [CAVEAT] If sampled action is out of action_space, choose the first action in action_space.
valid_idx = act_mask.gather(1, acts.view(-1, 1)).view(-1)
acts[valid_idx == 0] = 0
self.saved_actions.append(SavedAction(m.log_prob(acts), value))
self.entropy.append(m.entropy())
return acts.cpu().numpy().tolist()
def update(self, optimizer, device, ent_weight):
if len(self.rewards) <= 0:
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return 0.0, 0.0, 0.0
batch_rewards = np.vstack(self.rewards).T # numpy array of [bs, #steps]
batch_rewards = torch.FloatTensor(batch_rewards).to(device)
num_steps = batch_rewards.shape[1]
for i in range(1, num_steps):
batch_rewards[:, num_steps - i - 1] += self.gamma * batch_rewards[:, num_steps - i]
actor_loss = 0
critic_loss = 0
entropy_loss = 0
for i in range(0, num_steps):
log_prob, value = self.saved_actions[i] # log_prob: Tensor of [bs, ], value: Tensor of [bs, 1]
advantage = batch_rewards[:, i] - value.squeeze(1) # Tensor of [bs, ]
actor_loss += -log_prob * advantage.detach() # Tensor of [bs, ]
critic_loss += advantage.pow(2) # Tensor of [bs, ]
entropy_loss += -self.entropy[i] # Tensor of [bs, ]
actor_loss = actor_loss.mean()
critic_loss = critic_loss.mean()
entropy_loss = entropy_loss.mean()
loss = actor_loss + critic_loss + ent_weight * entropy_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return loss.item(), actor_loss.item(), critic_loss.item(), entropy_loss.item()
class ACDataLoader(object):
def __init__(self, uids, batch_size):
self.uids = np.array(uids)
self.num_users = len(uids)
self.batch_size = batch_size
self.reset()
def reset(self):
self._rand_perm = np.random.permutation(self.num_users)
self._start_idx = 0
self._has_next = True
def has_next(self):
return self._has_next
def get_batch(self):
if not self._has_next:
return None
# Multiple users per batch
end_idx = min(self._start_idx + self.batch_size, self.num_users)
batch_idx = self._rand_perm[self._start_idx:end_idx]
batch_uids = self.uids[batch_idx]
self._has_next = self._has_next and end_idx < self.num_users
self._start_idx = end_idx
return batch_uids.tolist()
def train(args):
# check how datasets are loaded by BatchKGEnvironment
train_env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len,
state_history=args.state_history)
valid_env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len,
state_history=args.state_history)
train_uids = list(train_env.kg(USER).keys())
valid_uids = list(valid_env.kg(USER).keys())
train_dataloader = ACDataLoader(train_uids, args.batch_size)
valid_dataloader = ACDataLoader(valid_uids, args.batch_size)
model = ActorCritic(train_env.state_dim, train_env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model.load_state_dict(model_sd)
logger.info('Parameters:' + str([i[0] for i in model.named_parameters()]))
optimizer = optim.Adam(model.parameters(), lr=args.lr)
metrics = MetricsLogger(args.wandb_entity,
f'pgpr_{args.dataset}',
config=args)
metrics.register('train_loss')
metrics.register('train_ploss')
metrics.register('train_vloss')
metrics.register('train_entropy')
metrics.register('train_reward')
metrics.register('avg_train_loss')
metrics.register('avg_train_ploss')
metrics.register('avg_train_vloss')
metrics.register('avg_train_entropy')
metrics.register('avg_train_reward')
metrics.register('std_train_reward')
metrics.register('valid_loss')
metrics.register('valid_ploss')
metrics.register('valid_vloss')
metrics.register('valid_entropy')
metrics.register('valid_reward')
metrics.register('avg_valid_loss')
metrics.register('avg_valid_ploss')
metrics.register('avg_valid_vloss')
metrics.register('avg_valid_entropy')
metrics.register('avg_valid_reward')
metrics.register('std_valid_reward')
loaders = {'train': train_dataloader,
'valid': valid_dataloader}
envs = {'train': train_env,
'valid':valid_env}
step_counter = {
'train': 0,
'valid':0
}
uids_split = {'train' :train_uids,
'valid':valid_uids}
first_iterate = True
model.train()
start = 0
for epoch in range(1, args.epochs + 1):
splits_to_compute = list(loaders.items())
if first_iterate:
first_iterate = False
splits_to_compute.insert(0, ('valid', valid_dataloader))
for split_name, dataloader in splits_to_compute:
if split_name == 'valid' and epoch%10 != 0:
continue
if split_name == 'valid':
model.eval()
else:
model.train()
dataloader.reset()
env = envs[split_name]
uids = uids_split[split_name]
iter_counter = 0
### Start epoch ###
dataloader.reset()
while dataloader.has_next():
batch_uids = dataloader.get_batch()
### Start batch episodes ###
batch_state = env.reset(batch_uids) # numpy array of [bs, state_dim]
done = False
while not done:
batch_act_mask = env.batch_action_mask(dropout=args.act_dropout) # numpy array of size [bs, act_dim]
batch_act_idx = model.select_action(batch_state, batch_act_mask, args.device) # int
batch_state, batch_reward, done = env.batch_step(batch_act_idx)
model.rewards.append(batch_reward)
### End of episodes ###
if split_name == 'train':
lr = args.lr * max(1e-4, 1.0 - float(step_counter[split_name]) / (args.epochs * len(uids) / args.batch_size))
for pg in optimizer.param_groups:
pg['lr'] = lr
# Update policy
total_reward = np.sum(model.rewards)
loss, ploss, vloss, eloss = model.update(optimizer, args.device, args.ent_weight)
cur_metrics = {f'{split_name}_loss':loss,
f'{split_name}_ploss':ploss,
f'{split_name}_vloss':vloss,
f'{split_name}_entropy':eloss,
f'{split_name}_reward':total_reward,
f'{split_name}_iter': step_counter[split_name]}
for k,v in cur_metrics.items():
metrics.log(k, v)
#metrics.push(cur_metrics.keys())
step_counter[split_name] += 1
iter_counter += 1
cur_metrics = [f'{split_name}_epoch']
cur_metrics.extend([f'{split_name}_loss',
f'{split_name}_ploss',
f'{split_name}_vloss',
f'{split_name}_entropy',
f'{split_name}_reward',
])
for k in cur_metrics[1:]:
metrics.log(f'avg_{k}', sum(metrics.history(k, iter_counter))/max(iter_counter,1) )
getattr(metrics, f'avg_{split_name}_reward')[-1] /= args.batch_size
metrics.log(f'{split_name}_epoch', epoch)
cur_metrics.append(f'std_{split_name}_reward')
metrics.log(f'std_{split_name}_reward',np.std(metrics.history( f'{split_name}_reward', iter_counter)) )
info = ""
for k in cur_metrics:
if isinstance(getattr(metrics,k)[-1],float):
x = '{:.5f}'.format(getattr(metrics, k)[-1])
else:
x = '{:d}'.format(getattr(metrics, k)[-1])
info = info + f'| {k}={x} '
metrics.push(cur_metrics)
logger.info(info)
### END of epoch ###
if epoch % 10 == 0:
policy_file = '{}/policy_model_epoch_{}.ckpt'.format(args.log_dir, epoch)
logger.info("Save models to " + policy_file)
torch.save(model.state_dict(), policy_file)
makedirs(args.dataset)
metrics.write(TEST_METRICS_FILE_PATH[args.dataset])
metrics.close_wandb()
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=ML1M, help='One of {ML1M}')
parser.add_argument('--name', type=str, default='train_agent', help='directory name.')
parser.add_argument('--seed', type=int, default=123, help='random seed.')
parser.add_argument('--gpu', type=str, default='0', help='gpu device.')
parser.add_argument('--epochs', type=int, default=50, help='Max number of epochs.')
parser.add_argument('--batch_size', type=int, default=32, help='batch size.')
parser.add_argument('--lr', type=float, default=1e-4, help='learning rate.')
parser.add_argument('--max_acts', type=int, default=250, help='Max number of actions.')
parser.add_argument('--max_path_len', type=int, default=3, help='Max path length.')
parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor.')
parser.add_argument('--ent_weight', type=float, default=1e-3, help='weight factor for entropy loss')
parser.add_argument('--act_dropout', type=float, default=0, help='action dropout rate.')
parser.add_argument('--state_history', type=int, default=1, help='state history length')
parser.add_argument('--hidden', type=int, nargs='*', default=[512, 256], help='number of samples')
parser.add_argument('--do_validation', type=bool, default=True, help='Whether to perform validation')
parser.add_argument("--wandb", action="store_true", help="If passed, will log to Weights and Biases.")
parser.add_argument(
"--wandb_entity",
required="--wandb" in sys.argv,
type=str,
help="Entity name to push to the wandb logged data, in case args.wandb is specified.",
)
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
os.makedirs(TMP_DIR[args.dataset], exist_ok=True)
with open(os.path.join(TMP_DIR[args.dataset],HPARAMS_FILE), 'w') as f:
import json
import copy
args_dict = dict()
for x,y in copy.deepcopy(args._get_kwargs()):
args_dict[x] = y
if 'device' in args_dict:
del args_dict['device']
json.dump(args_dict,f)
args.log_dir = os.path.join(TMP_DIR[args.dataset], args.name)
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
global logger
logger = get_logger(args.log_dir + '/train_log.txt')
logger.info(args)
set_random_seed(args.seed)
train(args)
if __name__ == '__main__':
main()
| 14,020 | 39.523121 | 129 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/PGPR/transe_model.py | from __future__ import absolute_import, division, print_function
from easydict import EasyDict as edict
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.PGPR.pgpr_utils import *
from models.PGPR.data_utils import Dataset
class KnowledgeEmbedding(nn.Module):
def __init__(self, args, dataloader):
super(KnowledgeEmbedding, self).__init__()
self.embed_size = args.embed_size
self.num_neg_samples = args.num_neg_samples
self.device = args.device
self.l2_lambda = args.l2_lambda
self.dataset_name = args.dataset
self.relation_names = dataloader.dataset.other_relation_names
self.entity_names = dataloader.dataset.entity_names
self.relation2entity = dataloader.dataset.relation2entity
# Initialize entity embeddings.
self.initialize_entity_embeddings(dataloader.dataset)
for e in self.entities:
embed = self._entity_embedding(self.entities[e].vocab_size)
setattr(self, e, embed)
# Initialize relation embeddings and relation biases.
self.initialize_relations_embeddings(dataloader.dataset)
for r in self.relations:
embed = self._relation_embedding()
setattr(self, r, embed)
bias = self._relation_bias(len(self.relations[r].et_distrib))
setattr(self, r + '_bias', bias)
def initialize_entity_embeddings(self, dataset):
self.entities = edict()
for entity_name in self.entity_names:
value = edict(vocab_size=getattr(dataset, entity_name).vocab_size)
self.entities[entity_name] = value
def initialize_relations_embeddings(self, dataset):
self.relations = edict()
main_rel = INTERACTION[dataset.dataset_name]
self.relations[main_rel] = edict(
et=PRODUCT,
et_distrib=self._make_distrib(getattr(dataset, "review").product_uniform_distrib)
)
for relation_name in dataset.other_relation_names:
value = edict(
et=dataset.relation2entity[relation_name],
et_distrib=self._make_distrib(getattr(dataset, relation_name).et_distrib)
)
self.relations[relation_name] = value
def _entity_embedding(self, vocab_size):
"""Create entity embedding of size [vocab_size+1, embed_size].
Note that last dimension is always 0's.
"""
embed = nn.Embedding(vocab_size + 1, self.embed_size, padding_idx=-1, sparse=False)
initrange = 0.5 / self.embed_size
weight = torch.FloatTensor(vocab_size + 1, self.embed_size).uniform_(-initrange, initrange)
embed.weight = nn.Parameter(weight)
return embed
def _relation_embedding(self):
"""Create relation vector of size [1, embed_size]."""
initrange = 0.5 / self.embed_size
weight = torch.FloatTensor(1, self.embed_size).uniform_(-initrange, initrange)
embed = nn.Parameter(weight)
return embed
def _relation_bias(self, vocab_size):
"""Create relation bias of size [vocab_size+1]."""
bias = nn.Embedding(vocab_size + 1, 1, padding_idx=-1, sparse=False)
bias.weight = nn.Parameter(torch.zeros(vocab_size + 1, 1))
return bias
def _make_distrib(self, distrib):
"""Normalize input numpy vector to distribution."""
distrib = np.power(np.array(distrib, dtype=np.float), 0.75)
distrib = distrib / distrib.sum()
distrib = torch.FloatTensor(distrib).to(self.device)
return distrib
def forward(self, batch_idxs):
loss = self.compute_loss(batch_idxs)
#loss = self.compute_loss_lfm(batch_idxs)
#assert loss == loss2
return loss
def compute_loss(self, batch_idxs):
"""Compute knowledge graph negative sampling loss.
"""
regularizations = []
user_idxs = batch_idxs[:, 0]
product_idxs = batch_idxs[:, 1]
knowledge_relations = get_knowledge_derived_relations(self.dataset_name)
#print(knowledge_relations)
# user + interaction -> product
up_loss, up_embeds = self.neg_loss(USER, INTERACTION[self.dataset_name], PRODUCT, user_idxs, product_idxs)
regularizations.extend(up_embeds)
loss = up_loss
i = 2
for curr_rel in knowledge_relations:
entity_name, curr_idxs = self.relation2entity[curr_rel], batch_idxs[:, i]
# product + curr_rel -> curr_entity
curr_loss, curr_embeds = self.neg_loss(PRODUCT, curr_rel, entity_name, product_idxs, curr_idxs)
if curr_loss is not None:
regularizations.extend(curr_embeds)
loss += curr_loss
i+=1
# l2 regularization
if self.l2_lambda > 0:
l2_loss = 0.0
for term in regularizations:
l2_loss += torch.norm(term)
loss += self.l2_lambda * l2_loss
return loss
def neg_loss(self, entity_head, relation, entity_tail, entity_head_idxs, entity_tail_idxs):
# Entity tail indices can be -1. Remove these indices. Batch size may be changed!
mask = entity_tail_idxs >= 0
fixed_entity_head_idxs = entity_head_idxs[mask]
fixed_entity_tail_idxs = entity_tail_idxs[mask]
if fixed_entity_head_idxs.size(0) <= 0:
return None, []
entity_head_embedding = getattr(self, entity_head) # nn.Embedding
entity_tail_embedding = getattr(self, entity_tail) # nn.Embedding
relation_vec = getattr(self, relation) # [1, embed_size]
relation_bias_embedding = getattr(self, relation + '_bias') # nn.Embedding
entity_tail_distrib = self.relations[relation].et_distrib # [vocab_size]
return kg_neg_loss(entity_head_embedding, entity_tail_embedding,
fixed_entity_head_idxs, fixed_entity_tail_idxs,
relation_vec, relation_bias_embedding, self.num_neg_samples, entity_tail_distrib)
def kg_neg_loss(entity_head_embed, entity_tail_embed, entity_head_idxs, entity_tail_idxs,
relation_vec, relation_bias_embed, num_samples, distrib):
"""Compute negative sampling loss for triple (entity_head, relation, entity_tail).
Args:
entity_head_embed: Tensor of size [batch_size, embed_size].
entity_tail_embed: Tensor of size [batch_size, embed_size].
entity_head_idxs:
entity_tail_idxs:
relation_vec: Parameter of size [1, embed_size].
relation_bias: Tensor of size [batch_size]
num_samples: An integer.
distrib: Tensor of size [vocab_size].
Returns:
A tensor of [1].
"""
batch_size = entity_head_idxs.size(0)
entity_head_vec = entity_head_embed(entity_head_idxs) # [batch_size, embed_size]
example_vec = entity_head_vec + relation_vec # [batch_size, embed_size]
example_vec = example_vec.unsqueeze(2) # [batch_size, embed_size, 1]
entity_tail_vec = entity_tail_embed(entity_tail_idxs) # [batch_size, embed_size]
pos_vec = entity_tail_vec.unsqueeze(1) # [batch_size, 1, embed_size]
relation_bias = relation_bias_embed(entity_tail_idxs).squeeze(1) # [batch_size]
pos_logits = torch.bmm(pos_vec, example_vec).squeeze() + relation_bias # [batch_size]
pos_loss = -pos_logits.sigmoid().log() # [batch_size]
neg_sample_idx = torch.multinomial(distrib, num_samples, replacement=True).view(-1)
neg_vec = entity_tail_embed(neg_sample_idx) # [num_samples, embed_size]
neg_logits = torch.mm(example_vec.squeeze(2), neg_vec.transpose(1, 0).contiguous())
neg_logits += relation_bias.unsqueeze(1) # [batch_size, num_samples]
neg_loss = -neg_logits.neg().sigmoid().log().sum(1) # [batch_size]
loss = (pos_loss + neg_loss).mean()
return loss, [entity_head_vec, entity_tail_vec, neg_vec]
| 7,960 | 41.345745 | 114 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/PGPR/train_transe_model.py | from __future__ import absolute_import, division, print_function
import os
import argparse
import torch
import torch.optim as optim
from data_utils import DataLoader
from models.PGPR.pgpr_utils import *
from models.PGPR.transe_model import KnowledgeEmbedding
logger = None
def train(args, dataset):
dataloader = DataLoader(dataset, args.batch_size)
review_to_train = len(dataset.review.data) * args.epochs + 1
model = KnowledgeEmbedding(args, dataloader).to(args.device)
logger.info('Parameters:' + str([i[0] for i in model.named_parameters()]))
optimizer = optim.SGD(model.parameters(), lr=args.lr)
steps = 0
smooth_loss = 0.0
for epoch in range(1, args.epochs + 1):
dataloader.reset()
while dataloader.has_next():
# Set learning rate.
lr = args.lr * max(1e-4, 1.0 - dataloader.finished_review_num / float(review_to_train))
for pg in optimizer.param_groups:
pg['lr'] = lr
# Get training batch.
batch_idxs = dataloader.get_batch()
batch_idxs = torch.from_numpy(batch_idxs).to(args.device)
# Train models.
optimizer.zero_grad()
train_loss = model(batch_idxs)
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
smooth_loss += train_loss.item() / args.steps_per_checkpoint
steps += 1
if steps % args.steps_per_checkpoint == 0:
logger.info('Epoch: {:02d} | '.format(epoch) +
'Review: {:d}/{:d} | '.format(dataloader.finished_review_num, review_to_train) +
'Lr: {:.5f} | '.format(lr) +
'Smooth loss: {:.5f}'.format(smooth_loss))
smooth_loss = 0.0
if epoch % 10 == 0:
torch.save(model.state_dict(), '{}/transe_model_sd_epoch_{}.ckpt'.format(args.log_dir, epoch))
def extract_embeddings(args, dataset):
"""Note that last entity embedding is of size [vocab_size+1, d]."""
dataset_name = args.dataset
model_file = '{}/transe_model_sd_epoch_{}.ckpt'.format(args.log_dir, args.epochs)
print('Load embeddings', model_file)
state_dict = torch.load(model_file, map_location=lambda storage, loc: storage)
embeds = {}
for entity_name in dataset.entity_names:
embeds[entity_name] = state_dict[f'{entity_name}.weight'].cpu().data.numpy()[:-1]
embeds[INTERACTION[dataset_name]] = (
state_dict[INTERACTION[dataset_name]].cpu().data.numpy()[0],
state_dict[f'{INTERACTION[dataset_name]}_bias.weight'].cpu().data.numpy()
)
for relation_name in dataset.other_relation_names:
embeds[relation_name] = (
state_dict[f'{relation_name}'].cpu().data.numpy()[0],
state_dict[f'{relation_name}_bias.weight'].cpu().data.numpy()
)
save_embed(dataset_name, embeds)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=ML1M, help='One of {beauty, cd, cell, clothing}.')
parser.add_argument('--name', type=str, default='train_transe_model', help='models name.')
parser.add_argument('--seed', type=int, default=123, help='random seed.')
parser.add_argument('--gpu', type=str, default='0', help='gpu device.')
parser.add_argument('--epochs', type=int, default=30, help='number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=64, help='batch size.')
parser.add_argument('--lr', type=float, default=0.5, help='learning rate.')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay for adam.')
parser.add_argument('--l2_lambda', type=float, default=0, help='l2 lambda')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Clipping gradient.')
parser.add_argument('--embed_size', type=int, default=100, help='knowledge embedding size.')
parser.add_argument('--num_neg_samples', type=int, default=5, help='number of negative samples.')
parser.add_argument('--steps_per_checkpoint', type=int, default=200, help='Number of steps for checkpoint.')
args = parser.parse_args()
os.makedirs(LOG_DATASET_DIR[args.dataset], exist_ok=True)
with open(os.path.join(LOG_DATASET_DIR[args.dataset], f'{TRANSE_HPARAMS_FILE}'), 'w') as f:
import json
import copy
args_dict = dict()
for x,y in copy.deepcopy(args._get_kwargs()):
args_dict[x] = y
if 'device' in args_dict:
del args_dict['device']
json.dump(args_dict,f)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
print(TMP_DIR[args.dataset])
args.log_dir = os.path.join(TMP_DIR[args.dataset], args.name)
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
global logger
logger = get_logger(args.log_dir + '/train_log.txt')
logger.info(args)
set_random_seed(args.seed)
dataset = load_dataset(args.dataset)
train(args, dataset)
extract_embeddings(args, dataset)
if __name__ == '__main__':
main()
| 5,258 | 41.072 | 112 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/PGPR/test_agent.py | from __future__ import absolute_import, division, print_function
import os
import argparse
from math import log
import numpy as np
import torch
import json
from easydict import EasyDict as edict
from tqdm import tqdm
from functools import reduce
from models.PGPR.kg_env import BatchKGEnvironment
from models.PGPR.train_agent import ActorCritic
from models.PGPR.pgpr_utils import *
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
def evaluate(dataset_name, topk_matches, test_user_products):
"""Compute metrics for predicted recommendations.
Args:
topk_matches: a list or dict of product ids in ascending order.
"""
invalid_users = []
# Compute metrics
metrics = edict(
# ndcg_other=[],
ndcg=[],
hr=[],
precision=[],
recall=[],
)
ndcgs = []
# uid2gender, gender2name = get_user2gender(dataset_name)
test_user_idxs = list(test_user_products.keys())
rel_size = []
for uid in test_user_idxs:
if uid not in topk_matches or len(topk_matches[uid]) < 10:
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]
if len(pred_list) == 0:
continue
rel_size.append(len(rel_set))
k = 0
hit_num = 0.0
hit_list = []
for pid in pred_list:
k += 1
if pid in rel_set:
hit_num += 1
hit_list.append(1)
else:
hit_list.append(0)
ndcg = ndcg_at_k(hit_list, k)
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
# General
# metrics.ndcg_other.append(ndcg_other)
metrics.ndcg.append(ndcg)
metrics.hr.append(hit)
metrics.recall.append(recall)
metrics.precision.append(precision)
avg_metrics = edict(
ndcg=[],
hr=[],
precision=[],
recall=[],
)
print("Average test set size: ", np.array(rel_size).mean())
for metric, values in metrics.items():
avg_metrics[metric] = np.mean(values)
avg_metric_value = np.mean(values) * 100 if metric == "ndcg_other" else np.mean(values)
n_users = len(values)
print("Overall for noOfUser={}, {}={:.4f}".format(n_users, metric,
avg_metric_value))
print("\n")
makedirs(dataset_name)
with open(RECOM_METRICS_FILE_PATH[dataset_name], 'w') as f:
json.dump(metrics,f)
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=1):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def batch_beam_search(env, model, uids, device, intrain=None, topk=[25, 5, 1]):
def _batch_acts_to_masks(batch_acts):
batch_masks = []
for acts in batch_acts:
num_acts = len(acts)
act_mask = np.zeros(model.act_dim, dtype=np.uint8)
act_mask[:num_acts] = 1
batch_masks.append(act_mask)
return np.vstack(batch_masks)
state_pool = env.reset(uids) # numpy of [bs, dim]
path_pool = env._batch_path # list of list, size=bs
probs_pool = [[] for _ in uids]
model.eval()
for hop in range(3):
state_tensor = torch.FloatTensor(state_pool).to(device)
acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs
actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim]
actmask_tensor = torch.ByteTensor(actmask_pool).to(device)
probs, _ = model((state_tensor, actmask_tensor)) # Tensor of [bs, act_dim]
probs_max, _ = torch.max(probs, 0)
probs_min, _ = torch.min(probs, 0)
topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k]
topk_idxs = topk_idxs.detach().cpu().numpy()
topk_probs = topk_probs.detach().cpu().numpy()
new_path_pool, new_probs_pool = [], []
for row in range(topk_idxs.shape[0]):
path = path_pool[row]
probs = probs_pool[row]
for idx, p in zip(topk_idxs[row], topk_probs[row]):
if idx >= len(acts_pool[row]): # act idx is invalid
continue
relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id)
if relation == SELF_LOOP:
next_node_type = path[-1][1]
else:
next_node_type = KG_RELATION[env.dataset_name][path[-1][1]][
relation] # Changing according to the dataset
new_path = path + [(relation, next_node_type, next_node_id)]
new_path_pool.append(new_path)
new_probs_pool.append(probs + [p])
path_pool = new_path_pool
probs_pool = new_probs_pool
if hop < 2:
state_pool = env._batch_get_state(path_pool)
return path_pool, probs_pool
def predict_paths(policy_file, path_file, args):
print('Predicting paths...')
env = BatchKGEnvironment(args.dataset, args.max_acts, max_path_len=args.max_path_len,
state_history=args.state_history)
pretrain_sd = torch.load(policy_file)
model = ActorCritic(env.state_dim, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
test_labels = load_labels(args.dataset, 'test')
test_uids = list(test_labels.keys())
batch_size = 16
start_idx = 0
all_paths, all_probs = [], []
pbar = tqdm(total=len(test_uids))
while start_idx < len(test_uids):
end_idx = min(start_idx + batch_size, len(test_uids))
batch_uids = test_uids[start_idx:end_idx]
paths, probs = batch_beam_search(env, model, batch_uids, args.device, topk=args.topk)
all_paths.extend(paths)
all_probs.extend(probs)
start_idx = end_idx
pbar.update(batch_size)
predicts = {'paths': all_paths, 'probs': all_probs}
pickle.dump(predicts, open(path_file, 'wb'))
def save_output(dataset_name, pred_paths):
extracted_path_dir = LOG_DATASET_DIR[dataset_name]#extracted_path_dir + "/pgpr"
if not os.path.isdir(extracted_path_dir):
os.makedirs(extracted_path_dir)
print("Normalizing items scores...")
# Get min and max score to performe normalization between 0 and 1
score_list = []
for uid, pid in pred_paths.items():
for pid, path_list in pred_paths[uid].items():
for path in path_list:
score_list.append(float(path[0]))
min_score = min(score_list)
max_score = max(score_list)
print("Saving pred_paths...")
for uid in pred_paths.keys():
curr_pred_paths = pred_paths[uid]
for pid in curr_pred_paths.keys():
curr_pred_paths_for_pid = curr_pred_paths[pid]
for i, curr_path in enumerate(curr_pred_paths_for_pid):
path_score = pred_paths[uid][pid][i][0]
path_prob = pred_paths[uid][pid][i][1]
path = pred_paths[uid][pid][i][2]
new_path_score = (float(path_score) - min_score) / (max_score - min_score)
pred_paths[uid][pid][i] = (new_path_score, path_prob, path)
with open(extracted_path_dir + "/pred_paths.pkl", 'wb') as pred_paths_file:
pickle.dump(pred_paths, pred_paths_file)
pred_paths_file.close()
def extract_paths(dataset_name, save_paths, path_file, train_labels, valid_labels, test_labels):
embeds = load_embed(args.dataset)
user_embeds = embeds[USER]
main_entity, main_relation = MAIN_PRODUCT_INTERACTION[dataset_name]
product = main_entity
watched_embeds = embeds[main_relation][0]
movie_embeds = embeds[main_entity]
scores = np.dot(user_embeds + watched_embeds, movie_embeds.T)
validation_pids = get_validation_pids(dataset_name)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
for path, probs in zip(results['paths'], results['probs']):
if path[-1][1] != product:
continue
uid = path[0][2]
if uid not in pred_paths:
continue
pid = path[-1][2]
if uid in valid_labels and pid in valid_labels[uid]:
continue
if pid in train_labels[uid]:
continue
if pid not in pred_paths[uid]:
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
if args.save_paths:
save_output(dataset_name, pred_paths)
return pred_paths, scores
def evaluate_paths(dataset_name, pred_paths, emb_scores, train_labels, test_labels):
# 2) Pick best path for each user-product pair, also remove pid if it is in train set.
best_pred_paths = {}
for uid in pred_paths:
if uid in train_labels:
train_pids = set(train_labels[uid])
else:
print("Invalid train_pids")
best_pred_paths[uid] = []
for pid in pred_paths[uid]:
if pid in train_pids:
continue
# if pid in validation_pids[uid]:
# continue
# Get the path with highest probability
sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True)
best_pred_paths[uid].append(sorted_path[0])
# save_best_pred_paths(extracted_path_dir, best_pred_paths)
# 3) Compute top 10 recommended products for each user.
sort_by = 'score'
pred_labels = {}
pred_paths_top10 = {}
for uid in best_pred_paths:
if sort_by == 'score':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[0], x[1]), reverse=True)
elif sort_by == 'prob':
sorted_path = sorted(best_pred_paths[uid], key=lambda x: (x[1], x[0]), reverse=True)
top10_pids = [p[-1][2] for _, _, p in sorted_path[:10]] # from largest to smallest
top10_paths = [p for _, _, p in sorted_path[:10]] # paths for the top10
# add up to 10 pids if not enough
if args.add_products and len(top10_pids) < 10:
train_pids = set(train_labels[uid])
cand_pids = np.argsort(emb_scores[uid])
for cand_pid in cand_pids[::-1]:
if cand_pid in train_pids or cand_pid in top10_pids:
continue
top10_pids.append(cand_pid)
if len(top10_pids) >= 10:
break
# end of add
pred_labels[uid] = top10_pids[::-1] # change order to from smallest to largest!
pred_paths_top10[uid] = top10_paths[::-1]
evaluate(dataset_name, pred_labels, test_labels)
# In formula w of pi log(2 + (number of patterns of same pattern type among uv paths / total number of paths among uv paths))
def get_path_pattern_weigth(path_pattern_name, pred_uv_paths):
n_same_path_pattern = 0
total_paths = len(pred_uv_paths)
for path in pred_uv_paths:
if path_pattern_name == get_path_pattern(path):
n_same_path_pattern += 1
return log(2 + (n_same_path_pattern / total_paths))
def test(args):
policy_file = args.log_dir + '/policy_model_epoch_{}.ckpt'.format(args.epochs)
path_file = args.log_dir + '/policy_paths_epoch{}.pkl'.format(args.epochs)
train_labels = load_labels(args.dataset, 'train')
valid_labels = load_labels(args.dataset, 'valid')
test_labels = load_labels(args.dataset, 'test')
# kg = load_kg(args.dataset)
if args.run_path:
predict_paths(policy_file, path_file, args)
if args.save_paths or args.run_eval:
pred_paths, scores = extract_paths(args.dataset, args.save_paths, path_file, train_labels, valid_labels, test_labels)
if args.run_eval:
evaluate_paths(args.dataset, pred_paths, scores, train_labels, test_labels)
if __name__ == '__main__':
boolean = lambda x: (str(x).lower() == 'true')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=ML1M, help='One of {cloth, beauty, cell, cd}')
parser.add_argument('--name', type=str, default='train_agent', help='directory name.')
parser.add_argument('--seed', type=int, default=123, help='random seed.')
parser.add_argument('--gpu', type=str, default='0', help='gpu device.')
parser.add_argument('--epochs', type=int, default=50, help='num of epochs.')
parser.add_argument('--max_acts', type=int, default=250, help='Max number of actions.')
parser.add_argument('--max_path_len', type=int, default=3, help='Max path length.')
parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor.')
parser.add_argument('--state_history', type=int, default=1, help='state history length')
parser.add_argument('--hidden', type=int, nargs='*', default=[512, 256], help='number of samples')
parser.add_argument('--add_products', type=boolean, default=True, help='Add predicted products up to 10')
parser.add_argument('--topk', type=list, nargs='*', default=[25, 5, 1], help='number of samples')
parser.add_argument('--run_path', type=boolean, default=True, help='Generate predicted path? (takes long time)')
parser.add_argument('--run_eval', type=boolean, default=True, help='Run evaluation?')
parser.add_argument('--save_paths', type=boolean, default=True, help='Save paths')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
args.log_dir = os.path.join(TMP_DIR[args.dataset], args.name)
test(args)
| 14,342 | 39.516949 | 125 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/PGPR/pgpr_utils.py | from __future__ import absolute_import, division, print_function
from easydict import EasyDict as edict
import os
import sys
import random
import pickle
import logging
import logging.handlers
import numpy as np
import csv
# import scipy.sparse as sp
import torch
from collections import defaultdict
import shutil
# Dataset names.
# from sklearn.feature_extraction.text import TfidfTransformer
ML1M = 'ml1m'
LFM1M = 'lfm1m'
CELL = 'cellphones'
MODEL = 'pgpr'
TRANSE='transe'
ROOT_DIR = os.environ('TREX_DATA_ROOT') if 'TREX_DATA_ROOT' in os.environ else '../..'
# Dataset directories.
DATASET_DIR = {
ML1M: f'{ROOT_DIR}/data/{ML1M}/preprocessed/{MODEL}',
LFM1M: f'{ROOT_DIR}/data/{LFM1M}/preprocessed/{MODEL}',
CELL: f'{ROOT_DIR}/data/{CELL}/preprocessed/{MODEL}'
}
VALID_METRICS_FILE_NAME = 'valid_metrics.json'
OPTIM_HPARAMS_METRIC = 'valid_reward'
OPTIM_HPARAMS_LAST_K = 100 # last 100 episodes
LOG_DIR = f'{ROOT_DIR}/results'
LOG_DATASET_DIR = {
ML1M: f'{LOG_DIR}/{ML1M}/{MODEL}',
LFM1M: f'{LOG_DIR}/{LFM1M}/{MODEL}',
CELL: f'{LOG_DIR}/{CELL}/{MODEL}',
}
# for compatibility, CFG_DIR, BEST_CFG_DIR have been modified s,t, they are independent from the dataset
CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/hparams_cfg',
}
BEST_CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/best_hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/best_hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/best_hparams_cfg',
}
TEST_METRICS_FILE_NAME = 'test_metrics.json'
RECOM_METRICS_FILE_NAME = 'recommender_metrics.json'
RECOM_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{RECOM_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{RECOM_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{RECOM_METRICS_FILE_NAME}',
}
TEST_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
BEST_TEST_METRICS_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
CONFIG_FILE_NAME = 'config.json'
CFG_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
BEST_CFG_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
TRANSE_HPARAMS_FILE = f'transe_{MODEL}_hparams_file.json'
HPARAMS_FILE = f'{MODEL}_hparams_file.json'
# Model result directories.
TMP_DIR = {
ML1M: f'{DATASET_DIR[ML1M]}/tmp',
LFM1M: f'{DATASET_DIR[LFM1M]}/tmp',
CELL: f'{DATASET_DIR[CELL]}/tmp',
}
# Label files.
LABELS = {
ML1M: (TMP_DIR[ML1M] + '/train_label.pkl', TMP_DIR[ML1M] + '/valid_label.pkl', TMP_DIR[ML1M] + '/test_label.pkl'),
LFM1M: (TMP_DIR[LFM1M] + '/train_label.pkl', TMP_DIR[LFM1M] + '/valid_label.pkl', TMP_DIR[LFM1M] + '/test_label.pkl'),
CELL: (TMP_DIR[CELL] + '/train_label.pkl', TMP_DIR[CELL] + '/valid_label.pkl', TMP_DIR[CELL] + '/test_label.pkl')
}
# ENTITIES/RELATIONS SHARED BY ALL DATASETS
USER = 'user'
PRODUCT = 'product'
INTERACTION = {
ML1M: "watched",
LFM1M: "listened",
CELL: "purchase",
}
SELF_LOOP = 'self_loop'
PRODUCED_BY_PRODUCER = 'produced_by_producer'
PRODUCER = 'producer'
# ML1M ENTITIES
CINEMATOGRAPHER = 'cinematographer'
PRODCOMPANY = 'prodcompany'
COMPOSER = 'composer'
CATEGORY = 'category'
ACTOR = 'actor'
COUNTRY = 'country'
WIKIPAGE = 'wikipage'
EDITOR = 'editor'
WRITTER = 'writter'
DIRECTOR = 'director'
# LASTFM ENTITIES
ARTIST = 'artist'
ENGINEER = 'engineer'
GENRE = 'genre'
# CELL ENTITIES
BRAND = 'brand'
RPRODUCT = 'rproduct'
# ML1M RELATIONS
DIRECTED_BY_DIRECTOR = 'directed_by_director'
PRODUCED_BY_COMPANY = 'produced_by_prodcompany'
STARRED_BY_ACTOR = 'starred_by_actor'
RELATED_TO_WIKIPAGE = 'related_to_wikipage'
EDITED_BY_EDITOR = 'edited_by_editor'
WROTE_BY_WRITTER = 'wrote_by_writter'
CINEMATOGRAPHY_BY_CINEMATOGRAPHER = 'cinematography_by_cinematographer'
COMPOSED_BY_COMPOSER = 'composed_by_composer'
PRODUCED_IN_COUNTRY = 'produced_in_country'
BELONG_TO_CATEGORY = 'belong_to_category'
# LASTFM RELATIONS
MIXED_BY_ENGINEER = 'mixed_by_engineer'
FEATURED_BY_ARTIST = 'featured_by_artist'
BELONG_TO_GENRE = 'belong_to_genre'
# CELL RELATIONS
PURCHASE = 'purchase'
ALSO_BOUGHT_RP = 'also_bought_related_product'
ALSO_VIEWED_RP = 'also_viewed_related_product'
ALSO_BOUGHT_P = 'also_bought_product'
ALSO_VIEWED_P = 'also_viewed_product'
KG_RELATION = {
ML1M: {
USER: {
INTERACTION[ML1M]: PRODUCT,
},
ACTOR: {
STARRED_BY_ACTOR: PRODUCT,
},
DIRECTOR: {
DIRECTED_BY_DIRECTOR: PRODUCT,
},
PRODUCT: {
INTERACTION[ML1M]: USER,
PRODUCED_BY_COMPANY: PRODCOMPANY,
PRODUCED_BY_PRODUCER: PRODUCER,
EDITED_BY_EDITOR: EDITOR,
WROTE_BY_WRITTER: WRITTER,
CINEMATOGRAPHY_BY_CINEMATOGRAPHER: CINEMATOGRAPHER,
BELONG_TO_CATEGORY: CATEGORY,
DIRECTED_BY_DIRECTOR: DIRECTOR,
STARRED_BY_ACTOR: ACTOR,
COMPOSED_BY_COMPOSER: COMPOSER,
PRODUCED_IN_COUNTRY: COUNTRY,
RELATED_TO_WIKIPAGE: WIKIPAGE,
},
PRODCOMPANY: {
PRODUCED_BY_COMPANY: PRODUCT,
},
COMPOSER: {
COMPOSED_BY_COMPOSER: PRODUCT,
},
PRODUCER: {
PRODUCED_BY_PRODUCER: PRODUCT,
},
WRITTER: {
WROTE_BY_WRITTER: PRODUCT,
},
EDITOR: {
EDITED_BY_EDITOR: PRODUCT,
},
CATEGORY: {
BELONG_TO_CATEGORY: PRODUCT,
},
CINEMATOGRAPHER: {
CINEMATOGRAPHY_BY_CINEMATOGRAPHER: PRODUCT,
},
COUNTRY: {
PRODUCED_IN_COUNTRY: PRODUCT,
},
WIKIPAGE: {
RELATED_TO_WIKIPAGE: PRODUCT,
}
},
LFM1M: {
USER: {
INTERACTION[LFM1M]: PRODUCT,
},
ARTIST: {
FEATURED_BY_ARTIST: PRODUCT,
},
ENGINEER: {
MIXED_BY_ENGINEER: PRODUCT,
},
PRODUCT: {
INTERACTION[LFM1M]: USER,
PRODUCED_BY_PRODUCER: PRODUCER,
FEATURED_BY_ARTIST: ARTIST,
MIXED_BY_ENGINEER: ENGINEER,
BELONG_TO_GENRE: GENRE,
},
PRODUCER: {
PRODUCED_BY_PRODUCER: PRODUCT,
},
GENRE: {
BELONG_TO_GENRE: PRODUCT,
},
},
CELL: {
USER: {
PURCHASE: PRODUCT,
},
PRODUCT: {
PURCHASE: USER,
PRODUCED_BY_COMPANY: BRAND,
BELONG_TO_CATEGORY: CATEGORY,
ALSO_BOUGHT_RP: RPRODUCT,
ALSO_VIEWED_RP: RPRODUCT,
ALSO_BOUGHT_P: PRODUCT,
ALSO_VIEWED_P: PRODUCT,
},
BRAND: {
PRODUCED_BY_COMPANY: PRODUCT,
},
CATEGORY: {
BELONG_TO_CATEGORY: PRODUCT,
},
RPRODUCT: {
ALSO_BOUGHT_RP: PRODUCT,
ALSO_VIEWED_RP: PRODUCT,
}
},
}
# 0 is reserved to the main relation, 1 to mention
PATH_PATTERN = {
ML1M: {
0: ((None, USER), (INTERACTION[ML1M], PRODUCT), (INTERACTION[ML1M], USER), (INTERACTION[ML1M], PRODUCT)),
2: ((None, USER), (INTERACTION[ML1M], PRODUCT), (CINEMATOGRAPHY_BY_CINEMATOGRAPHER, CINEMATOGRAPHER), (CINEMATOGRAPHY_BY_CINEMATOGRAPHER, PRODUCT)),
3: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_BY_COMPANY, PRODCOMPANY), (PRODUCED_BY_COMPANY, PRODUCT)),
4: ((None, USER), (INTERACTION[ML1M], PRODUCT), (COMPOSED_BY_COMPOSER, COMPOSER), (COMPOSED_BY_COMPOSER, PRODUCT)),
5: ((None, USER), (INTERACTION[ML1M], PRODUCT), (BELONG_TO_CATEGORY, CATEGORY), (BELONG_TO_CATEGORY, PRODUCT)),
7: ((None, USER), (INTERACTION[ML1M], PRODUCT), (STARRED_BY_ACTOR, ACTOR), (STARRED_BY_ACTOR, PRODUCT)),
8: ((None, USER), (INTERACTION[ML1M], PRODUCT), (EDITED_BY_EDITOR, EDITOR), (EDITED_BY_EDITOR, PRODUCT)),
9: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_BY_PRODUCER, PRODUCER), (PRODUCED_BY_PRODUCER, PRODUCT)),
10: ((None, USER), (INTERACTION[ML1M], PRODUCT), (WROTE_BY_WRITTER, WRITTER), (WROTE_BY_WRITTER, PRODUCT)),
11: ((None, USER), (INTERACTION[ML1M], PRODUCT), (DIRECTED_BY_DIRECTOR, DIRECTOR), (DIRECTED_BY_DIRECTOR, PRODUCT)),
12: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_IN_COUNTRY, COUNTRY), (PRODUCED_IN_COUNTRY, PRODUCT)),
13: ((None, USER), (INTERACTION[ML1M], PRODUCT), (RELATED_TO_WIKIPAGE, WIKIPAGE), (RELATED_TO_WIKIPAGE, PRODUCT)),
},
LFM1M: {
0: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (INTERACTION[LFM1M], USER), (INTERACTION[LFM1M], PRODUCT)),
2: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (BELONG_TO_GENRE, GENRE), (BELONG_TO_GENRE, PRODUCT)),
4: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (FEATURED_BY_ARTIST, ARTIST), (FEATURED_BY_ARTIST, PRODUCT)),
5: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (MIXED_BY_ENGINEER, ENGINEER), (MIXED_BY_ENGINEER, PRODUCT)),
6: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (PRODUCED_BY_PRODUCER, PRODUCER), (PRODUCED_BY_PRODUCER, PRODUCT)),
},
CELL: {
0: ((None, USER), (PURCHASE, PRODUCT), (PURCHASE, USER), (PURCHASE, PRODUCT)),
2: ((None, USER), (PURCHASE, PRODUCT), (BELONG_TO_CATEGORY, CATEGORY), (BELONG_TO_CATEGORY, PRODUCT)),
3: ((None, USER), (PURCHASE, PRODUCT), (PRODUCED_BY_COMPANY, BRAND), (PRODUCED_BY_COMPANY, PRODUCT)),
4: ((None, USER), (PURCHASE, PRODUCT), (ALSO_BOUGHT_P, PRODUCT)),
5: ((None, USER), (PURCHASE, PRODUCT), (ALSO_VIEWED_P, PRODUCT)),
6: ((None, USER), (PURCHASE, PRODUCT), (ALSO_BOUGHT_RP, RPRODUCT), (ALSO_BOUGHT_RP, PRODUCT)),
10: ((None, USER), (PURCHASE, PRODUCT), (ALSO_VIEWED_RP, RPRODUCT), (ALSO_VIEWED_RP, PRODUCT)),
}
}
MAIN_PRODUCT_INTERACTION = {
ML1M: (PRODUCT, INTERACTION[ML1M]),
LFM1M: (PRODUCT, INTERACTION[LFM1M]),
CELL: (PRODUCT, PURCHASE)
}
def get_entities(dataset_name):
return list(KG_RELATION[dataset_name].keys())
def get_knowledge_derived_relations(dataset_name):
main_entity, main_relation = MAIN_PRODUCT_INTERACTION[dataset_name]
ans = list(KG_RELATION[dataset_name][main_entity].keys())
ans.remove(main_relation)
return ans
def get_dataset_relations(dataset_name, entity_head):
return list(KG_RELATION[dataset_name][entity_head].keys())
def get_entity_tail(dataset_name, relation):
entity_head, _ = MAIN_PRODUCT_INTERACTION[dataset_name]
return KG_RELATION[dataset_name][entity_head][relation]
def get_logger(logname):
logger = logging.getLogger(logname)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.handlers.RotatingFileHandler(logname, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def save_dataset(dataset, dataset_obj):
dataset_file = os.path.join(TMP_DIR[dataset], 'dataset.pkl')
with open(dataset_file, 'wb') as f:
pickle.dump(dataset_obj, f)
def load_dataset(dataset):
dataset_file = os.path.join(TMP_DIR[dataset], 'dataset.pkl')
dataset_obj = pickle.load(open(dataset_file, 'rb'))
return dataset_obj
def save_labels(dataset, labels, mode='train'):
if mode == 'train':
label_file = LABELS[dataset][0]
elif mode == 'valid':
label_file = LABELS[dataset][1]
elif mode == 'test':
label_file = LABELS[dataset][2]
else:
raise Exception('mode should be one of {train, test}.')
with open(label_file, 'wb') as f:
pickle.dump(labels, f)
f.close()
def load_labels(dataset, mode='train'):
if mode == 'train':
label_file = LABELS[dataset][0]
elif mode == 'valid':
label_file = LABELS[dataset][1]
elif mode == 'test':
label_file = LABELS[dataset][2]
else:
raise Exception('mode should be one of {train, test}.')
user_products = pickle.load(open(label_file, 'rb'))
return user_products
def save_embed(dataset, embed):
embed_file = '{}/transe_embed.pkl'.format(TMP_DIR[dataset])
pickle.dump(embed, open(embed_file, 'wb'))
def load_embed(dataset, embed_model=TRANSE):
embed_file = '{}/transe_embed.pkl'.format(TMP_DIR[dataset])
print('Load embedding:', embed_file)
if not os.path.exists(embed_file):
default_emb_path = os.path.join(ROOT_DIR, 'pretrained', dataset, MODEL, embed_model, 'transe_embed.pkl')
shutil.copyfile(default_emb_path, embed_file)
embed = pickle.load(open(embed_file, 'rb'))
return embed
# Receive paths in form (score, prob, [path]) return the last relationship
def get_path_pattern(path):
return path[-1][-1][0]
def get_pid_to_kgid_mapping(dataset_name):
if dataset_name == "ml1m":
file = open(DATASET_DIR[dataset_name] + "/entities/mappings/movie.txt", "r")
elif dataset_name == "lfm1m":
file = open(DATASET_DIR[dataset_name] + "/entities/mappings/song.txt", "r")
else:
print("Dataset mapping not found!")
exit(-1)
reader = csv.reader(file, delimiter=' ')
dataset_pid2kg_pid = {}
next(reader, None)
for row in reader:
if dataset_name == "ml1m" or dataset_name == "lfm1m":
dataset_pid2kg_pid[int(row[0])] = int(row[1])
file.close()
return dataset_pid2kg_pid
def get_validation_pids(dataset_name):
if not os.path.isfile(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')):
return []
validation_pids = defaultdict(set)
with open(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')) as valid_file:
reader = csv.reader(valid_file, delimiter=" ")
for row in reader:
uid = int(row[0])
pid = int(row[1])
validation_pids[uid].add(pid)
valid_file.close()
return validation_pids
def get_uid_to_kgid_mapping(dataset_name):
dataset_uid2kg_uid = {}
with open(DATASET_DIR[dataset_name] + "/entities/mappings/user.txt", 'r') as file:
reader = csv.reader(file, delimiter=" ")
next(reader, None)
for row in reader:
if dataset_name == "ml1m" or dataset_name == "lfm1m":
uid_review = int(row[0])
uid_kg = int(row[1])
dataset_uid2kg_uid[uid_review] = uid_kg
return dataset_uid2kg_uid
def save_kg(dataset, kg):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
pickle.dump(kg, open(kg_file, 'wb'))
def load_kg(dataset):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
# CHANGED
kg = pickle.load(open(kg_file, 'rb'))
return kg
def shuffle(arr):
for i in range(len(arr) - 1, 0, -1):
# Pick a random index from 0 to i
j = random.randint(0, i + 1)
# Swap arr[i] with the element at random index
arr[i], arr[j] = arr[j], arr[i]
return arr
def makedirs(dataset_name):
os.makedirs(BEST_CFG_DIR[dataset_name], exist_ok=True)
os.makedirs(CFG_DIR[dataset_name], exist_ok=True)
| 15,735 | 31.715177 | 156 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/embeddings/transe/knowledge_graph.py | from __future__ import absolute_import, division, print_function
import os
import sys
import argparse
from math import log
from tqdm import tqdm
from copy import deepcopy
import pandas as pd
import numpy as np
import gzip
import pickle
import random
from datetime import datetime
# import matplotlib.pyplot as plt
import torch
import os
import numpy as np
import gzip
from easydict import EasyDict as edict
import random
from models.embeddings.transe.utils import *
#get_knowledge_derived_relations, DATASET_DIR, \
# get_pid_to_kgid_mapping, get_uid_to_kgid_mapping, get_entity_edict,\
# MAIN_PRODUCT_INTERACTION, USER,\
# ML1M, LFM1M, CELL, get_entities,get_dataset_relations, get_entity_tail
class KnowledgeGraph(object):
def __init__(self, dataset, verbose=False):
self.G = dict()
self.verbose = verbose
self._load_entities(dataset)
self.dataset = dataset
self.dataset_name = dataset.dataset_name
self._load_reviews(dataset)
self._load_knowledge(dataset)
self._clean()
self.top_matches = None
def _load_entities(self, dataset):
if self.verbose:
print('Load entities...')
num_nodes = 0
entities = get_entities(dataset.dataset_name)
for entity in entities:
self.G[entity] = {}
vocab_size = getattr(dataset, entity).vocab_size
relations = get_dataset_relations(dataset.dataset_name, entity)
for eid in range(vocab_size):
self.G[entity][eid] = {r: [] for r in relations}
num_nodes += vocab_size
if self.verbose:
print('Total {:d} nodes.'.format(num_nodes))
def _load_reviews(self, dataset):
if self.verbose:
print('Load reviews...')
num_edges = 0
for rid, data in enumerate(dataset.review.data):
uid, pid, _, _ = data
# (2) Add edges.
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[dataset.dataset_name]
self._add_edge(USER, uid, main_interaction, main_product, pid)
num_edges += 2
if self.verbose:
print('Total {:d} review edges.'.format(num_edges))
def _load_knowledge(self, dataset):
relations = get_knowledge_derived_relations(dataset.dataset_name)
main_entity, _ = MAIN_PRODUCT_INTERACTION[dataset.dataset_name]
for relation in relations:
if self.verbose:
print('Load knowledge {}...'.format(relation))
data = getattr(dataset, relation).data
num_edges = 0
for pid, eids in enumerate(data):
if len(eids) <= 0:
continue
for eid in set(eids):
et_type = get_entity_tail(dataset.dataset_name, relation)
self._add_edge(main_entity, pid, relation, et_type, eid)
num_edges += 2
if self.verbose:
print('Total {:d} {:s} edges.'.format(num_edges, relation))
def _add_edge(self, etype1, eid1, relation, etype2, eid2):
self.G[etype1][eid1][relation].append(eid2)
self.G[etype2][eid2][relation].append(eid1)
def _clean(self):
if self.verbose:
print('Remove duplicates...')
for etype in self.G:
for eid in self.G[etype]:
for r in self.G[etype][eid]:
data = self.G[etype][eid][r]
data = tuple(sorted(set(data)))
self.G[etype][eid][r] = data
def compute_degrees(self):
if self.verbose:
print('Compute node degrees...')
self.degrees = {}
self.max_degree = {}
for etype in self.G:
self.degrees[etype] = {}
for eid in self.G[etype]:
count = 0
for r in self.G[etype][eid]:
count += len(self.G[etype][eid][r])
self.degrees[etype][eid] = count
def get(self, eh_type, eh_id=None, relation=None):
data = self.G
if eh_type is not None:
data = data[eh_type]
if eh_id is not None:
data = data[eh_id]
if relation is not None:
data = data[relation]
return data
def __call__(self, eh_type, eh_id=None, relation=None):
return self.get(eh_type, eh_id, relation)
def get_tails(self, entity_type, entity_id, relation):
return self.G[entity_type][entity_id][relation]
| 4,538 | 32.873134 | 91 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/embeddings/transe/transe_model.py | from __future__ import absolute_import, division, print_function
from easydict import EasyDict as edict
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.embeddings.transe.dataset import Dataset
from models.embeddings.transe.utils import *
class KnowledgeEmbedding(nn.Module):
def __init__(self, args, dataloader):
super(KnowledgeEmbedding, self).__init__()
self.embed_size = args.embed_size
self.num_neg_samples = args.num_neg_samples
self.device = args.device
self.l2_lambda = args.l2_lambda
self.dataset_name = args.dataset
#self.relation_names = dataloader.dataset.relation_names
self.relation_names = dataloader.dataset.other_relation_names
self.entity_names = dataloader.dataset.entity_names
self.relation2entity = dataloader.dataset.relation2entity
# Initialize entity embeddings.
self.initialize_entity_embeddings(dataloader.dataset)
for e in self.entities:
embed = self._entity_embedding(self.entities[e].vocab_size)
setattr(self, e, embed)
# Initialize relation embeddings and relation biases.
self.initialize_relations_embeddings(dataloader.dataset)
for r in self.relations:
embed = self._relation_embedding()
setattr(self, r, embed)
bias = self._relation_bias(len(self.relations[r].et_distrib))
setattr(self, r + '_bias', bias)
def initialize_entity_embeddings(self, dataset):
self.entities = edict()
for entity_name in self.entity_names:
value = edict(vocab_size=getattr(dataset, entity_name).vocab_size)
self.entities[entity_name] = value
def initialize_relations_embeddings(self, dataset):
'''
self.relations = edict()
for relation_name in dataset.relation_names:
value = edict(
et=dataset.relation2entity[relation_name],
et_distrib=self._make_distrib(getattr(dataset, relation_name).et_distrib)
)
self.relations[relation_name] = value
main_rel = INTERACTION[dataset.dataset_name]
self.relations[main_rel] = edict(
et="product",
et_distrib=self._make_distrib(getattr(dataset, "review").product_uniform_distrib)
)
'''
self.relations = edict()
for relation_name in dataset.other_relation_names:
value = edict(
et=dataset.relation2entity[relation_name],
et_distrib=self._make_distrib(getattr(dataset, relation_name).et_distrib)
)
self.relations[relation_name] = value
main_rel = INTERACTION[dataset.dataset_name]
self.relations[main_rel] = edict(
et="product",
et_distrib=self._make_distrib(getattr(dataset, "review").product_uniform_distrib)
)
def _entity_embedding(self, vocab_size):
"""Create entity embedding of size [vocab_size+1, embed_size].
Note that last dimension is always 0's.
"""
embed = nn.Embedding(vocab_size + 1, self.embed_size, padding_idx=-1, sparse=False)
initrange = 0.5 / self.embed_size
weight = torch.FloatTensor(vocab_size + 1, self.embed_size).uniform_(-initrange, initrange)
embed.weight = nn.Parameter(weight)
return embed
def _relation_embedding(self):
"""Create relation vector of size [1, embed_size]."""
initrange = 0.5 / self.embed_size
weight = torch.FloatTensor(1, self.embed_size).uniform_(-initrange, initrange)
embed = nn.Parameter(weight)
return embed
def _relation_bias(self, vocab_size):
"""Create relation bias of size [vocab_size+1]."""
bias = nn.Embedding(vocab_size + 1, 1, padding_idx=-1, sparse=False)
bias.weight = nn.Parameter(torch.zeros(vocab_size + 1, 1))
return bias
def _make_distrib(self, distrib):
"""Normalize input numpy vector to distribution."""
distrib = np.power(np.array(distrib, dtype=np.float), 0.75)
distrib = distrib / distrib.sum()
distrib = torch.FloatTensor(distrib).to(self.device)
return distrib
def forward(self, batch_idxs):
loss = self.compute_loss(batch_idxs)
return loss
def compute_loss(self, batch_idxs):
"""Compute knowledge graph negative sampling loss.
"""
regularizations = []
user_idxs = batch_idxs[:, 0]
product_idxs = batch_idxs[:, 1]
rel2entity_idxs_tuple = {}
i = 2
for rel_name in get_knowledge_derived_relations(self.dataset_name):
rel2entity_idxs_tuple[rel_name] = (self.relation2entity[rel_name], batch_idxs[:, i])
i+=1
# user + interaction -> product
up_loss, up_embeds = self.neg_loss(USER, INTERACTION[self.dataset_name], PRODUCT, user_idxs, product_idxs)
regularizations.extend(up_embeds)
loss = up_loss
for curr_rel in get_knowledge_derived_relations(self.dataset_name):
entity_idxs_tuple = rel2entity_idxs_tuple[curr_rel]
entity_name, curr_idxs = entity_idxs_tuple
# product + curr_rel -> curr_entity
curr_loss, curr_embeds = self.neg_loss(PRODUCT, curr_rel, entity_name, product_idxs, curr_idxs)
if curr_loss is not None:
regularizations.extend(curr_embeds)
loss += curr_loss
# l2 regularization
if self.l2_lambda > 0:
l2_loss = 0.0
for term in regularizations:
l2_loss += torch.norm(term)
loss += self.l2_lambda * l2_loss
return loss
def neg_loss(self, entity_head, relation, entity_tail, entity_head_idxs, entity_tail_idxs):
# Entity tail indices can be -1. Remove these indices. Batch size may be changed!
mask = entity_tail_idxs >= 0
fixed_entity_head_idxs = entity_head_idxs[mask]
fixed_entity_tail_idxs = entity_tail_idxs[mask]
if fixed_entity_head_idxs.size(0) <= 0:
return None, []
entity_head_embedding = getattr(self, entity_head) # nn.Embedding
entity_tail_embedding = getattr(self, entity_tail) # nn.Embedding
relation_vec = getattr(self, relation) # [1, embed_size]
relation_bias_embedding = getattr(self, relation + '_bias') # nn.Embedding
entity_tail_distrib = self.relations[relation].et_distrib # [vocab_size]
return kg_neg_loss(entity_head_embedding, entity_tail_embedding,
fixed_entity_head_idxs, fixed_entity_tail_idxs,
relation_vec, relation_bias_embedding, self.num_neg_samples, entity_tail_distrib)
def kg_neg_loss(entity_head_embed, entity_tail_embed, entity_head_idxs, entity_tail_idxs,
relation_vec, relation_bias_embed, num_samples, distrib):
"""Compute negative sampling loss for triple (entity_head, relation, entity_tail).
Args:
entity_head_embed: Tensor of size [batch_size, embed_size].
entity_tail_embed: Tensor of size [batch_size, embed_size].
entity_head_idxs:
entity_tail_idxs:
relation_vec: Parameter of size [1, embed_size].
relation_bias: Tensor of size [batch_size]
num_samples: An integer.
distrib: Tensor of size [vocab_size].
Returns:
A tensor of [1].
"""
batch_size = entity_head_idxs.size(0)
entity_head_vec = entity_head_embed(entity_head_idxs) # [batch_size, embed_size]
example_vec = entity_head_vec + relation_vec # [batch_size, embed_size]
example_vec = example_vec.unsqueeze(2) # [batch_size, embed_size, 1]
entity_tail_vec = entity_tail_embed(entity_tail_idxs) # [batch_size, embed_size]
pos_vec = entity_tail_vec.unsqueeze(1) # [batch_size, 1, embed_size]
relation_bias = relation_bias_embed(entity_tail_idxs).squeeze(1) # [batch_size]
pos_logits = torch.bmm(pos_vec, example_vec).squeeze() + relation_bias # [batch_size]
pos_loss = -pos_logits.sigmoid().log() # [batch_size]
neg_sample_idx = torch.multinomial(distrib, num_samples, replacement=True).view(-1)
neg_vec = entity_tail_embed(neg_sample_idx) # [num_samples, embed_size]
neg_logits = torch.mm(example_vec.squeeze(2), neg_vec.transpose(1, 0).contiguous())
neg_logits += relation_bias.unsqueeze(1) # [batch_size, num_samples]
neg_loss = -neg_logits.neg().sigmoid().log().sum(1) # [batch_size]
loss = (pos_loss + neg_loss).mean()
return loss, [entity_head_vec, entity_tail_vec, neg_vec]
| 8,711 | 40.684211 | 114 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/embeddings/transe/utils.py | from __future__ import absolute_import, division, print_function
from easydict import EasyDict as edict
import os
import sys
import random
import pickle
import logging
import logging.handlers
import numpy as np
import csv
# import scipy.sparse as sp
import torch
from collections import defaultdict
# Dataset names.
# from sklearn.feature_extraction.text import TfidfTransformer
ML1M = 'ml1m'
LFM1M = 'lfm1m'
CELL = 'cellphones'
ROOT_DIR = os.environ('TREX_DATA_ROOT') if 'TREX_DATA_ROOT' in os.environ else '../../..'
# STILL NOT SUPPORTED = beauty, cell, cloth
BEAUTY_CORE ='beauty'
CELL_CORE = CELL
CLOTH_CORE = 'cloth'
# retro compatiblity
MOVIE_CORE = 'ml1m'
AZ_BOOK_CORE = 'book'
MODEL = 'transe'
# Dataset directories.
DATASET_DIR = {
ML1M: f'{ROOT_DIR}/data/{ML1M}/preprocessed/{MODEL}',
LFM1M: f'{ROOT_DIR}/data/{LFM1M}/preprocessed/{MODEL}',
CELL: f'{ROOT_DIR}/data/{CELL}/preprocessed/{MODEL}'
}
# Model result directories.
TMP_DIR = {
ML1M: f'{DATASET_DIR[ML1M]}/tmp',
LFM1M: f'{DATASET_DIR[LFM1M]}/tmp',
CELL: f'{DATASET_DIR[CELL]}/tmp',
}
VALID_METRICS_FILE_NAME = 'valid_metrics.json'
TRANSE_OPT_METRIC = 'valid_loss'
#OPTIM_HPARAMS_METRIC = 'avg_valid_reward'
OPTIM_HPARAMS_METRIC = 'valid_loss'
LOG_DIR = f'{ROOT_DIR}/results'
LOG_DATASET_DIR = {
ML1M: f'{LOG_DIR}/{ML1M}/{MODEL}',
LFM1M: f'{LOG_DIR}/{LFM1M}/{MODEL}',
CELL: f'{LOG_DIR}/{CELL}/{MODEL}',
}
# for compatibility, CFG_DIR, BEST_CFG_DIR have been modified s,t, they are independent from the dataset
CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/hparams_cfg',
}
BEST_CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/best_hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/best_hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/best_hparams_cfg',
}
TEST_METRICS_FILE_NAME = 'test_metrics.json'
RECOM_METRICS_FILE_NAME = 'recommender_metrics.json'
RECOM_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{RECOM_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{RECOM_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{RECOM_METRICS_FILE_NAME}',
}
TEST_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
BEST_TEST_METRICS_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
CONFIG_FILE_NAME = 'config.json'
CFG_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
BEST_CFG_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
TRANSE_TEST_METRICS_FILE_NAME = 'test_metrics_transe.json'
TRANSE_CFG_FILE_NAME = 'config_transe.json'
TRANSE_TEST_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TRANSE_TEST_METRICS_FILE_NAME}',
}
BEST_TRANSE_TEST_METRICS_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TRANSE_TEST_METRICS_FILE_NAME}',
}
TRANSE_CFG_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TRANSE_CFG_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TRANSE_CFG_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TRANSE_CFG_FILE_NAME}',
}
BEST_TRANSE_CFG_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TRANSE_CFG_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TRANSE_CFG_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TRANSE_CFG_FILE_NAME}',
}
TRANSE_HPARAMS_FILE = f'transe_{MODEL}_hparams_file.json'
HPARAMS_FILE = f'{MODEL}_hparams_file.json'
# Label files.
LABELS = {
ML1M: (TMP_DIR[ML1M] + '/train_label.pkl', TMP_DIR[ML1M] + '/valid_label.pkl', TMP_DIR[ML1M] + '/test_label.pkl'),
LFM1M: (TMP_DIR[LFM1M] + '/train_label.pkl', TMP_DIR[LFM1M] + '/valid_label.pkl', TMP_DIR[LFM1M] + '/test_label.pkl'),
CELL: (TMP_DIR[CELL] + '/train_label.pkl', TMP_DIR[CELL] + '/valid_label.pkl', TMP_DIR[CELL] + '/test_label.pkl')
}
# UCPR SPECIFIC RELATIONS
PADDING = 'padding'
SELF_LOOP = 'self_loop'
# ENTITIES/RELATIONS SHARED BY ALL DATASETS
USER = 'user'
PRODUCT = 'product'
INTERACTION = {
ML1M: "watched",
LFM1M: "listened",
CELL: "purchase",
}
SELF_LOOP = 'self_loop'
PRODUCED_BY_PRODUCER = 'produced_by_producer'
PRODUCER = 'producer'
# ML1M ENTITIES
CINEMATOGRAPHER = 'cinematographer'
PRODCOMPANY = 'prodcompany'
COMPOSER = 'composer'
CATEGORY = 'category'
ACTOR = 'actor'
COUNTRY = 'country'
WIKIPAGE = 'wikipage'
EDITOR = 'editor'
WRITTER = 'writter'
DIRECTOR = 'director'
# LASTFM ENTITIES
ARTIST = 'artist'
ENGINEER = 'engineer'
GENRE = 'genre'
# CELL ENTITIES
BRAND = 'brand'
RPRODUCT = 'rproduct'
# ML1M RELATIONS
DIRECTED_BY_DIRECTOR = 'directed_by_director'
PRODUCED_BY_COMPANY = 'produced_by_prodcompany'
STARRED_BY_ACTOR = 'starred_by_actor'
RELATED_TO_WIKIPAGE = 'related_to_wikipage'
EDITED_BY_EDITOR = 'edited_by_editor'
WROTE_BY_WRITTER = 'wrote_by_writter'
CINEMATOGRAPHY_BY_CINEMATOGRAPHER = 'cinematography_by_cinematographer'
COMPOSED_BY_COMPOSER = 'composed_by_composer'
PRODUCED_IN_COUNTRY = 'produced_in_country'
BELONG_TO_CATEGORY = 'belong_to_category'
# LASTFM RELATIONS
MIXED_BY_ENGINEER = 'mixed_by_engineer'
FEATURED_BY_ARTIST = 'featured_by_artist'
BELONG_TO_GENRE = 'belong_to_genre'
# CELL RELATIONS
PURCHASE = 'purchase'
ALSO_BOUGHT_RP = 'also_bought_related_product'
ALSO_VIEWED_RP = 'also_viewed_related_product'
ALSO_BOUGHT_P = 'also_bought_product'
ALSO_VIEWED_P = 'also_viewed_product'
KG_RELATION = {
ML1M: {
USER: {
INTERACTION[ML1M]: PRODUCT,
},
ACTOR: {
STARRED_BY_ACTOR: PRODUCT,
},
DIRECTOR: {
DIRECTED_BY_DIRECTOR: PRODUCT,
},
PRODUCT: {
INTERACTION[ML1M]: USER,
PRODUCED_BY_COMPANY: PRODCOMPANY,
PRODUCED_BY_PRODUCER: PRODUCER,
EDITED_BY_EDITOR: EDITOR,
WROTE_BY_WRITTER: WRITTER,
CINEMATOGRAPHY_BY_CINEMATOGRAPHER: CINEMATOGRAPHER,
BELONG_TO_CATEGORY: CATEGORY,
DIRECTED_BY_DIRECTOR: DIRECTOR,
STARRED_BY_ACTOR: ACTOR,
COMPOSED_BY_COMPOSER: COMPOSER,
PRODUCED_IN_COUNTRY: COUNTRY,
RELATED_TO_WIKIPAGE: WIKIPAGE,
},
PRODCOMPANY: {
PRODUCED_BY_COMPANY: PRODUCT,
},
COMPOSER: {
COMPOSED_BY_COMPOSER: PRODUCT,
},
PRODUCER: {
PRODUCED_BY_PRODUCER: PRODUCT,
},
WRITTER: {
WROTE_BY_WRITTER: PRODUCT,
},
EDITOR: {
EDITED_BY_EDITOR: PRODUCT,
},
CATEGORY: {
BELONG_TO_CATEGORY: PRODUCT,
},
CINEMATOGRAPHER: {
CINEMATOGRAPHY_BY_CINEMATOGRAPHER: PRODUCT,
},
COUNTRY: {
PRODUCED_IN_COUNTRY: PRODUCT,
},
WIKIPAGE: {
RELATED_TO_WIKIPAGE: PRODUCT,
}
},
LFM1M: {
USER: {
INTERACTION[LFM1M]: PRODUCT,
},
ARTIST: {
FEATURED_BY_ARTIST: PRODUCT,
},
ENGINEER: {
MIXED_BY_ENGINEER: PRODUCT,
},
PRODUCT: {
INTERACTION[LFM1M]: USER,
PRODUCED_BY_PRODUCER: PRODUCER,
FEATURED_BY_ARTIST: ARTIST,
MIXED_BY_ENGINEER: ENGINEER,
BELONG_TO_GENRE: GENRE,
},
PRODUCER: {
PRODUCED_BY_PRODUCER: PRODUCT,
},
GENRE: {
BELONG_TO_GENRE: PRODUCT,
},
},
CELL: {
USER: {
PURCHASE: PRODUCT,
},
PRODUCT: {
PURCHASE: USER,
PRODUCED_BY_COMPANY: BRAND,
BELONG_TO_CATEGORY: CATEGORY,
ALSO_BOUGHT_RP: RPRODUCT,
ALSO_VIEWED_RP: RPRODUCT,
ALSO_BOUGHT_P: PRODUCT,
ALSO_VIEWED_P: PRODUCT,
},
BRAND: {
PRODUCED_BY_COMPANY: PRODUCT,
},
CATEGORY: {
BELONG_TO_CATEGORY: PRODUCT,
},
RPRODUCT: {
ALSO_BOUGHT_RP: PRODUCT,
ALSO_VIEWED_RP: PRODUCT,
}
},
}
# 0 is reserved to the main relation, 1 to mention
PATH_PATTERN = {
ML1M: {
0: ((None, USER), (INTERACTION[ML1M], PRODUCT), (INTERACTION[ML1M], USER), (INTERACTION[ML1M], PRODUCT)),
2: ((None, USER), (INTERACTION[ML1M], PRODUCT), (CINEMATOGRAPHY_BY_CINEMATOGRAPHER, CINEMATOGRAPHER), (CINEMATOGRAPHY_BY_CINEMATOGRAPHER, PRODUCT)),
3: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_BY_COMPANY, PRODCOMPANY), (PRODUCED_BY_COMPANY, PRODUCT)),
4: ((None, USER), (INTERACTION[ML1M], PRODUCT), (COMPOSED_BY_COMPOSER, COMPOSER), (COMPOSED_BY_COMPOSER, PRODUCT)),
5: ((None, USER), (INTERACTION[ML1M], PRODUCT), (BELONG_TO_CATEGORY, CATEGORY), (BELONG_TO_CATEGORY, PRODUCT)),
7: ((None, USER), (INTERACTION[ML1M], PRODUCT), (STARRED_BY_ACTOR, ACTOR), (STARRED_BY_ACTOR, PRODUCT)),
8: ((None, USER), (INTERACTION[ML1M], PRODUCT), (EDITED_BY_EDITOR, EDITOR), (EDITED_BY_EDITOR, PRODUCT)),
9: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_BY_PRODUCER, PRODUCER), (PRODUCED_BY_PRODUCER, PRODUCT)),
10: ((None, USER), (INTERACTION[ML1M], PRODUCT), (WROTE_BY_WRITTER, WRITTER), (WROTE_BY_WRITTER, PRODUCT)),
11: ((None, USER), (INTERACTION[ML1M], PRODUCT), (DIRECTED_BY_DIRECTOR, DIRECTOR), (DIRECTED_BY_DIRECTOR, PRODUCT)),
12: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_IN_COUNTRY, COUNTRY), (PRODUCED_IN_COUNTRY, PRODUCT)),
13: ((None, USER), (INTERACTION[ML1M], PRODUCT), (RELATED_TO_WIKIPAGE, WIKIPAGE), (RELATED_TO_WIKIPAGE, PRODUCT)),
},
LFM1M: {
0: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (INTERACTION[LFM1M], USER), (INTERACTION[LFM1M], PRODUCT)),
2: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (BELONG_TO_GENRE, GENRE), (BELONG_TO_GENRE, PRODUCT)),
4: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (FEATURED_BY_ARTIST, ARTIST), (FEATURED_BY_ARTIST, PRODUCT)),
5: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (MIXED_BY_ENGINEER, ENGINEER), (MIXED_BY_ENGINEER, PRODUCT)),
6: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (PRODUCED_BY_PRODUCER, PRODUCER), (PRODUCED_BY_PRODUCER, PRODUCT)),
},
CELL: {
0: ((None, USER), (PURCHASE, PRODUCT), (PURCHASE, USER), (PURCHASE, PRODUCT)),
2: ((None, USER), (PURCHASE, PRODUCT), (BELONG_TO_CATEGORY, CATEGORY), (BELONG_TO_CATEGORY, PRODUCT)),
3: ((None, USER), (PURCHASE, PRODUCT), (PRODUCED_BY_COMPANY, BRAND), (PRODUCED_BY_COMPANY, PRODUCT)),
4: ((None, USER), (PURCHASE, PRODUCT), (ALSO_BOUGHT_P, PRODUCT)),
5: ((None, USER), (PURCHASE, PRODUCT), (ALSO_VIEWED_P, PRODUCT)),
6: ((None, USER), (PURCHASE, PRODUCT), (ALSO_BOUGHT_RP, RPRODUCT), (ALSO_BOUGHT_RP, PRODUCT)),
10: ((None, USER), (PURCHASE, PRODUCT), (ALSO_VIEWED_RP, RPRODUCT), (ALSO_VIEWED_RP, PRODUCT)),
}
}
MAIN_PRODUCT_INTERACTION = {
ML1M: (PRODUCT, INTERACTION[ML1M]),
LFM1M: (PRODUCT, INTERACTION[LFM1M]),
CELL: (PRODUCT, PURCHASE)
}
def get_entities(dataset_name):
return list(KG_RELATION[dataset_name].keys())
def get_knowledge_derived_relations(dataset_name):
main_entity, main_relation = MAIN_PRODUCT_INTERACTION[dataset_name]
ans = list(KG_RELATION[dataset_name][main_entity].keys())
ans.remove(main_relation)
return ans
def get_dataset_relations(dataset_name, entity_head):
return list(KG_RELATION[dataset_name][entity_head].keys())
def get_entity_tail(dataset_name, relation):
entity_head, _ = MAIN_PRODUCT_INTERACTION[dataset_name]
return KG_RELATION[dataset_name][entity_head][relation]
def get_logger(logname):
logger = logging.getLogger(logname)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.handlers.RotatingFileHandler(logname, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def save_dataset(dataset, dataset_obj):
dataset_file = os.path.join(TMP_DIR[dataset], 'dataset.pkl')
with open(dataset_file, 'wb') as f:
pickle.dump(dataset_obj, f)
def load_dataset(dataset):
dataset_file = os.path.join(TMP_DIR[dataset], 'dataset.pkl')
dataset_obj = pickle.load(open(dataset_file, 'rb'))
return dataset_obj
def save_labels(dataset, labels, mode='train'):
if mode == 'train':
label_file = LABELS[dataset][0]
elif mode == 'valid':
label_file = LABELS[dataset][1]
elif mode == 'test':
label_file = LABELS[dataset][2]
else:
raise Exception('mode should be one of {train, test}.')
with open(label_file, 'wb') as f:
pickle.dump(labels, f)
f.close()
def load_labels(dataset, mode='train'):
if mode == 'train':
label_file = LABELS[dataset][0]
elif mode == 'valid':
label_file = LABELS[dataset][1]
elif mode == 'test':
label_file = LABELS[dataset][2]
else:
raise Exception('mode should be one of {train, test}.')
user_products = pickle.load(open(label_file, 'rb'))
return user_products
def save_embed(dataset, embed):
embed_file = '{}/transe_embed.pkl'.format(TMP_DIR[dataset])
pickle.dump(embed, open(embed_file, 'wb'))
def load_embed(dataset):
embed_file = '{}/transe_embed.pkl'.format(TMP_DIR[dataset])
print('Load embedding:', embed_file)
embed = pickle.load(open(embed_file, 'rb'))
return embed
# Receive paths in form (score, prob, [path]) return the last relationship
def get_path_pattern(path):
return path[-1][-1][0]
def get_pid_to_kgid_mapping(dataset_name):
if dataset_name == "ml1m":
file = open(DATASET_DIR[dataset_name] + "/entities/mappings/movie.txt", "r")
elif dataset_name == "lfm1m":
file = open(DATASET_DIR[dataset_name] + "/entities/mappings/song.txt", "r")
else:
print("Dataset mapping not found!")
exit(-1)
reader = csv.reader(file, delimiter=' ')
dataset_pid2kg_pid = {}
next(reader, None)
for row in reader:
if dataset_name == "ml1m" or dataset_name == "lfm1m":
dataset_pid2kg_pid[int(row[0])] = int(row[1])
file.close()
return dataset_pid2kg_pid
def get_validation_pids(dataset_name):
if not os.path.isfile(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')):
return []
validation_pids = defaultdict(set)
with open(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')) as valid_file:
reader = csv.reader(valid_file, delimiter=" ")
for row in reader:
uid = int(row[0])
pid = int(row[1])
validation_pids[uid].add(pid)
valid_file.close()
return validation_pids
def get_uid_to_kgid_mapping(dataset_name):
dataset_uid2kg_uid = {}
with open(DATASET_DIR[dataset_name] + "/entities/mappings/user.txt", 'r') as file:
reader = csv.reader(file, delimiter=" ")
next(reader, None)
for row in reader:
if dataset_name == "ml1m" or dataset_name == "lfm1m":
uid_review = int(row[0])
uid_kg = int(row[1])
dataset_uid2kg_uid[uid_review] = uid_kg
return dataset_uid2kg_uid
def save_kg(dataset, kg):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
pickle.dump(kg, open(kg_file, 'wb'))
def load_kg(dataset):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
# CHANGED
kg = pickle.load(open(kg_file, 'rb'))
return kg
def shuffle(arr):
for i in range(len(arr) - 1, 0, -1):
# Pick a random index from 0 to i
j = random.randint(0, i + 1)
# Swap arr[i] with the element at random index
arr[i], arr[j] = arr[j], arr[i]
return arr
def makedirs(dataset_name):
os.makedirs(BEST_CFG_DIR[dataset_name], exist_ok=True)
os.makedirs(CFG_DIR[dataset_name], exist_ok=True)
| 16,750 | 30.077922 | 156 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/embeddings/transe/train_transe.py | from __future__ import absolute_import, division, print_function
import os
import argparse
import torch
import torch.optim as optim
from models.embeddings.transe.dataset import DataLoader, Dataset
from models.embeddings.transe.utils import *
from models.embeddings.transe.transe_model import KnowledgeEmbedding
import json
import sys
logger = None
def train(args):
dataset_name = args.dataset
train_set = Dataset(args,set_name='train')
val_set = Dataset(args,set_name='valid')
train_loader = DataLoader(train_set, args.batch_size)
valid_loader = DataLoader(val_set, args.batch_size)
review_to_train = len(train_set.review.data) * args.epochs + 1
model = KnowledgeEmbedding(args, train_loader).to(args.device)
logger.info('Parameters:' + str([i[0] for i in model.named_parameters()]))
optimizer = optim.SGD(model.parameters(), lr=args.lr)
steps = 0
smooth_loss = 0.0
best_val_loss = sys.maxsize
train_loss_history = []
val_loss_history = []
for epoch in range(1, args.epochs + 1):
train_loader.reset()
while train_loader.has_next():
# Set learning rate.
lr = args.lr * max(1e-4, 1.0 - train_loader.finished_review_num / float(review_to_train))
for pg in optimizer.param_groups:
pg['lr'] = lr
# Get training batch.
batch_idxs = train_loader.get_batch()
batch_idxs = torch.from_numpy(batch_idxs).to(args.device)
# Train models.
optimizer.zero_grad()
train_loss = model(batch_idxs)
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
smooth_loss += train_loss.item() / args.steps_per_checkpoint
steps += 1
if steps % args.steps_per_checkpoint == 0:
logger.info('Epoch: {:02d} | '.format(epoch) +
'Review: {:d}/{:d} | '.format(train_loader.finished_review_num, review_to_train) +
'Lr: {:.5f} | '.format(lr) +
'Smooth loss: {:.5f}'.format(smooth_loss))
train_loss_history.append(smooth_loss)
smooth_loss = 0.0
if epoch % 10 == 0:
if args.do_validation:
model.eval()
total_val_loss = 0
cnt = 0
valid_loader.reset()
while valid_loader.has_next():
# Get valid batch.
batch_idxs = valid_loader.get_batch()
batch_idxs = torch.from_numpy(batch_idxs).to(args.device)
valid_loss = model(batch_idxs)
total_val_loss += valid_loss.item()
cnt += 1
avg_valid_loss = total_val_loss/max(cnt, 1)
logger.info('Epoch: {:02d} | '.format(epoch) +
'Validation loss: {:.5f}'.format(avg_valid_loss))
val_loss_history.append(avg_valid_loss)
if avg_valid_loss < best_val_loss:
best_val_loss = avg_valid_loss
torch.save(model.state_dict(), '{}/transe_best_model.ckpt'.format(args.log_dir))
model.train()
torch.save(model.state_dict(), '{}/transe_model_sd_epoch_{}.ckpt'.format(args.log_dir, epoch))
makedirs(dataset_name)
with open(TRANSE_TEST_METRICS_FILE_PATH[dataset_name], 'w') as f:
json.dump( {'valid_loss': best_val_loss,
'valid_loss_history': val_loss_history,
'train_loss_history':train_loss_history } ,f)
def extract_embeddings(args, dataset):
"""Note that last entity embedding is of size [vocab_size+1, d]."""
dataset_name = args.dataset
os.makedirs(args.log_dir, exist_ok=True)
model_file = '{}/transe_best_model.ckpt'.format(args.log_dir)
print('Load embeddings', model_file)
state_dict = torch.load(model_file, map_location=lambda storage, loc: storage)
embeds = {}
for entity_name in dataset.entity_names:
embeds[entity_name] = state_dict[f'{entity_name}.weight'].cpu().data.numpy()[:-1]
embeds[INTERACTION[dataset_name]] = (
state_dict[INTERACTION[dataset_name]].cpu().data.numpy()[0],
state_dict[f'{INTERACTION[dataset_name]}_bias.weight'].cpu().data.numpy()
)
for relation_name in dataset.other_relation_names:
embeds[relation_name] = (
state_dict[f'{relation_name}'].cpu().data.numpy()[0],
state_dict[f'{relation_name}_bias.weight'].cpu().data.numpy()
)
save_embed(dataset_name, embeds)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=LFM1M, help=f'One of [{ML1M}, {LFM1M}, beauty, cd, cell, clothing]')
parser.add_argument('--name', type=str, default='train_transe_model', help='models name.')
parser.add_argument('--seed', type=int, default=123, help='random seed.')
parser.add_argument('--gpu', type=str, default='0', help='gpu device.')
parser.add_argument('--epochs', type=int, default=30, help='number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=64, help='batch size.')
parser.add_argument('--lr', type=float, default=0.5, help='learning rate.')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay for adam.')
parser.add_argument('--l2_lambda', type=float, default=0, help='l2 lambda')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Clipping gradient.')
parser.add_argument('--embed_size', type=int, default=100, help='knowledge embedding size.')
parser.add_argument('--num_neg_samples', type=int, default=5, help='number of negative samples.')
parser.add_argument('--steps_per_checkpoint', type=int, default=200, help='Number of steps for checkpoint.')
parser.add_argument('--do_validation', type=bool, default=True, help='Whether to perform validation')
args = parser.parse_args()
os.makedirs(LOG_DATASET_DIR[args.dataset], exist_ok=True)
with open(os.path.join(LOG_DATASET_DIR[args.dataset], f'{TRANSE_HPARAMS_FILE}'), 'w') as f:
import json
import copy
args_dict = dict()
for x,y in copy.deepcopy(args._get_kwargs()):
args_dict[x] = y
if 'device' in args_dict:
del args_dict['device']
json.dump(args_dict,f)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
print(f'Set to gpu:{args.gpu}')
args.device = torch.device(f'cuda:0') if torch.cuda.is_available() else 'cpu'
print(TMP_DIR[args.dataset])
args.log_dir = os.path.join(TMP_DIR[args.dataset], args.name)
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
global logger
logger = get_logger(args.log_dir + '/train_log.txt')
logger.info(args)
set_random_seed(args.seed)
dataset = load_dataset(args.dataset)
train(args)
extract_embeddings(args, dataset)
if __name__ == '__main__':
main()
| 7,161 | 41.129412 | 123 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/CAFE/train_neural_symbol.py | from __future__ import absolute_import, division, print_function
import os
import sys
import numpy as np
import logging
import logging.handlers
import torch
import torch.optim as optim
#from tensorboardX import SummaryWriter
import time
from models.CAFE.knowledge_graph import *
from models.CAFE.data_utils import OnlinePathLoader, OnlinePathLoaderWithMPSplit, KGMask
from models.CAFE.symbolic_model import EntityEmbeddingModel, SymbolicNetwork, create_symbolic_model
from models.CAFE.cafe_utils import *
from easydict import EasyDict as edict
import wandb
from models.utils import MetricsLogger
logger = None
def set_logger(logname):
global logger
logger = logging.getLogger(logname)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.handlers.RotatingFileHandler(logname, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
def train(args):
train_dataloader = OnlinePathLoader(args.dataset, args.batch_size, topk=args.topk_candidates)
valid_dataloader = OnlinePathLoader(args.dataset, args.batch_size, topk=args.topk_candidates)
metapaths = train_dataloader.kg.metapaths
kg_embeds = load_embed(args.dataset) if train else None
model = create_symbolic_model(args, train_dataloader.kg, train=True, pretrain_embeds=kg_embeds)
params = [name for name, param in model.named_parameters() if param.requires_grad]
logger.info(f'Trainable parameters: {params}')
logger.info('==================================')
optimizer = optim.SGD(model.parameters(), lr=args.lr)
total_steps = args.epochs * train_dataloader.total_steps
metrics = MetricsLogger(args.wandb_entity,
f'{MODEL}_{args.dataset}',
config=args)
metrics.register('train_loss')
metrics.register('train_regloss')
metrics.register('train_rankloss')
metrics.register('avg_train_loss')
metrics.register('avg_train_regloss')
metrics.register('avg_train_rankloss')
metrics.register('valid_loss')
metrics.register('valid_regloss')
metrics.register('valid_rankloss')
metrics.register('avg_valid_loss')
metrics.register('avg_valid_regloss')
metrics.register('avg_valid_rankloss')
loaders = {'train': train_dataloader,
'valid': valid_dataloader
}
step_counter = {
'train': 0,
'valid':0
}
first_iterate = True
torch.save(model.state_dict(), '{}/symbolic_model_epoch{}.ckpt'.format(args.log_dir, 0))
start_time = time.time()
first_iterate = True
model.train()
for epoch in range(1, args.epochs + 1):
splits_to_compute = list(loaders.items())
if first_iterate:
first_iterate = False
splits_to_compute.insert(0, ('valid', valid_dataloader))
for split_name, dataloader in splits_to_compute:
if split_name == 'valid' and epoch%5 == 0:
model.eval()
else:
model.train()
iter_counter = 0
### Start epoch ###
dataloader.reset()
while dataloader.has_next():
# Update learning rate
if split_name == 'train':
lr = args.lr * max(1e-4, 1.0 - step_counter[split_name] / total_steps)
for pg in optimizer.param_groups:
pg['lr'] = lr
# pos_paths: [bs, path_len], neg_paths: [bs, n, path_len]
mpid, pos_paths, neg_pids = dataloader.get_batch()
pos_paths = torch.from_numpy(pos_paths).to(args.device)
neg_pids = torch.from_numpy(neg_pids).to(args.device)
optimizer.zero_grad()
reg_loss, rank_loss = model(metapaths[mpid], pos_paths, neg_pids)
loss = reg_loss + args.rank_weight * rank_loss
if split_name == 'train':
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5.0)
optimizer.step()
cur_metrics = {f'{split_name}_loss': loss.item(),
f'{split_name}_regloss':reg_loss.item(),
f'{split_name}_rankloss':rank_loss.item(),
f'{split_name}_iter': step_counter[split_name]}
for k,v in cur_metrics.items():
metrics.log(k, v)
#metrics.push(cur_metrics.keys())
step_counter[split_name] += 1
iter_counter += 1
del pos_paths
del neg_pids
cur_metrics = [f'{split_name}_epoch']
cur_metrics.extend([f'{split_name}_loss',
f'{split_name}_regloss',
f'{split_name}_rankloss'
])
for k in cur_metrics[1:]:
metrics.log(f'avg_{k}', sum(metrics.history(k, iter_counter))/max(iter_counter,1) )
metrics.log(f'{split_name}_epoch', epoch)
#metrics.log(f'std_{split_name}_reward',np.std(metrics.history( f'{split_name}_reward', iter_counter)) )
info = ""
for k in cur_metrics:
if isinstance(getattr(metrics,k)[-1],float):
x = '{:.5f}'.format(getattr(metrics, k)[-1])
else:
x = '{:d}'.format(getattr(metrics, k)[-1])
info = info + f'| {k}={x} '
metrics.push(cur_metrics)
logger.info(info)
if epoch % 10 == 0:
policy_file = '{}/symbolic_model_epoch{}.ckpt'.format(args.log_dir, epoch)
torch.save(model.state_dict(), policy_file)
#metrics.push_model(policy_file, f'{MODEL}_{args.dataset}_{epoch}')
makedirs(args.dataset)
metrics.write(TEST_METRICS_FILE_PATH[args.dataset])#metrics.write(os.path.join(TMP_DIR[args.dataset], VALID_METRICS_FILE_NAME))
metrics.close_wandb()
def main():
args = parse_args()
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
set_logger(args.log_dir + '/train_log.txt')
logger.info(args)
os.makedirs(TMP_DIR[args.dataset], exist_ok=True)
with open(os.path.join(TMP_DIR[args.dataset],HPARAMS_FILE), 'w') as f:
import json
import copy
args_dict = dict()
for x,y in copy.deepcopy(args._get_kwargs()):
args_dict[x] = y
if 'device' in args_dict:
del args_dict['device']
json.dump(args_dict,f)
train(args)
if __name__ == '__main__':
main()
| 6,848 | 35.625668 | 131 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/CAFE/cafe_utils.py | from __future__ import absolute_import, division, print_function
import os
import random
import argparse
import pickle
import numpy as np
import gzip
import scipy.sparse as sp
from sklearn.feature_extraction.text import TfidfTransformer
import torch
import sys
import shutil
ML1M = 'ml1m'
LFM1M = 'lfm1m'
CELL = 'cellphones'
MODEL = 'cafe'
ROOT_DIR = os.environ('TREX_DATA_ROOT') if 'TREX_DATA_ROOT' in os.environ else '../..'
TRANSE='transe'
# Dataset directories.
DATA_DIR = {
ML1M: f'{ROOT_DIR}/data/{ML1M}/preprocessed/{MODEL}',
LFM1M: f'{ROOT_DIR}/data/{LFM1M}/preprocessed/{MODEL}',
CELL: f'{ROOT_DIR}/data/{CELL}/preprocessed/{MODEL}'
}
OPTIM_HPARAMS_METRIC = 'avg_valid_loss'
VALID_METRICS_FILE_NAME = 'valid_metrics.json'
LOG_DIR = f'{ROOT_DIR}/results'
LOG_DATASET_DIR = {
ML1M: f'{LOG_DIR}/{ML1M}/{MODEL}',
LFM1M: f'{LOG_DIR}/{LFM1M}/{MODEL}',
CELL: f'{LOG_DIR}/{CELL}/{MODEL}',
}
# for compatibility, CFG_DIR, BEST_CFG_DIR have been modified s,t, they are independent from the dataset
CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/hparams_cfg',
}
BEST_CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/best_hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/best_hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/best_hparams_cfg',
}
TEST_METRICS_FILE_NAME = 'test_metrics.json'
RECOM_METRICS_FILE_NAME = 'recommender_metrics.json'
RECOM_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{RECOM_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{RECOM_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{RECOM_METRICS_FILE_NAME}',
}
TEST_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
BEST_TEST_METRICS_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
CONFIG_FILE_NAME = 'config.json'
CFG_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
BEST_CFG_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
HPARAMS_FILE = f'{MODEL}_hparams_file.json'
# Model result directories.
TMP_DIR = {
ML1M: f'{DATA_DIR[ML1M]}/tmp',
LFM1M: f'{DATA_DIR[LFM1M]}/tmp',
CELL: f'{DATA_DIR[CELL]}/tmp',
}
LABEL_FILE = {
ML1M: (DATA_DIR[ML1M] + '/train.txt.gz', DATA_DIR[ML1M] + '/valid.txt.gz', DATA_DIR[ML1M] + '/test.txt.gz'),
LFM1M: (DATA_DIR[LFM1M] + '/train.txt.gz', DATA_DIR[LFM1M] + '/valid.txt.gz', DATA_DIR[LFM1M] + '/test.txt.gz'),
CELL: (DATA_DIR[CELL] + '/train.txt.gz', DATA_DIR[CELL] + '/valid.txt.gz', DATA_DIR[CELL] + '/test.txt.gz'),
}
EMBED_FILE = {
ML1M: DATA_DIR[ML1M] + '/kg_embedding.ckpt',
LFM1M: DATA_DIR[LFM1M] + '/kg_embedding.ckpt',
CELL: DATA_DIR[CELL] + '/kg_embedding.ckpt',
}
def parse_args():
boolean = lambda x: (str(x).lower() == 'true')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='ml1m', help='dataset name. One of {clothing, cell, beauty, cd}')
parser.add_argument('--name', type=str, default='neural_symbolic_model', help='model name.')
parser.add_argument('--seed', type=int, default=123, help='random seed.')
parser.add_argument('--gpu', type=int, default=0, help='gpu device.')
# Hyperparamters for training neural-symbolic model.
parser.add_argument('--epochs', type=int, default=20, help='Number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=64, help='batch size.')
parser.add_argument('--lr', type=float, default=0.1, help='learning rate.')
parser.add_argument('--steps_per_checkpoint', type=int, default=100, help='Number of steps for checkpoint.')
parser.add_argument('--embed_size', type=int, default=200, help='KG embedding size.')
parser.add_argument('--deep_module', type=boolean, default=True, help='Use deep module or not')
parser.add_argument('--use_dropout', type=boolean, default=True, help='use dropout or not.')
parser.add_argument('--rank_weight', type=float, default=1.0, help='weighting factor for ranking loss.')
parser.add_argument('--topk_candidates', type=int, default=10, help='weighting factor for ranking loss.')
# Hyperparameters for execute neural programs (inference).
parser.add_argument('--sample_size', type=int, default=500, help='sample size for model.')
parser.add_argument('--do_infer', type=boolean, default=True, help='whether to infer paths after training.')
parser.add_argument('--do_execute', type=boolean, default=True, help='whether to execute neural programs.')
parser.add_argument('--do_validation', type=bool, default=True, help='Whether to perform validation')
parser.add_argument("--wandb", action="store_true", help="If passed, will log to Weights and Biases.")
parser.add_argument(
"--wandb_entity",
required="--wandb" in sys.argv,
type=str,
help="Entity name to push to the wandb logged data, in case args.wandb is specified.",
)
args = parser.parse_args()
# This is model directory.
args.log_dir = f'{TMP_DIR[args.dataset]}/{args.name}'
# This is the checkpoint name of the trained neural-symbolic model.
args.symbolic_model = f'{args.log_dir}/symbolic_model_epoch{args.epochs}.ckpt'
# This is the filename of the paths inferred by the trained neural-symbolic model.
args.infer_path_data = f'{args.log_dir}/infer_path_data.pkl'
# Set GPU device.
os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'
os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu)
args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
torch.backends.cudnn.enabled = False
set_random_seed(args.seed)
return args
def load_embed_sd(dataset, embed_model=TRANSE):
print('Load embedding:', EMBED_FILE[dataset])
if not os.path.exists(EMBED_FILE[dataset]):
default_emb_path = os.path.join(ROOT_DIR, 'pretrained', dataset, MODEL, embed_model, os.path.basename(EMBED_FILE[dataset]) )
shutil.copyfile(default_emb_path, EMBED_FILE[dataset])
state_dict = torch.load(EMBED_FILE[dataset], map_location=lambda storage, loc: storage)
return state_dict
def load_embed(dataset, embed_model=TRANSE):
embed_file = '{}/embed.pkl'.format(TMP_DIR[dataset])
print('Load embedding:', embed_file)
if not os.path.exists(embed_file):
default_emb_path = os.path.join(ROOT_DIR, 'pretrained', dataset, MODEL, embed_model, 'embed.pkl')
shutil.copyfile(default_emb_path, embed_file)
embed = pickle.load(open(embed_file, 'rb'))
return embed
def save_embed(dataset, embed):
if not os.path.isdir(TMP_DIR[dataset]):
os.makedirs(TMP_DIR[dataset])
embed_file = TMP_DIR[dataset] + '/embed.pkl'
pickle.dump(embed, open(embed_file, 'wb'))
print(f'File is saved to "{os.path.abspath(embed_file)}".')
def load_kg(dataset):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
kg = pickle.load(open(kg_file, 'rb'))
return kg
def save_kg(dataset, kg):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
pickle.dump(kg, open(kg_file, 'wb'))
print(f'File is saved to "{os.path.abspath(kg_file)}".')
def load_user_products(dataset, up_type='pos'):
up_file = '{}/user_products_{}.npy'.format(TMP_DIR[dataset], up_type)
with open(up_file, 'rb') as f:
up = np.load(f)
return up
def save_user_products(dataset, up, up_type='pos'):
up_file = '{}/user_products_{}.npy'.format(TMP_DIR[dataset], up_type)
with open(up_file, 'wb') as f:
np.save(f, up)
print(f'File is saved to "{os.path.abspath(up_file)}".')
def load_labels(dataset, mode='train'):
if mode == 'train':
label_file = LABEL_FILE[dataset][0]
elif mode == 'valid':
label_file = LABEL_FILE[dataset][1]
elif mode == 'test':
label_file = LABEL_FILE[dataset][2]
else:
raise Exception('mode should be one of {train, test}.')
# user_products = pickle.load(open(label_file, 'rb'))
labels = {} # key: user_id, value: list of item IDs.
with gzip.open(label_file, 'rb') as f:
for line in f:
cells = line.decode().strip().split('\t')
labels[int(cells[0])] = [int(x) for x in cells[1:]]
return labels
def load_path_count(dataset):
count_file = TMP_DIR[dataset] + '/path_count.pkl'
count = pickle.load(open(count_file, 'rb'))
return count
def save_path_count(dataset, count):
count_file = TMP_DIR[dataset] + '/path_count.pkl'
pickle.dump(count, open(count_file, 'wb'))
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def makedirs(dataset_name):
os.makedirs(BEST_CFG_DIR[dataset_name], exist_ok=True)
os.makedirs(CFG_DIR[dataset_name], exist_ok=True)
| 9,304 | 34.788462 | 132 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/CAFE/execute_neural_symbol.py | from __future__ import absolute_import, division, print_function
import csv
import sys
from collections import defaultdict
from functools import reduce
import numpy as np
import pickle
import logging
import logging.handlers
import math
from tqdm import tqdm
import torch
from torch.nn import functional as F
import json
from easydict import EasyDict as edict
from models.CAFE.knowledge_graph import *
from models.CAFE.data_utils import KGMask
from models.CAFE.symbolic_model import SymbolicNetwork, create_symbolic_model
from models.CAFE.cafe_utils import *
logger = None
def set_logger(logname):
global logger
logger = logging.getLogger(logname)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.handlers.RotatingFileHandler(logname, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
def infer_paths(args, topk_paths=25):
kg = load_kg(args.dataset)
model = create_symbolic_model(args, kg, train=False)
train_labels = load_labels(args.dataset, 'train')
valid_labels = load_labels(args.dataset, 'valid')
train_valid_labels = dict(zip(train_labels.keys(), list(train_labels.values()) + list(valid_labels.values())))
train_uids = list(train_labels.keys())
kg_mask = KGMask(kg)
predicts = {}
pbar = tqdm(total=len(train_uids))
for uid in train_uids:
predicts[uid] = {}
for mpid in range(len(kg.metapaths)):
metapath = kg.metapaths[mpid]
paths = model.infer_with_path(metapath, uid, kg_mask,
excluded_pids=train_valid_labels[uid],
topk_paths=topk_paths)
predicts[uid][mpid] = paths
pbar.update(1)
with open(args.infer_path_data, 'wb') as f:
pickle.dump(predicts, f)
def dcg_at_k(r, k, method=1):
"""Score is discounted cumulative gain (dcg)
Relevance is positive real values. Can use binary
as the previous methods.
Returns:
Discounted cumulative gain
"""
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=1):
"""Score is normalized discounted cumulative gain (ndcg)
Relevance is positive real values. Can use binary
as the previous methods.
Returns:
Normalized discounted cumulative gain
"""
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def evaluate_with_insufficient_pred(topk_matches, test_user_products, dataset_name):
"""Compute metrics for predicted recommendations.
Args:
topk_matches: a list or dict of product ids ordered by largest to smallest scores
"""
# Compute metrics
metrics = edict(
ndcg=[],
hr=[],
precision=[],
recall=[],
hit=[],
)
precisions, recalls, ndcgs, hits, our_ndcgs = [], [], [], [], []
test_user_idxs = list(test_user_products.keys())
for uid in test_user_idxs:
if uid not in topk_matches:
pred_list = []
else:
pred_list = topk_matches[uid]
while len(pred_list) < 10:
pred_list.append(0)
rel_set = test_user_products[uid]
dcg = 0.0
hit_num = 0.0
hits = []
for i in range(len(pred_list)):
if pred_list[i] in rel_set:
dcg += 1. / (math.log(i + 2) / math.log(2))
hit_num += 1
hits.append(1)
else:
hits.append(0)
# idcg
idcg = 0.0
for i in range(min(len(rel_set), len(pred_list))):
idcg += 1. / (math.log(i + 2) / math.log(2))
ndcg = dcg / idcg
our_ndcg = ndcg_at_k(hits, len(pred_list))
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
our_ndcgs.append(our_ndcg)
ndcgs.append(ndcg)
recalls.append(recall)
precisions.append(precision)
hits.append(hit)
metrics.ndcg.append(our_ndcg)
metrics.recall.append(recall)
metrics.precision.append(precision)
metrics.hit.append(hit)
our_ndcg = np.mean(our_ndcgs)
our_recall = np.mean(recalls)
makedirs(dataset_name)
with open(RECOM_METRICS_FILE_PATH[dataset_name], 'w') as f:
json.dump(metrics,f)
print(f"Our ndcg: {our_ndcg}, Our recall: {our_recall}")
avg_precision = np.mean(precisions) * 100
avg_recall = our_recall * 100
avg_ndcg = np.mean(ndcgs) * 100
avg_hit = np.mean(hits) * 100
msg = 'NDCG={:.3f} | Recall={:.3f} | HR={:.3f} | Precision={:.3f}'.format(
avg_ndcg, avg_recall, avg_hit, avg_precision)
print(msg)
return msg
class MetaProgramExecutor(object):
"""This implements the profile-guided reasoning algorithm."""
def __init__(self, symbolic_model, kg_mask, args):
self.symbolic_model = symbolic_model
self.kg_mask = kg_mask
self.device = args.device
def _get_module(self, relation):
return getattr(self.symbolic_model, relation)
def execute(self, program, uid, excluded_pids=None, adaptive_topk=False, manual_topk=5):
"""Execute the program to generate node representations and real nodes.
Args:
program: an instance of MetaProgram.
uid: user ID (integer).
excluded_pids: list of product IDs (list).
"""
uid_tensor = torch.LongTensor([uid]).to(self.device)
user_vec = self.symbolic_model.embedding(USER, uid_tensor) # tensor [1, d]
root = program.root # TreeNode
root.data['vec'] = user_vec # tensor [1, d]
root.data['paths'] = [([uid], [], [], [])] # (path, value, mp)
excluded_pids = [] if excluded_pids is None else excluded_pids.copy()
# Run BFS to traverse tree.
queue = root.get_children()
while queue: # queue is not empty
node = queue.pop(0)
child_nodes = np.random.permutation(node.get_children())
queue.extend(child_nodes)
# Compute estimated vector of the node.
x = (node.parent.data['vec'], user_vec)
node.data['vec'] = self._get_module(node.relation)(x) # tensor [1, d]
# Compute scores (log prob) for the node.
entity_vecs = self.symbolic_model.embedding(node.entity) # tensor [vocab, d]
scores = torch.matmul(node.data['vec'], entity_vecs.t()) # tensor [1, vocab]
scores = F.log_softmax(scores[0], dim=0) # tensor [vocab, ]
node.data['paths'] = []
visited_ids = []
for path, value, ep, mp in node.parent.data['paths']:
# Find valid node ids for current path.
valid_ids = self.kg_mask.get_ids(node.parent.entity, path[-1], node.relation)
valid_ids = set(valid_ids).difference(visited_ids)
if not node.has_children() and excluded_pids:
valid_ids = valid_ids.difference(excluded_pids)
if not valid_ids: # empty list
continue
valid_ids = list(valid_ids)
# Compute top k nodes.
valid_ids = torch.LongTensor(valid_ids).to(self.device)
valid_scores = scores.index_select(0, valid_ids)
if adaptive_topk:
k = min(node.sample_size, len(valid_ids))
else:
k = min(manual_topk, len(valid_ids))
topk_scores, topk_idxs = valid_scores.topk(k)
topk_ids = valid_ids.index_select(0, topk_idxs)
# Add nodes and scores to paths.
topk_ids = topk_ids.detach().cpu().numpy()
topk_scores = topk_scores.detach().cpu().numpy()
for j in range(k):
new_path = path + [topk_ids[j]]
new_value = value + [topk_scores[j]]
new_mp = mp + [node.relation]
new_ep = ep + [node.entity]
node.data['paths'].append((new_path, new_value, new_ep, new_mp))
# Remember to add the node to visited list!!!
visited_ids.append(topk_ids[j])
if not node.has_children():
excluded_pids.append(topk_ids[j])
def collect_results(self, program):
entities, results = [], []
# entities.append(program.root.entity)
queue = program.root.get_children()
while len(queue) > 0:
node = queue.pop(0)
# entities.append(node.entity)
queue.extend(node.get_children())
if not node.has_children():
results.extend(node.data['paths'])
return results
class TreeNode(object):
def __init__(self, level, entity, relation):
super(TreeNode, self).__init__()
self.level = level
self.entity = entity # Entity type
self.relation = relation # Relation pointing to this tail entity
self.parent = None
self.children = {} # key = (relation, entity), value = TreeNode
self.sample_size = 0 # number of nodes to sample
self.data = {} # extra information to save
def has_parent(self):
return self.parent is not None
def has_children(self):
return len(self.children) > 0
def get_children(self):
return list(self.children.values())
def __str__(self):
parent = None if not self.has_parent() else self.parent.entity
msg = '({},{},{})'.format(parent, self.relation, self.entity)
return msg
class NeuralProgramLayout(object):
"""This refers to the layout tree in the paper."""
def __init__(self, metapaths):
super(NeuralProgramLayout, self).__init__()
# self.metapaths = metapaths
# print(metapaths)
self.mp2id = {}
for mpid, mp in enumerate(metapaths):
simple_mp = tuple([v[0] for v in mp[1:]])
self.mp2id[simple_mp] = mpid
# self.root = None
# self.initialize()
self.root = TreeNode(0, USER, None)
for mp in metapaths:
node = self.root
for i in range(1, len(mp)):
# child = TreeNode(1, mp[i][1], mp[i][0])
if mp[i] not in node.children:
node.children[mp[i]] = TreeNode(i, mp[i][1], mp[i][0])
node.children[mp[i]].parent = node
node = node.children[mp[i]]
def update_by_path_count(self, path_count):
"""Update sample size of each node by expected number of paths.
Args:
path_count: dict with key=mpid, value=int
"""
def _postorder_update(node, parent_rels):
if not node.has_children():
mpid = self.mp2id[tuple(parent_rels)]
node.sample_size = int(path_count[mpid])
return
min_pos_sample_size, max_sample_size = 99, 0
for child in node.get_children():
_postorder_update(child, parent_rels + [child.relation])
max_sample_size = max(max_sample_size, child.sample_size)
if child.sample_size > 0:
# min_pos_sample_size = min(max_sample_size, child.sample_size)
min_pos_sample_size = min(min_pos_sample_size, child.sample_size)
# Update current node sampling size.
# a) if current node is root, set to 1.
if not node.has_parent():
node.sample_size = 1
# b) if current node is not root, and all children sample sizes are 0, set to 0.
elif max_sample_size == 0:
node.sample_size = 0
# c) if current node is not root, take the minimum and update children.
else:
node.sample_size = min_pos_sample_size
for child in node.get_children():
child.sample_size = int(child.sample_size / node.sample_size)
_postorder_update(self.root, [])
def print_postorder(self, hide_branch=True):
def _postorder(node, msgs):
msg = (node.entity, node.relation, node.sample_size)
new_msgs = msgs + [msg]
if not node.has_children():
if hide_branch and msg[2] == 0:
return
str_msgs = ['({},{},{})'.format(msg[0], msg[1], msg[2]) for msg in new_msgs]
print(' '.join(str_msgs))
return
for child in node.children:
_postorder(child, new_msgs)
_postorder(self.root, [])
def create_heuristic_program(metapaths, raw_paths_with_scores, prior_count, sample_size):
pcount = prior_count.astype(np.int)
pcount[pcount > 5] = 5
mp_scores = np.ones(len(metapaths)) * -99
for mpid in raw_paths_with_scores:
paths = raw_paths_with_scores[mpid]
if len(paths) <= 0:
continue
scores = np.array([p2[-1] for p1, p2 in paths])
scores[scores < -5.0] = -5.0
mp_scores[mpid] = np.mean(scores)
top_idxs = np.argsort(mp_scores)[::-1]
norm_count = np.zeros(len(metapaths))
rest = sample_size
for mpid in top_idxs:
if pcount[mpid] <= rest:
norm_count[mpid] = pcount[mpid]
else:
norm_count[mpid] = rest
rest -= norm_count[mpid]
program_layout = NeuralProgramLayout(metapaths)
program_layout.update_by_path_count(norm_count)
return program_layout
def save_pred_paths(dataset, pred_paths):
#if not os.path.isdir("../../results/"):
# os.makedirs("../../results/")
#extracted_path_dir = f"../../results/{dataset}"
#if not os.path.isdir(extracted_path_dir):
# os.makedirs(extracted_path_dir)
extracted_path_dir = LOG_DATASET_DIR[dataset]#extracted_path_dir + "/cafe"
if not os.path.isdir(extracted_path_dir):
os.makedirs(extracted_path_dir)
print(f"Saving predicted paths in {extracted_path_dir} + /pred_paths.pkl")
with open(extracted_path_dir + "/pred_paths.pkl", 'wb') as pred_paths_file:
pickle.dump(pred_paths, pred_paths_file)
pred_paths_file.close()
def run_program(args):
dataset_name = args.dataset
kg = load_kg(args.dataset)
kg_mask = KGMask(kg)
train_labels = load_labels(args.dataset, 'train')
valid_labels = load_labels(args.dataset, 'valid')
train_valid_labels = dict(zip(train_labels.keys(), list(train_labels.values()) + list(valid_labels.values())))
test_labels = load_labels(args.dataset, 'test')
path_counts = load_path_count(args.dataset) # Training path freq
with open(args.infer_path_data, 'rb') as f:
raw_paths = pickle.load(f) # Test path with scores
symbolic_model = create_symbolic_model(args, kg, train=False)
program_exe = MetaProgramExecutor(symbolic_model, kg_mask, args)
pred_labels = {}
pbar = tqdm(total=len(test_labels))
pred_paths_istances = {}
for uid in test_labels:
pred_paths_istances[uid] = {}
program = create_heuristic_program(kg.metapaths, raw_paths[uid], path_counts[uid], args.sample_size)
program_exe.execute(program, uid, train_valid_labels[uid])
paths = program_exe.collect_results(program)
tmp = [(r[0][-1], reduce(lambda x, y: x * y, r[1])) for r in paths]
for r in paths:
path = [("self_loop", 'user', r[0][0])]
for i in range(len(r[-1])):
path.append((r[-1][i], r[2][i], r[0][i + 1]))
if i == len(r[-1]) - 1: continue
pred_paths_istances[r[0][0]][r[0][-1]] = [(reduce(lambda x, y: x * y, r[1]), np.mean(r[1][-1]), path)]
tmp = sorted(tmp, key=lambda x: x[1], reverse=True)[:10]
pred_labels[uid] = [t[0] for t in tmp]
pbar.update(1)
save_pred_paths(args.dataset, pred_paths_istances)
msg = evaluate_with_insufficient_pred(pred_labels, test_labels, dataset_name)
logger.info(msg)
def main():
args = parse_args()
if args.do_infer:
infer_paths(args)
if args.do_execute:
# Repeat 10 times due to randomness.
logfile = f'{args.log_dir}/program_exe_heuristic_ss{args.sample_size}.txt'
set_logger(logfile)
logger.info(args)
for i in range(1):
logger.info(i + 1)
run_program(args)
if __name__ == '__main__':
main()
| 16,959 | 35.085106 | 114 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/CAFE/symbolic_model.py | from __future__ import absolute_import, division, print_function
import torch
from torch import nn
from torch.nn import functional as F
from models.CAFE.knowledge_graph import *
from models.CAFE.cafe_utils import *
class EntityEmbeddingModel(nn.Module):
def __init__(self, entity_info, embed_size, init_embed=None):
super(EntityEmbeddingModel, self).__init__()
self.entity_info = entity_info
self.embed_size = embed_size
# initialize embedding
for name in entity_info:
info = entity_info[name]
embed = nn.Embedding(info["vocab_size"] + 1, self.embed_size, padding_idx=-1, sparse=False)
initrange = 0.5 / self.embed_size
weight = torch.FloatTensor(info["vocab_size"] + 1, self.embed_size).uniform_(-initrange, initrange)
embed.weight = nn.Parameter(weight)
setattr(self, name, embed)
if init_embed is not None:
for name in entity_info:
weight = torch.from_numpy(init_embed[name])
getattr(self, name).data = weight
def forward(self, entity, ids=None):
if ids is None:
return getattr(self, entity).weight
return getattr(self, entity)(ids)
def vocab_size(self, entity):
return self.entity_info[entity]["vocab_size"] + 1
class RelationModule(nn.Module):
def __init__(self, embed_size, relation_info):
super(RelationModule, self).__init__()
self.name = relation_info["name"]
self.eh_name = relation_info["entity_head"]
self.et_name = relation_info["entity_tail"]
self.fc1 = nn.Linear(embed_size * 2, 256)
self.bn1 = nn.BatchNorm1d(256)
self.fc2 = nn.Linear(256, embed_size)
self.dropout = nn.Dropout(0.5)
def forward(self, inputs):
"""Compute log probability of output entity.
Args:
x: a FloatTensor of size [bs, input_size].
Returns:
FloatTensor of log probability of size [bs, output_size].
"""
eh_vec, user_vec = inputs
x = torch.cat([eh_vec, user_vec], dim=-1)
x = self.bn1(self.dropout(F.relu(self.fc1(x))))
out = self.fc2(x) + eh_vec
return out
class DeepRelationModule(nn.Module):
def __init__(self, embed_size, relation_info, use_dropout=True):
super(DeepRelationModule, self).__init__()
self.name = relation_info["name"]
self.eh_name = relation_info["entity_head"]
self.et_name = relation_info["entity_tail"]
input_size = embed_size * 2
self.fc1 = nn.Linear(input_size, 256)
self.bn1 = nn.BatchNorm1d(256)
self.fc2 = nn.Linear(256, input_size)
self.bn2 = nn.BatchNorm1d(input_size)
self.fc3 = nn.Linear(input_size, embed_size)
if use_dropout:
self.dropout = nn.Dropout(0.5)
else:
self.dropout = None
def forward(self, inputs):
eh_vec, user_vec = inputs
feature = torch.cat([eh_vec, user_vec], dim=-1)
x = F.relu(self.fc1(feature))
if self.dropout is not None:
x = self.dropout(x)
x = self.bn1(x)
x = F.relu(self.fc2(x) + feature)
if self.dropout is not None:
x = self.dropout(x)
x = self.bn2(x)
out = self.fc3(x)
return out
class SymbolicNetwork(nn.Module):
def __init__(self, relation_info, embedding, deep_module, use_dropout, device):
"""Initialize the network.
Args:
entity_info: a dict whose key is entity name and value contains attributes.
relation_info:
embed_size: embedding size.
"""
super(SymbolicNetwork, self).__init__()
self.embedding = embedding
self.embed_size = embedding.embed_size
self.device = device
self._create_modules(relation_info, deep_module, use_dropout)
self.nll_criterion = nn.NLLLoss(reduction="none")
self.ce_loss = nn.CrossEntropyLoss()
def _create_modules(self, relation_info, use_deep=False, use_dropout=True):
"""Create module for each relation."""
for name in relation_info:
info = relation_info[name]
if not use_deep:
module = RelationModule(self.embed_size, info)
else:
module = DeepRelationModule(self.embed_size, info, use_dropout)
setattr(self, name, module)
def _get_modules(self, metapath):
"""Get list of modules by metapath."""
module_seq = [] # seq len = len(metapath)-1
for relation, _ in metapath[1:]:
module = getattr(self, relation)
module_seq.append(module)
return module_seq
def _forward(self, modules, uids):
outputs = []
batch_size = uids.size(0)
user_vec = self.embedding(USER, uids) # [bs, d]
input_vec = user_vec
for module in modules:
out = module((input_vec, user_vec)).view(batch_size, -1) # [bs, d]
outputs.append(out)
input_vec = out
return outputs
def forward(self, metapath, pos_paths, neg_pids):
"""Compute loss.
Args:
metapath: list of relations, e.g. [USER, (r1, e1),..., (r_n, e_n)].
uid: a LongTensor of user ids, with size [bs, ].
target_path: a LongTensor of node ids, with size [bs, len(metapath)],
e.g. each path contains [u, e1,..., e_n].
indicator: an integer value indicating good/bad path.
teacher_forcing: use teacher forcing or not.
Returns:
logprobs: sum of log probabilities of given target node ids, with size [bs, ].
"""
# Note: len(modules) = len(metapath)-1 = len(target_path)-1
modules = self._get_modules(metapath)
outputs = self._forward(modules, pos_paths[:, 0])
# Path regularization loss
reg_loss = 0
scores = 0
for i, module in enumerate(modules):
et_vecs = self.embedding(module.et_name)
scores = torch.matmul(outputs[i], et_vecs.t())
reg_loss += self.ce_loss(scores, pos_paths[:, i + 1])
# Ranking loss
logprobs = F.log_softmax(scores, dim=1) # [bs, vocab_size]
# predict = outputs[-1] # [bs, d]
# pos_products = self.embedding(PRODUCT, pos_paths[:, -1]) # [bs, d]
# pos_score = torch.sum(predict * pos_products, dim=1) # [bs, ]
# neg_products = self.embedding(PRODUCT, neg_pids) # [bs, d]
# neg_score = torch.sum(predict * neg_products, dim=1) # [bs, ]
pos_score = torch.gather(logprobs, 1, pos_paths[:, -1].view(-1, 1))
neg_score = torch.gather(logprobs, 1, neg_pids.view(-1, 1))
rank_loss = torch.sigmoid(neg_score - pos_score).mean()
return reg_loss, rank_loss
def forward_simple(self, metapath, uids, pids):
modules = self._get_modules(metapath)
outputs = self._forward(modules, uids)
# Path regularization loss
products = self.embedding(PRODUCT) # [bs, d]
scores = torch.matmul(outputs[-1], products.t()) # [bs, vocab_size]
logprobs = F.log_softmax(scores, dim=1) # [bs, vocab_size]
pid_logprobs = logprobs.gather(1, pids.view(-1, 1)).view(-1)
return pid_logprobs
def infer_direct(self, metapath, uid, pids):
if len(pids) == 0:
return []
modules = self._get_modules(metapath)
uid_tensor = torch.LongTensor([uid]).to(self.device)
outputs = self._forward(modules, uid_tensor) # list of tensor of [1, d]
# Path regularization loss
pids_tensor = torch.LongTensor(pids).to(self.device)
products = self.embedding(PRODUCT) # [bs, d]
scores = torch.matmul(outputs[-1], products.t()) # [1, vocab_size]
logprobs = F.log_softmax(scores, dim=1) # [1, vocab_size]
pid_logprobs = logprobs[0][pids_tensor]
x = pid_logprobs.detach().cpu().numpy().tolist()
del uid_tensor
del pids_tensor
return x
def infer_with_path(self, metapath, uid, kg_mask, excluded_pids=None, topk_paths=5):
"""Reasoning paths over kg."""
modules = self._get_modules(metapath)
uid_tensor = torch.LongTensor([uid]).to(self.device)
outputs = self._forward(modules, uid_tensor) # list of tensor of [1, d]
layer_logprobs = []
for i, module in enumerate(modules):
et_vecs = self.embedding(module.et_name)
scores = torch.matmul(outputs[i], et_vecs.t()) # [1, vocab_size]
logprobs = F.log_softmax(scores[0], dim=0) # [vocab_size, ]
layer_logprobs.append(logprobs)
# Decide adaptive sampling size.
num_valid_ids = len(kg_mask.get_ids(USER, uid, modules[0].name))
if num_valid_ids <= 0:
return []
#if topk_paths <= num_valid_ids:
sample_sizes = [topk_paths, 5, 1]
#else:
#sample_sizes = [num_valid_ids, int(topk_paths / num_valid_ids) + 1, 1]
# if topk_paths <= num_valid_ids:
#sample_sizes = [topk_paths, 5, 1]
# else:
# sample_sizes = [num_valid_ids, int(topk_paths / num_valid_ids) + 1, 1]
result_paths = [([uid], [])] # (list of ids, list of scores)
for i, module in enumerate(modules): # iterate over each level
# If remove excluded item for the last node.
# if i == len(modules) - 1 and excluded_pids is not None:
# excluded_pids = torch.LongTensor(excluded_pids).to(self.device)
# layer_logprobs[i][excluded_pids] = -9999
tmp_paths = []
visited_ids = []
for path, value in result_paths: # both are lists
# Find valid node ids that are unvisited and not excluded pids.
valid_et_ids = kg_mask.get_ids(module.eh_name, path[-1], module.name)
valid_et_ids = set(valid_et_ids).difference(visited_ids)
if i == len(modules) - 1 and excluded_pids is not None:
valid_et_ids = valid_et_ids.difference(excluded_pids)
if len(valid_et_ids) <= 0:
continue
valid_et_ids = list(valid_et_ids)
# Compute top k nodes.
valid_et_ids = torch.LongTensor(valid_et_ids).to(self.device)
valid_et_logprobs = layer_logprobs[i].index_select(0, valid_et_ids)
k = min(sample_sizes[i], len(valid_et_ids))
topk_et_logprobs, topk_idxs = valid_et_logprobs.topk(k)
topk_et_ids = valid_et_ids.index_select(0, topk_idxs)
# layer_logprobs[i][topk_et_ids] = -9999 # prevent the nodes being selected again
# Add nodes to path separately.
topk_et_ids = topk_et_ids.detach().cpu().numpy()
topk_et_logprobs = topk_et_logprobs.detach().cpu().numpy()
for j in range(topk_et_ids.shape[0]):
new_path = path + [topk_et_ids[j]]
new_value = value + [topk_et_logprobs[j]]
tmp_paths.append((new_path, new_value))
# Remember to add the node to visited list!!!
visited_ids.append(topk_et_ids[j])
del valid_et_ids
if len(tmp_paths) <= 0:
return []
result_paths = tmp_paths
del uid_tensor
return result_paths
def create_symbolic_model(args, kg, train=True, pretrain_embeds=None):
"""Create neural symbolic model based on KG.
Args:
args: arguments.
kg (KnowledgeGraph): KG object.
train (bool, optional): is training model. Defaults to True.
Returns:
SymbolicNetwork: model object.
"""
entity_info, relation_info = {}, {}
for entity in kg.G:
entity_info[entity] = {"vocab_size": len(kg.G[entity])}
for rel in kg.relation_info:
relation_info[rel] = {
"name": rel,
"entity_head": kg.relation_info[rel][0],
"entity_tail": kg.relation_info[rel][1],
}
# pretrain_embeds = utils.load_embed(args.dataset) if train else None
entity_embed_model = EntityEmbeddingModel(entity_info, args.embed_size, init_embed=pretrain_embeds)
model = SymbolicNetwork(relation_info, entity_embed_model, args.deep_module, args.use_dropout, args.device)
model = model.to(args.device)
if train:
model.train()
else:
assert hasattr(args, "symbolic_model")
print("Load symbolic model:", args.symbolic_model)
pretrain_sd = torch.load(args.symbolic_model, map_location=lambda storage, loc: storage)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
model.eval()
return model
| 12,938 | 40.07619 | 111 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/traditional/load_data.py |
import collections
import os
import numpy as np
import random as rd
import torch
import torch.utils.data
from torch.utils.data import Dataset
import math
class Data(Dataset):
def __init__(self, args, path, batch_style='list'):
super(Data).__init__()
self.batch_styles = {'list': 0, 'map': 1}
assert batch_style in list(
self.batch_styles.keys()), f'Error: got {batch_style} but valid batch styles are {list(self.batch_styles.keys())}'
self.path = path
self.args = args
self.batch_style = batch_style
self.batch_style_id = self.batch_styles[self.batch_style]
self.batch_size = args.batch_size
train_file = os.path.join(path, 'preprocessed/kgat/train.txt')
valid_file = os.path.join(path, 'preprocessed/kgat/valid.txt')
test_file = os.path.join(path, 'preprocessed/kgat/test.txt')
kg_file = os.path.join(path, 'preprocessed/kgat/kg_final.txt')
# ----------get number of users and items & then load rating data from train_file & test_file------------.
self.n_train, self.n_valid, self.n_test = 0, 0, 0
self.n_users, self.n_items = 0, 0
self.train_data, self.train_user_dict = self._load_ratings(train_file)
self.valid_data, self.valid_user_dict = self._load_ratings(valid_file)
self.test_data, self.test_user_dict = self._load_ratings(test_file)
self.exist_users = list(self.train_user_dict.keys())
self.N_exist_users = len(self.exist_users)
self._statistic_ratings()
# ----------get number of entities and relations & then load kg data from kg_file ------------.
self.n_relations, self.n_entities, self.n_triples = 0, 0, 0
self.kg_data, self.kg_dict, self.relation_dict = self._load_kg(kg_file)
# ----------print the basic info about the dataset-------------.
self.batch_size_kg = self.n_triples // (self.n_train // self.batch_size)
self._print_data_info()
# reading train & test interaction data.
def _load_ratings(self, file_name):
user_dict = dict()
inter_mat = list()
lines = open(file_name, 'r').readlines()
for l in lines:
tmps = l.strip()
inters = [int(i) for i in tmps.split(' ')]
u_id, pos_ids = inters[0], inters[1:]
pos_ids = list(set(pos_ids))
for i_id in pos_ids:
inter_mat.append([u_id, i_id])
if len(pos_ids) > 0:
user_dict[u_id] = pos_ids
return np.array(inter_mat), user_dict
def _statistic_ratings(self):
self.n_users = max(max(self.train_data[:, 0]), max(self.test_data[:, 0])) + 1
self.n_items = max(max(max(self.train_data[:, 1]), max(self.valid_data[:, 1])),
max(self.test_data[:, 1])) + 1
self.n_train = len(self.train_data)
self.n_valid = len(self.valid_data)
self.n_test = len(self.test_data)
# reading train & test interaction data.
def _load_kg(self, file_name):
def _construct_kg(kg_np):
kg = collections.defaultdict(list)
rd = collections.defaultdict(list)
for head, relation, tail in kg_np:
kg[head].append((tail, relation))
rd[relation].append((head, tail))
return kg, rd
kg_np = np.loadtxt(file_name, dtype=np.int32)
kg_np = np.unique(kg_np, axis=0)
# self.n_relations = len(set(kg_np[:, 1]))
# self.n_entities = len(set(kg_np[:, 0]) | set(kg_np[:, 2]))
self.n_relations = max(kg_np[:, 1]) + 1
self.n_entities = max(max(kg_np[:, 0]), max(kg_np[:, 2])) + 1
self.n_triples = len(kg_np)
kg_dict, relation_dict = _construct_kg(kg_np)
return kg_np, kg_dict, relation_dict
def _print_data_info(self):
print('[n_users, n_items]=[%d, %d]' % (self.n_users, self.n_items))
print('[n_train, n_test]=[%d, %d]' % (self.n_train, self.n_test))
print('[n_entities, n_relations, n_triples]=[%d, %d, %d]' % (self.n_entities, self.n_relations, self.n_triples))
print('[batch_size, batch_size_kg]=[%d, %d]' % (self.batch_size, self.batch_size_kg))
def get_sparsity_split(self):
try:
split_uids, split_state = [], []
lines = open(self.path + '/sparsity.split', 'r').readlines()
for idx, line in enumerate(lines):
if idx % 2 == 0:
split_state.append(line.strip())
print(line.strip())
else:
split_uids.append([int(uid) for uid in line.strip().split(' ')])
print('get sparsity split.')
except Exception:
split_uids, split_state = self.create_sparsity_split()
f = open(self.path + '/sparsity.split', 'w')
for idx in range(len(split_state)):
f.write(split_state[idx] + '\n')
f.write(' '.join([str(uid) for uid in split_uids[idx]]) + '\n')
print('create sparsity split.')
return split_uids, split_state
def create_sparsity_split(self):
all_users_to_test = list(self.test_user_dict.keys())
user_n_iid = dict()
# generate a dictionary to store (key=n_iids, value=a list of uid).
for uid in all_users_to_test:
train_iids = self.train_user_dict[uid]
test_iids = self.test_user_dict[uid]
n_iids = len(train_iids) + len(test_iids)
if n_iids not in user_n_iid.keys():
user_n_iid[n_iids] = [uid]
else:
user_n_iid[n_iids].append(uid)
split_uids = list()
# split the whole user set into four subset.
temp = []
count = 1
fold = 4
n_count = (self.n_train + self.n_test)
n_rates = 0
split_state = []
for idx, n_iids in enumerate(sorted(user_n_iid)):
temp += user_n_iid[n_iids]
n_rates += n_iids * len(user_n_iid[n_iids])
n_count -= n_iids * len(user_n_iid[n_iids])
if n_rates >= count * 0.25 * (self.n_train + self.n_test):
split_uids.append(temp)
state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' % (n_iids, len(temp), n_rates)
split_state.append(state)
print(state)
temp = []
n_rates = 0
fold -= 1
if idx == len(user_n_iid.keys()) - 1 or n_count == 0:
split_uids.append(temp)
state = '#inter per user<=[%d], #users=[%d], #all rates=[%d]' % (n_iids, len(temp), n_rates)
split_state.append(state)
print(state)
return split_uids, split_state
def __len__(self):
# number of existing users after the preprocessing described in the paper,
# determines the length of the training dataset, for which a positive an negative are extracted
return len(self.exist_users)
##_generate_train_cf_batch
def __getitem__(self, idx):
"""
if self.batch_size <= self.n_users:
user = rd.sample(self.exist_users, self.batch_size)
else:
users = [rd.choice(self.exist_users) for _ in range(self.batch_size)]
"""
def sample_pos_items_for_u(u, num):
pos_items = self.train_user_dict[u]
n_pos_items = len(pos_items)
pos_batch = []
while True:
if len(pos_batch) == num: break
pos_id = np.random.randint(low=0, high=n_pos_items, size=1)[0]
pos_i_id = pos_items[pos_id]
if pos_i_id not in pos_batch:
pos_batch.append(pos_i_id)
return pos_batch
def sample_neg_items_for_u(u, num):
neg_items = []
while True:
if len(neg_items) == num: break
neg_i_id = np.random.randint(low=0, high=self.n_items, size=1)[0]
if neg_i_id not in self.train_user_dict[u] and neg_i_id not in neg_items:
neg_items.append(neg_i_id)
return neg_items
"""
pos_items, neg_items = [], []
for u in users:
pos_items += sample_pos_items_for_u(u, 1)
neg_items += sample_neg_items_for_u(u, 1)
"""
u = self.exist_users[idx]
pos_item = sample_pos_items_for_u(u, 1)
neg_item = sample_neg_items_for_u(u, 1)
if len(pos_item) == 1:
pos_item = pos_item[0]
if len(neg_item) == 1:
neg_item = neg_item[0]
if self.batch_style_id == 0:
return u, pos_item, neg_item
else:
return {'users': u, 'pos_items': pos_item,
'neg_items': neg_item} # u, pos_item, neg_item #users, pos_items, neg_items
def as_test_feed_dict(self, model, user_batch, item_batch, drop_flag=True):
feed_dict = {
model.users: user_batch,
model.pos_items: item_batch,
model.mess_dropout: [0.] * len(eval(self.args.layer_size)),
model.node_dropout: [0.] * len(eval(self.args.layer_size)),
}
return feed_dict
def as_train_feed_dict(self, model, batch_data):
if self.batch_style_id == 0:
users, pos_items, neg_items = batch_data
batch_data = {}
batch_data['users'] = users
batch_data['pos_items'] = pos_items
batch_data['neg_items'] = neg_items
feed_dict = {
model.users: batch_data['users'],
model.pos_items: batch_data['pos_items'],
model.neg_items: batch_data['neg_items'],
model.mess_dropout: eval(self.args.mess_dropout),
model.node_dropout: eval(self.args.node_dropout),
}
return feed_dict
| 9,960 | 35.756458 | 126 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/traditional/NFM/batch_test.py | '''
Created on Dec 18, 2018
Tensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang (xiangwang@u.nus.edu)
'''
import models.traditional.metrics as metrics
from parser import parse_args
import multiprocessing
import heapq
import numpy as np
import random
from torch.utils.data import DataLoader, RandomSampler
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from loader_nfm import NFM_loader
train_cores = multiprocessing.cpu_count()
test_cores = multiprocessing.cpu_count()//2
args = parse_args()
Ks = eval(args.Ks)
data_generator = {}
MANUAL_SEED = 2019
torch.manual_seed(MANUAL_SEED)
def seed_worker(worker_id):
torch.manual_seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
random.seed(MANUAL_SEED)
g = torch.Generator(device='cpu')
g.manual_seed(MANUAL_SEED)
ds = NFM_loader(args=args, path=args.data_path + args.dataset)
data_generator['dataset'] = ds
data_generator['loader'] = DataLoader(ds,
batch_size=ds.batch_size,
sampler=RandomSampler(ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
batch_test_flag = True
USR_NUM, ITEM_NUM = data_generator['dataset'].n_users, data_generator['dataset'].n_items
N_TRAIN, N_TEST = data_generator['dataset'].n_train, data_generator['dataset'].n_test
BATCH_SIZE = args.batch_size
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
# user u's ratings for user u
rating = x[0]
#uid
u = x[1]
#user u's items in the training set
try:
training_items = data_generator['dataset'].train_user_dict[u]
valid_items = data_generator['dataset'].valid_user_dict[u]
except Exception:
training_items = []
valid_items = []
#user u's items in the test set
user_pos_test = data_generator['dataset'].test_user_dict[u]
all_items = set(range(ITEM_NUM))
test_items = list((all_items - set(training_items)) - set(valid_items) )
if args.test_flag == 'part':
r, auc = ranklist_by_heapq(user_pos_test, test_items, rating, Ks)
else:
r, auc = ranklist_by_sorted(user_pos_test, test_items, rating, Ks)
# # .......checking.......
# try:
# assert len(user_pos_test) != 0
# except Exception:
# print(u)
# print(training_items)
# print(user_pos_test)
# exit()
# # .......checking.......
return get_performance(user_pos_test, r, auc, Ks)
def test(sess, model, users_to_test, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
pool = multiprocessing.Pool(test_cores)
if args.model_type in ['ripple']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE // 20
elif args.model_type in ['fm', 'nfm']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE
else:
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
DATASET_KEY = 'A_dataset' if args.model_type == 'cke' else 'dataset'
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
i_rate_batch = model.eval(sess, feed_dict=feed_dict)
i_rate_batch = i_rate_batch.reshape((-1, len(item_batch)))
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
rate_batch = model.eval(sess, feed_dict=feed_dict)
rate_batch = rate_batch.reshape((-1, len(item_batch)))
user_batch_rating_uid = zip(rate_batch, user_batch)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
assert count == n_test_users
pool.close()
return result | 7,649 | 31.008368 | 101 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/traditional/FM/batch_test.py | '''
Created on Dec 18, 2018
Tensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang (xiangwang@u.nus.edu)
'''
import models.traditional.metrics as metrics
from parser import parse_args
import multiprocessing
import heapq
import numpy as np
import random
from torch.utils.data import DataLoader, RandomSampler
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from loader_fm import FM_loader
train_cores = multiprocessing.cpu_count()
test_cores = multiprocessing.cpu_count()//2
args = parse_args()
Ks = eval(args.Ks)
data_generator = {}
MANUAL_SEED = 2019
torch.manual_seed(MANUAL_SEED)
def seed_worker(worker_id):
torch.manual_seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
random.seed(MANUAL_SEED)
g = torch.Generator(device='cpu')
g.manual_seed(MANUAL_SEED)
ds = FM_loader(args=args, path=args.data_path + args.dataset)
data_generator['dataset'] = ds
data_generator['loader'] = DataLoader(ds,
batch_size=ds.batch_size,
sampler=RandomSampler(ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
batch_test_flag = True
USR_NUM, ITEM_NUM = data_generator['dataset'].n_users, data_generator['dataset'].n_items
N_TRAIN, N_TEST = data_generator['dataset'].n_train, data_generator['dataset'].n_test
BATCH_SIZE = args.batch_size
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
# user u's ratings for user u
rating = x[0]
#uid
u = x[1]
#user u's items in the training set
try:
training_items = data_generator['dataset'].train_user_dict[u]
valid_items = data_generator['dataset'].valid_user_dict[u]
except Exception:
training_items = []
valid_items = []
#user u's items in the test set
user_pos_test = data_generator['dataset'].test_user_dict[u]
all_items = set(range(ITEM_NUM))
test_items = list((all_items - set(training_items)) - set(valid_items) )
if args.test_flag == 'part':
r, auc = ranklist_by_heapq(user_pos_test, test_items, rating, Ks)
else:
r, auc = ranklist_by_sorted(user_pos_test, test_items, rating, Ks)
# # .......checking.......
# try:
# assert len(user_pos_test) != 0
# except Exception:
# print(u)
# print(training_items)
# print(user_pos_test)
# exit()
# # .......checking.......
return get_performance(user_pos_test, r, auc, Ks)
def test(sess, model, users_to_test, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
pool = multiprocessing.Pool(test_cores)
if args.model_type in ['ripple']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE // 20
elif args.model_type in ['fm', 'nfm']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE
else:
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
DATASET_KEY = 'A_dataset' if args.model_type == 'cke' else 'dataset'
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
i_rate_batch = model.eval(sess, feed_dict=feed_dict)
i_rate_batch = i_rate_batch.reshape((-1, len(item_batch)))
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
rate_batch = model.eval(sess, feed_dict=feed_dict)
rate_batch = rate_batch.reshape((-1, len(item_batch)))
user_batch_rating_uid = zip(rate_batch, user_batch)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
assert count == n_test_users
pool.close()
return result | 7,646 | 30.995816 | 101 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/traditional/BPRMF/batch_test.py | '''
Created on Dec 18, 2018
Tensorflow Implementation of Knowledge Graph Attention Network (KGAT) model in:
Wang Xiang et al. KGAT: Knowledge Graph Attention Network for Recommendation. In KDD 2019.
@author: Xiang Wang (xiangwang@u.nus.edu)
'''
import models.traditional.metrics as metrics
from parser import parse_args
import multiprocessing
import heapq
import numpy as np
import random
from torch.utils.data import DataLoader, RandomSampler
import torch
torch.multiprocessing.set_sharing_strategy('file_system')
from loader_bprmf import BPRMF_loader
train_cores = multiprocessing.cpu_count()
test_cores = multiprocessing.cpu_count()//2
args = parse_args()
Ks = eval(args.Ks)
data_generator = {}
MANUAL_SEED = 2019
torch.manual_seed(MANUAL_SEED)
def seed_worker(worker_id):
torch.manual_seed(MANUAL_SEED)
np.random.seed(MANUAL_SEED)
random.seed(MANUAL_SEED)
g = torch.Generator(device='cpu')
g.manual_seed(MANUAL_SEED)
ds = BPRMF_loader(args=args, path=args.data_path + args.dataset)
data_generator['dataset'] = ds
data_generator['loader'] = DataLoader(ds,
batch_size=ds.batch_size,
sampler=RandomSampler(ds,
replacement=True,
generator=g) if args.with_replacement else None,
shuffle=False if args.with_replacement else True,
num_workers=train_cores,
drop_last=True,
persistent_workers=True
)
batch_test_flag = False
USR_NUM, ITEM_NUM = data_generator['dataset'].n_users, data_generator['dataset'].n_items
N_TRAIN, N_TEST = data_generator['dataset'].n_train, data_generator['dataset'].n_test
BATCH_SIZE = args.batch_size
def get_auc(item_score, user_pos_test):
item_score = sorted(item_score.items(), key=lambda kv: kv[1])
item_score.reverse()
item_sort = [x[0] for x in item_score]
posterior = [x[1] for x in item_score]
r = []
for i in item_sort:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = metrics.auc(ground_truth=r, prediction=posterior)
return auc
def ranklist_by_heapq(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = 0.
return r, auc
def ranklist_by_sorted(user_pos_test, test_items, rating, Ks):
item_score = {}
for i in test_items:
item_score[i] = rating[i]
K_max = max(Ks)
K_max_item_score = heapq.nlargest(K_max, item_score, key=item_score.get)
r = []
for i in K_max_item_score:
if i in user_pos_test:
r.append(1)
else:
r.append(0)
auc = get_auc(item_score, user_pos_test)
return r, auc
def get_performance(user_pos_test, r, auc, Ks):
precision, recall, ndcg, hit_ratio = [], [], [], []
for K in Ks:
precision.append(metrics.precision_at_k(r, K))
recall.append(metrics.recall_at_k(r, K, len(user_pos_test)))
ndcg.append(metrics.ndcg_at_k(r, K))
hit_ratio.append(metrics.hit_at_k(r, K))
return {'recall': np.array(recall), 'precision': np.array(precision),
'ndcg': np.array(ndcg), 'hit_ratio': np.array(hit_ratio), 'auc': auc}
def test_one_user(x):
# user u's ratings for user u
rating = x[0]
#uid
u = x[1]
#user u's items in the training set
try:
training_items = data_generator['dataset'].train_user_dict[u]
valid_items = data_generator['dataset'].valid_user_dict[u]
except Exception:
training_items = []
valid_items = []
#user u's items in the test set
user_pos_test = data_generator['dataset'].test_user_dict[u]
all_items = set(range(ITEM_NUM))
test_items = list((all_items - set(training_items)) - set(valid_items) )
if args.test_flag == 'part':
r, auc = ranklist_by_heapq(user_pos_test, test_items, rating, Ks)
else:
r, auc = ranklist_by_sorted(user_pos_test, test_items, rating, Ks)
# # .......checking.......
# try:
# assert len(user_pos_test) != 0
# except Exception:
# print(u)
# print(training_items)
# print(user_pos_test)
# exit()
# # .......checking.......
return get_performance(user_pos_test, r, auc, Ks)
def test(sess, model, users_to_test, drop_flag=False, batch_test_flag=False):
result = {'precision': np.zeros(len(Ks)), 'recall': np.zeros(len(Ks)), 'ndcg': np.zeros(len(Ks)),
'hit_ratio': np.zeros(len(Ks)), 'auc': 0.}
pool = multiprocessing.Pool(test_cores)
if args.model_type in ['ripple']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE // 20
elif args.model_type in ['fm', 'nfm']:
u_batch_size = BATCH_SIZE
i_batch_size = BATCH_SIZE
else:
u_batch_size = BATCH_SIZE * 2
i_batch_size = BATCH_SIZE
test_users = users_to_test
n_test_users = len(test_users)
n_user_batchs = n_test_users // u_batch_size + 1
count = 0
DATASET_KEY = 'A_dataset' if args.model_type == 'cke' else 'dataset'
for u_batch_id in range(n_user_batchs):
start = u_batch_id * u_batch_size
end = (u_batch_id + 1) * u_batch_size
user_batch = test_users[start: end]
if batch_test_flag:
n_item_batchs = ITEM_NUM // i_batch_size + 1
rate_batch = np.zeros(shape=(len(user_batch), ITEM_NUM))
i_count = 0
for i_batch_id in range(n_item_batchs):
i_start = i_batch_id * i_batch_size
i_end = min((i_batch_id + 1) * i_batch_size, ITEM_NUM)
item_batch = range(i_start, i_end)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
i_rate_batch = model.eval(sess, feed_dict=feed_dict)
i_rate_batch = i_rate_batch.reshape((-1, len(item_batch)))
rate_batch[:, i_start: i_end] = i_rate_batch
i_count += i_rate_batch.shape[1]
assert i_count == ITEM_NUM
else:
item_batch = range(ITEM_NUM)
feed_dict = data_generator[DATASET_KEY].as_test_feed_dict(model=model,
user_batch=user_batch,
item_batch=item_batch,
drop_flag=drop_flag)
rate_batch = model.eval(sess, feed_dict=feed_dict)
rate_batch = rate_batch.reshape((-1, len(item_batch)))
user_batch_rating_uid = zip(rate_batch, user_batch)
batch_result = pool.map(test_one_user, user_batch_rating_uid)
count += len(batch_result)
for re in batch_result:
result['precision'] += re['precision']/n_test_users
result['recall'] += re['recall']/n_test_users
result['ndcg'] += re['ndcg']/n_test_users
result['hit_ratio'] += re['hit_ratio']/n_test_users
result['auc'] += re['auc']/n_test_users
assert count == n_test_users
pool.close()
return result | 7,675 | 31.117155 | 101 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/test.py | from __future__ import absolute_import, division, print_function
import os
import argparse
import json
from math import log
from datetime import datetime
from tqdm import tqdm
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
import threading
from functools import reduce
import time
import pickle
import gc
import json
from easydict import EasyDict as edict
import itertools
from models.UCPR.utils import *
from models.UCPR.src.model.get_model.get_model import *
from models.UCPR.src.parser import parse_args
from models.UCPR.src.para_setting import parameter_path, parameter_path_th
import collections
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=1):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def save_output(dataset_name, pred_paths):
extracted_path_dir = LOG_DATASET_DIR[dataset_name]
if not os.path.isdir(extracted_path_dir):
os.makedirs(extracted_path_dir)
print("Normalizing items scores...")
# Get min and max score to performe normalization between 0 and 1
score_list = []
for uid, pid in pred_paths.items():
for pid, path_list in pred_paths[uid].items():
for path in path_list:
score_list.append(float(path[0]))
min_score = min(score_list)
max_score = max(score_list)
print("Saving pred_paths...")
for uid in pred_paths.keys():
curr_pred_paths = pred_paths[uid]
for pid in curr_pred_paths.keys():
curr_pred_paths_for_pid = curr_pred_paths[pid]
for i, curr_path in enumerate(curr_pred_paths_for_pid):
path_score = pred_paths[uid][pid][i][0]
path_prob = pred_paths[uid][pid][i][1]
path = pred_paths[uid][pid][i][2]
new_path_score = (float(path_score) - min_score) / (max_score - min_score)
pred_paths[uid][pid][i] = (new_path_score, path_prob, path)
with open(extracted_path_dir + "/pred_paths.pkl", 'wb') as pred_paths_file:
pickle.dump(pred_paths, pred_paths_file)
pred_paths_file.close()
def evaluate(topk_matches, test_user_products, no_skip_user, dataset_name):
"""Compute metrics for predicted recommendations.
Args:
topk_matches: a list or dict of product ids in ascending order.
"""
invalid_users = []
# Compute metrics
metrics = edict(
# ndcg_other=[],
ndcg=[],
hr=[],
precision=[],
recall=[],
)
ndcgs = []
test_user_idxs = list(test_user_products.keys())
x = defaultdict(int)
rel_size = []
for uid in test_user_idxs:
if uid not in topk_matches or len(topk_matches[uid]) < 10:
x['a'] +=1
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]
if uid not in no_skip_user:
x['b'] += 1
continue
if len(pred_list) == 0:
x['c'] +=1
continue
rel_size.append(len(rel_set))
k = 0
hit_num = 0.0
hit_list = []
for pid in pred_list:
k += 1
if pid in rel_set:
hit_num += 1
hit_list.append(1)
else:
hit_list.append(0)
#print(k, len(hit_list), collections.Counter(hit_list))
ndcg = ndcg_at_k(hit_list, k)
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
metrics.ndcg.append(ndcg)
metrics.hr.append(hit)
metrics.recall.append(recall)
metrics.precision.append(precision)
avg_metrics = edict(
ndcg=[],
hr=[],
precision=[],
recall=[],
)
print("Average test set size: ", np.array(rel_size).mean())
for metric, values in metrics.items():
avg_metrics[metric] = np.mean(values)
avg_metric_value = np.mean(values) * 100 if metric == "ndcg_other" else np.mean(values)
n_users = len(values)
print("Overall for noOfUser={}, {}={:.4f}".format(n_users, metric,
avg_metric_value))
print("\n")
makedirs(dataset_name)
with open(RECOM_METRICS_FILE_PATH[dataset_name], 'w') as f:
json.dump(metrics,f)
return avg_metrics.precision, avg_metrics.recall, avg_metrics.ndcg, avg_metrics.hr,\
invalid_users
def batch_beam_search(args, env, model, uids, device, topk=[25, 5, 1]):
def _batch_acts_to_masks(batch_acts):
batch_masks = []
for acts in batch_acts:
num_acts = len(acts)
act_mask = np.zeros(model.act_dim, dtype=np.uint8)
act_mask[:num_acts] = 1
batch_masks.append(act_mask)
return np.vstack(batch_masks)
state_pool = env.reset(args.epochs,uids) # numpy of [bs, dim]
model.reset(uids)
path_pool = env._batch_path # list of list, size=bs
probs_pool = [[] for _ in uids]
index_ori_list = [_ for _ in range(len(uids))]
idx_list = [i for i in range(len(uids))]
# print('idx_list = ', idx_list)
model.eval()
for hop in range(3):
acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs
actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim]
state_tensor = model.generate_st_emb(path_pool, up_date_hop = idx_list)
batch_next_action_emb = model.generate_act_emb(path_pool, acts_pool)
actmask_tensor = torch.BoolTensor(actmask_pool).to(device)
try:
next_enti_emb, next_action_emb = batch_next_action_emb[0], batch_next_action_emb[1]
probs, _ = model((state_tensor[0],state_tensor[1], next_enti_emb, next_action_emb, actmask_tensor))
except:
probs, _ = model((state_tensor, batch_next_action_emb, actmask_tensor)) # Tensor of [bs, act_dim]
probs = probs + actmask_tensor.float() # In order to differ from masked actions
del actmask_tensor
topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k]
topk_idxs = topk_idxs.detach().cpu().numpy()
topk_probs = topk_probs.detach().cpu().numpy()
new_path_pool, new_probs_pool, new_index_pool, new_idx = [], [], [], []
for row in range(topk_idxs.shape[0]):
path = path_pool[row]
probs = probs_pool[row]
index_ori = index_ori_list[row]
for idx, p in zip(topk_idxs[row], topk_probs[row]):
if idx >= len(acts_pool[row]): # act idx is invalid
continue
relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id)
if relation == SELF_LOOP:
next_node_type = path[-1][1]
else:
if args.envir == 'p1':
next_node_type = KG_RELATION[args.dataset][path[-1][1]][relation]#KG_RELATION[path[-1][1]][relation]
else:
next_node_type = KG_RELATION[args.dataset][path[-1][1]][relation]#env.et_idx2ty[next_node_id]
# next_node_type = KG_RELATION[path[-1][1]][relation]
new_path = path + [(relation, next_node_type, next_node_id)]
new_path_pool.append(new_path)
new_probs_pool.append(probs + [p])
new_index_pool.append(index_ori)
new_idx.append(row)
print(len(new_path_pool))
print(len(new_idx))
print()
path_pool = new_path_pool
probs_pool = new_probs_pool
index_ori_list = new_index_pool
idx_list = new_idx
gc.collect()
return path_pool, probs_pool
def predict_paths(args, policy_file, path_file, train_labels, test_labels, pretest):
print('Predicting paths...')
env = KG_Env(args, Dataset(args), args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
print(policy_file)
print('Loading pretrain')
pretrain_sd = torch.load(policy_file)
print('Loading model')
model = Memory_Model(args, env.user_triplet_set, env.rela_2_index,
env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
print('Model loaded')
test_uids = list(test_labels.keys())
test_uids = [uid for uid in test_uids if uid in train_labels and uid in env.user_list]
batch_size = 16
start_idx = 0
all_paths, all_probs = [], []
times = 0
pbar = tqdm(total=len(test_uids))
while start_idx < len(test_uids):
# print(' bar state/text_uid = ', start_idx, '/', len(test_uids), end = '\r')
end_idx = min(start_idx + batch_size, len(test_uids))
batch_uids = test_uids[start_idx:end_idx]
print(f'{start_idx}/{ len(test_uids)}')
paths, probs = batch_beam_search(args, env, model, batch_uids, args.device, topk=args.topk)
all_paths.extend(paths)
all_probs.extend(probs)
start_idx = end_idx
times += 1
if times % 50 == 0:
str_batch_uids = [str(st) for st in batch_uids]
fail_uids = ",".join(str_batch_uids)
fail_batch = f"'batch_uids = ', {fail_uids}, {str(start_idx)}, {str(end_idx)}"
args.logger.info(fail_batch)
if pretest == 1 and times >= 100: break
pbar.update(batch_size)
predicts = {'paths': all_paths, 'probs': all_probs}
pickle.dump(predicts, open(path_file, 'wb'))
def get_validation_pids(dataset_name):
if not os.path.isfile(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')):
return []
validation_pids = defaultdict(set)
with open(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')) as valid_file:
reader = csv.reader(valid_file, delimiter=" ")
for row in reader:
uid = int(row[0])
pid = int(row[1])
validation_pids[uid].add(pid)
valid_file.close()
return validation_pids
def extract_paths(dataset_name, path_file, train_labels, valid_labels, test_labels):
embeds = load_embed(dataset_name)
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[dataset_name]
user_embeds = embeds[USER]
purchase_embeds = embeds[main_interaction][0]
product_embeds = embeds[main_product]#[0]
print(user_embeds.shape)
print(purchase_embeds.shape)
print(product_embeds[0].shape)
print(product_embeds.shape)
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
print(scores.shape)
validation_pids = get_validation_pids(dataset_name)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
total_pre_user_num = {}
no_skip_user = {}
for path, probs in zip(results['paths'], results['probs']):
uid = path[0][2]
no_skip_user[uid] = 1
print(results.keys())
x = defaultdict(int)
for idx, (path, probs) in enumerate(zip(results['paths'], results['probs'])):
if path[-1][1] != main_product:
#print('a')
x['a'] += 1
continue
uid = path[0][2]
if uid not in total_pre_user_num:
total_pre_user_num[uid] = len(total_pre_user_num)
x['b'] += 1
#print('b')
if uid not in pred_paths:
#print('c')
x['c'] += 1
continue
pid = path[-1][2]
if uid in valid_labels and pid in valid_labels[uid]:
#print('d')
x['d'] += 1
continue
if pid in train_labels[uid]:
#print('e')
x['e'] += 1
continue
if pid not in pred_paths[uid]:
#print('f')
x['f'] += 1
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
print(x)
#print(pred_paths)
save_output(dataset_name, pred_paths)
return pred_paths, scores
def evaluate_paths(topk,dataset_name, pred_paths, scores, train_labels,
test_labels, args, path_file, pretest=1):
# train_labels, test_labels, args, path_file, pretest=pretest):
'''
embeds = load_embed(args.dataset)
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[args.dataset]
user_embeds = embeds[USER]
purchase_embeds = embeds[main_interaction][0]
product_embeds = embeds[main_product]#[0]
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
validation_pids = get_validation_pids(args.dataset)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels }#if uid in test_labels}#train_labels}
total_pre_user_num = {}
no_skip_user = {}
for path, probs in zip(results['paths'], results['probs']):
uid = path[0][2]
no_skip_user[uid] = 1
for path, probs in zip(results['paths'], results['probs']):
if path[-1][1] != main_product:
continue
uid = path[0][2]
if uid not in total_pre_user_num:
total_pre_user_num[uid] = len(total_pre_user_num)
if uid not in pred_paths:
continue
pid = path[-1][2]
if uid in valid_labels and pid in valid_labels[uid]:
continue
if pid in train_labels[uid]:
continue
if pid not in pred_paths[uid]:
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
save_output(args.dataset, pred_paths)
'''
# 2) Pick best path for each user-product pair, also remove pid if it is in train set.
best_pred_paths = {}
from collections import defaultdict
#best_pred_paths_logging = {}
for uid in pred_paths:
train_pids = set(train_labels[uid])
best_pred_paths[uid] = []
#best_pred_paths_logging[uid] = []#defaultdict(list)
for pid in pred_paths[uid]:
if pid in train_pids:
continue
sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True)
best_pred_paths[uid].append(sorted_path[0])
#best_pred_paths_logging[uid].append(sorted_path[0])
def prob_keyget(x):
return (x[1], x[0])
def score_keyget(x):
return (x[0], x[1])
sort_by = 'score'
pred_labels = {}
pred_paths_top10 = {}
total_pro_num = 0
for uid in best_pred_paths:
if sort_by == 'score':
keygetter = score_keyget
elif sort_by == 'prob':
keygetter = prob_keyget
sorted_path = sorted(best_pred_paths[uid], key=keygetter, reverse=True)
top10_pids = [p[-1][2] for _, _, p in sorted_path[:10]]
top10_paths = [p for _, _, p in sorted_path[:10]]
if args.add_products and len(top10_pids) < 10:
train_pids = set(train_labels[uid])
cand_pids = np.argsort(scores[uid])
for cand_pid in cand_pids[::-1]:
if cand_pid in train_pids or cand_pid in top10_pids:
continue
top10_pids.append(cand_pid)
if len(top10_pids) >= 10:
break
pred_labels[uid] = top10_pids[::-1] # change order to from smallest to largest!
pred_paths_top10[uid] = top10_paths[::-1]
#print(uid, len(pred_labels[uid]), pred_labels[uid])
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
total_pre_user_num = {}
no_skip_user = {}
for path, probs in zip(results['paths'], results['probs']):
uid = path[0][2]
no_skip_user[uid] = 1
avg_precision, avg_recall, avg_ndcg, avg_hit, invalid_users = evaluate(pred_labels,
test_labels, no_skip_user, dataset_name)
print('precision: ', avg_precision)
print('recall: ', avg_recall)
print('ndcg: ', avg_ndcg)
print('hit: ', avg_hit)
# In formula w of pi log(2 + (number of patterns of same pattern type among uv paths / total number of paths among uv paths))
def get_path_pattern_weigth(path_pattern_name, pred_uv_paths):
n_same_path_pattern = 0
total_paths = len(pred_uv_paths)
for path in pred_uv_paths:
if path_pattern_name == get_path_pattern(path):
n_same_path_pattern += 1
return log(2 + (n_same_path_pattern / total_paths))
def test(args, train_labels, valid_labels, test_labels, best_recall, pretest = 1):
print('start predict')
policy_file = args.policy_path #args.save_model_dir + '/policy_model_epoch_{}.ckpt'.format(35)#args.eva_epochs)
path_file = os.path.join(TMP_DIR[args.dataset], 'policy_paths_epoch{}_{}.pkl'.format(args.eva_epochs, args.topk_string)) #args.save_model_dir + '/' + 'pre' + str(pretest) + 'policy_paths_epoch{}_{}.pkl'.format(args.eva_epochs, args.topk_string)
#if args.dataset in [BEAUTY_CORE, CELL_CORE, CLOTH_CORE]:
#sort_by_2 = 'prob'
#eva_file_2 = args.log_dir + '/' + 'pre' + str(pretest) + sort_by_2 + '_eva'+ '_' + args.topk_string + '.txt'
TOP_N_LOGGING = 100
if args.run_path or os.path.exists(path_file) == False:
predict_paths(args, policy_file, path_file, train_labels, test_labels, pretest)#predict_paths(policy_file, path_file, args)
if args.save_paths or args.run_eval():
pred_paths, scores = extract_paths(args.dataset, path_file, train_labels, valid_labels, test_labels)
if args.run_eval:
evaluate_paths(TOP_N_LOGGING,args.dataset, pred_paths, scores,
train_labels, test_labels, args, path_file, pretest=pretest)
if __name__ == '__main__':
args = parse_args()
args.training = 0
args.training = (args.training == 1)
args.att_evaluation = False
if args.envir == 'p1':
para_env = parameter_path_th
KG_Env = KGEnvironment
elif args.envir == 'p2':
para_env = parameter_path
KG_Env = KGEnvironment
para_env(args)
train_labels = load_labels(args.dataset, 'train')
valid_labels = load_labels(args.dataset, 'valid')
test_labels = load_labels(args.dataset, 'test')
best_recall = 0
args.eva_epochs = args.best_model_epoch
test(args, train_labels, valid_labels, test_labels, best_recall, pretest = 0)
| 19,385 | 34.966605 | 248 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/utils.py | from __future__ import absolute_import, division, print_function
from easydict import EasyDict as edict
import os
import sys
import random
import pickle
import logging
import logging.handlers
import numpy as np
import csv
# import scipy.sparse as sp
import torch
from collections import defaultdict
import shutil
# Dataset names.
# from sklearn.feature_extraction.text import TfidfTransformer
ML1M = 'ml1m'
LFM1M = 'lfm1m'
CELL = 'cellphones'
ROOT_DIR = os.environ('TREX_DATA_ROOT') if 'TREX_DATA_ROOT' in os.environ else '../..'
# STILL NOT SUPPORTED = beauty, cell, cloth
BEAUTY_CORE ='beauty'
CELL_CORE = CELL
CLOTH_CORE = 'cloth'
# retro compatiblity
MOVIE_CORE = 'ml1m'
AZ_BOOK_CORE = 'book'
MODEL = 'ucpr'
TRANSE='transe'
# Dataset directories.
DATASET_DIR = {
ML1M: f'{ROOT_DIR}/data/{ML1M}/preprocessed/{MODEL}',
LFM1M: f'{ROOT_DIR}/data/{LFM1M}/preprocessed/{MODEL}',
CELL: f'{ROOT_DIR}/data/{CELL}/preprocessed/{MODEL}'
}
# Model result directories.
TMP_DIR = {
ML1M: f'{DATASET_DIR[ML1M]}/tmp',
LFM1M: f'{DATASET_DIR[LFM1M]}/tmp',
CELL: f'{DATASET_DIR[CELL]}/tmp',
}
VALID_METRICS_FILE_NAME = 'valid_metrics.json'
TRANSE_OPT_METRIC = 'valid_loss'
#OPTIM_HPARAMS_METRIC = 'avg_valid_reward'
OPTIM_HPARAMS_METRIC = 'valid_reward'
OPTIM_HPARAMS_LAST_K = 100 # last 100 episodes
#OPTIM_HPARAMS_METRIC = 'ndcg'
LOG_DIR = f'{ROOT_DIR}/results'
LOG_DATASET_DIR = {
ML1M: f'{LOG_DIR}/{ML1M}/{MODEL}',
LFM1M: f'{LOG_DIR}/{LFM1M}/{MODEL}',
CELL: f'{LOG_DIR}/{CELL}/{MODEL}',
}
# for compatibility, CFG_DIR, BEST_CFG_DIR have been modified s,t, they are independent from the dataset
CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/hparams_cfg',
}
BEST_CFG_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/best_hparams_cfg',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/best_hparams_cfg',
CELL: f'{LOG_DATASET_DIR[CELL]}/best_hparams_cfg',
}
TEST_METRICS_FILE_NAME = 'test_metrics.json'
RECOM_METRICS_FILE_NAME = 'recommender_metrics.json'
RECOM_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{RECOM_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{RECOM_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{RECOM_METRICS_FILE_NAME}',
}
TEST_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
BEST_TEST_METRICS_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TEST_METRICS_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TEST_METRICS_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TEST_METRICS_FILE_NAME}',
}
CONFIG_FILE_NAME = 'config.json'
CFG_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
BEST_CFG_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{CONFIG_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{CONFIG_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{CONFIG_FILE_NAME}',
}
TRANSE_TEST_METRICS_FILE_NAME = 'test_metrics_transe.json'
TRANSE_CFG_FILE_NAME = 'config_transe.json'
TRANSE_TEST_METRICS_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TRANSE_TEST_METRICS_FILE_NAME}',
}
BEST_TRANSE_TEST_METRICS_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TRANSE_TEST_METRICS_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TRANSE_TEST_METRICS_FILE_NAME}',
}
TRANSE_CFG_FILE_PATH = {
ML1M: f'{CFG_DIR[ML1M]}/{TRANSE_CFG_FILE_NAME}',
LFM1M: f'{CFG_DIR[LFM1M]}/{TRANSE_CFG_FILE_NAME}',
CELL: f'{CFG_DIR[CELL]}/{TRANSE_CFG_FILE_NAME}',
}
BEST_TRANSE_CFG_FILE_PATH = {
ML1M: f'{BEST_CFG_DIR[ML1M]}/{TRANSE_CFG_FILE_NAME}',
LFM1M: f'{BEST_CFG_DIR[LFM1M]}/{TRANSE_CFG_FILE_NAME}',
CELL: f'{BEST_CFG_DIR[CELL]}/{TRANSE_CFG_FILE_NAME}',
}
TRANSE_HPARAMS_FILE = f'transe_{MODEL}_hparams_file.json'
HPARAMS_FILE = f'{MODEL}_hparams_file.json'
SAVE_MODEL_DIR = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/save',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/save',
CELL: f'{LOG_DATASET_DIR[CELL]}/save',
}
EVALUATION = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/eva_pre',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/eva_pre',
CELL: f'{LOG_DATASET_DIR[CELL]}/eva_pre',
}
EVALUATION_2 = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/eval',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/eval',
CELL: f'{LOG_DATASET_DIR[CELL]}/eval',
}
CASE_ST = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/case_st',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/case_st',
CELL: f'{LOG_DATASET_DIR[CELL]}/case_st',
}
TEST = {
ML1M: f'{LOG_DATASET_DIR[ML1M]}/test',
LFM1M: f'{LOG_DATASET_DIR[LFM1M]}/test',
CELL: f'{LOG_DATASET_DIR[CELL]}/test',
}
# Label files.
LABELS = {
ML1M: (TMP_DIR[ML1M] + '/train_label.pkl', TMP_DIR[ML1M] + '/valid_label.pkl', TMP_DIR[ML1M] + '/test_label.pkl'),
LFM1M: (TMP_DIR[LFM1M] + '/train_label.pkl', TMP_DIR[LFM1M] + '/valid_label.pkl', TMP_DIR[LFM1M] + '/test_label.pkl'),
CELL: (TMP_DIR[CELL] + '/train_label.pkl', TMP_DIR[CELL] + '/valid_label.pkl', TMP_DIR[CELL] + '/test_label.pkl')
}
# UCPR SPECIFIC RELATIONS
PADDING = 'padding'
SELF_LOOP = 'self_loop'
# ENTITIES/RELATIONS SHARED BY ALL DATASETS
USER = 'user'
PRODUCT = 'product'
INTERACTION = {
ML1M: "watched",
LFM1M: "listened",
CELL: "purchase",
}
SELF_LOOP = 'self_loop'
PRODUCED_BY_PRODUCER = 'produced_by_producer'
PRODUCER = 'producer'
# ML1M ENTITIES
CINEMATOGRAPHER = 'cinematographer'
PRODCOMPANY = 'prodcompany'
COMPOSER = 'composer'
CATEGORY = 'category'
ACTOR = 'actor'
COUNTRY = 'country'
WIKIPAGE = 'wikipage'
EDITOR = 'editor'
WRITTER = 'writter'
DIRECTOR = 'director'
# LASTFM ENTITIES
ARTIST = 'artist'
ENGINEER = 'engineer'
GENRE = 'genre'
# CELL ENTITIES
BRAND = 'brand'
RPRODUCT = 'rproduct'
# ML1M RELATIONS
DIRECTED_BY_DIRECTOR = 'directed_by_director'
PRODUCED_BY_COMPANY = 'produced_by_prodcompany'
STARRED_BY_ACTOR = 'starred_by_actor'
RELATED_TO_WIKIPAGE = 'related_to_wikipage'
EDITED_BY_EDITOR = 'edited_by_editor'
WROTE_BY_WRITTER = 'wrote_by_writter'
CINEMATOGRAPHY_BY_CINEMATOGRAPHER = 'cinematography_by_cinematographer'
COMPOSED_BY_COMPOSER = 'composed_by_composer'
PRODUCED_IN_COUNTRY = 'produced_in_country'
BELONG_TO_CATEGORY = 'belong_to_category'
# LASTFM RELATIONS
MIXED_BY_ENGINEER = 'mixed_by_engineer'
FEATURED_BY_ARTIST = 'featured_by_artist'
BELONG_TO_GENRE = 'belong_to_genre'
# CELL RELATIONS
PURCHASE = 'purchase'
ALSO_BOUGHT_RP = 'also_bought_related_product'
ALSO_VIEWED_RP = 'also_viewed_related_product'
ALSO_BOUGHT_P = 'also_bought_product'
ALSO_VIEWED_P = 'also_viewed_product'
KG_RELATION = {
ML1M: {
USER: {
INTERACTION[ML1M]: PRODUCT,
},
ACTOR: {
STARRED_BY_ACTOR: PRODUCT,
},
DIRECTOR: {
DIRECTED_BY_DIRECTOR: PRODUCT,
},
PRODUCT: {
INTERACTION[ML1M]: USER,
PRODUCED_BY_COMPANY: PRODCOMPANY,
PRODUCED_BY_PRODUCER: PRODUCER,
EDITED_BY_EDITOR: EDITOR,
WROTE_BY_WRITTER: WRITTER,
CINEMATOGRAPHY_BY_CINEMATOGRAPHER: CINEMATOGRAPHER,
BELONG_TO_CATEGORY: CATEGORY,
DIRECTED_BY_DIRECTOR: DIRECTOR,
STARRED_BY_ACTOR: ACTOR,
COMPOSED_BY_COMPOSER: COMPOSER,
PRODUCED_IN_COUNTRY: COUNTRY,
RELATED_TO_WIKIPAGE: WIKIPAGE,
},
PRODCOMPANY: {
PRODUCED_BY_COMPANY: PRODUCT,
},
COMPOSER: {
COMPOSED_BY_COMPOSER: PRODUCT,
},
PRODUCER: {
PRODUCED_BY_PRODUCER: PRODUCT,
},
WRITTER: {
WROTE_BY_WRITTER: PRODUCT,
},
EDITOR: {
EDITED_BY_EDITOR: PRODUCT,
},
CATEGORY: {
BELONG_TO_CATEGORY: PRODUCT,
},
CINEMATOGRAPHER: {
CINEMATOGRAPHY_BY_CINEMATOGRAPHER: PRODUCT,
},
COUNTRY: {
PRODUCED_IN_COUNTRY: PRODUCT,
},
WIKIPAGE: {
RELATED_TO_WIKIPAGE: PRODUCT,
}
},
LFM1M: {
USER: {
INTERACTION[LFM1M]: PRODUCT,
},
ARTIST: {
FEATURED_BY_ARTIST: PRODUCT,
},
ENGINEER: {
MIXED_BY_ENGINEER: PRODUCT,
},
PRODUCT: {
INTERACTION[LFM1M]: USER,
PRODUCED_BY_PRODUCER: PRODUCER,
FEATURED_BY_ARTIST: ARTIST,
MIXED_BY_ENGINEER: ENGINEER,
BELONG_TO_GENRE: GENRE,
},
PRODUCER: {
PRODUCED_BY_PRODUCER: PRODUCT,
},
GENRE: {
BELONG_TO_GENRE: PRODUCT,
},
},
CELL: {
USER: {
PURCHASE: PRODUCT,
},
PRODUCT: {
PURCHASE: USER,
PRODUCED_BY_COMPANY: BRAND,
BELONG_TO_CATEGORY: CATEGORY,
ALSO_BOUGHT_RP: RPRODUCT,
ALSO_VIEWED_RP: RPRODUCT,
ALSO_BOUGHT_P: PRODUCT,
ALSO_VIEWED_P: PRODUCT,
},
BRAND: {
PRODUCED_BY_COMPANY: PRODUCT,
},
CATEGORY: {
BELONG_TO_CATEGORY: PRODUCT,
},
RPRODUCT: {
ALSO_BOUGHT_RP: PRODUCT,
ALSO_VIEWED_RP: PRODUCT,
}
},
}
# 0 is reserved to the main relation, 1 to mention
PATH_PATTERN = {
ML1M: {
0: ((None, USER), (INTERACTION[ML1M], PRODUCT), (INTERACTION[ML1M], USER), (INTERACTION[ML1M], PRODUCT)),
2: ((None, USER), (INTERACTION[ML1M], PRODUCT), (CINEMATOGRAPHY_BY_CINEMATOGRAPHER, CINEMATOGRAPHER), (CINEMATOGRAPHY_BY_CINEMATOGRAPHER, PRODUCT)),
3: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_BY_COMPANY, PRODCOMPANY), (PRODUCED_BY_COMPANY, PRODUCT)),
4: ((None, USER), (INTERACTION[ML1M], PRODUCT), (COMPOSED_BY_COMPOSER, COMPOSER), (COMPOSED_BY_COMPOSER, PRODUCT)),
5: ((None, USER), (INTERACTION[ML1M], PRODUCT), (BELONG_TO_CATEGORY, CATEGORY), (BELONG_TO_CATEGORY, PRODUCT)),
7: ((None, USER), (INTERACTION[ML1M], PRODUCT), (STARRED_BY_ACTOR, ACTOR), (STARRED_BY_ACTOR, PRODUCT)),
8: ((None, USER), (INTERACTION[ML1M], PRODUCT), (EDITED_BY_EDITOR, EDITOR), (EDITED_BY_EDITOR, PRODUCT)),
9: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_BY_PRODUCER, PRODUCER), (PRODUCED_BY_PRODUCER, PRODUCT)),
10: ((None, USER), (INTERACTION[ML1M], PRODUCT), (WROTE_BY_WRITTER, WRITTER), (WROTE_BY_WRITTER, PRODUCT)),
11: ((None, USER), (INTERACTION[ML1M], PRODUCT), (DIRECTED_BY_DIRECTOR, DIRECTOR), (DIRECTED_BY_DIRECTOR, PRODUCT)),
12: ((None, USER), (INTERACTION[ML1M], PRODUCT), (PRODUCED_IN_COUNTRY, COUNTRY), (PRODUCED_IN_COUNTRY, PRODUCT)),
13: ((None, USER), (INTERACTION[ML1M], PRODUCT), (RELATED_TO_WIKIPAGE, WIKIPAGE), (RELATED_TO_WIKIPAGE, PRODUCT)),
},
LFM1M: {
0: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (INTERACTION[LFM1M], USER), (INTERACTION[LFM1M], PRODUCT)),
2: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (BELONG_TO_GENRE, GENRE), (BELONG_TO_GENRE, PRODUCT)),
4: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (FEATURED_BY_ARTIST, ARTIST), (FEATURED_BY_ARTIST, PRODUCT)),
5: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (MIXED_BY_ENGINEER, ENGINEER), (MIXED_BY_ENGINEER, PRODUCT)),
6: ((None, USER), (INTERACTION[LFM1M], PRODUCT), (PRODUCED_BY_PRODUCER, PRODUCER), (PRODUCED_BY_PRODUCER, PRODUCT)),
},
CELL: {
0: ((None, USER), (PURCHASE, PRODUCT), (PURCHASE, USER), (PURCHASE, PRODUCT)),
2: ((None, USER), (PURCHASE, PRODUCT), (BELONG_TO_CATEGORY, CATEGORY), (BELONG_TO_CATEGORY, PRODUCT)),
3: ((None, USER), (PURCHASE, PRODUCT), (PRODUCED_BY_COMPANY, BRAND), (PRODUCED_BY_COMPANY, PRODUCT)),
4: ((None, USER), (PURCHASE, PRODUCT), (ALSO_BOUGHT_P, PRODUCT)),
5: ((None, USER), (PURCHASE, PRODUCT), (ALSO_VIEWED_P, PRODUCT)),
6: ((None, USER), (PURCHASE, PRODUCT), (ALSO_BOUGHT_RP, RPRODUCT), (ALSO_BOUGHT_RP, PRODUCT)),
10: ((None, USER), (PURCHASE, PRODUCT), (ALSO_VIEWED_RP, RPRODUCT), (ALSO_VIEWED_RP, PRODUCT)),
}
}
MAIN_PRODUCT_INTERACTION = {
ML1M: (PRODUCT, INTERACTION[ML1M]),
LFM1M: (PRODUCT, INTERACTION[LFM1M]),
CELL: (PRODUCT, PURCHASE)
}
def get_entities(dataset_name):
return list(KG_RELATION[dataset_name].keys())
def get_knowledge_derived_relations(dataset_name):
main_entity, main_relation = MAIN_PRODUCT_INTERACTION[dataset_name]
ans = list(KG_RELATION[dataset_name][main_entity].keys())
ans.remove(main_relation)
return ans
def get_dataset_relations(dataset_name, entity_head):
return list(KG_RELATION[dataset_name][entity_head].keys())
def get_entity_tail(dataset_name, relation):
entity_head, _ = MAIN_PRODUCT_INTERACTION[dataset_name]
return KG_RELATION[dataset_name][entity_head][relation]
def get_logger(logname):
logger = logging.getLogger(logname)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('[%(levelname)s] %(message)s')
ch = logging.StreamHandler(sys.stdout)
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.handlers.RotatingFileHandler(logname, mode='w')
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
def set_random_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
def save_dataset(dataset, dataset_obj):
dataset_file = os.path.join(TMP_DIR[dataset], 'dataset.pkl')
with open(dataset_file, 'wb') as f:
pickle.dump(dataset_obj, f)
def load_dataset(dataset):
dataset_file = os.path.join(TMP_DIR[dataset], 'dataset.pkl')
dataset_obj = pickle.load(open(dataset_file, 'rb'))
return dataset_obj
def save_labels(dataset, labels, mode='train'):
if mode == 'train':
label_file = LABELS[dataset][0]
elif mode == 'valid':
label_file = LABELS[dataset][1]
elif mode == 'test':
label_file = LABELS[dataset][2]
else:
raise Exception('mode should be one of {train, test}.')
with open(label_file, 'wb') as f:
pickle.dump(labels, f)
f.close()
def load_labels(dataset, mode='train'):
if mode == 'train':
label_file = LABELS[dataset][0]
elif mode == 'valid':
label_file = LABELS[dataset][1]
elif mode == 'test':
label_file = LABELS[dataset][2]
else:
raise Exception('mode should be one of {train, test}.')
user_products = pickle.load(open(label_file, 'rb'))
return user_products
def save_embed(dataset, embed):
embed_file = '{}/transe_embed.pkl'.format(TMP_DIR[dataset])
pickle.dump(embed, open(embed_file, 'wb'))
def load_embed(dataset, embed_model=TRANSE):
embed_file = '{}/transe_embed.pkl'.format(TMP_DIR[dataset])
print('Load embedding:', embed_file)
if not os.path.exists(embed_file):
default_emb_path = os.path.join(ROOT_DIR, 'pretrained', dataset, MODEL, embed_model, 'transe_embed.pkl')
shutil.copyfile(default_emb_path, embed_file)
embed = pickle.load(open(embed_file, 'rb'))
return embed
# Receive paths in form (score, prob, [path]) return the last relationship
def get_path_pattern(path):
return path[-1][-1][0]
def get_pid_to_kgid_mapping(dataset_name):
if dataset_name == "ml1m":
file = open(DATASET_DIR[dataset_name] + "/entities/mappings/movie.txt", "r")
elif dataset_name == "lfm1m":
file = open(DATASET_DIR[dataset_name] + "/entities/mappings/song.txt", "r")
else:
print("Dataset mapping not found!")
exit(-1)
reader = csv.reader(file, delimiter=' ')
dataset_pid2kg_pid = {}
next(reader, None)
for row in reader:
if dataset_name == "ml1m" or dataset_name == "lfm1m":
dataset_pid2kg_pid[int(row[0])] = int(row[1])
file.close()
return dataset_pid2kg_pid
def get_validation_pids(dataset_name):
if not os.path.isfile(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')):
return []
validation_pids = defaultdict(set)
with open(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')) as valid_file:
reader = csv.reader(valid_file, delimiter=" ")
for row in reader:
uid = int(row[0])
pid = int(row[1])
validation_pids[uid].add(pid)
valid_file.close()
return validation_pids
def get_uid_to_kgid_mapping(dataset_name):
dataset_uid2kg_uid = {}
with open(DATASET_DIR[dataset_name] + "/entities/mappings/user.txt", 'r') as file:
reader = csv.reader(file, delimiter=" ")
next(reader, None)
for row in reader:
if dataset_name == "ml1m" or dataset_name == "lfm1m":
uid_review = int(row[0])
uid_kg = int(row[1])
dataset_uid2kg_uid[uid_review] = uid_kg
return dataset_uid2kg_uid
def save_kg(dataset, kg):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
pickle.dump(kg, open(kg_file, 'wb'))
def load_kg(dataset):
kg_file = TMP_DIR[dataset] + '/kg.pkl'
# CHANGED
kg = pickle.load(open(kg_file, 'rb'))
return kg
def shuffle(arr):
for i in range(len(arr) - 1, 0, -1):
# Pick a random index from 0 to i
j = random.randint(0, i + 1)
# Swap arr[i] with the element at random index
arr[i], arr[j] = arr[j], arr[i]
return arr
def makedirs(dataset_name):
os.makedirs(BEST_CFG_DIR[dataset_name], exist_ok=True)
os.makedirs(CFG_DIR[dataset_name], exist_ok=True)
| 17,841 | 29.975694 | 156 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/train.py | from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from models.UCPR.utils import *
from models.UCPR.src.model.get_model.get_model import *
from models.UCPR.src.parser import parse_args
from models.UCPR.src.para_setting import parameter_path, parameter_path_th
from models.UCPR.src.data_loader import ACDataLoader
from models.UCPR.preprocess.dataset import Dataset
from models.UCPR.preprocess.knowledge_graph import KnowledgeGraph
import time
import json
from easydict import EasyDict as edict
import wandb
from models.utils import MetricsLogger
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
def pretrain_set(args, env):
if args.load_pretrain_model == True:
logger = get_logger(args.log_dir + '/train_log_pretrain.txt')
args.logger = logger
print(args.pretrained_dir + '/' + args.sort_by + '_pretrained_md_json_' + args.topk_string + '.txt')
with open(args.pretrained_dir + '/' + args.sort_by + '_pretrained_md_json_' + args.topk_string + '.txt') as json_file:
best_model_json = json.load(json_file)
logger.info(args.pretrained_dir + '/' + args.sort_by + '_pretrained_md_json_' + args.topk_string + '.txt')
policy_file = best_model_json['pretrained_file']
pretrain_sd = torch.load(policy_file)
logger.info("pretrain_model_load")
logger.info(policy_file)
# input()
model = Memory_Model(args, env.user_triplet_set, env.rela_2_index, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
pretrain_sd = {k: v for k, v in pretrain_sd.items() if k in model_sd}
para_meter = [k.split('.')[0] for k, v in pretrain_sd.items()]
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
for name, child in model.named_children():
print('name = ', name)
for param in child.parameters():
param.requires_grad = True
if args.dataset in [BEAUTY_CORE, CELL_CORE, CLOTH_CORE]:
for name, child in model.named_children():
print('name = ', name)
if 'kg' in name:
print('name = ', name)
for param in child.parameters():
param.requires_grad = False
elif args.dataset in [LFM1M,ML1M]:#MOVIE_CORE, AZ_BOOK_CORE]:
for name, child in model.named_children():
print('name = ', name)
if name in para_meter and 'kg' not in name and 'actor' not in name and 'critic' not in name:
print('name = ', name)
for param in child.parameters():
param.requires_grad = False
grad_string = ''
for name, child in model.named_children():
print('name = ', name)
for param in child.parameters():
print(param.requires_grad)
grad_string += ' name = ' + name + ' ' + str(param.requires_grad)
logger.info(grad_string)
# start_epoch = args.pretrained_st_epoch + 1
else:
logger = get_logger(args.log_dir + '/train_log.txt')
args.logger = logger
model = Memory_Model(args, env.user_triplet_set, env.rela_2_index, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
grad_string = ''
for name, child in model.named_children():
print('name = ', name)
for param in child.parameters():
print(param.requires_grad)
grad_string += ' name = ' + name + ' ' + str(param.requires_grad)
logger.info(grad_string)
core_user_list = args.core_user_list
#kg_fre_dict = args.kg_fre_dict
sp_user_filter = args.sp_user_filter
try:
kg_user_filter = args.kg_user_filter
args.kg_user_filter = ''
except:
pass
args.core_user_list = ''
#args.kg_fre_dict = ''
args.sp_user_filter = ''
logger.info(args)
args.core_user_list = core_user_list
#args.kg_fre_dict = kg_fre_dict
args.sp_user_filter = sp_user_filter
try:
args.kg_user_filter = kg_user_filter
except:
pass
del core_user_list#, kg_fre_dict
return model, logger
def train(args):
train_env = KG_Env(args, Dataset(args, set_name='train'), args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
valid_env = KG_Env(args, Dataset(args, set_name='valid'), args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
print('env.output_valid_user() = ', len(train_env.output_valid_user()))
print('args.batch_size = ', args.batch_size)
train_dataloader = ACDataLoader(train_env.output_valid_user(), args.batch_size)
valid_dataloader = ACDataLoader(valid_env.output_valid_user(), args.batch_size)
model, logger = pretrain_set(args, train_env)
logger.info('valid user = ')
# logger.info(env.output_valid_user())
logger.info(len(train_env.output_valid_user()))
optimizer = optim.Adam(model.parameters(), lr=args.lr)
logger.info('Parameters:' + str([i[0] for i in model.named_parameters()]))
step=0
metrics = MetricsLogger(args.wandb_entity,
f'ucpr_{args.dataset}',
config=args)
metrics.register('train_loss')
metrics.register('train_ploss')
metrics.register('train_vloss')
metrics.register('train_entropy')
metrics.register('train_reward')
metrics.register('avg_train_loss')
metrics.register('avg_train_ploss')
metrics.register('avg_train_vloss')
metrics.register('avg_train_entropy')
metrics.register('avg_train_reward')
metrics.register('std_train_reward')
metrics.register('valid_loss')
metrics.register('valid_ploss')
metrics.register('valid_vloss')
metrics.register('valid_entropy')
metrics.register('valid_reward')
metrics.register('avg_valid_loss')
metrics.register('avg_valid_ploss')
metrics.register('avg_valid_vloss')
metrics.register('avg_valid_entropy')
metrics.register('avg_valid_reward')
metrics.register('std_valid_reward')
loaders = {'train': train_dataloader,
'valid': valid_dataloader}
envs = {'train': train_env,
'valid':valid_env}
step_counter = {
'train': 0,
'valid':0
}
first_iterate = True
for epoch in range(0, args.epochs + 1):
splits_to_compute = list(loaders.items())
if first_iterate:
first_iterate = False
splits_to_compute.insert(0, ('valid', valid_dataloader))
for split_name, dataloader in splits_to_compute:
if split_name == 'valid' and epoch%10 != 0:
continue
if split_name == 'valid':
model.eval()
else:
model.train()
dataloader.reset()
env = envs[split_name]
iter_counter = 0
dataloader.reset()
while dataloader.has_next():
batch_uids = dataloader.get_batch()
### Start batch episodes ###
env.reset(epoch, batch_uids, training = True) # numpy array of [bs, state_dim]
model.user_triplet_set = env.user_triplet_set
model.reset(batch_uids)
while not env._done:
batch_act_mask = env.batch_action_mask(dropout=args.act_dropout) # numpy array of size [bs, act_dim]
batch_emb_state = model.generate_st_emb(env._batch_path)
batch_next_action_emb = model.generate_act_emb(env._batch_path, env._batch_curr_actions)
batch_act_idx = model.select_action(batch_emb_state, batch_next_action_emb, batch_act_mask, args.device) # int
batch_state, batch_reward = env.batch_step(batch_act_idx)
model.rewards.append(batch_reward)
### End of episodes ###
for pg in optimizer.param_groups:
lr = pg['lr']
total_reward = np.sum(model.rewards)
# Update policy
loss, ploss, vloss, eloss = model.update(optimizer, env, args.device, args.ent_weight, step_counter[split_name])
cur_metrics = {f'{split_name}_loss':loss,
f'{split_name}_ploss':ploss,
f'{split_name}_vloss':vloss,
f'{split_name}_entropy':eloss,
f'{split_name}_reward':total_reward,
f'{split_name}_iter': step_counter[split_name]}
for k,v in cur_metrics.items():
metrics.log(k, v)
#metrics.push(cur_metrics.keys())
step_counter[split_name] += 1
iter_counter += 1
#if step_counter[split_name] > 0 and step_counter[split_name] % 100 == 0:
# #avg_reward = np.mean(total_rewards) / args.batch_size
# dataloader.reset()
cur_metrics = [f'{split_name}_epoch']
cur_metrics.extend([f'{split_name}_loss',
f'{split_name}_ploss',
f'{split_name}_vloss',
f'{split_name}_entropy',
f'{split_name}_reward',
])
for k in cur_metrics[1:]:
metrics.log(f'avg_{k}', sum(metrics.history(k, iter_counter))/max(iter_counter,1) )
getattr(metrics, f'avg_{split_name}_reward')[-1] /= args.batch_size
metrics.log(f'{split_name}_epoch', epoch)
cur_metrics.append(f'std_{split_name}_reward')
metrics.log(f'std_{split_name}_reward',np.std(metrics.history( f'{split_name}_reward', iter_counter)) )
info = ""
for k in cur_metrics:
if isinstance(getattr(metrics,k)[-1],float):
x = '{:.5f}'.format(getattr(metrics, k)[-1])
else:
x = '{:d}'.format(getattr(metrics, k)[-1])
info = info + f'| {k}={x} '
metrics.push(cur_metrics)
logger.info(info)
### END of epoch ###
if epoch % 10 == 0:
policy_file = '{}/policy_model_epoch_{}.ckpt'.format(TMP_DIR[args.dataset], epoch)
logger.info("Save model to " + policy_file)
torch.save(model.state_dict(), policy_file)
#metrics.push_model(policy_file, f'{MODEL}_{args.dataset}_{epoch}')
makedirs(args.dataset)
metrics.write(TEST_METRICS_FILE_PATH[args.dataset])#metrics.write(os.path.join(TMP_DIR[args.dataset], VALID_METRICS_FILE_NAME) )
metrics.close_wandb()
if __name__ == '__main__':
args = parse_args()
os.makedirs(TMP_DIR[args.dataset], exist_ok=True)
with open(os.path.join(TMP_DIR[args.dataset],HPARAMS_FILE), 'w') as f:
import json
import copy
args_dict = dict()
for x,y in copy.deepcopy(args._get_kwargs()):
args_dict[x] = y
if 'device' in args_dict:
del args_dict['device']
json.dump(args_dict,f)
args.training = 1
args.training = (args.training == 1)
if args.envir == 'p1':
para_env = parameter_path_th
KG_Env = KGEnvironment
elif args.envir == 'p2':
para_env = parameter_path
KG_Env = KGEnvironment
para_env(args)
train(args)
| 11,947 | 36.930159 | 147 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/parser.py | import os
from models.UCPR.utils import *
import argparse
import random
def parse_args():
boolean = lambda x: (str(x).lower() == 'true')
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=LFM1M, help='One of {clothing, cell, beauty, cd}')
parser.add_argument('--name', type=str, default='train_agent_enti_emb', help='directory name.')
parser.add_argument('--model', type=str, default='UCPR', help='directory name.')
parser.add_argument('--seed', type=int, default=52, help='random seed.')
parser.add_argument('--p_hop', type=int, default=1, help='random seed.')
parser.add_argument('--gpu', type=int, default=0, help='gpu device.')
parser.add_argument('--epochs', type=int, default=38, help='Max number of epochs.')
parser.add_argument('--batch_size', type=int, default=128, help='batch size.')
parser.add_argument('--sub_batch_size', type=int, default=1, help='sub batch size.')
parser.add_argument('--n_memory', type=int, default=32, help='sub batch size.')
# parser.add_argument('--user_core', type=int, default=5, help='sub batch size.')
parser.add_argument('--lr', type=float, default=7e-5, help='learning rate.')
parser.add_argument('--l2_lambda', type=float, default=0, help='l2 lambda')
parser.add_argument('--max_acts', type=int, default=50, help='Max number of actions.')
parser.add_argument('--max_path_len', type=int, default=3, help='Max path length.')
parser.add_argument('--gamma', type=float, default=0.99, help='reward discount factor.')
parser.add_argument('--ent_weight', type=float, default=1e-3, help='weight factor for entropy loss')
parser.add_argument('--reasoning_step', type=int, default=3, help='weight factor for entropy loss')
parser.add_argument('--pretrained_st_epoch', type=int, default=0, help='h0_embbed')
parser.add_argument('--att_core', type=int, default=0, help='h0_embbed')
parser.add_argument('--user_core_th', type=int, default=6, help='h0_embbed')
parser.add_argument('--grad_check', type=int, default=0, help='h0_embbed')
parser.add_argument('--embed_size', type=int, default=50, help='knowledge embedding size.')
parser.add_argument('--act_dropout', type=float, default=0.5, help='action dropout rate.')
parser.add_argument('--state_history', type=int, default=1, help='state history length')
parser.add_argument('--hidden', type=int, nargs='*', default=[64, 32], help='number of samples')
parser.add_argument('--gradient_plot', type=str, default='gradient_plot/', help='number of negative samples.')
parser.add_argument('--best_save_model_dir', type=str, default='', help='best_save_model_dir')
parser.add_argument('--reward_hybrid', type=int, default=0, help='weight factor for entropy loss')
parser.add_argument('--reward_rh', type=str, default='', help='number of negative samples.')
parser.add_argument('--test_lstm_up', type=int, default=1, help='test_lstm_up')
parser.add_argument('--h0_embbed', type=int, default=0, help='h0_embbed')
parser.add_argument('--training', type=int, default=0, help='h0_embbed')
parser.add_argument('--load_pretrain_model', type=int, default=0, help='h0_embbed')
parser.add_argument('--att_evaluation', type=int, default=0, help='att_evaluation')
parser.add_argument('--state_rg', type=int, default=0, help='state_require_gradient')
parser.add_argument('--kg_emb_grad', type=int, default=0, help='if kg_emb_grad')
parser.add_argument('--save_pretrain_model', type=int, default=0, help='save_pretrain_model')
parser.add_argument('--mv_test', type=int, default=0, help='mv_test')
parser.add_argument('--env_old', type=int, default=0, help='env_old')
parser.add_argument('--kg_old', type=int, default=0, help='env_old')
parser.add_argument('--tri_wd_rm', type=int, default=0, help='tri_wd_rm')
parser.add_argument('--tri_pro_rm', type=int, default=0, help='tri_pro_rm')
parser.add_argument('--l2_weight', type=float, default=1e-6, help='weight of the l2 regularization term')
parser.add_argument('--sam_type', type=str, default='alet', help='number of negative samples.')
parser.add_argument('--topk', type=int, nargs='*', default=[25, 5, 1], help='number of samples')
#parser.add_argument('--topk_list', type=int, nargs='*', default=[1, 10, 100, 100], help='number of samples')
parser.add_argument('--run_path', type=boolean, default=True, help='Generate predicted path? (takes long time)')
parser.add_argument('--run_eval', type=boolean, default=True, help='Run evaluation?')
parser.add_argument('--save_paths', type=boolean, default=True, help='Save paths')
parser.add_argument('--pretest', type=int, default=0, help='pretest')
parser.add_argument('--item_core', type=int, default=10, help='core number')
parser.add_argument('--user_core', type=int, default=300, help='core number')
parser.add_argument('--best_model_epoch', type=int, default=0, help='core number')
parser.add_argument('--kg_fre_lower', type=int, default=15, help='core number')
parser.add_argument('--kg_fre_upper', type=int, default=500, help='core number')
parser.add_argument('--lambda_num', type=float, default=0.5, help='core number')
parser.add_argument('--non_sampling', type=boolean, default=False, help='core number')
parser.add_argument('--gp_setting', type=str, default='6000_800_15_500_250', help='core number')
parser.add_argument('--kg_no_grad', type=boolean, default=False, help='core number')
parser.add_argument('--sort_by', type=str, default='score', help='score or prob')
parser.add_argument('--eva_epochs', type=int, default=0, help='core number')
parser.add_argument('--KGE_pretrained', type=int, default=0, help='KGE_pretrained')
parser.add_argument('--load_pt_emb_size', type=int, default=0, help='core number')
parser.add_argument('--user_o', type=int, default=0, help='user_o')
parser.add_argument('--add_products', type=boolean, default=True, help='Add predicted products up to 10')
parser.add_argument('--do_validation', type=bool, default=True, help='Whether to perform validation')
parser.add_argument("--wandb", action="store_true", help="If passed, will log to Weights and Biases.")
parser.add_argument(
"--wandb_entity",
required="--wandb" in sys.argv,
type=str,
help="Entity name to push to the wandb logged data, in case args.wandb is specified.",
)
parser.add_argument('--policy_path', type=str, default=None, help='Path to the .pt file of the trained agent ')
args = parser.parse_args()
args.gpu = str(args.gpu)
args.KGE_pretrained = (args.KGE_pretrained == 1)
args.reward_hybrid = (args.reward_hybrid == 1)
args.test_lstm_up = (args.test_lstm_up == 1)
args.load_pretrain_model = (args.load_pretrain_model == 1)
args.att_evaluation = (args.att_evaluation == 1)
args.state_rg = (args.state_rg == 1)
args.load_pt_emb_size = (args.load_pt_emb_size == 1)
args.kg_emb_grad = (args.kg_emb_grad == 1)
args.mv_test = (args.mv_test == 1)
args.env_old = (args.env_old == 1)
args.kg_old = (args.kg_old == 1)
args.user_o = (args.user_o == 1)
args.grad_check = (args.grad_check == 1)
args.pretest = (args.pretest == 1)
args.save_pretrain_model = (args.save_pretrain_model == 1)
args.tri_wd_rm = (args.tri_wd_rm == 1)
args.tri_pro_rm = (args.tri_pro_rm == 1)
# os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
args.device = torch.device('cuda:0') if torch.cuda.is_available() else 'cpu'
if args.dataset in [BEAUTY_CORE, CELL_CORE, CLOTH_CORE]:
args.envir = 'p1'
args.sort_by = 'score'
# if args.KGE_pretrained == True: args.embed_size = 100
else:
args.envir = 'p2'
args.sort_by = 'prob'
print(args.envir)
# if args.KGE_pretrained == True: args.embed_size = 50
'''
if args.dataset in [BEAUTY_CORE, CELL_CORE, CLOTH_CORE]:
args.topk = [10, 15, 1]
args.topk_list = [1, 10, 150, 100]
elif args.dataset == MOVIE_CORE:
if args.mv_test == True:
args.topk = [8, 3, 4]
args.topk_list = [1, 8, 24, 96]
else:
args.topk = [8, 3, 4]
args.topk_list = [1, 8, 24, 96]
# args.topk = [10, 10, 1]
# args.topk_list = [1, 10, 100, 100]
# args.topk = [10, 3, 4]
# args.topk_list = [1, 8, 24, 96]
elif args.dataset == AZ_BOOK_CORE:
args.topk = [8, 2, 6]
args.topk_list = [1, 8, 16, 96]
'''
args.topk = [25, 5, 1]
args.topk_string = ', '.join([str(k) for k in args.topk])
args.topk_string = ""
if args.model in ['lstm', 'state_history']:
args.non_sampling = True
return args | 8,873 | 50 | 116 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/test.py | from __future__ import absolute_import, division, print_function
import os
import argparse
import json
from math import log
from datetime import datetime
from tqdm import tqdm
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
import threading
from functools import reduce
import time
import pickle
import gc
import json
from easydict import EasyDict as edict
import itertools
from models.UCPR.utils import *
from models.UCPR.src.model.get_model.get_model import *
from models.UCPR.src.parser import parse_args
from models.UCPR.src.para_setting import parameter_path, parameter_path_th
import collections
def dcg_at_k(r, k, method=1):
r = np.asfarray(r)[:k]
if r.size:
if method == 0:
return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
elif method == 1:
return np.sum(r / np.log2(np.arange(2, r.size + 2)))
else:
raise ValueError('method must be 0 or 1.')
return 0.
def ndcg_at_k(r, k, method=1):
dcg_max = dcg_at_k(sorted(r, reverse=True), k, method)
if not dcg_max:
return 0.
return dcg_at_k(r, k, method) / dcg_max
def save_output(dataset_name, pred_paths):
extracted_path_dir = LOG_DATASET_DIR[dataset_name]
if not os.path.isdir(extracted_path_dir):
os.makedirs(extracted_path_dir)
print("Normalizing items scores...")
# Get min and max score to performe normalization between 0 and 1
score_list = []
for uid, pid in pred_paths.items():
for pid, path_list in pred_paths[uid].items():
for path in path_list:
score_list.append(float(path[0]))
min_score = min(score_list)
max_score = max(score_list)
print("Saving pred_paths...")
for uid in pred_paths.keys():
curr_pred_paths = pred_paths[uid]
for pid in curr_pred_paths.keys():
curr_pred_paths_for_pid = curr_pred_paths[pid]
for i, curr_path in enumerate(curr_pred_paths_for_pid):
path_score = pred_paths[uid][pid][i][0]
path_prob = pred_paths[uid][pid][i][1]
path = pred_paths[uid][pid][i][2]
new_path_score = (float(path_score) - min_score) / (max_score - min_score)
pred_paths[uid][pid][i] = (new_path_score, path_prob, path)
with open(extracted_path_dir + "/pred_paths.pkl", 'wb') as pred_paths_file:
pickle.dump(pred_paths, pred_paths_file)
pred_paths_file.close()
def evaluate(topk_matches, test_user_products, no_skip_user, dataset_name):
"""Compute metrics for predicted recommendations.
Args:
topk_matches: a list or dict of product ids in ascending order.
"""
invalid_users = []
# Compute metrics
metrics = edict(
# ndcg_other=[],
ndcg=[],
hr=[],
precision=[],
recall=[],
)
ndcgs = []
test_user_idxs = list(test_user_products.keys())
x = defaultdict(int)
rel_size = []
for uid in test_user_idxs:
if uid not in topk_matches or len(topk_matches[uid]) < 10:
x['a'] +=1
invalid_users.append(uid)
continue
pred_list, rel_set = topk_matches[uid][::-1], test_user_products[uid]
if uid not in no_skip_user:
x['b'] += 1
continue
if len(pred_list) == 0:
x['c'] +=1
continue
rel_size.append(len(rel_set))
k = 0
hit_num = 0.0
hit_list = []
for pid in pred_list:
k += 1
if pid in rel_set:
hit_num += 1
hit_list.append(1)
else:
hit_list.append(0)
#print(k, len(hit_list), collections.Counter(hit_list))
ndcg = ndcg_at_k(hit_list, k)
recall = hit_num / len(rel_set)
precision = hit_num / len(pred_list)
hit = 1.0 if hit_num > 0.0 else 0.0
metrics.ndcg.append(ndcg)
metrics.hr.append(hit)
metrics.recall.append(recall)
metrics.precision.append(precision)
avg_metrics = edict(
ndcg=[],
hr=[],
precision=[],
recall=[],
)
print("Average test set size: ", np.array(rel_size).mean())
for metric, values in metrics.items():
avg_metrics[metric] = np.mean(values)
avg_metric_value = np.mean(values) * 100 if metric == "ndcg_other" else np.mean(values)
n_users = len(values)
print("Overall for noOfUser={}, {}={:.4f}".format(n_users, metric,
avg_metric_value))
print("\n")
makedirs(dataset_name)
with open(RECOM_METRICS_FILE_PATH[dataset_name], 'w') as f:
json.dump(metrics,f)
return avg_metrics.precision, avg_metrics.recall, avg_metrics.ndcg, avg_metrics.hr,\
invalid_users
def batch_beam_search(args, env, model, uids, device, topk=[25, 5, 1]):
def _batch_acts_to_masks(batch_acts):
batch_masks = []
for acts in batch_acts:
num_acts = len(acts)
act_mask = np.zeros(model.act_dim, dtype=np.uint8)
act_mask[:num_acts] = 1
batch_masks.append(act_mask)
return np.vstack(batch_masks)
state_pool = env.reset(args.epochs,uids) # numpy of [bs, dim]
model.reset(uids)
path_pool = env._batch_path # list of list, size=bs
probs_pool = [[] for _ in uids]
index_ori_list = [_ for _ in range(len(uids))]
idx_list = [i for i in range(len(uids))]
# print('idx_list = ', idx_list)
model.eval()
for hop in range(3):
acts_pool = env._batch_get_actions(path_pool, False) # list of list, size=bs
actmask_pool = _batch_acts_to_masks(acts_pool) # numpy of [bs, dim]
state_tensor = model.generate_st_emb(path_pool, up_date_hop = idx_list)
batch_next_action_emb = model.generate_act_emb(path_pool, acts_pool)
actmask_tensor = torch.BoolTensor(actmask_pool).to(device)
try:
next_enti_emb, next_action_emb = batch_next_action_emb[0], batch_next_action_emb[1]
probs, _ = model((state_tensor[0],state_tensor[1], next_enti_emb, next_action_emb, actmask_tensor))
except:
probs, _ = model((state_tensor, batch_next_action_emb, actmask_tensor)) # Tensor of [bs, act_dim]
probs = probs + actmask_tensor.float() # In order to differ from masked actions
del actmask_tensor
topk_probs, topk_idxs = torch.topk(probs, topk[hop], dim=1) # LongTensor of [bs, k]
topk_idxs = topk_idxs.detach().cpu().numpy()
topk_probs = topk_probs.detach().cpu().numpy()
new_path_pool, new_probs_pool, new_index_pool, new_idx = [], [], [], []
for row in range(topk_idxs.shape[0]):
path = path_pool[row]
probs = probs_pool[row]
index_ori = index_ori_list[row]
for idx, p in zip(topk_idxs[row], topk_probs[row]):
if idx >= len(acts_pool[row]): # act idx is invalid
continue
relation, next_node_id = acts_pool[row][idx] # (relation, next_node_id)
if relation == SELF_LOOP:
next_node_type = path[-1][1]
else:
if args.envir == 'p1':
next_node_type = KG_RELATION[args.dataset][path[-1][1]][relation]#KG_RELATION[path[-1][1]][relation]
else:
next_node_type = KG_RELATION[args.dataset][path[-1][1]][relation]#env.et_idx2ty[next_node_id]
# next_node_type = KG_RELATION[path[-1][1]][relation]
new_path = path + [(relation, next_node_type, next_node_id)]
new_path_pool.append(new_path)
new_probs_pool.append(probs + [p])
new_index_pool.append(index_ori)
new_idx.append(row)
print(len(new_path_pool))
print(len(new_idx))
print()
path_pool = new_path_pool
probs_pool = new_probs_pool
index_ori_list = new_index_pool
idx_list = new_idx
gc.collect()
return path_pool, probs_pool
def predict_paths(args, policy_file, path_file, train_labels, test_labels, pretest):
print('Predicting paths...')
env = KG_Env(args, Dataset(args), args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
print(policy_file)
print('Loading pretrain')
pretrain_sd = torch.load(policy_file)
print('Loading model')
model = Memory_Model(args, env.user_triplet_set, env.rela_2_index,
env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
print('Model loaded')
test_uids = list(test_labels.keys())
test_uids = [uid for uid in test_uids if uid in train_labels and uid in env.user_list]
batch_size = 16
start_idx = 0
all_paths, all_probs = [], []
times = 0
pbar = tqdm(total=len(test_uids))
while start_idx < len(test_uids):
# print(' bar state/text_uid = ', start_idx, '/', len(test_uids), end = '\r')
end_idx = min(start_idx + batch_size, len(test_uids))
batch_uids = test_uids[start_idx:end_idx]
print(f'{start_idx}/{ len(test_uids)}')
paths, probs = batch_beam_search(args, env, model, batch_uids, args.device, topk=args.topk)
all_paths.extend(paths)
all_probs.extend(probs)
start_idx = end_idx
times += 1
if times % 50 == 0:
str_batch_uids = [str(st) for st in batch_uids]
fail_uids = ",".join(str_batch_uids)
fail_batch = f"'batch_uids = ', {fail_uids}, {str(start_idx)}, {str(end_idx)}"
args.logger.info(fail_batch)
if pretest == 1 and times >= 100: break
pbar.update(batch_size)
predicts = {'paths': all_paths, 'probs': all_probs}
pickle.dump(predicts, open(path_file, 'wb'))
def get_validation_pids(dataset_name):
if not os.path.isfile(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')):
return []
validation_pids = defaultdict(set)
with open(os.path.join(DATASET_DIR[dataset_name], 'valid.txt')) as valid_file:
reader = csv.reader(valid_file, delimiter=" ")
for row in reader:
uid = int(row[0])
pid = int(row[1])
validation_pids[uid].add(pid)
valid_file.close()
return validation_pids
def extract_paths(dataset_name, path_file, train_labels, valid_labels, test_labels):
embeds = load_embed(dataset_name)
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[dataset_name]
user_embeds = embeds[USER]
purchase_embeds = embeds[main_interaction][0]
product_embeds = embeds[main_product]#[0]
print(user_embeds.shape)
print(purchase_embeds.shape)
print(product_embeds[0].shape)
print(product_embeds.shape)
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
print(scores.shape)
validation_pids = get_validation_pids(dataset_name)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
total_pre_user_num = {}
no_skip_user = {}
for path, probs in zip(results['paths'], results['probs']):
uid = path[0][2]
no_skip_user[uid] = 1
print(results.keys())
x = defaultdict(int)
for idx, (path, probs) in enumerate(zip(results['paths'], results['probs'])):
if path[-1][1] != main_product:
#print('a')
x['a'] += 1
continue
uid = path[0][2]
if uid not in total_pre_user_num:
total_pre_user_num[uid] = len(total_pre_user_num)
x['b'] += 1
#print('b')
if uid not in pred_paths:
#print('c')
x['c'] += 1
continue
pid = path[-1][2]
if uid in valid_labels and pid in valid_labels[uid]:
#print('d')
x['d'] += 1
continue
if pid in train_labels[uid]:
#print('e')
x['e'] += 1
continue
if pid not in pred_paths[uid]:
#print('f')
x['f'] += 1
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
print(x)
#print(pred_paths)
save_output(dataset_name, pred_paths)
return pred_paths, scores
def evaluate_paths(topk,dataset_name, pred_paths, scores, train_labels,
test_labels, args, path_file, pretest=1):
# train_labels, test_labels, args, path_file, pretest=pretest):
'''
embeds = load_embed(args.dataset)
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[args.dataset]
user_embeds = embeds[USER]
purchase_embeds = embeds[main_interaction][0]
product_embeds = embeds[main_product]#[0]
scores = np.dot(user_embeds + purchase_embeds, product_embeds.T)
validation_pids = get_validation_pids(args.dataset)
# 1) Get all valid paths for each user, compute path score and path probability.
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels }#if uid in test_labels}#train_labels}
total_pre_user_num = {}
no_skip_user = {}
for path, probs in zip(results['paths'], results['probs']):
uid = path[0][2]
no_skip_user[uid] = 1
for path, probs in zip(results['paths'], results['probs']):
if path[-1][1] != main_product:
continue
uid = path[0][2]
if uid not in total_pre_user_num:
total_pre_user_num[uid] = len(total_pre_user_num)
if uid not in pred_paths:
continue
pid = path[-1][2]
if uid in valid_labels and pid in valid_labels[uid]:
continue
if pid in train_labels[uid]:
continue
if pid not in pred_paths[uid]:
pred_paths[uid][pid] = []
path_score = scores[uid][pid]
path_prob = reduce(lambda x, y: x * y, probs)
pred_paths[uid][pid].append((path_score, path_prob, path))
save_output(args.dataset, pred_paths)
'''
# 2) Pick best path for each user-product pair, also remove pid if it is in train set.
best_pred_paths = {}
from collections import defaultdict
#best_pred_paths_logging = {}
for uid in pred_paths:
train_pids = set(train_labels[uid])
best_pred_paths[uid] = []
#best_pred_paths_logging[uid] = []#defaultdict(list)
for pid in pred_paths[uid]:
if pid in train_pids:
continue
sorted_path = sorted(pred_paths[uid][pid], key=lambda x: x[1], reverse=True)
best_pred_paths[uid].append(sorted_path[0])
#best_pred_paths_logging[uid].append(sorted_path[0])
def prob_keyget(x):
return (x[1], x[0])
def score_keyget(x):
return (x[0], x[1])
sort_by = 'score'
pred_labels = {}
pred_paths_top10 = {}
total_pro_num = 0
for uid in best_pred_paths:
if sort_by == 'score':
keygetter = score_keyget
elif sort_by == 'prob':
keygetter = prob_keyget
sorted_path = sorted(best_pred_paths[uid], key=keygetter, reverse=True)
top10_pids = [p[-1][2] for _, _, p in sorted_path[:10]]
top10_paths = [p for _, _, p in sorted_path[:10]]
if args.add_products and len(top10_pids) < 10:
train_pids = set(train_labels[uid])
cand_pids = np.argsort(scores[uid])
for cand_pid in cand_pids[::-1]:
if cand_pid in train_pids or cand_pid in top10_pids:
continue
top10_pids.append(cand_pid)
if len(top10_pids) >= 10:
break
pred_labels[uid] = top10_pids[::-1] # change order to from smallest to largest!
pred_paths_top10[uid] = top10_paths[::-1]
#print(uid, len(pred_labels[uid]), pred_labels[uid])
results = pickle.load(open(path_file, 'rb'))
pred_paths = {uid: {} for uid in test_labels}
total_pre_user_num = {}
no_skip_user = {}
for path, probs in zip(results['paths'], results['probs']):
uid = path[0][2]
no_skip_user[uid] = 1
avg_precision, avg_recall, avg_ndcg, avg_hit, invalid_users = evaluate(pred_labels,
test_labels, no_skip_user, dataset_name)
print('precision: ', avg_precision)
print('recall: ', avg_recall)
print('ndcg: ', avg_ndcg)
print('hit: ', avg_hit)
# In formula w of pi log(2 + (number of patterns of same pattern type among uv paths / total number of paths among uv paths))
def get_path_pattern_weigth(path_pattern_name, pred_uv_paths):
n_same_path_pattern = 0
total_paths = len(pred_uv_paths)
for path in pred_uv_paths:
if path_pattern_name == get_path_pattern(path):
n_same_path_pattern += 1
return log(2 + (n_same_path_pattern / total_paths))
def test(args, train_labels, valid_labels, test_labels, best_recall, pretest = 1):
print('start predict')
policy_file = args.policy_path #args.save_model_dir + '/policy_model_epoch_{}.ckpt'.format(35)#args.eva_epochs)
path_file = os.path.join(TMP_DIR[args.dataset], 'policy_paths_epoch{}_{}.pkl'.format(args.eva_epochs, args.topk_string)) #args.save_model_dir + '/' + 'pre' + str(pretest) + 'policy_paths_epoch{}_{}.pkl'.format(args.eva_epochs, args.topk_string)
#if args.dataset in [BEAUTY_CORE, CELL_CORE, CLOTH_CORE]:
#sort_by_2 = 'prob'
#eva_file_2 = args.log_dir + '/' + 'pre' + str(pretest) + sort_by_2 + '_eva'+ '_' + args.topk_string + '.txt'
TOP_N_LOGGING = 100
if args.run_path or os.path.exists(path_file) == False:
predict_paths(args, policy_file, path_file, train_labels, test_labels, pretest)#predict_paths(policy_file, path_file, args)
if args.save_paths or args.run_eval():
pred_paths, scores = extract_paths(args.dataset, path_file, train_labels, valid_labels, test_labels)
if args.run_eval:
evaluate_paths(TOP_N_LOGGING,args.dataset, pred_paths, scores,
train_labels, test_labels, args, path_file, pretest=pretest)
if __name__ == '__main__':
args = parse_args()
args.training = 0
args.training = (args.training == 1)
args.att_evaluation = False
if args.envir == 'p1':
para_env = parameter_path_th
KG_Env = KGEnvironment
elif args.envir == 'p2':
para_env = parameter_path
KG_Env = KGEnvironment
para_env(args)
train_labels = load_labels(args.dataset, 'train')
valid_labels = load_labels(args.dataset, 'valid')
test_labels = load_labels(args.dataset, 'test')
best_recall = 0
args.eva_epochs = args.best_model_epoch
test(args, train_labels, valid_labels, test_labels, best_recall, pretest = 0)
| 19,385 | 34.966605 | 248 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/train.py | from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from models.UCPR.utils import *
from models.UCPR.src.model.get_model.get_model import *
from models.UCPR.src.parser import parse_args
from models.UCPR.src.para_setting import parameter_path, parameter_path_th
from models.UCPR.src.data_loader import ACDataLoader
from models.UCPR.preprocess.dataset import Dataset
from models.UCPR.preprocess.knowledge_graph import KnowledgeGraph
import time
import json
from easydict import EasyDict as edict
import wandb
from models.utils import MetricsLogger
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
def pretrain_set(args, env):
if args.load_pretrain_model == True:
logger = get_logger(args.log_dir + '/train_log_pretrain.txt')
args.logger = logger
print(args.pretrained_dir + '/' + args.sort_by + '_pretrained_md_json_' + args.topk_string + '.txt')
with open(args.pretrained_dir + '/' + args.sort_by + '_pretrained_md_json_' + args.topk_string + '.txt') as json_file:
best_model_json = json.load(json_file)
logger.info(args.pretrained_dir + '/' + args.sort_by + '_pretrained_md_json_' + args.topk_string + '.txt')
policy_file = best_model_json['pretrained_file']
pretrain_sd = torch.load(policy_file)
logger.info("pretrain_model_load")
logger.info(policy_file)
# input()
model = Memory_Model(args, env.user_triplet_set, env.rela_2_index, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
model_sd = model.state_dict()
pretrain_sd = {k: v for k, v in pretrain_sd.items() if k in model_sd}
para_meter = [k.split('.')[0] for k, v in pretrain_sd.items()]
model_sd.update(pretrain_sd)
model.load_state_dict(model_sd)
for name, child in model.named_children():
print('name = ', name)
for param in child.parameters():
param.requires_grad = True
if args.dataset in [BEAUTY_CORE, CELL_CORE, CLOTH_CORE]:
for name, child in model.named_children():
print('name = ', name)
if 'kg' in name:
print('name = ', name)
for param in child.parameters():
param.requires_grad = False
elif args.dataset in [LFM1M,ML1M]:#MOVIE_CORE, AZ_BOOK_CORE]:
for name, child in model.named_children():
print('name = ', name)
if name in para_meter and 'kg' not in name and 'actor' not in name and 'critic' not in name:
print('name = ', name)
for param in child.parameters():
param.requires_grad = False
grad_string = ''
for name, child in model.named_children():
print('name = ', name)
for param in child.parameters():
print(param.requires_grad)
grad_string += ' name = ' + name + ' ' + str(param.requires_grad)
logger.info(grad_string)
# start_epoch = args.pretrained_st_epoch + 1
else:
logger = get_logger(args.log_dir + '/train_log.txt')
args.logger = logger
model = Memory_Model(args, env.user_triplet_set, env.rela_2_index, env.act_dim, gamma=args.gamma, hidden_sizes=args.hidden).to(args.device)
grad_string = ''
for name, child in model.named_children():
print('name = ', name)
for param in child.parameters():
print(param.requires_grad)
grad_string += ' name = ' + name + ' ' + str(param.requires_grad)
logger.info(grad_string)
core_user_list = args.core_user_list
#kg_fre_dict = args.kg_fre_dict
sp_user_filter = args.sp_user_filter
try:
kg_user_filter = args.kg_user_filter
args.kg_user_filter = ''
except:
pass
args.core_user_list = ''
#args.kg_fre_dict = ''
args.sp_user_filter = ''
logger.info(args)
args.core_user_list = core_user_list
#args.kg_fre_dict = kg_fre_dict
args.sp_user_filter = sp_user_filter
try:
args.kg_user_filter = kg_user_filter
except:
pass
del core_user_list#, kg_fre_dict
return model, logger
def train(args):
train_env = KG_Env(args, Dataset(args, set_name='train'), args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
valid_env = KG_Env(args, Dataset(args, set_name='valid'), args.max_acts, max_path_len=args.max_path_len, state_history=args.state_history)
print('env.output_valid_user() = ', len(train_env.output_valid_user()))
print('args.batch_size = ', args.batch_size)
train_dataloader = ACDataLoader(train_env.output_valid_user(), args.batch_size)
valid_dataloader = ACDataLoader(valid_env.output_valid_user(), args.batch_size)
model, logger = pretrain_set(args, train_env)
logger.info('valid user = ')
# logger.info(env.output_valid_user())
logger.info(len(train_env.output_valid_user()))
optimizer = optim.Adam(model.parameters(), lr=args.lr)
logger.info('Parameters:' + str([i[0] for i in model.named_parameters()]))
step=0
metrics = MetricsLogger(args.wandb_entity,
f'ucpr_{args.dataset}',
config=args)
metrics.register('train_loss')
metrics.register('train_ploss')
metrics.register('train_vloss')
metrics.register('train_entropy')
metrics.register('train_reward')
metrics.register('avg_train_loss')
metrics.register('avg_train_ploss')
metrics.register('avg_train_vloss')
metrics.register('avg_train_entropy')
metrics.register('avg_train_reward')
metrics.register('std_train_reward')
metrics.register('valid_loss')
metrics.register('valid_ploss')
metrics.register('valid_vloss')
metrics.register('valid_entropy')
metrics.register('valid_reward')
metrics.register('avg_valid_loss')
metrics.register('avg_valid_ploss')
metrics.register('avg_valid_vloss')
metrics.register('avg_valid_entropy')
metrics.register('avg_valid_reward')
metrics.register('std_valid_reward')
loaders = {'train': train_dataloader,
'valid': valid_dataloader}
envs = {'train': train_env,
'valid':valid_env}
step_counter = {
'train': 0,
'valid':0
}
first_iterate = True
for epoch in range(0, args.epochs + 1):
splits_to_compute = list(loaders.items())
if first_iterate:
first_iterate = False
splits_to_compute.insert(0, ('valid', valid_dataloader))
for split_name, dataloader in splits_to_compute:
if split_name == 'valid' and epoch%10 != 0:
continue
if split_name == 'valid':
model.eval()
else:
model.train()
dataloader.reset()
env = envs[split_name]
iter_counter = 0
dataloader.reset()
while dataloader.has_next():
batch_uids = dataloader.get_batch()
### Start batch episodes ###
env.reset(epoch, batch_uids, training = True) # numpy array of [bs, state_dim]
model.user_triplet_set = env.user_triplet_set
model.reset(batch_uids)
while not env._done:
batch_act_mask = env.batch_action_mask(dropout=args.act_dropout) # numpy array of size [bs, act_dim]
batch_emb_state = model.generate_st_emb(env._batch_path)
batch_next_action_emb = model.generate_act_emb(env._batch_path, env._batch_curr_actions)
batch_act_idx = model.select_action(batch_emb_state, batch_next_action_emb, batch_act_mask, args.device) # int
batch_state, batch_reward = env.batch_step(batch_act_idx)
model.rewards.append(batch_reward)
### End of episodes ###
for pg in optimizer.param_groups:
lr = pg['lr']
total_reward = np.sum(model.rewards)
# Update policy
loss, ploss, vloss, eloss = model.update(optimizer, env, args.device, args.ent_weight, step_counter[split_name])
cur_metrics = {f'{split_name}_loss':loss,
f'{split_name}_ploss':ploss,
f'{split_name}_vloss':vloss,
f'{split_name}_entropy':eloss,
f'{split_name}_reward':total_reward,
f'{split_name}_iter': step_counter[split_name]}
for k,v in cur_metrics.items():
metrics.log(k, v)
#metrics.push(cur_metrics.keys())
step_counter[split_name] += 1
iter_counter += 1
#if step_counter[split_name] > 0 and step_counter[split_name] % 100 == 0:
# #avg_reward = np.mean(total_rewards) / args.batch_size
# dataloader.reset()
cur_metrics = [f'{split_name}_epoch']
cur_metrics.extend([f'{split_name}_loss',
f'{split_name}_ploss',
f'{split_name}_vloss',
f'{split_name}_entropy',
f'{split_name}_reward',
])
for k in cur_metrics[1:]:
metrics.log(f'avg_{k}', sum(metrics.history(k, iter_counter))/max(iter_counter,1) )
getattr(metrics, f'avg_{split_name}_reward')[-1] /= args.batch_size
metrics.log(f'{split_name}_epoch', epoch)
cur_metrics.append(f'std_{split_name}_reward')
metrics.log(f'std_{split_name}_reward',np.std(metrics.history( f'{split_name}_reward', iter_counter)) )
info = ""
for k in cur_metrics:
if isinstance(getattr(metrics,k)[-1],float):
x = '{:.5f}'.format(getattr(metrics, k)[-1])
else:
x = '{:d}'.format(getattr(metrics, k)[-1])
info = info + f'| {k}={x} '
metrics.push(cur_metrics)
logger.info(info)
### END of epoch ###
if epoch % 10 == 0:
policy_file = '{}/policy_model_epoch_{}.ckpt'.format(TMP_DIR[args.dataset], epoch)
logger.info("Save model to " + policy_file)
torch.save(model.state_dict(), policy_file)
#metrics.push_model(policy_file, f'{MODEL}_{args.dataset}_{epoch}')
makedirs(args.dataset)
metrics.write(TEST_METRICS_FILE_PATH[args.dataset])#metrics.write(os.path.join(TMP_DIR[args.dataset], VALID_METRICS_FILE_NAME) )
metrics.close_wandb()
if __name__ == '__main__':
args = parse_args()
os.makedirs(TMP_DIR[args.dataset], exist_ok=True)
with open(os.path.join(TMP_DIR[args.dataset],HPARAMS_FILE), 'w') as f:
import json
import copy
args_dict = dict()
for x,y in copy.deepcopy(args._get_kwargs()):
args_dict[x] = y
if 'device' in args_dict:
del args_dict['device']
json.dump(args_dict,f)
args.training = 1
args.training = (args.training == 1)
if args.envir == 'p1':
para_env = parameter_path_th
KG_Env = KGEnvironment
elif args.envir == 'p2':
para_env = parameter_path
KG_Env = KGEnvironment
para_env(args)
train(args)
| 11,947 | 36.930159 | 147 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/env/sp_user_tri_set.py | from __future__ import absolute_import, division, print_function
import os
import sys
from tqdm import tqdm
import pickle
import random
import torch
from datetime import datetime
import numpy as np
import collections
from collections import defaultdict
from collections import Counter
import time
import multiprocessing
import itertools
from multiprocessing import Pool, cpu_count
import multiprocessing as mp
from functools import partial
from models.UCPR.utils import *
def kg_based_get_user_triplet_set(args, kg, user_list, p_hop, n_memory):
args_tmp = {'p_hop': p_hop, 'n_memory': n_memory}
# user -> [(hop_0_heads, hop_0_relations, hop_0_tails), (hop_1_heads, hop_1_relations, hop_1_tails), ...]
user_triplet_set = collections.defaultdict(list)
# entity_interaction_dict = collections.defaultdict(list)
user_history_dict = {}
for user in user_list:
if user not in user_history_dict:
user_history_dict[user] = [[USER, user]]
global g_kg, g_args
g_args = args
g_kg = kg
with mp.Pool(processes=min(mp.cpu_count(), 5)) as pool:
job = partial(_kg_based_get_user_triplet_set, p_hop=max(1,args_tmp['p_hop']),
KG_RELATION = KG_RELATION, n_memory=args_tmp['n_memory'], n_neighbor=16)
for u, u_r_set in pool.starmap(job, user_history_dict.items()):
user_triplet_set[u] = u_r_set
#del g_kg, g_et_idx2ty, g_args
del g_kg, g_args
return user_triplet_set
def _kg_based_get_user_triplet_set(user, history, p_hop=2, KG_RELATION = None, n_memory=32, n_neighbor=16):
ret = []
entity_interaction_list = []
for h in range(max(1,p_hop)):
memories_h = []
memories_r = []
memories_t = []
if h == 0:
tails_of_last_hop = history
else:
tails_of_last_hop = ret[-1][2]
for entity_type, entity in tails_of_last_hop:
tmp_list = []
for k_, v_set in g_kg(entity_type,entity).items():
if k_ == SELF_LOOP: continue
for v_ in v_set:
if v_ in g_kg(USER):
if v_ in g_args.sp_user_filter: tmp_list.append([k_,v_])
else:
# usage g_kg.degrees[etype][eid]
# k_ is the relation, get next entity type for given dataset, ent_type, rel_type
cur_etype = KG_RELATION[g_kg.dataset_name][entity_type][k_]
if g_kg.degrees[cur_etype][v_] >= g_args.kg_fre_lower:
tmp_list.append([k_,v_])
if h != 0 and len(tmp_list) >= 30: break
if len(tmp_list) == 0:
for k_, v_set in g_kg(entity_type,entity).items():
for v_ in v_set:
tmp_list.append([k_,v_])
if h != 0 and len(tmp_list) >= 30: break
for tail_and_relation in random.sample(tmp_list, min(len(tmp_list), n_neighbor)):
memories_h.append([entity_type,entity])
memories_r.append(tail_and_relation[0])
# original name is misleading, as they are stored in reverse order
# (check KG_based_kg to see that a call to g_kg leads to an output of rel:[entities]. This output is represnted at line 65 as k_,v_set)
rel, ent = tail_and_relation[0], tail_and_relation[1]
x_etype = KG_RELATION[g_kg.dataset_name][entity_type][rel]
memories_t.append([x_etype, ent])
if len(memories_h) == 0:
# added condition to avoid out of range when accessing ret[-1]
if len(ret) > 0:
ret.append(ret[-1])
else:
replace = len(memories_h) < n_memory
indices = np.random.choice(len(memories_h), size=n_memory, replace=replace)
memories_h = [memories_h[i] for i in indices]
memories_r = [memories_r[i] for i in indices]
memories_t = [memories_t[i] for i in indices]
ret.append([memories_h, memories_r, memories_t])
return user, ret
def _kg_based_get_user_triplet_set(user, history, p_hop=2, KG_RELATION = None, n_memory=32, n_neighbor=16):
ret = []
entity_interaction_list = []
for h in range(max(1,p_hop)):
memories_h = []
memories_r = []
memories_t = []
if h == 0:
tails_of_last_hop = history
else:
tails_of_last_hop = ret[-1][2]
#print(tails_of_last_hop)
for entity_type, entity in tails_of_last_hop:
tmp_list = []#get(self, eh_type, eh_id=None, relation=None):
for k_, v_set in g_kg(entity_type,entity).items():
#print(k_, len(v_set))
if k_ == SELF_LOOP: continue
for v_ in v_set:
if v_ in g_kg(USER):#if g_et_idx2ty[v_] == USER:
if v_ in g_args.sp_user_filter: tmp_list.append([k_,v_])
else:
# usage g_kg.degrees[etype][eid]
# k_ is the relation, get next entity type for given dataset, ent_type, rel_type
cur_etype = KG_RELATION[g_kg.dataset_name][entity_type][k_]
#print(g_kg.degrees[cur_etype][v_])
if g_kg.degrees[cur_etype][v_] >= g_args.kg_fre_lower:#g_args.kg_fre_dict[v_] >= g_args.kg_fre_lower:
tmp_list.append([k_,v_])
if h != 0 and len(tmp_list) >= 30: break
if len(tmp_list) == 0:
for k_, v_set in g_kg(entity_type,entity).items():
for v_ in v_set:
tmp_list.append([k_,v_])
if h != 0 and len(tmp_list) >= 30: break
for tail_and_relation in random.sample(tmp_list, min(len(tmp_list), n_neighbor)):
memories_h.append([entity_type,entity])
memories_r.append(tail_and_relation[0])
# original name is misleading, as they are stored in reverse order
# (check KG_based_kg to see that a call to g_kg leads to an output of rel:[entities]. This output is represnted at line 65 as k_,v_set)
rel, ent = tail_and_relation[0], tail_and_relation[1]
x_etype = KG_RELATION[g_kg.dataset_name][entity_type][rel]
memories_t.append([x_etype, ent])#[g_et_idx2ty[tail_and_relation[1] ],tail_and_relation[1]])
#print(len(memories_h), len(memories_r), len(memories_t))
if len(memories_h) == 0:
# new line to avoid out of range when accessing ret[-1]
if len(ret) > 0:
ret.append(ret[-1])
else:
replace = len(memories_h) < n_memory
#print()
#print()
#print(len(memories_h), n_memory, replace)
indices = np.random.choice(len(memories_h), size=n_memory, replace=replace)
memories_h = [memories_h[i] for i in indices]
memories_r = [memories_r[i] for i in indices]
memories_t = [memories_t[i] for i in indices]
ret.append([memories_h, memories_r, memories_t])
#print(ret)
return user, ret
'''
def rw_get_user_triplet_set(args, kg, user_list, p_hop, n_memory):
args_tmp = {'p_hop': p_hop, 'n_memory': n_memory}
user_triplet_set = collections.defaultdict(list)
user_history_dict = {}
for user in user_list:
if user not in user_history_dict:
user_history_dict[user] = [[USER, user]]
global g_kg, g_args
g_kg = kg
g_args = args
with mp.Pool(processes=min(mp.cpu_count(), 4)) as pool:
job = partial(_rw_get_user_triplet_set, p_hop=max(1,args_tmp['p_hop']), KG_RELATION =
KG_RELATION[args.dataset], n_memory=args_tmp['n_memory'], n_neighbor=16)
for u, u_r_set in pool.starmap(job, user_history_dict.items()):
# print(' u, u_r_set = ', u, u_r_set)
user_triplet_set[u] = u_r_set
del g_kg
return user_triplet_set
def _rw_get_user_triplet_set(user, history, p_hop=2, KG_RELATION = None, n_memory=32, n_neighbor=16):
def add_list_condition(next_entities, h, entity_type, entity, kg_fre_upper, kg_fre_lower, remove_type, skip_self_loop = True):
for k_, v_set in g_kg(entity_type,entity).items():
if k_ == SELF_LOOP and skip_self_loop == True: continue
next_node_type = KG_RELATION[args.dataset][entity_type][k_]
if next_node_type in remove_type: print('next_node_type remove == ', remove_type, end='\r'); continue
if WORD in remove_type:
if k_ == 'mentions' or k_ == 'described_as' or k_ == 'also_viewed':
print('next_node_type remove == ', k_, end='\r')
continue
if next_node_type == USER:
for v_ in v_set:
if v_ in g_args.sp_user_filter: next_entities.append([k_,v_])
else:
for v_ in v_set:
if g_args.kg_fre_dict[next_node_type][v_] < kg_fre_upper and \
g_args.kg_fre_dict[next_node_type][v_] >= kg_fre_lower:
next_entities.append([k_,v_])
if h != 0 and len(next_entities) >= 20: break
return next_entities
if g_args.tri_wd_rm == True:
remove_type = [WORD]
elif g_args.tri_pro_rm == True:
remove_type = [PRODUCT]
else:
remove_type = []
ret = []
entity_interaction_list = []
for h in range(max(1,p_hop)):
memories_h = []
memories_r = []
memories_t = []
if h == 0:
tails_of_last_hop = history
else:
tails_of_last_hop = ret[-1][2]
for entity_type, entity in tails_of_last_hop:
next_entities = []
next_entities = add_list_condition(next_entities, h, entity_type, entity, g_args.kg_fre_upper, g_args.kg_fre_lower, remove_type, skip_self_loop = True)
if len(next_entities) == 0:
next_entities = add_list_condition(next_entities, h, entity_type, entity, g_args.kg_fre_upper, g_args.kg_fre_lower, [], skip_self_loop = False)
# print('len(next_entities) = ', len(next_entities))
for tail_and_relation in random.sample(next_entities, min(len(next_entities), n_neighbor)):
memories_h.append([entity_type,entity])
memories_r.append(tail_and_relation[0])
memories_t.append([KG_RELATION[args.dataset][entity_type][tail_and_relation[0]],tail_and_relation[1]])
# if the current ripple set of the given user is empty, we simply copy the ripple set of the last hop here
# this won't happen for h = 0, because only the items that appear in the KG have been selected
# this only happens on 154 users in Book-Crossing dataset (since both BX dataset and the KG are sparse)
if len(memories_h) == 0:
ret.append(ret[-1])
else:
# sample a fixed-size 1-hop memory for each user
replace = len(memories_h) < n_memory
indices = np.random.choice(len(memories_h), size=n_memory, replace=replace)
memories_h = [memories_h[i] for i in indices]
memories_r = [memories_r[i] for i in indices]
memories_t = [memories_t[i] for i in indices]
ret.append([memories_h, memories_r, memories_t])
return user, ret
''' | 11,671 | 41.59854 | 164 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/env/env.py | from __future__ import absolute_import, division, print_function
import os
import sys
sys.path.insert(0,'../preprocess')
from tqdm import tqdm
import pickle
import random
import torch
from datetime import datetime
from collections import defaultdict
from models.UCPR.utils import *
# from preprocess.knowledge_graph import RW_based_KG, KG_based_KG
# from preprocess import knowledge_graph
# from preprocess import dataset
# input()
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# import collections
# from collections import defaultdict
# from collections import Counter
from models.UCPR.utils import *
from models.UCPR.src.env.sp_user_tri_set import kg_based_get_user_triplet_set#, rw_get_user_triplet_set
from models.UCPR.preprocess.knowledge_graph import KnowledgeGraph
from models.UCPR.preprocess.dataset import Dataset
# validated wrt pgpr
class PATH_PTN(object):
def __init__(self, dataset_name):
self.patterns = []
self.dataset_name = dataset_name
#print(dataset_name)
#print(PATH_PATTERN[dataset_name])
for pattern_id in sorted(PATH_PATTERN[dataset_name].keys()):#[1, 11, 12, 13, 14, 15, 16, 17, 18]:
pattern = PATH_PATTERN[dataset_name][pattern_id]
pattern = [SELF_LOOP] + [v[0] for v in pattern[1:]] # pattern contains all relations
if pattern_id == 1:
pattern.append(SELF_LOOP)
self.patterns.append(tuple(pattern))
# new method
def _has_pattern(self, path):
pattern = tuple([v[0] for v in path])
return pattern in self.patterns
def _rw_has_pattern(self, path):
pattern = tuple([v[0] for v in path])
return pattern in self.patterns
def _kg_has_pattern(self, path):
pattern = tuple([v[0] for v in path])
if pattern[0] == SELF_LOOP and (pattern[1] == INTERACTION[self.dataset_name]) \
and pattern[2] != SELF_LOOP and pattern[3] != SELF_LOOP:
return True
else:
return False
class BatchKGEnvironment(object):
def __init__(self, args, dataset, max_acts, max_path_len=3, state_history=1):
super(BatchKGEnvironment, self).__init__()
self.args = args
self.max_acts = max_acts
self.act_dim = max_acts + 1 # Add self-loop action, whose act_idx is always 0.
self.max_num_nodes = max_path_len + 1 # max number of hops (= #nodes - 1)
self.p_hop = args.p_hop
self.n_memory = args.n_memory
self._done = False
self.dataset_name = dataset.dataset_name
self.dataset = dataset
self.kg = KnowledgeGraph(dataset)
self.select_user_th()
self.load_dataset_emb(args, self.dataset_name)
# P1 means Review dataset environment
if self.args.envir == 'p1':
self._get_reward = self._rw_get_reward
self.next_type = lambda curr_node_type, relation, next_node_id: KG_RELATION[self.dataset_name][curr_node_type][relation]
self.get_user_triplet = rw_get_user_triplet_set
# P2 means KnowledgeGraph dataset environment
elif self.args.envir == 'p2':
self._get_reward = self._kg_get_reward
self.next_type = lambda curr_node_type, relation, next_node_id: KG_RELATION[self.dataset_name][curr_node_type][relation]
self.get_user_triplet = kg_based_get_user_triplet_set
self.set_UC_view()
self.PATH_PTN = PATH_PTN(self.dataset_name)
# validated wrt pgpr
def load_dataset_emb(self, args, dataset_str):
#self.dataset = load_dataset(dataset_str)
self.args.core_user_list = self.core_user_list
#self.args.sp_user_filter = self.dataset.total_user_list[:800]
self.user_list = [user for user in list(self.kg(USER).keys()) if user in self.core_user_list]
# taken from original implementation
self.args.sp_user_filter = self.total_user_list[:800]
self.embeds = load_embed(dataset_str)
print(self.embeds.keys())
self.embed_size = self.embeds[USER].shape[1]
self.embeds[SELF_LOOP] = (np.zeros(self.embed_size), 0.0)
# NOTE: by the way embeds are saved in train_transe.py and its original version train_transe_kg and rw,
# self.embeds contains also the entitites embeddings, however these are not actually used in self.rela_2_index
# since the elements of the latter dictionary are accessed only by key, which is relation name
self.rela_2_index = {}
for k, v in self.embeds.items():
if k not in self.rela_2_index:
self.rela_2_index[k] = len(self.rela_2_index)
def set_UC_view(self):
if self.args.non_sampling == True:
self.user_triplet_set = [user for user in list(self.kg(USER).keys())if user in self.core_user_list]
return
user_triplet_path = '{}/triplet_set_{}.pickle'.format(TMP_DIR[self.dataset_name],self.args.name)#self.args.save_model_dir, self.args.name)
if os.path.exists(user_triplet_path):
with open(user_triplet_path, 'rb') as fp:
self.user_triplet_set = pickle.load(fp)
print('load user_triplet_path = ', user_triplet_path)
else:
if self.args.envir == 'p1':
self.user_triplet_set = self.get_user_triplet(self.args, self.kg, self.user_list,
self.p_hop, self.n_memory)
else:
self.user_triplet_set = self.get_user_triplet(self.args, self.kg,
self.user_list, self.p_hop, self.n_memory)
with open(user_triplet_path, 'wb') as fp:
pickle.dump(self.user_triplet_set, fp)
print('user_triplet_set_path save = ', user_triplet_path)
def select_user_th(self):
#LABELS[self.dataset_name][0]
train_labels = load_labels(self.dataset_name, 'train')
self.train_labels = train_labels
user_counter_dict = defaultdict(int)
self.item_freqs = defaultdict(int)
for uid, train_pids in train_labels.items():
# do set(train_pids) if considering only 0/1 interactions
# otherwise keep train_pids and count also multiple interactions with the same item
for pid in set(train_pids):
user_counter_dict[uid] += 1
self.item_freqs[pid] += 1
#test_labels = load_labels(args.dataset, 'test')
#trn_data = pd.read_csv(f'{self.data_dir}/train_pd.csv',index_col=None)
#trn_data = trn_data.drop(trn_data.columns[0], axis=1)
#trn_data = trn_data[['user','item','like']].values
'''
user_counter_dict = {}
self.item_fre = {}
for row in trn_data:
if row[2] == 1:
if row[0] not in user_counter_dict: user_counter_dict[row[0]] = 0
user_counter_dict[row[0]] += 1
if row[1] not in self.item_fre: self.item_fre[row[1]] = 0
self.item_fre[row[1]] += 1
'''
user_counter_dict = sorted(user_counter_dict.items(), key=lambda x: x[1], reverse=True)
self.user_counter_dict = user_counter_dict
self.core_user_list = [user_set[0] for user_set in user_counter_dict] # APPLY FILTERING TOPK IF NEEDED, not used in the original final version of UCPR #[:self.user_top_k]]
self.total_user_list = [user_set[0] for user_set in user_counter_dict]
# validated wrt pgpr
def reset(self, epoch, uids=None, training = False):
if uids is None:
all_uids = list(self.kg(USER).keys())
uids = [random.choice(all_uids)]
self._batch_path = [[(SELF_LOOP, USER, uid)] for uid in uids]
self._done = False
self._batch_curr_actions = self._batch_get_actions(self._batch_path, self._done)
self._batch_curr_reward = self._batch_get_reward(self._batch_path)
# validated wrt pgpr
#def kg_based_next_type(self, curr_node_type, relation, next_node_id):
# return self.et_idx2ty[next_node_id]
# validated wrt pgpr
#def rw_based_next_type(self, curr_node_type, relation, next_node_id):
# return KG_RELATION[curr_node_type][relation]
# validated wrt pgpr
def batch_step(self, batch_act_idx):
"""
Args:
batch_act_idx: list of integers.
Returns:
batch_next_state: numpy array of size [bs, state_dim].
batch_reward: numpy array of size [bs].
done: True/False
"""
assert len(batch_act_idx) == len(self._batch_path)
# Execute batch actions.
for i in range(len(batch_act_idx)):
act_idx = batch_act_idx[i]
_, curr_node_type, curr_node_id = self._batch_path[i][-1]
relation, next_node_id = self._batch_curr_actions[i][act_idx]
if relation == SELF_LOOP:
next_node_type = curr_node_type
else:
next_node_type = self.next_type(curr_node_type, relation, next_node_id)
self._batch_path[i].append((relation, next_node_type, next_node_id))
self._done = self._is_done() # must run before get actions, etc..
self._batch_curr_actions = self._batch_get_actions(self._batch_path, self._done)
self._batch_curr_reward = self._batch_get_reward(self._batch_path)
return None, self._batch_curr_reward
def _is_done(self):
"""Episode ends only if max path length is reached."""
return self._done or len(self._batch_path[0]) >= self.max_num_nodes
# validated wrt pgpr
def _batch_get_actions(self, batch_path, done):
return [self._get_actions(path, done) for path in batch_path]
# validated wrt pgpr
def _get_actions(self, path, done):
"""Compute actions for current node."""
main_product, review_interaction = MAIN_PRODUCT_INTERACTION[self.dataset_name]
_, curr_node_type, curr_node_id = path[-1]
actions = [(SELF_LOOP, curr_node_id)]
# actions = [(SELF_LOOP, curr_node_id)] # self-loop must be included.
# (1) If game is finished, only return self-loop action.
if done:
return actions
#print('tlabels_len', len(self.train_labels.items()) )
#s1 = set([x[0] for x in self.train_labels.items()])
#s2 = set([x for x in self.kg(USER).keys()])
#print(s1-s2 )
#print(s2-s1)
#print('uids len ', len(self.kg(USER).keys()) )
# (2) Get all possible edges from original knowledge graph.
# [CAVEAT] Must remove visited nodes!
relations_nodes = self.kg(curr_node_type, curr_node_id)
candidate_acts = [] # list of tuples of (relation, node_type, node_id)
visited_nodes = set([(v[1], v[2]) for v in path])
for r in relations_nodes:
next_node_ids = relations_nodes[r]
next_node_set = []
for n_id in next_node_ids:
# might need changing since utils.py has been modified
#print(curr_node_type, r, n_id)
next_node_set.append([self.next_type(curr_node_type, r, n_id),
n_id])
next_node_ids = [n_set[1] for n_set in next_node_set if (n_set[0], n_set[1]) not in visited_nodes]
candidate_acts.extend(zip([r] * len(next_node_ids), next_node_ids))
# (3) If candidate action set is empty, only return self-loop action.
if len(candidate_acts) == 0:
return actions
# (4) If number of available actions is smaller than max_acts, return action sets.
if len(candidate_acts) <= self.max_acts:
candidate_acts = sorted(candidate_acts, key=lambda x: (x[0], x[1]))
actions.extend(candidate_acts)
return actions
# (5) If there are too many actions, do some deterministic trimming here!
uid = path[0][-1]
user_embed = self.embeds[USER][uid]
scores = []
for r, next_node_id in candidate_acts:
next_node_type = self.next_type(curr_node_type, r, next_node_id)
# print('curr_node_type, r, next_node_id = ', curr_node_type, r, next_node_id)
# print('next_node_type = ', next_node_type)
#LASTFM ENTITIES
if next_node_type == USER:
src_embed = user_embed
elif next_node_type == main_product:
src_embed = user_embed + self.embeds[review_interaction][0]
else: # BRAND, CATEGORY, RELATED_PRODUCT
src_embed = user_embed + self.embeds[main_product][0] + self.embeds[r][0]
score = np.matmul(src_embed, self.embeds[next_node_type][next_node_id])
# This trimming may filter out target products!
# Manually set the score of target products a very large number.
# if next_node_type == PRODUCT and next_node_id in self._target_pids:
# score = 99999.0
scores.append(score)
candidate_idxs = np.argsort(scores)[-self.max_acts:] # choose actions with larger scores
candidate_acts = sorted([candidate_acts[i] for i in candidate_idxs], key=lambda x: (x[0], x[1]))
actions.extend(candidate_acts)
return actions
def _kg_get_reward(self, path):
# If it is initial state or 1-hop search, reward is 0.
if len(path) <= 3:
return 0.0
if not self.PATH_PTN._kg_has_pattern(path):
return 0.0
target_score = 0.0
_, curr_node_type, curr_node_id = path[-1]
main_entity, main_relation = MAIN_PRODUCT_INTERACTION[self.dataset_name]
if curr_node_type == main_entity:
# Give soft reward for other reached products.
score = 0
uid = path[0][-1]
if curr_node_id in self.kg(USER, uid)[main_relation]: score += 1
else: score += 0.0
target_score = max(score, 0.0)
return target_score
def _rw_get_reward(self, path):
# If it is initial state or 1-hop search, reward is 0.
if len(path) <= 2:
return 0.0
# print('path = ', path)
if not self.PATH_PTN._rw_has_pattern(path):
return 0.0
target_score = 0.0
# print('has type')
_, curr_node_type, curr_node_id = path[-1]
main_entity, main_relation = MAIN_PRODUCT_INTERACTION[self.dataset_name]
if curr_node_type == main_entity:
# Give soft reward for other reached products.
uid = path[0][-1]
score = 0
if curr_node_id in self.kg(USER, uid)[main_relation]: score += 1
else: score += 0.0
target_score = max(score, 0.0)
# print(path, 'target_score = ', target_score)
# input()
return target_score
def _batch_get_reward(self, batch_path):
batch_reward = [self._get_reward(path) for path in batch_path]
return np.array(batch_reward)
def batch_action_mask(self, dropout=0.0):
"""Return action masks of size [bs, act_dim]."""
batch_mask = []
for actions in self._batch_curr_actions:
act_idxs = list(range(len(actions)))
if dropout > 0 and len(act_idxs) >= 5:
keep_size = int(len(act_idxs[1:]) * (1.0 - dropout))
tmp = np.random.choice(act_idxs[1:], keep_size, replace=False).tolist()
act_idxs = [act_idxs[0]] + tmp
# act_mask = np.zeros(self.act_dim, dtype=np.uint8)
act_mask = np.zeros(self.act_dim)
act_mask[act_idxs] = 1
batch_mask.append(act_mask)
return np.vstack(batch_mask)
def output_valid_user(self):
return [user for user in list(self.kg(USER).keys()) if user in self.user_list]
| 16,021 | 39.768448 | 180 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/model/UCPR.py | from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from easydict import EasyDict as edict
from models.UCPR.src.model.lstm_base.model_lstm_mf_emb import AC_lstm_mf_dummy
from models.UCPR.src.model.lstm_base.model_kg import KG_KGE#, RW_KGE
from models.UCPR.src.model.lstm_base.model_kg_pre import KG_KGE_pretrained#, RW_KGE_pretrained
from models.UCPR.src.model.lstm_base.backbone_lstm import EncoderRNN, EncoderRNN_batch, KGState_LSTM, KGState_LSTM_no_rela
from models.UCPR.utils import *
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class UCPR(AC_lstm_mf_dummy):
def __init__(self, args, user_triplet_set, rela_2_index, act_dim, gamma=0.99, hidden_sizes=[512, 256]):
super().__init__(args, user_triplet_set, rela_2_index, act_dim, gamma, hidden_sizes)
self.l2_weight = args.l2_weight
self.sub_batch_size = args.sub_batch_size
self.scalar = nn.Parameter(torch.Tensor([args.lambda_num]), requires_grad=True)
print('args.lambda_num = ', args.lambda_num)
self.dummy_rela = torch.ones(max(self.user_triplet_set) * 2 + 1, 1, self.embed_size)
self.dummy_rela = nn.Parameter(self.dummy_rela, requires_grad=True).to(self.device)
self.dummy_rela_emb = nn.Embedding(max(self.user_triplet_set) * 2 + 1, self.embed_size * self.embed_size).to(self.device)
self.dataset_name = args.dataset
self.dataset = load_dataset(args.dataset)
if self.args.envir == 'p1':
self._get_next_node_type = lambda curr_node_type, relation, next_node_id: \
KG_RELATION[self.dataset_name][curr_node_type][relation]
if self.args.KGE_pretrained == True: self.kg_emb = RW_KGE_pretrained(args)
else: self.kg_emb = RW_KGE(args)
elif self.args.envir == 'p2':
self._get_next_node_type = lambda curr_node_type, relation, next_node_id: \
KG_RELATION[self.dataset_name][curr_node_type][relation]
if self.args.KGE_pretrained == True: self.kg_emb = KG_KGE_pretrained(args)
else: self.kg_emb = KG_KGE(args)
self.bulid_mode_user()
self.bulid_model_rl()
self.bulid_model_reasoning()
def bulid_model_rl(self):
self.state_lstm = KGState_LSTM(self.args, history_len=1)
self.l1 = nn.Linear(2 * self.embed_size, self.hidden_sizes[1])
self.l2 = nn.Linear(self.hidden_sizes[0], self.hidden_sizes[1])
self.actor = nn.Linear(self.hidden_sizes[1], self.act_dim)
self.critic = nn.Linear(self.hidden_sizes[1], 1)
self.saved_actions = []
self.rewards = []
self.entropy = []
def bulid_model_reasoning(self):
self.reasoning_step = self.args.reasoning_step
self.rn_state_tr_query = []
self.update_rn_state = []
self.rn_query_st_tr = []
self.rh_query = []
self.o_r_query = []
self.v_query = []
self.t_u_query = []
for i in range(self.reasoning_step):
self.rn_state_tr_query.append(nn.Linear(self.embed_size * 2, self.embed_size).cuda())
self.update_rn_state.append(nn.Linear(self.embed_size * 2, self.embed_size).cuda())
self.rn_query_st_tr.append(nn.Linear(self.embed_size * (self.p_hop), self.embed_size).cuda())
self.rh_query.append(nn.Linear(self.embed_size, self.embed_size, bias=False).cuda())
self.o_r_query.append(nn.Linear(self.embed_size, self.embed_size, bias=False).cuda())
self.v_query.append(nn.Linear(self.embed_size, self.embed_size, bias=False).cuda())
self.t_u_query.append(nn.Linear(self.embed_size, self.embed_size, bias=False).cuda())
self.rn_state_tr_query = nn.ModuleList(self.rn_state_tr_query)
self.update_rn_state = nn.ModuleList(self.update_rn_state)
self.rn_query_st_tr = nn.ModuleList(self.rn_query_st_tr)
self.rh_query = nn.ModuleList(self.rh_query)
self.o_r_query = nn.ModuleList(self.o_r_query)
self.v_query = nn.ModuleList(self.v_query)
self.t_u_query = nn.ModuleList(self.t_u_query)
self.rn_cal_state_prop = nn.Linear(self.embed_size, 1, bias=False).cuda()
def bulid_mode_user(self):
self.relation_emb = nn.Embedding(len(self.rela_2_index), self.embed_size * self.embed_size)
self.update_us_tr = []
for hop in range(self.p_hop):
self.update_us_tr.append(nn.Linear(self.embed_size * 2, self.embed_size).cuda())
self.update_us_tr = nn.ModuleList(self.update_us_tr)
self.cal_state_prop = nn.Linear(self.embed_size * 3, 1)
def reset(self, uids=None):
self.lstm_state_cache = []
self.uids = [uid for uid in uids for _ in range(self.sub_batch_size)]
self.memories_h = {}
self.memories_r = {}
self.memories_t = {}
for i in range(max(1,self.p_hop)):
self.memories_h[i] = th.cat([th.cat([self.kg_emb.lookup_emb(u_set[0], type_index = torch.LongTensor([u_set[1]]).to(self.device))
for u_set in self.user_triplet_set[user][i][0]], 0).unsqueeze(0) for user in self.uids], 0)
self.memories_r[i] = th.cat([self.relation_emb(torch.LongTensor([self.rela_2_index[relation]
for relation in self.user_triplet_set[user][i][1]]).to(self.device)).unsqueeze(0) for user in self.uids], 0)
self.memories_r[i] = self.memories_r[i].view(-1, self.n_memory, self.embed_size, self.embed_size)
self.memories_t[i] = th.cat([th.cat([self.kg_emb.lookup_emb(u_set[0], type_index = torch.LongTensor([u_set[1]]).to(self.device))
for u_set in self.user_triplet_set[user][i][2]], 0).unsqueeze(0) for user in self.uids], 0)
self.prev_state_h, self.prev_state_c = self.state_lstm.set_up_hidden_state(len(self.uids))
def forward(self, inputs):
state, res_user_emb, next_enti_emb, next_action_emb, act_mask = inputs # state: [bs, state_dim], act_mask: [bs, act_dim]
state_tr = state.unsqueeze(1).repeat(1, next_action_emb.shape[1], 1)
probs_st = state_tr * next_action_emb
res_user_emb = res_user_emb.unsqueeze(1).repeat(1, next_enti_emb.shape[1], 1)
probs_user = res_user_emb * next_enti_emb
probs_st = probs_st.sum(-1)
probs_user = probs_user.sum(-1)
scalar = self.scalar.unsqueeze(1).repeat(probs_user.shape[0],1)
probs = probs_st + self.scalar * probs_user
probs = probs.masked_fill(~act_mask, value=torch.tensor(-1e10))
act_probs = F.softmax(probs, dim=-1)
x = self.l1(state)
x = F.dropout(F.elu(x), p=0.4)
state_values = self.critic(x) # Tensor of [bs, 1]
return act_probs, state_values
def select_action(self, batch_state, batch_next_action_emb, batch_act_mask, device):
act_mask = torch.BoolTensor(batch_act_mask).to(device) # Tensor of [bs, act_dim]
state_output, res_user_emb = batch_state[0], batch_state[1]
next_enti_emb, next_action_emb = batch_next_action_emb[0], batch_next_action_emb[1]
probs, value = self((state_output, res_user_emb, next_enti_emb, next_action_emb, act_mask)) # act_probs: [bs, act_dim], state_value: [bs, 1]
m = Categorical(probs)
acts = m.sample() # Tensor of [bs, ], requires_grad=False
# [CAVEAT] If sampled action is out of action_space, choose the first action in action_space.
valid_idx = act_mask.gather(1, acts.view(-1, 1)).view(-1)
acts[valid_idx == 0] = 0
self.saved_actions.append(SavedAction(m.log_prob(acts), value))
self.entropy.append(m.entropy())
return acts.cpu().numpy().tolist()
def rn_query_st(self, state, relation_embed_dual, rn_step):
#print('RN QUERY ST')
user_embeddings = self.memories_h[0][:,0]
#print(user_embeddings.shape)
# state = th.cat([state.squeeze(), user_embeddings], -1)
state = th.cat([state.squeeze(1)], -1)
relation_embed_dual = th.cat([relation_embed_dual.squeeze(1)], -1)
#print(state.shape)
#print(relation_embed_dual.shape)
o_list = []
for hop in range(self.p_hop):
h_expanded = torch.unsqueeze(self.memories_t[hop], dim=3)
#print(f'\t hop={hop} ', h_expanded.shape)
#print(f'\t hop={hop} ', self.memories_r[hop].shape)
Rh = torch.squeeze(torch.matmul(self.memories_r[hop], h_expanded))
# [batch_size, dim, 1]
#print(f'\t hop={hop} Rh ', Rh.shape)
#print(f'\t hop={hop} take tail shape', self.memories_t[0].shape)
v = state.unsqueeze(1).repeat(1, self.memories_t[0].shape[1], 1)
#print(f'\t hop={hop} Rh ', v.shape)
r_v = relation_embed_dual.unsqueeze(1).repeat(1, self.memories_t[0].shape[1], 1, 1)
#print(f'\t hop={hop} r_v ', r_v.shape)
r_vh = torch.squeeze(torch.matmul(r_v, h_expanded))
#print(f'\t hop={hop} r_vh', r_vh.shape)
t_u = user_embeddings.unsqueeze(1).repeat(1, self.memories_t[0].shape[1], 1)
#print(f'\t hop={hop} Rh ', t_u.shape)
q_Rh = self.rh_query[rn_step](Rh)
q_v = self.v_query[rn_step](v)
t_u = self.t_u_query[rn_step](t_u)
o_r = self.o_r_query[rn_step](r_vh)
t_state = torch.tanh(q_Rh + q_v + t_u + o_r)
# print('t_state = ', t_state.shape)
#print('rn_cal_prop: ', self.rn_cal_state_prop(t_state).shape)
probs = torch.squeeze(self.rn_cal_state_prop(t_state),-1)
#print('probs: ', probs.shape)
probs_normalized = F.softmax(probs, dim=1)
probs_expanded = torch.unsqueeze(probs_normalized, dim=2)
# [batch_size, dim]
o = (self.memories_t[hop] * probs_expanded).sum(dim=1).unsqueeze(1)
o_list.append(o)
o_list = torch.cat(o_list, 1)
user_o = o_list.sum(1)
return user_o
def update_query_embedding(self, selc_entitiy):
# update before query
selc_entitiy = selc_entitiy.repeat(1, self.memories_t[0].shape[1], 1)
for hop in range(self.p_hop):
tmp_memories_t = th.cat([self.memories_t[hop], selc_entitiy], -1)
self.memories_t[hop] = self.update_us_tr[hop](tmp_memories_t)
def update_path_info_memories(self, up_date_hop):
new_memories_h = {}
new_memories_r = {}
new_memories_t = {}
for i in range(max(1,self.p_hop)):
new_memories_h[i] = []
new_memories_r[i] = []
new_memories_t[i] = []
for row in up_date_hop:
new_memories_h[i].append(self.memories_h[i][row,:,:].unsqueeze(0))
new_memories_r[i].append(self.memories_r[i][row,:,:,:].unsqueeze(0))
new_memories_t[i].append(self.memories_t[i][row,:,:].unsqueeze(0))
self.memories_h[i] = th.cat(new_memories_h[i], 0).to(self.device)
self.memories_r[i] = th.cat(new_memories_r[i], 0).to(self.device)
self.memories_t[i] = th.cat(new_memories_t[i], 0).to(self.device)
def generate_st_emb(self, batch_path, up_date_hop = None):
if up_date_hop != None:
self.update_path_info(up_date_hop)
self.update_path_info_memories(up_date_hop)
tmp_state = [self._get_state_update(index, path) for index, path in enumerate(batch_path)]
all_state = th.cat([ts[3].unsqueeze(0) for ts in tmp_state], 0)
#print(all_state.shape)
if len(batch_path[0]) != 1:
selc_entitiy = th.cat([ts[0].unsqueeze(0) for ts in tmp_state], 0)
#print(selc_entitiy.shape)
self.update_query_embedding(selc_entitiy)
#print('Prev state: ', all_state.shape, self.prev_state_h.shape, self.prev_state_c.shape)
state_output, self.prev_state_h, self.prev_state_c = self.state_lstm(all_state,
self.prev_state_h, self.prev_state_c)
#print('New state: ', state_output.shape, self.prev_state_h.shape, self.prev_state_c.shape)
curr_node_embed = th.cat([ts[0].unsqueeze(0) for ts in tmp_state], 0)
relation_embed = th.cat([ts[1].unsqueeze(0) for ts in tmp_state], 0)
relation_embed_dual = th.cat([ts[2].unsqueeze(0) for ts in tmp_state], 0)
#print('Rel: ', curr_node_embed.shape, relation_embed.shape, relation_embed_dual.shape)
#print('Reasoning step')
state_tmp = relation_embed
#print(state_tmp.shape)
for rn_step in range(self.reasoning_step):
query_state = self.rn_query_st(state_tmp, relation_embed_dual, rn_step)
#print(rn_step, ' ', query_state.shape)
if rn_step < self.reasoning_step - 1:
state_tmp_ = th.cat([query_state, state_tmp], -1)
#print('s1.1: ', state_tmp_.shape)
state_tmp = self.update_rn_state[rn_step](state_tmp_)
#print('s1.2: ', state_tmp.shape)
# input()
res_user_emb = query_state
#print('Res user emb: ', query_state.shape)
state_output = state_output.squeeze(1)#state_output.squeeze() #results in crash if batch size is 1,
# furthermore the squeeze is useless, as for normal batches the tensors before squeeze do not have collapsable dimensions
res_user_emb = res_user_emb.squeeze(1)# res_user_emb.squeeze() #results in crash if batch size is 1
# furthermore the squeeze is useless, as for normal batches the tensors before squeeze do not have collapsable dimensions
#print('Squeeze: ', state_output.shape, res_user_emb.shape)
return [state_output, res_user_emb]
def generate_act_emb(self, batch_path, batch_curr_actions):
all_action_set = [self._get_actions(index, actions_sets[0], actions_sets[1])
for index, actions_sets in enumerate(zip(batch_path, batch_curr_actions))]
enti_emb = th.cat([action_set[0].unsqueeze(0) for action_set in all_action_set], 0)
next_action_state = th.cat([action_set[1].unsqueeze(0) for action_set in all_action_set], 0)
return [enti_emb, next_action_state]
def _get_actions(self, index, curr_path, curr_actions):
last_relation, curr_node_type, curr_node_id = curr_path[-1]
entities_embs = []
relation_embs = []
for action_set in curr_actions:
if action_set[0] == SELF_LOOP: next_node_type = curr_node_type
else: next_node_type = self._get_next_node_type(curr_node_type, action_set[0], action_set[1])
enti_emb = self.kg_emb.lookup_emb(next_node_type,
type_index = torch.LongTensor([action_set[1]]).to(self.device))
entities_embs.append(enti_emb)
rela_emb = self.kg_emb.lookup_rela_emb(action_set[0])
relation_embs.append(rela_emb)
pad_emb = self.kg_emb.lookup_rela_emb(PADDING)
for _ in range(self.act_dim - len(entities_embs)):
entities_embs.append(pad_emb)
relation_embs.append(pad_emb)
enti_emb = th.cat(entities_embs, 0)
rela_emb = th.cat(relation_embs, 0)
next_action_state = th.cat([enti_emb, rela_emb], -1)
return [enti_emb, next_action_state]
def _get_state_update(self, index, path):
"""Return state of numpy vector: [user_embed, curr_node_embed, last_node_embed, last_relation]."""
if len(path) == 1:
user_embed = self.kg_emb.lookup_emb(USER, type_index =
torch.LongTensor([path[0][-1]]).to(self.device))[0].unsqueeze(0)
curr_node_embed = user_embed
last_relation_embed = self.dummy_rela[path[0][-1], :, :]
relation_embed_dual = self.dummy_rela_emb(torch.LongTensor([path[0][-1]]).to(self.device))
relation_embed_dual = relation_embed_dual.view(self.embed_size, self.embed_size)
st_emb = self.action_encoder(last_relation_embed, user_embed)
else:
last_relation, curr_node_type, curr_node_id = path[-1]
# print('last_relation, curr_node_type, curr_node_id = ', last_relation, curr_node_type, curr_node_id )
curr_node_embed = self.kg_emb.lookup_emb(curr_node_type,
type_index = torch.LongTensor([curr_node_id]).to(self.device))[0].unsqueeze(0)
last_relation_embed = self.kg_emb.lookup_rela_emb(last_relation)[0].unsqueeze(0)
relation_embed_dual = self.relation_emb(torch.LongTensor([self.rela_2_index[last_relation]]).to(self.device))
relation_embed_dual = relation_embed_dual.view(self.embed_size, self.embed_size)
st_emb = self.action_encoder(last_relation_embed, curr_node_embed)
return [curr_node_embed, last_relation_embed.squeeze(), relation_embed_dual.squeeze(), st_emb]
def update(self, optimizer, env_model, device, ent_weight, step):
if len(self.rewards) <= 0:
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return 0.0, 0.0, 0.0
batch_rewards = np.vstack(self.rewards).T # numpy array of [bs, #steps]
batch_rewards = torch.FloatTensor(batch_rewards).to(device)
num_steps = batch_rewards.shape[1]
for i in range(1, num_steps):
batch_rewards[:, num_steps - i - 1] += self.gamma * batch_rewards[:, num_steps - i]
actor_loss = 0
critic_loss = 0
entropy_loss = 0
for i in range(0, num_steps):
log_prob, value = self.saved_actions[i] # log_prob: Tensor of [bs, ], value: Tensor of [bs, 1]
advantage = batch_rewards[:, i] - value.squeeze(1) # Tensor of [bs, ]
actor_loss += -log_prob * advantage.detach() # Tensor of [bs, ]
critic_loss += advantage.pow(2) # Tensor of [bs, ]
entropy_loss += -self.entropy[i] # Tensor of [bs, ]
l2_reg = 0
for name, param in self.named_parameters():
if 'weight' in name:
l2_reg += torch.norm(param)
l2_loss = self.l2_weight * l2_reg
actor_loss = actor_loss.mean()
critic_loss = critic_loss.mean()
entropy_loss = entropy_loss.mean()
loss = actor_loss + critic_loss + ent_weight * entropy_loss + l2_loss
if self.training:
optimizer.zero_grad()
loss.backward()
# if step % 50 == 0:
# plot_grad_flow_v2(self.named_parameters(), self.args.log_dir, step)
# print('grad_cherck')
optimizer.step()
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return loss.item(), actor_loss.item(), critic_loss.item(), entropy_loss.item()
| 19,300 | 44.521226 | 149 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/model/baseline/baseline.py | from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from easydict import EasyDict as edict
from models.UCPR.src.model.lstm_base.model_kg import KG_KGE#, RW_KGE
from models.UCPR.src.model.lstm_base.model_kg_pre import KG_KGE_pretrained#, RW_KGE_pretrained
from models.UCPR.src.model.lstm_base.backbone_lstm import EncoderRNN, EncoderRNN_batch, KGState_LSTM, KGState_LSTM_ERU
from models.UCPR.utils import *
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class ActorCritic(nn.Module):
def __init__(self, args, user_triplet_set, rela_2_index, act_dim, gamma=0.99, hidden_sizes=[512, 256]):
super(ActorCritic, self).__init__()
self.args = args
self.act_dim = act_dim
self.device = args.device
self.sub_batch_size = args.sub_batch_size
self.gamma = gamma
self.p_hop = args.p_hop
self.hidden_sizes = hidden_sizes
self.n_memory = args.n_memory
self.kg = load_kg(args.dataset)
# self.gradient_plot_save = args.gradient_plot_save
self.embed_size = args.embed_size
self.user_triplet_set = user_triplet_set
self.rela_2_index = rela_2_index
if self.args.envir == 'p1':
self._get_next_node_type = lambda curr_node_type, relation, next_node_id: self.kg(curr_node_type, next_node_id, relation) #self._get_next_node_type_meta
if self.args.KGE_pretrained == True:
self.kg_emb = RW_KGE_pretrained(args)
else:
self.kg_emb = RW_KGE(args)
elif self.args.envir == 'p2':
self._get_next_node_type = lambda curr_node_type, relation, next_node_id: self.kg(curr_node_type, next_node_id, relation)#self._get_next_node_type_graph
if self.args.KGE_pretrained == True:
self.kg_emb = KG_KGE_pretrained(args)
else:
self.kg_emb = KG_KGE(args)
dataset = load_dataset(args.dataset)
self.bulid_model_rl()
'''
def _get_next_node_type_meta(self, curr_node_type, next_relation, next_entity_id):
return KG_RELATION[curr_node_type][next_relation]
def _get_next_node_type_graph(self, curr_node_type, next_relation, next_entity_id):
return self.et_idx2ty[next_entity_id]
'''
def bulid_model_rl(self):
self.state_lstm = KGState_LSTM(self.args, history_len=1)
self.transfor_state = nn.Linear(2 * self.embed_size, 2 * self.embed_size)
self.state_tr_query = nn.Linear(self.embed_size * 3, self.embed_size)
self.l1 = nn.Linear(4 * self.embed_size, 2 * self.embed_size)
self.l2 = nn.Linear(self.hidden_sizes[0], self.hidden_sizes[1])
self.actor = nn.Linear(2 * self.embed_size, self.act_dim)
self.critic = nn.Linear(2 * self.embed_size, 1)
self.saved_actions = []
self.rewards = []
self.entropy = []
def forward(self, inputs):
# print('inputs = ', inputs)
state, _, act_mask = inputs # state: [bs, state_dim], act_mask: [bs, act_dim]
state = state.squeeze(1)
x = self.l1(state)
actor_logits = self.actor(x)
probs = actor_logits.masked_fill(~act_mask, value=torch.tensor(-1e10))
act_probs = F.softmax(probs, dim=-1)
state_values = self.critic(x) # Tensor of [bs, 1]
return act_probs, state_values
def select_action(self, batch_state, batch_next_action_emb,
batch_act_mask, device):
act_mask = torch.BoolTensor(batch_act_mask).to(device) # Tensor of [bs, act_dim]
probs, value = self((batch_state, batch_next_action_emb, act_mask)) # act_probs: [bs, act_dim], state_value: [bs, 1]
m = Categorical(probs)
acts = m.sample() # Tensor of [bs, ], requires_grad=False
# [CAVEAT] If sampled action is out of action_space, choose the first action in action_space.
valid_idx = act_mask.gather(1, acts.view(-1, 1)).view(-1)
acts[valid_idx == 0] = 0
self.saved_actions.append(SavedAction(m.log_prob(acts), value))
self.entropy.append(m.entropy())
return acts.cpu().numpy().tolist()
def reset(self, uids=None):
self.uids = [uid for uid in uids for _ in range(1)]
def update_path_info(self, up_date_hop):
new_uids = []
for row in up_date_hop:
new_uids.append(self.uids[row])
self.uids = new_uids
def generate_st_emb(self, batch_path, up_date_hop = None):
if up_date_hop != None:
self.update_path_info(up_date_hop)
state_output = th.cat([self._get_state_update(index, path).unsqueeze(0)
for index, path in enumerate(batch_path)], 0)
return state_output
def _get_state_update(self, index, path):
"""Return state of numpy vector: [user_embed, curr_node_embed, last_node_embed, last_relation]."""
user_embed = self.kg_emb.lookup_emb(USER, type_index =
torch.LongTensor([path[0][-1]]).to(self.device))[0].unsqueeze(0)
if len(path) == 1:
curr_node_embed = user_embed
zero_embed = torch.zeros(self.embed_size).to(self.device).unsqueeze(0)
st_emb = th.cat([user_embed, user_embed, zero_embed, zero_embed], -1)
else:
older_relation, last_node_type, last_node_id = path[-2]
last_relation, curr_node_type, curr_node_id = path[-1]
curr_node_embed = self.kg_emb.lookup_emb(curr_node_type,
type_index = torch.LongTensor([curr_node_id]).to(self.device))[0].unsqueeze(0)
last_node_embed = self.kg_emb.lookup_emb(last_node_type,
type_index = torch.LongTensor([last_node_id]).to(self.device))[0].unsqueeze(0)
last_relation_embed = self.kg_emb.lookup_rela_emb(last_relation)[0].unsqueeze(0)
# st_emb = self.action_encoder(last_relation_embed, curr_node_embed)
st_emb = th.cat([user_embed, curr_node_embed, last_node_embed, last_relation_embed], -1)
return st_emb
def action_encoder(self, relation_emb, entitiy_emb):
action_embedding = th.cat([relation_emb, entitiy_emb], -1)
return action_embedding
def generate_act_emb(self, batch_path, batch_curr_actions):
return None
# return th.cat([self._get_actions(index, actions_sets[0],
# actions_sets[1]).unsqueeze(0) for index, actions_sets in enumerate(zip(batch_path, batch_curr_actions))], 0)
# def _get_actions(self, index, curr_path, curr_actions):
# last_relation, curr_node_type, curr_node_id = curr_path[-1]
# entities_embs = []
# relation_embs = []
# for action_set in curr_actions:
# if action_set[0] == SELF_LOOP: next_node_type = curr_node_type
# else: next_node_type = self._get_next_node_type(curr_node_type, action_set[0], action_set[1])
# enti_emb = self.kg_emb.lookup_emb(next_node_type,
# type_index = torch.LongTensor([action_set[1]]).to(self.device))
# entities_embs.append(enti_emb)
# rela_emb = self.kg_emb.lookup_rela_emb(action_set[0])
# relation_embs.append(rela_emb)
# pad_emb = self.kg_emb.lookup_rela_emb(PADDING)
# for _ in range(self.act_dim - len(entities_embs)):
# entities_embs.append(pad_emb)
# relation_embs.append(pad_emb)
# enti_emb = th.cat(entities_embs, 0)
# rela_emb = th.cat(relation_embs, 0)
# next_action_state = th.cat([enti_emb, rela_emb], -1)
# return next_action_state
def _get_actions(self, index, curr_path, curr_actions):
last_relation, curr_node_type, curr_node_id = curr_path[-1]
entities_embs = []
relation_embs = []
for action_set in curr_actions:
if action_set[0] == SELF_LOOP: next_node_type = curr_node_type
else: next_node_type = self._get_next_node_type(curr_node_type, action_set[0], action_set[1])
enti_emb = self.kg_emb.lookup_emb(next_node_type,
type_index = torch.LongTensor([action_set[1]]).to(self.device))
entities_embs.append(enti_emb)
rela_emb = self.kg_emb.lookup_rela_emb(action_set[0])
relation_embs.append(rela_emb)
pad_emb = self.kg_emb.lookup_rela_emb(PADDING)
for _ in range(self.act_dim - len(entities_embs)):
entities_embs.append(pad_emb)
relation_embs.append(pad_emb)
enti_emb = th.cat(entities_embs, 0)
rela_emb = th.cat(relation_embs, 0)
next_action_state = th.cat([enti_emb, rela_emb], -1)
return next_action_state
'''
def _get_next_node_type(self, curr_node_type, next_relation, next_entity_id):
pass
'''
def update(self, optimizer, env_model, device, ent_weight, step):
if len(self.rewards) <= 0:
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return 0.0, 0.0, 0.0
batch_rewards = np.vstack(self.rewards).T # numpy array of [bs, #steps]
batch_rewards = torch.FloatTensor(batch_rewards).to(device)
num_steps = batch_rewards.shape[1]
for i in range(1, num_steps):
batch_rewards[:, num_steps - i - 1] += self.gamma * batch_rewards[:, num_steps - i]
actor_loss = 0
critic_loss = 0
entropy_loss = 0
for i in range(0, num_steps):
log_prob, value = self.saved_actions[i] # log_prob: Tensor of [bs, ], value: Tensor of [bs, 1]
advantage = batch_rewards[:, i] - value.squeeze(1) # Tensor of [bs, ]
actor_loss += -log_prob * advantage.detach() # Tensor of [bs, ]
critic_loss += advantage.pow(2) # Tensor of [bs, ]
entropy_loss += -self.entropy[i] # Tensor of [bs, ]
actor_loss = actor_loss.mean()
critic_loss = critic_loss.mean()
entropy_loss = entropy_loss.mean()
loss = actor_loss + critic_loss + ent_weight * entropy_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return loss.item(), actor_loss.item(), critic_loss.item(), entropy_loss.item()
| 10,812 | 38.463504 | 165 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/model/lstm_base/model_kg_pre.py |
from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from easydict import EasyDict as edict
from models.UCPR.utils import *
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class KG_KGE_pretrained(nn.Module):
def __init__(self, args):
super(KG_KGE_pretrained, self).__init__()
self.device = args.device
self.l2_lambda = args.l2_lambda
dataset = load_dataset(args.dataset)
self.kg = load_kg(args.dataset)
self.embed_size = args.embed_size
# self.embeds = load_embed(args.dataset)
'''
try:
self.embeds = load_embed_dim(args.dataset, args.embed_size)
print('self.embeds = load_embed(load_embed_dim) = ')
args.logger.info('self.embeds = load_embed(load_embed_dim')
except:
'''
self.embeds = load_embed(args.dataset)
self.requires_grad = args.kg_emb_grad
self.dataset_name = args.dataset
self.relation_names = dataset.other_relation_names
self.entity_names = dataset.entity_names
self.relation2entity = dataset.relation2entity
# Initialize entity embeddings.
self.initialize_entity_embeddings(dataset)
for e in self.entities:
embed = self._entity_embedding(e, self.entities[e].vocab_size)
setattr(self, e, embed)
# Initialize relation embeddings and relation biases.
self.initialize_relations_embeddings(dataset)
for r in self.relations:
embed = self._relation_embedding(r)
setattr(self, r, embed)
bias = self._relation_bias(len(self.relations[r].et_distrib))
setattr(self, r + '_bias', bias)
embed = self._relation_embedding(PADDING)
setattr(self, PADDING, embed)
embed = self._relation_embedding(SELF_LOOP)
setattr(self, SELF_LOOP, embed)
#bias = self._relation_bias(len(self.relations[r].et_distrib))
#setattr(self, PADDING + '_bias', bias)
def initialize_entity_embeddings(self, dataset):
self.entities = edict()
for entity_name in self.entity_names:
value = edict(vocab_size=getattr(dataset, entity_name).vocab_size)
self.entities[entity_name] = value
def initialize_relations_embeddings(self, dataset):
self.relations = edict()
for relation_name in dataset.other_relation_names:
value = edict(
et=dataset.relation2entity[relation_name],
et_distrib=self._make_distrib(getattr(dataset, relation_name).et_distrib)
)
self.relations[relation_name] = value
main_rel = INTERACTION[dataset.dataset_name]
self.relations[main_rel] = edict(
et="product",
et_distrib=self._make_distrib(getattr(dataset, "review").product_uniform_distrib)
)
def _entity_embedding(self, key, vocab_size):
"""Create entity embedding of size [vocab_size+1, embed_size].
Note that last dimension is always 0's.
"""
embed = nn.Embedding(vocab_size + 1, self.embed_size, padding_idx=-1, sparse=False)
embed.weight.data = torch.from_numpy(np.concatenate((self.embeds[key], np.zeros_like(self.embeds[key])[:1,:]), axis=0)[:,:self.embed_size])
embed.weight.requires_grad = self.requires_grad
# print('key = ', key)
# print('self.embeds[key] = ', (torch.from_numpy(np.concatenate((self.embeds[key], np.zeros_like(self.embeds[key])[:1,:]), axis=0))[:,:self.embed_size]).shape)
# print('vocab_size + 1 = ', vocab_size + 1)
# print('embed = ', embed.weight.shape)
# print('embed.requires_grad = ', embed.requires_grad)
return embed
def _relation_embedding(self, key):
"""Create relation vector of size [1, embed_size]."""
# initrange = 0.5 / self.embed_size
# embed = nn.Parameter(torch.from_numpy(self.embeds[key][0])[:,:self.embed_size])
# print('torch.from_numpy(self.embeds[key][0:])[:,:self.embed_size] = ', (torch.from_numpy(self.embeds[key][0])[:,:self.embed_size]).shpae)
# embed.requires_grad = self.requires_grad
# return embed
if key != SELF_LOOP and key != 'padding':
# print('self.embeds[key][0]) = ', self.embeds[key][0].shape)
embed = nn.Parameter(torch.from_numpy(np.expand_dims(self.embeds[key][0], axis=0)[:,:self.embed_size]))
# print(key, 'embed = ', embed.shape)
else:
weight = torch.randn(1, self.embed_size, requires_grad=True)
embed = nn.Parameter(weight[:,:self.embed_size])
# print(key, 'embed = ', embed.shape)
embed.requires_grad = True
return embed
def _relation_bias(self, vocab_size):
"""Create relation bias of size [vocab_size+1]."""
bias = nn.Embedding(vocab_size + 1, 1, padding_idx=-1, sparse=False)
bias.weight = nn.Parameter(torch.zeros(vocab_size + 1, 1))
bias.requires_grad = True
# bias.weight.requires_grad = self.requires_grad
return bias
def _make_distrib(self, distrib):
"""Normalize input numpy vector to distribution."""
distrib = np.power(np.array(distrib, dtype=np.float), 0.75)
distrib = distrib / distrib.sum()
distrib = torch.FloatTensor(distrib).to(self.device)
return distrib
def lookup_emb(self, node_type, type_index):
embedding_file = getattr(self, node_type)
entity_vec = embedding_file(type_index)
return entity_vec
def lookup_rela_emb(self, node_type):
relation_vec = getattr(self, node_type)
return relation_vec
| 6,028 | 36.918239 | 167 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/model/lstm_base/model_lstm_mf_emb.py | from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from easydict import EasyDict as edict
from models.UCPR.src.model.lstm_base.model_kg import KG_KGE#, RW_KGE
from models.UCPR.src.model.lstm_base.model_kg_pre import KG_KGE_pretrained#, RW_KGE_pretrained
from models.UCPR.src.model.lstm_base.backbone_lstm import EncoderRNN_batch, KGState_LSTM
from models.UCPR.utils import *
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class AC_lstm_mf_dummy(nn.Module):
def __init__(self, args, user_triplet_set, rela_2_index, act_dim, gamma=0.99, hidden_sizes=[512, 256]):
super(AC_lstm_mf_dummy, self).__init__()
self.args = args
self.act_dim = act_dim
self.device = args.device
self.gamma = gamma
self.p_hop = args.p_hop
self.hidden_sizes = hidden_sizes
self.n_memory = args.n_memory
self.rela_2_index = rela_2_index
self.l2_weight = args.l2_weight
self.embed_size = args.embed_size
self.user_triplet_set = user_triplet_set
self.kg = load_kg(args.dataset)
self.dataset_name = self.kg.dataset_name
if self.args.envir == 'p1':
self._get_next_node_type = lambda curr_node_type, relation, next_node_id: \
KG_RELATION[self.dataset_name][curr_node_type][relation]
if self.args.KGE_pretrained == True:
self.kg_emb = RW_KGE_pretrained(args)
else:
self.kg_emb = RW_KGE(args)
elif self.args.envir == 'p2':
self._get_next_node_type = lambda curr_node_type, relation, next_node_id: \
KG_RELATION[self.dataset_name][curr_node_type][relation]
if self.args.KGE_pretrained == True:
self.kg_emb = KG_KGE_pretrained(args)
else:
self.kg_emb = KG_KGE(args)
dataset = load_dataset(args.dataset)
self.bulid_model_rl()
self.dummy_rela = torch.ones(max(self.user_triplet_set) * 2+ 1, 1, self.embed_size)
self.dummy_rela = nn.Parameter(self.dummy_rela, requires_grad=True).to(self.device)
def bulid_model_rl(self):
self.state_lstm = KGState_LSTM(self.args, history_len=1)
self.transfor_state = nn.Linear(2 * self.embed_size, 2 * self.embed_size)
self.state_tr_query = nn.Linear(self.embed_size * 3, self.embed_size)
self.l1 = nn.Linear(2 * self.embed_size, self.hidden_sizes[1])
self.l2 = nn.Linear(self.hidden_sizes[0], self.hidden_sizes[1])
self.actor = nn.Linear(self.hidden_sizes[1], self.act_dim)
self.critic = nn.Linear(self.hidden_sizes[1], 1)
self.saved_actions = []
self.rewards = []
self.entropy = []
def forward(self, inputs):
state, batch_next_action_emb, act_mask = inputs # state: [bs, state_dim], act_mask: [bs, act_dim]
state = state.squeeze()
state_tr = state.unsqueeze(1).repeat(1, batch_next_action_emb.shape[1], 1)
probs = state_tr * batch_next_action_emb
probs = probs.sum(-1)
probs = probs.masked_fill(~act_mask, value=torch.tensor(-1e10))
act_probs = F.softmax(probs, dim=-1)
x = self.l1(state)
x = F.dropout(F.elu(x), p=0.4)
state_values = self.critic(x) # Tensor of [bs, 1]
return act_probs, state_values
def select_action(self, batch_state, batch_next_action_emb,
batch_act_mask, device):
act_mask = torch.BoolTensor(batch_act_mask).to(device) # Tensor of [bs, act_dim]
probs, value = self((batch_state, batch_next_action_emb, act_mask)) # act_probs: [bs, act_dim], state_value: [bs, 1]
m = Categorical(probs)
acts = m.sample() # Tensor of [bs, ], requires_grad=False
# [CAVEAT] If sampled action is out of action_space, choose the first action in action_space.
valid_idx = act_mask.gather(1, acts.view(-1, 1)).view(-1)
acts[valid_idx == 0] = 0
self.saved_actions.append(SavedAction(m.log_prob(acts), value))
self.entropy.append(m.entropy())
return acts.cpu().numpy().tolist()
def reset(self, uids=None):
self.uids = [uid for uid in uids]
self.prev_state_h, self.prev_state_c = self.state_lstm.set_up_hidden_state(len(self.uids))
# print(' self.uids = ', len(self.uids))
def update_path_info(self, up_date_hop):
new_uids = []
for row in up_date_hop:
new_uids.append(self.uids[row])
self.uids = new_uids
new_prev_state_h = []
new_prev_state_c = []
for row in up_date_hop:
new_prev_state_h.append(self.prev_state_h[:,row,:].unsqueeze(1))
new_prev_state_c.append(self.prev_state_c[:,row,:].unsqueeze(1))
self.prev_state_h = th.cat(new_prev_state_h, 1).to(self.device)
self.prev_state_c = th.cat(new_prev_state_c, 1).to(self.device)
def generate_st_emb(self, batch_path, up_date_hop = None):
if up_date_hop != None:
self.update_path_info(up_date_hop)
all_state = th.cat([self._get_state_update(index, path).unsqueeze(0)
for index, path in enumerate(batch_path)], 0)
# print('all_state = ', all_state.shape)
# print('prev_state_h = ', self.prev_state_h.shape)
# print('prev_state_c = ', self.prev_state_c.shape)
# input()
# print('all_state = ', all_state[0,:])
# print('self.prev_state_h = ', self.prev_state_h[:,0,:])
# print('self.prev_state_c = ', self.prev_state_c[:,0,:])
# input()
state_output, self.prev_state_h, self.prev_state_c = self.state_lstm(all_state, self.prev_state_h, self.prev_state_c)
return state_output
def action_encoder(self, relation_emb, entitiy_emb):
action_embedding = th.cat([relation_emb, entitiy_emb], -1)
return action_embedding
def _get_state_update(self, index, path):
"""Return state of numpy vector: [user_embed, curr_node_embed, last_node_embed, last_relation]."""
if len(path) == 1:
user_embed = self.kg_emb.lookup_emb(USER, type_index =
torch.LongTensor([path[0][-1]]).to(self.device))[0].unsqueeze(0)
curr_node_embed = user_embed
dummy_rela = self.dummy_rela[path[0][-1], :, :]
st_emb = self.action_encoder(dummy_rela, user_embed)
else:
last_relation, curr_node_type, curr_node_id = path[-1]
curr_node_embed = self.kg_emb.lookup_emb(curr_node_type,
type_index = torch.LongTensor([curr_node_id]).to(self.device))[0].unsqueeze(0)
last_relation_embed = self.kg_emb.lookup_rela_emb(last_relation)[0].unsqueeze(0)
st_emb = self.action_encoder(last_relation_embed, curr_node_embed)
return st_emb
def generate_act_emb(self, batch_path, batch_curr_actions):
return th.cat([self._get_actions(index, actions_sets[0],
actions_sets[1]).unsqueeze(0) for index, actions_sets in enumerate(zip(batch_path, batch_curr_actions))], 0)
def _get_actions(self, index, curr_path, curr_actions):
last_relation, curr_node_type, curr_node_id = curr_path[-1]
entities_embs = []
relation_embs = []
for action_set in curr_actions:
if action_set[0] == SELF_LOOP:
next_node_type = curr_node_type
else:
next_node_type = self._get_next_node_type(curr_node_type, action_set[0], action_set[1])
enti_emb = self.kg_emb.lookup_emb(next_node_type,
type_index = torch.LongTensor([action_set[1]]).to(self.device))
entities_embs.append(enti_emb)
rela_emb = self.kg_emb.lookup_rela_emb(action_set[0])
relation_embs.append(rela_emb)
# print('rela_emb = ', rela_emb.shape)
pad_emb = self.kg_emb.lookup_rela_emb(PADDING)
for _ in range(self.act_dim - len(entities_embs)):
entities_embs.append(pad_emb)
relation_embs.append(pad_emb)
# print('pad_emb = ', pad_emb.shape)
# input()
enti_emb = th.cat(entities_embs, 0)
rela_emb = th.cat(relation_embs, 0)
next_action_state = th.cat([enti_emb, rela_emb], -1)
return next_action_state
def _get_next_node_type(self, curr_node_type, next_relation, next_entity_id):
pass
def update(self, optimizer, env_model, device, ent_weight, step):
if len(self.rewards) <= 0:
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return 0.0, 0.0, 0.0
batch_rewards = np.vstack(self.rewards).T # numpy array of [bs, #steps]
batch_rewards = torch.FloatTensor(batch_rewards).to(device)
num_steps = batch_rewards.shape[1]
for i in range(1, num_steps):
batch_rewards[:, num_steps - i - 1] += self.gamma * batch_rewards[:, num_steps - i]
l2_reg = 0
for name, param in self.named_parameters():
if 'weight' in name:
l2_reg += torch.norm(param)
l2_loss = self.l2_weight * l2_reg
actor_loss = 0
critic_loss = 0
entropy_loss = 0
for i in range(0, num_steps):
log_prob, value = self.saved_actions[i] # log_prob: Tensor of [bs, ], value: Tensor of [bs, 1]
advantage = batch_rewards[:, i] - value.squeeze(1) # Tensor of [bs, ]
actor_loss += -log_prob * advantage.detach() # Tensor of [bs, ]
critic_loss += advantage.pow(2) # Tensor of [bs, ]
entropy_loss += -self.entropy[i] # Tensor of [bs, ]
actor_loss = actor_loss.mean()
critic_loss = critic_loss.mean()
entropy_loss = entropy_loss.mean()
loss = actor_loss + critic_loss + ent_weight * entropy_loss + l2_loss
optimizer.zero_grad()
loss.backward()
if self.args.grad_check == True and step % 50 == 0:
plot_grad_flow_v2(self.named_parameters(), self.args.log_dir, step)
print('grad_cherck')
optimizer.step()
del self.rewards[:]
del self.saved_actions[:]
del self.entropy[:]
return loss.item(), actor_loss.item(), critic_loss.item(), entropy_loss.item()
| 10,715 | 37.826087 | 126 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/model/lstm_base/backbone_lstm.py | from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from easydict import EasyDict as edict
from models.UCPR.utils import *
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class EncoderRNN(nn.Module):
def __init__(self, args, input_size, hidden_size, device, n_layers=1):
super(EncoderRNN, self).__init__()
self.device = device
self.n_layers = n_layers
self.hidden_size = hidden_size
self.state_rg = args.state_rg
# self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True, bidirectional=True)
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True).to(self.device)
def blank_state(self):
hidden0 = torch.zeros(1, 1, self.hidden_size)
# hidden0 = hidden0.to(self.device)
return nn.Parameter(hidden0, requires_grad = self.state_rg).to(self.device)
def forward(self, input_state, hidden):
output, hidden = self.lstm(input_state, (hidden, hidden))
return output, hidden
class EncoderRNN_batch(nn.Module):
def __init__(self, args, input_size, hidden_size, device, n_layers=1):
super(EncoderRNN_batch, self).__init__()
self.device = device
self.n_layers = n_layers
self.hidden_size = hidden_size
self.state_rg = args.state_rg
# self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True, bidirectional=True)
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True).to(self.device)
def blank_state(self, batch_size):
hidden0 = torch.zeros(1, batch_size, self.hidden_size)
# hidden0 = hidden0.to(self.device)
return nn.Parameter(hidden0, requires_grad = self.state_rg).to(self.device)
def forward(self, input_state, hm, cm):
output, (hn, cn) = self.lstm(input_state, (hm, cm))
return output, hn, cn
class KGState_LSTM(nn.Module):
def __init__(self, args, history_len=1):
super(KGState_LSTM, self).__init__()
self.policy_lstm = EncoderRNN_batch(args, args.embed_size * 2, args.embed_size * 2, args.device)
def set_up_hidden_state(self, batch_size):
self.zero_hm = self.policy_lstm.blank_state(batch_size)
self.zero_cm = self.policy_lstm.blank_state(batch_size)
return self.zero_hm, self.zero_cm
def __call__(self, history_seq, hm, cm):
# print('hm = ', hm.shape)
# print('cm = ', cm.shape)
# print('history_seq = ', history_seq.shape)
# input()
output, hn, cn = self.policy_lstm(history_seq, hm, cm)
return output, hn, cn
class KGState_LSTM_ERU(nn.Module):
def __init__(self, args, history_len=1):
super(KGState_LSTM_ERU, self).__init__()
self.policy_lstm = EncoderRNN_batch(args, args.embed_size * 3, args.embed_size * 2, args.device)
def set_up_hidden_state(self, batch_size):
self.zero_hm = self.policy_lstm.blank_state(batch_size)
self.zero_cm = self.policy_lstm.blank_state(batch_size)
return self.zero_hm, self.zero_cm
def __call__(self, history_seq, hm, cm):
output, hn, cn = self.policy_lstm(history_seq, hm, cm)
return output, hn, cn
class KGState_LSTM_no_rela(nn.Module):
def __init__(self, args, history_len=1):
super(KGState_LSTM_no_rela, self).__init__()
self.policy_lstm = EncoderRNN_batch(args, args.embed_size, args.embed_size, args.device)
def set_up_hidden_state(self, batch_size):
self.zero_hm = self.policy_lstm.blank_state(batch_size)
self.zero_cm = self.policy_lstm.blank_state(batch_size)
return self.zero_hm, self.zero_cm
def __call__(self, history_seq, hm, cm):
# print('hm = ', hm.shape)
# print('cm = ', cm.shape)
# print('history_seq = ', history_seq.shape)
# input()
output, hn, cn = self.policy_lstm(history_seq, hm, cm)
return output, hn, cn | 4,204 | 36.212389 | 104 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/src/model/lstm_base/model_kg.py |
from __future__ import absolute_import, division, print_function
import sys
import os
import argparse
from collections import namedtuple
import torch
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
from torch.distributions import Categorical
from easydict import EasyDict as edict
from models.UCPR.utils import *
SavedAction = namedtuple('SavedAction', ['log_prob', 'value'])
class KG_KGE(nn.Module):
def __init__(self, args):
super(KG_KGE, self).__init__()
self.embed_size = args.embed_size
self.device = args.device
self.l2_lambda = args.l2_lambda
dataset = load_dataset(args.dataset)
self.kg = load_kg(args.dataset)
self.entities = edict()
self.requires_grad = args.kg_emb_grad
self.dataset_name = args.dataset
self.relation_names = dataset.other_relation_names
self.entity_names = dataset.entity_names
self.relation2entity = dataset.relation2entity
# Initialize entity embeddings.
self.initialize_entity_embeddings(dataset)
for e in self.entities:
embed = self._entity_embedding(e, self.entities[e].vocab_size)
setattr(self, e, embed)
# Initialize relation embeddings and relation biases.
self.initialize_relations_embeddings(dataset)
for r in self.relations:
embed = self._relation_embedding(r)
setattr(self, r, embed)
bias = self._relation_bias(len(self.relations[r].et_distrib))
setattr(self, r + '_bias', bias)
embed = self._relation_embedding(PADDING)
setattr(self, PADDING, embed)
embed = self._relation_embedding(SELF_LOOP)
setattr(self, SELF_LOOP, embed)
# original method to initialize relation embeddings
#self.relations = edict()
#for r in dataset.relation_names:#rela_list:
# self.relations[r] = edict(
# et_distrib=self._make_distrib([float(1) for _ in range(len(self.kg(USER))) ] ))#6000)]))
#for r in dataset.rela_list:
# embed = self._relation_embedding(r)
# setattr(self, r, embed)
# bias = self._relation_bias(188047)
# setattr(self, r + '_bias', bias)
def initialize_entity_embeddings(self, dataset):
self.entities = edict()
for entity_name in self.entity_names:
value = edict(vocab_size=getattr(dataset, entity_name).vocab_size)
self.entities[entity_name] = value
def initialize_relations_embeddings(self, dataset):
self.relations = edict()
for relation_name in dataset.other_relation_names:
value = edict(
et=dataset.relation2entity[relation_name],
et_distrib=self._make_distrib(getattr(dataset, relation_name).et_distrib)
)
self.relations[relation_name] = value
main_rel = INTERACTION[dataset.dataset_name]
self.relations[main_rel] = edict(
et="product",
et_distrib=self._make_distrib(getattr(dataset, "review").product_uniform_distrib)
)
def _entity_embedding(self, key, vocab_size):
"""Create entity embedding of size [vocab_size+1, embed_size].
Note that last dimension is always 0's.
"""
embed = nn.Embedding(vocab_size + 1, self.embed_size, padding_idx=-1, sparse=False)
embed.weight.requires_grad = self.requires_grad
# print('key = ', key)
# print(key, 'embed = ', embed.weight.shape)
# print('embed.requires_grad = ', embed.requires_grad)
return embed
def _relation_embedding(self, key):
"""Create relation vector of size [1, embed_size]."""
# initrange = 0.5 / self.embed_size
# embed = nn.Parameter(torch.from_numpy(self.embeds[key][0:])[:,:self.embed_size])
# print('torch.from_numpy(self.embeds[key][0:])[:,:self.embed_size] = ', (torch.from_numpy(self.embeds[key][0:])[:,:self.embed_size]).shpae)
# embed.requires_grad = self.requires_grad
weight = torch.randn(1, self.embed_size, requires_grad=True)
embed = nn.Parameter(weight[:,:self.embed_size])
# print(key, 'embed = ', embed.shape)
embed.requires_grad = self.requires_grad
return embed
def _relation_bias(self, vocab_size):
"""Create relation bias of size [vocab_size+1]."""
bias = nn.Embedding(vocab_size + 1, 1, padding_idx=-1, sparse=False)
bias.weight = nn.Parameter(torch.zeros(vocab_size + 1, 1))
# bias.requires_grad = self.requires_grad
return bias
def _make_distrib(self, distrib):
"""Normalize input numpy vector to distribution."""
distrib = np.power(np.array(distrib, dtype=np.float), 0.75)
distrib = distrib / distrib.sum()
distrib = torch.FloatTensor(distrib).to(self.device)
return distrib
def lookup_emb(self, node_type, type_index):
embedding_file = getattr(self, node_type)
entity_vec = embedding_file(type_index)
return entity_vec
def lookup_rela_emb(self, node_type):
relation_vec = getattr(self, node_type)
return relation_vec
| 5,310 | 36.401408 | 148 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/preprocess/knowledge_graph.py | from __future__ import absolute_import, division, print_function
import os
import sys
import argparse
from math import log
from tqdm import tqdm
from copy import deepcopy
import pandas as pd
import numpy as np
import gzip
import pickle
import random
from datetime import datetime
# import matplotlib.pyplot as plt
import torch
import os
import numpy as np
import gzip
from easydict import EasyDict as edict
import random
from models.UCPR.utils import *
#get_knowledge_derived_relations, DATASET_DIR, \
# get_pid_to_kgid_mapping, get_uid_to_kgid_mapping, get_entity_edict,\
# MAIN_PRODUCT_INTERACTION, USER,\
# ML1M, LFM1M, CELL, get_entities,get_dataset_relations, get_entity_tail
class KnowledgeGraph(object):
def __init__(self, dataset, verbose=False):
self.G = dict()
self.verbose = verbose
self._load_entities(dataset)
self.dataset = dataset
self.dataset_name = dataset.dataset_name
self._load_reviews(dataset)
self._load_knowledge(dataset)
self._clean()
self.top_matches = None
self.compute_degrees()
def _load_entities(self, dataset):
if self.verbose:
print('Load entities...')
num_nodes = 0
entities = get_entities(dataset.dataset_name)
for entity in entities:
self.G[entity] = {}
vocab_size = getattr(dataset, entity).vocab_size
relations = get_dataset_relations(dataset.dataset_name, entity)
for eid in range(vocab_size):
self.G[entity][eid] = {r: [] for r in relations}
num_nodes += vocab_size
if self.verbose:
print('Total {:d} nodes.'.format(num_nodes))
def _load_reviews(self, dataset):
if self.verbose:
print('Load reviews...')
num_edges = 0
for rid, data in enumerate(dataset.review.data):
uid, pid, _, _ = data
# (2) Add edges.
main_product, main_interaction = MAIN_PRODUCT_INTERACTION[dataset.dataset_name]
self._add_edge(USER, uid, main_interaction, main_product, pid)
num_edges += 2
if self.verbose:
print('Total {:d} review edges.'.format(num_edges))
def _load_knowledge(self, dataset):
relations = get_knowledge_derived_relations(dataset.dataset_name)
main_entity, _ = MAIN_PRODUCT_INTERACTION[dataset.dataset_name]
for relation in relations:
if self.verbose:
print('Load knowledge {}...'.format(relation))
data = getattr(dataset, relation).data
num_edges = 0
for pid, eids in enumerate(data):
if len(eids) <= 0:
continue
for eid in set(eids):
et_type = get_entity_tail(dataset.dataset_name, relation)
self._add_edge(main_entity, pid, relation, et_type, eid)
num_edges += 2
if self.verbose:
print('Total {:d} {:s} edges.'.format(num_edges, relation))
def _add_edge(self, etype1, eid1, relation, etype2, eid2):
self.G[etype1][eid1][relation].append(eid2)
self.G[etype2][eid2][relation].append(eid1)
def _clean(self):
if self.verbose:
print('Remove duplicates...')
for etype in self.G:
for eid in self.G[etype]:
for r in self.G[etype][eid]:
data = self.G[etype][eid][r]
data = tuple(sorted(set(data)))
self.G[etype][eid][r] = data
def compute_degrees(self):
if self.verbose:
print('Compute node degrees...')
self.degrees = {}
self.max_degree = {}
for etype in self.G:
self.degrees[etype] = {}
for eid in self.G[etype]:
count = 0
for r in self.G[etype][eid]:
count += len(self.G[etype][eid][r])
self.degrees[etype][eid] = count
def get(self, eh_type, eh_id=None, relation=None):
data = self.G
if eh_type is not None:
data = data[eh_type]
if eh_id is not None:
data = data[eh_id]
if relation is not None:
data = data[relation]
return data
def __call__(self, eh_type, eh_id=None, relation=None):
return self.get(eh_type, eh_id, relation)
def get_tails(self, entity_type, entity_id, relation):
return self.G[entity_type][entity_id][relation]
| 4,556 | 32.755556 | 91 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/preprocess/transe_model.py | from __future__ import absolute_import, division, print_function
from easydict import EasyDict as edict
import numpy as np
import pickle
import torch
import torch.nn as nn
import torch.nn.functional as F
from models.UCPR.preprocess.dataset import Dataset
from models.UCPR.utils import *
class KnowledgeEmbedding(nn.Module):
def __init__(self, args, dataloader):
super(KnowledgeEmbedding, self).__init__()
self.embed_size = args.embed_size
self.num_neg_samples = args.num_neg_samples
self.device = args.device
self.l2_lambda = args.l2_lambda
self.dataset_name = args.dataset
#self.relation_names = dataloader.dataset.relation_names
self.relation_names = dataloader.dataset.other_relation_names
self.entity_names = dataloader.dataset.entity_names
self.relation2entity = dataloader.dataset.relation2entity
# Initialize entity embeddings.
self.initialize_entity_embeddings(dataloader.dataset)
for e in self.entities:
embed = self._entity_embedding(self.entities[e].vocab_size)
setattr(self, e, embed)
# Initialize relation embeddings and relation biases.
self.initialize_relations_embeddings(dataloader.dataset)
for r in self.relations:
embed = self._relation_embedding()
setattr(self, r, embed)
bias = self._relation_bias(len(self.relations[r].et_distrib))
setattr(self, r + '_bias', bias)
def initialize_entity_embeddings(self, dataset):
self.entities = edict()
for entity_name in self.entity_names:
value = edict(vocab_size=getattr(dataset, entity_name).vocab_size)
self.entities[entity_name] = value
def initialize_relations_embeddings(self, dataset):
'''
self.relations = edict()
for relation_name in dataset.relation_names:
value = edict(
et=dataset.relation2entity[relation_name],
et_distrib=self._make_distrib(getattr(dataset, relation_name).et_distrib)
)
self.relations[relation_name] = value
main_rel = INTERACTION[dataset.dataset_name]
self.relations[main_rel] = edict(
et="product",
et_distrib=self._make_distrib(getattr(dataset, "review").product_uniform_distrib)
)
'''
self.relations = edict()
for relation_name in dataset.other_relation_names:
value = edict(
et=dataset.relation2entity[relation_name],
et_distrib=self._make_distrib(getattr(dataset, relation_name).et_distrib)
)
self.relations[relation_name] = value
main_rel = INTERACTION[dataset.dataset_name]
self.relations[main_rel] = edict(
et="product",
et_distrib=self._make_distrib(getattr(dataset, "review").product_uniform_distrib)
)
def _entity_embedding(self, vocab_size):
"""Create entity embedding of size [vocab_size+1, embed_size].
Note that last dimension is always 0's.
"""
embed = nn.Embedding(vocab_size + 1, self.embed_size, padding_idx=-1, sparse=False)
initrange = 0.5 / self.embed_size
weight = torch.FloatTensor(vocab_size + 1, self.embed_size).uniform_(-initrange, initrange)
embed.weight = nn.Parameter(weight)
return embed
def _relation_embedding(self):
"""Create relation vector of size [1, embed_size]."""
initrange = 0.5 / self.embed_size
weight = torch.FloatTensor(1, self.embed_size).uniform_(-initrange, initrange)
embed = nn.Parameter(weight)
return embed
def _relation_bias(self, vocab_size):
"""Create relation bias of size [vocab_size+1]."""
bias = nn.Embedding(vocab_size + 1, 1, padding_idx=-1, sparse=False)
bias.weight = nn.Parameter(torch.zeros(vocab_size + 1, 1))
return bias
def _make_distrib(self, distrib):
"""Normalize input numpy vector to distribution."""
distrib = np.power(np.array(distrib, dtype=np.float), 0.75)
distrib = distrib / distrib.sum()
distrib = torch.FloatTensor(distrib).to(self.device)
return distrib
def forward(self, batch_idxs):
loss = self.compute_loss(batch_idxs)
return loss
def compute_loss(self, batch_idxs):
"""Compute knowledge graph negative sampling loss.
"""
regularizations = []
user_idxs = batch_idxs[:, 0]
product_idxs = batch_idxs[:, 1]
rel2entity_idxs_tuple = {}
i = 2
for rel_name in get_knowledge_derived_relations(self.dataset_name):
rel2entity_idxs_tuple[rel_name] = (self.relation2entity[rel_name], batch_idxs[:, i])
i+=1
# user + interaction -> product
up_loss, up_embeds = self.neg_loss(USER, INTERACTION[self.dataset_name], PRODUCT, user_idxs, product_idxs)
regularizations.extend(up_embeds)
loss = up_loss
for curr_rel in get_knowledge_derived_relations(self.dataset_name):
entity_idxs_tuple = rel2entity_idxs_tuple[curr_rel]
entity_name, curr_idxs = entity_idxs_tuple
# product + curr_rel -> curr_entity
curr_loss, curr_embeds = self.neg_loss(PRODUCT, curr_rel, entity_name, product_idxs, curr_idxs)
if curr_loss is not None:
regularizations.extend(curr_embeds)
loss += curr_loss
# l2 regularization
if self.l2_lambda > 0:
l2_loss = 0.0
for term in regularizations:
l2_loss += torch.norm(term)
loss += self.l2_lambda * l2_loss
return loss
def neg_loss(self, entity_head, relation, entity_tail, entity_head_idxs, entity_tail_idxs):
# Entity tail indices can be -1. Remove these indices. Batch size may be changed!
mask = entity_tail_idxs >= 0
fixed_entity_head_idxs = entity_head_idxs[mask]
fixed_entity_tail_idxs = entity_tail_idxs[mask]
if fixed_entity_head_idxs.size(0) <= 0:
return None, []
entity_head_embedding = getattr(self, entity_head) # nn.Embedding
entity_tail_embedding = getattr(self, entity_tail) # nn.Embedding
relation_vec = getattr(self, relation) # [1, embed_size]
relation_bias_embedding = getattr(self, relation + '_bias') # nn.Embedding
entity_tail_distrib = self.relations[relation].et_distrib # [vocab_size]
return kg_neg_loss(entity_head_embedding, entity_tail_embedding,
fixed_entity_head_idxs, fixed_entity_tail_idxs,
relation_vec, relation_bias_embedding, self.num_neg_samples, entity_tail_distrib)
def kg_neg_loss(entity_head_embed, entity_tail_embed, entity_head_idxs, entity_tail_idxs,
relation_vec, relation_bias_embed, num_samples, distrib):
"""Compute negative sampling loss for triple (entity_head, relation, entity_tail).
Args:
entity_head_embed: Tensor of size [batch_size, embed_size].
entity_tail_embed: Tensor of size [batch_size, embed_size].
entity_head_idxs:
entity_tail_idxs:
relation_vec: Parameter of size [1, embed_size].
relation_bias: Tensor of size [batch_size]
num_samples: An integer.
distrib: Tensor of size [vocab_size].
Returns:
A tensor of [1].
"""
batch_size = entity_head_idxs.size(0)
entity_head_vec = entity_head_embed(entity_head_idxs) # [batch_size, embed_size]
example_vec = entity_head_vec + relation_vec # [batch_size, embed_size]
example_vec = example_vec.unsqueeze(2) # [batch_size, embed_size, 1]
entity_tail_vec = entity_tail_embed(entity_tail_idxs) # [batch_size, embed_size]
pos_vec = entity_tail_vec.unsqueeze(1) # [batch_size, 1, embed_size]
relation_bias = relation_bias_embed(entity_tail_idxs).squeeze(1) # [batch_size]
pos_logits = torch.bmm(pos_vec, example_vec).squeeze() + relation_bias # [batch_size]
pos_loss = -pos_logits.sigmoid().log() # [batch_size]
neg_sample_idx = torch.multinomial(distrib, num_samples, replacement=True).view(-1)
neg_vec = entity_tail_embed(neg_sample_idx) # [num_samples, embed_size]
neg_logits = torch.mm(example_vec.squeeze(2), neg_vec.transpose(1, 0).contiguous())
neg_logits += relation_bias.unsqueeze(1) # [batch_size, num_samples]
neg_loss = -neg_logits.neg().sigmoid().log().sum(1) # [batch_size]
loss = (pos_loss + neg_loss).mean()
return loss, [entity_head_vec, entity_tail_vec, neg_vec]
| 8,696 | 40.61244 | 114 | py |
rep-path-reasoning-recsys | rep-path-reasoning-recsys-main/models/UCPR/preprocess/train_transe.py | from __future__ import absolute_import, division, print_function
import os
import argparse
import torch
import torch.optim as optim
from models.UCPR.preprocess.dataset import DataLoader, Dataset
from models.UCPR.utils import *
from models.UCPR.preprocess.transe_model import KnowledgeEmbedding
import json
import sys
logger = None
def train(args):
dataset_name = args.dataset
train_set = Dataset(args,set_name='train')
val_set = Dataset(args,set_name='valid')
train_loader = DataLoader(train_set, args.batch_size)
valid_loader = DataLoader(val_set, args.batch_size)
review_to_train = len(train_set.review.data) * args.epochs + 1
model = KnowledgeEmbedding(args, train_loader).to(args.device)
logger.info('Parameters:' + str([i[0] for i in model.named_parameters()]))
optimizer = optim.SGD(model.parameters(), lr=args.lr)
steps = 0
smooth_loss = 0.0
best_val_loss = sys.maxsize
train_loss_history = []
val_loss_history = []
for epoch in range(1, args.epochs + 1):
train_loader.reset()
while train_loader.has_next():
# Set learning rate.
lr = args.lr * max(1e-4, 1.0 - train_loader.finished_review_num / float(review_to_train))
for pg in optimizer.param_groups:
pg['lr'] = lr
# Get training batch.
batch_idxs = train_loader.get_batch()
batch_idxs = torch.from_numpy(batch_idxs).to(args.device)
# Train models.
optimizer.zero_grad()
train_loss = model(batch_idxs)
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
smooth_loss += train_loss.item() / args.steps_per_checkpoint
steps += 1
if steps % args.steps_per_checkpoint == 0:
logger.info('Epoch: {:02d} | '.format(epoch) +
'Review: {:d}/{:d} | '.format(train_loader.finished_review_num, review_to_train) +
'Lr: {:.5f} | '.format(lr) +
'Smooth loss: {:.5f}'.format(smooth_loss))
train_loss_history.append(smooth_loss)
smooth_loss = 0.0
if epoch % 10 == 0:
if args.do_validation:
model.eval()
total_val_loss = 0
cnt = 0
valid_loader.reset()
while valid_loader.has_next():
# Get valid batch.
batch_idxs = valid_loader.get_batch()
batch_idxs = torch.from_numpy(batch_idxs).to(args.device)
valid_loss = model(batch_idxs)
total_val_loss += valid_loss.item()
cnt += 1
avg_valid_loss = total_val_loss/max(cnt, 1)
logger.info('Epoch: {:02d} | '.format(epoch) +
'Validation loss: {:.5f}'.format(avg_valid_loss))
val_loss_history.append(avg_valid_loss)
if avg_valid_loss < best_val_loss:
best_val_loss = avg_valid_loss
torch.save(model.state_dict(), '{}/transe_best_model.ckpt'.format(args.log_dir))
model.train()
torch.save(model.state_dict(), '{}/transe_model_sd_epoch_{}.ckpt'.format(args.log_dir, epoch))
makedirs(dataset_name)
with open(TRANSE_TEST_METRICS_FILE_PATH[dataset_name], 'w') as f:
json.dump( {'valid_loss': best_val_loss,
'valid_loss_history': val_loss_history,
'train_loss_history':train_loss_history } ,f)
def extract_embeddings(args, dataset):
"""Note that last entity embedding is of size [vocab_size+1, d]."""
dataset_name = args.dataset
os.makedirs(args.log_dir, exist_ok=True)
model_file = '{}/transe_best_model.ckpt'.format(args.log_dir)
print('Load embeddings', model_file)
state_dict = torch.load(model_file, map_location=lambda storage, loc: storage)
embeds = {}
for entity_name in dataset.entity_names:
embeds[entity_name] = state_dict[f'{entity_name}.weight'].cpu().data.numpy()[:-1]
embeds[INTERACTION[dataset_name]] = (
state_dict[INTERACTION[dataset_name]].cpu().data.numpy()[0],
state_dict[f'{INTERACTION[dataset_name]}_bias.weight'].cpu().data.numpy()
)
for relation_name in dataset.other_relation_names:
embeds[relation_name] = (
state_dict[f'{relation_name}'].cpu().data.numpy()[0],
state_dict[f'{relation_name}_bias.weight'].cpu().data.numpy()
)
save_embed(dataset_name, embeds)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default=LFM1M, help=f'One of [{ML1M}, {LFM1M}, beauty, cd, cell, clothing]')
parser.add_argument('--name', type=str, default='train_transe_model', help='models name.')
parser.add_argument('--seed', type=int, default=123, help='random seed.')
parser.add_argument('--gpu', type=str, default='0', help='gpu device.')
parser.add_argument('--epochs', type=int, default=30, help='number of epochs to train.')
parser.add_argument('--batch_size', type=int, default=64, help='batch size.')
parser.add_argument('--lr', type=float, default=0.5, help='learning rate.')
parser.add_argument('--weight_decay', type=float, default=0, help='weight decay for adam.')
parser.add_argument('--l2_lambda', type=float, default=0, help='l2 lambda')
parser.add_argument('--max_grad_norm', type=float, default=5.0, help='Clipping gradient.')
parser.add_argument('--embed_size', type=int, default=100, help='knowledge embedding size.')
parser.add_argument('--num_neg_samples', type=int, default=5, help='number of negative samples.')
parser.add_argument('--steps_per_checkpoint', type=int, default=200, help='Number of steps for checkpoint.')
parser.add_argument('--do_validation', type=bool, default=True, help='Whether to perform validation')
args = parser.parse_args()
os.makedirs(LOG_DATASET_DIR[args.dataset], exist_ok=True)
with open(os.path.join(LOG_DATASET_DIR[args.dataset], f'{TRANSE_HPARAMS_FILE}'), 'w') as f:
import json
import copy
args_dict = dict()
for x,y in copy.deepcopy(args._get_kwargs()):
args_dict[x] = y
if 'device' in args_dict:
del args_dict['device']
json.dump(args_dict,f)
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
print(f'Set to gpu:{args.gpu}')
args.device = torch.device(f'cuda:0') if torch.cuda.is_available() else 'cpu'
print(TMP_DIR[args.dataset])
args.log_dir = os.path.join(TMP_DIR[args.dataset], args.name)
if not os.path.isdir(args.log_dir):
os.makedirs(args.log_dir)
global logger
logger = get_logger(args.log_dir + '/train_log.txt')
logger.info(args)
set_random_seed(args.seed)
dataset = load_dataset(args.dataset)
train(args)
extract_embeddings(args, dataset)
if __name__ == '__main__':
main()
| 7,144 | 41.029412 | 123 | py |
shapely | shapely-main/docs/conf.py | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# set an environment variable for shapely.decorators.requires_geos to see if we
# are in a doc build
import os
os.environ["SPHINX_DOC_BUILD"] = "1"
# -- Project information -----------------------------------------------------
project = 'Shapely'
copyright = '2011-2023, Sean Gillies and Shapely contributors'
# The full version, including alpha/beta/rc tags.
import shapely
release = shapely.__version__.split("+")[0]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'matplotlib.sphinxext.plot_directive',
'sphinx.ext.intersphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.mathjax',
'numpydoc',
'sphinx_remove_toctrees'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_css_files = [
'custom.css',
]
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Shapely.tex', 'Shapely Documentation',
'Sean Gillies', 'manual'),
]
# --Options for sphinx extensions -----------------------------------------------
# connect docs in other projects
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://numpy.org/doc/stable/', None),
}
plot_rcparams = {
'savefig.bbox': "tight"
}
# -- Automatic generation of API reference pages -----------------------------
numpydoc_show_class_members = False
autosummary_generate = True
remove_from_toctrees = ["reference/*"]
def rstjinja(app, docname, source):
"""
Render our pages as a jinja template for fancy templating goodness.
"""
# https://www.ericholscher.com/blog/2016/jul/25/integrating-jinja-rst-sphinx/
# Make sure we're outputting HTML
if app.builder.format != 'html':
return
source[0] = app.builder.templates.render_string(source[0], app.config.html_context)
def get_module_functions(module, exclude=None):
"""Return a list of function names for the given submodule."""
mod = getattr(shapely, module)
return mod.__all__
html_context = {
'get_module_functions': get_module_functions
}
# write dummy _reference.rst with all functions listed to ensure the reference/
# stub pages are crated (the autogeneration of those stub pages by autosummary
# happens before the jinja rendering is done, and thus at that point the
# autosummary directives do not yet contain the final content
template = """
:orphan:
.. autogenerated file
.. currentmodule:: shapely
.. autosummary::
:toctree: reference/
"""
modules = [
"_geometry", "creation", "constructive", "coordinates", "io", "linear",
"measurement", "predicates", "set_operations"]
functions = [func for mod in modules for func in get_module_functions(mod)]
template += " " + "\n ".join(functions)
with open("_reference.rst", "w") as f:
f.write(template)
def setup(app):
app.connect("source-read", rstjinja)
| 4,366 | 29.326389 | 87 | py |
diffmimic | diffmimic-main/mimic.py | import functools
import numpy as np
import jax.numpy as jnp
from absl import flags, app
import yaml
from brax import envs
from brax.io import metrics
from brax.training.agents.apg import networks as apg_networks
from diffmimic.utils import AttrDict
from diffmimic.mimic_envs import register_mimic_env
import diffmimic.brax_lib.agent_diffmimic as dmm
register_mimic_env()
FLAGS = flags.FLAGS
flags.DEFINE_string('config', 'configs/AMP/backflip.yaml', help='Experiment configuration.')
def main(argv):
with open(FLAGS.config, 'r') as f:
args = AttrDict(yaml.safe_load(f))
logdir = "logs/exp"
for k, v in args.items():
if k == 'ref':
logdir += f"_{v.split('/')[-1].split('.')[0]}"
else:
logdir += f"_{v}"
demo_traj = jnp.array(np.load(args.ref))
demo_len = demo_traj.shape[0]
args.ep_len = min(args.ep_len, demo_len)
args.cycle_len = min(args.get('cycle_len', demo_len), demo_len)
args.ep_len_eval = min(args.get('ep_len_eval', demo_len), demo_len)
train_env = envs.get_environment(
env_name="humanoid_mimic_train",
system_config=args.system_config,
reference_traj=demo_traj,
obs_type=args.get('obs_type', 'timestamp'),
cyc_len=args.cycle_len,
total_length=args.ep_len_eval,
rollout_length=args.ep_len,
early_termination=args.get('early_termination', False),
demo_replay_mode=args.demo_replay_mode,
err_threshold=args.threshold,
replay_rate=args.get('replay_rate', 0.05),
reward_scaling=args.get('reward_scaling', 1.),
rot_weight=args.rot_weight,
vel_weight=args.vel_weight,
ang_weight=args.ang_weight
)
eval_env = envs.get_environment(
env_name="humanoid_mimic",
system_config=args.system_config,
reference_traj=demo_traj,
obs_type=args.get('obs_type', 'timestamp'),
cyc_len=args.cycle_len,
rot_weight=args.rot_weight,
vel_weight=args.vel_weight,
ang_weight=args.ang_weight
)
with metrics.Writer(logdir) as writer:
make_inference_fn, params, _ = dmm.train(
seed=args.seed,
environment=train_env,
eval_environment=eval_env,
episode_length=args.ep_len-1,
eval_episode_length=args.ep_len_eval-1,
num_envs=args.num_envs,
num_eval_envs=args.num_eval_envs,
learning_rate=args.lr,
num_evals=args.max_it+1,
max_gradient_norm=args.max_grad_norm,
network_factory=functools.partial(apg_networks.make_apg_networks, hidden_layer_sizes=(512, 256)),
normalize_observations=args.normalize_observations,
save_dir=logdir,
progress_fn=writer.write_scalars,
use_linear_scheduler=args.use_lr_scheduler,
truncation_length=args.get('truncation_length', None),
)
if __name__ == '__main__':
app.run(main)
| 2,985 | 32.931818 | 109 | py |
diffmimic | diffmimic-main/diffmimic/brax_lib/agent_diffmimic.py | # Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Analytic policy gradient training."""
import functools
import time
from typing import Callable, Optional, Tuple
from absl import logging
from brax import envs
from brax.envs import wrappers
from brax.training import pmap
from brax.training import types
from brax.training.acme import running_statistics
from brax.training.acme import specs
from brax.training.agents.apg import networks as apg_networks
from brax.training.types import Params
from brax.training.types import PRNGKey
from brax.io import model
import flax
import jax
import jax.numpy as jnp
import optax
from diffmimic.brax_lib import acting
from diffmimic.utils.io import serialize_qp
InferenceParams = Tuple[running_statistics.NestedMeanStd, Params]
Metrics = types.Metrics
_PMAP_AXIS_NAME = 'i'
@flax.struct.dataclass
class TrainingState:
"""Contains training state for the learner."""
optimizer_state: optax.OptState
normalizer_params: running_statistics.RunningStatisticsState
policy_params: Params
def _unpmap(v):
return jax.tree_util.tree_map(lambda x: x[0], v)
def train(environment: envs.Env,
episode_length: int,
action_repeat: int = 1,
num_envs: int = 1,
max_devices_per_host: Optional[int] = None,
num_eval_envs: int = 128,
learning_rate: float = 1e-4,
seed: int = 0,
truncation_length: Optional[int] = None,
max_gradient_norm: float = 1e9,
num_evals: int = 1,
normalize_observations: bool = False,
deterministic_eval: bool = False,
network_factory: types.NetworkFactory[
apg_networks.APGNetworks] = apg_networks.make_apg_networks,
progress_fn: Callable[[int, Metrics], None] = lambda *args: None,
eval_environment: Optional[envs.Env] = None,
eval_episode_length: Optional[int] = None,
save_dir: Optional[str] = None,
use_linear_scheduler: Optional[bool] = False,
):
"""Direct trajectory optimization training."""
best_pose_error = 1e8
xt = time.time()
process_count = jax.process_count()
process_id = jax.process_index()
local_device_count = jax.local_device_count()
local_devices_to_use = local_device_count
if max_devices_per_host:
local_devices_to_use = min(local_devices_to_use, max_devices_per_host)
logging.info(
'Device count: %d, process count: %d (id %d), local device count: %d, '
'devices to be used count: %d', jax.device_count(), process_count,
process_id, local_device_count, local_devices_to_use)
device_count = local_devices_to_use * process_count
if truncation_length is not None:
assert truncation_length > 0
num_evals_after_init = max(num_evals - 1, 1)
assert num_envs % device_count == 0
env = environment
env = wrappers.EpisodeWrapper(env, episode_length, action_repeat)
env = wrappers.VmapWrapper(env)
env = wrappers.AutoResetWrapper(env)
normalize = lambda x, y: x
if normalize_observations:
normalize = running_statistics.normalize
apg_network = network_factory(
env.observation_size,
env.action_size,
preprocess_observations_fn=normalize)
make_policy = apg_networks.make_inference_fn(apg_network)
if use_linear_scheduler:
lr_scheduler = optax.linear_schedule(init_value=learning_rate, end_value=1e-5, transition_steps=num_evals_after_init)
optimizer = optax.adam(learning_rate=lr_scheduler)
else:
optimizer = optax.adam(learning_rate=learning_rate)
def env_step(carry: Tuple[envs.State, PRNGKey], step_index: int,
policy: types.Policy):
env_state, key = carry
key, key_sample = jax.random.split(key)
actions = policy(env_state.obs, key_sample)[0]
nstate = env.step(env_state, actions)
if truncation_length is not None:
nstate = jax.lax.cond(
jnp.mod(step_index + 1, truncation_length) == 0.,
jax.lax.stop_gradient, lambda x: x, nstate)
return (nstate, key), (nstate.reward, env_state.obs, nstate.metrics)
def loss(policy_params, normalizer_params, key):
key_reset, key_scan = jax.random.split(key)
env_state = env.reset(
jax.random.split(key_reset, num_envs // process_count))
f = functools.partial(
env_step, policy=make_policy((normalizer_params, policy_params)))
(rewards,
obs, metrics) = jax.lax.scan(f, (env_state, key_scan),
(jnp.array(range(episode_length // action_repeat))))[1]
return -jnp.mean(rewards), (rewards, obs, metrics)
loss_grad = jax.grad(loss, has_aux=True)
def clip_by_global_norm(updates):
g_norm = optax.global_norm(updates)
trigger = g_norm < max_gradient_norm
return jax.tree_util.tree_map(
lambda t: jnp.where(trigger, t, (t / g_norm) * max_gradient_norm),
updates)
def training_epoch(training_state: TrainingState, key: PRNGKey):
key, key_grad = jax.random.split(key)
grad_raw, (rewards, obs, metrics) = loss_grad(training_state.policy_params,
training_state.normalizer_params, key_grad)
grad = clip_by_global_norm(grad_raw)
grad = jax.lax.pmean(grad, axis_name='i')
grad_raw = jax.lax.pmean(grad_raw, axis_name='i')
params_update, optimizer_state = optimizer.update(
grad, training_state.optimizer_state)
policy_params = optax.apply_updates(training_state.policy_params,
params_update)
normalizer_params = running_statistics.update(
training_state.normalizer_params, obs, pmap_axis_name=_PMAP_AXIS_NAME)
metrics = {
'grad_norm': optax.global_norm(grad_raw),
'params_norm': optax.global_norm(policy_params),
'loss': -1 * rewards,
**metrics
}
return TrainingState(
optimizer_state=optimizer_state,
normalizer_params=normalizer_params,
policy_params=policy_params), metrics
training_epoch = jax.pmap(training_epoch, axis_name=_PMAP_AXIS_NAME)
training_walltime = 0
# Note that this is NOT a pure jittable method.
def training_epoch_with_timing(training_state: TrainingState,
key: PRNGKey) -> Tuple[TrainingState, Metrics]:
nonlocal training_walltime
t = time.time()
(training_state, metrics) = training_epoch(training_state, key)
metrics = jax.tree_util.tree_map(jnp.mean, metrics)
jax.tree_util.tree_map(lambda x: x.block_until_ready(), metrics)
epoch_training_time = time.time() - t
training_walltime += epoch_training_time
sps = (episode_length * num_envs) / epoch_training_time
metrics = {
'training/sps': sps,
'training/walltime': training_walltime,
**{f'training/{name}': value for name, value in metrics.items()}
}
return training_state, metrics
key = jax.random.PRNGKey(seed)
global_key, local_key = jax.random.split(key)
del key
local_key = jax.random.fold_in(local_key, process_id)
local_key, eval_key = jax.random.split(local_key)
# The network key should be global, so that networks are initialized the same
# way for different processes.
policy_params = apg_network.policy_network.init(global_key)
del global_key
training_state = TrainingState(
optimizer_state=optimizer.init(policy_params),
policy_params=policy_params,
normalizer_params=running_statistics.init_state(
specs.Array((env.observation_size,), jnp.float32)))
training_state = jax.device_put_replicated(
training_state,
jax.local_devices()[:local_devices_to_use])
eval_episode_length = episode_length if not eval_episode_length else eval_episode_length
if not eval_environment:
eval_env = env
else:
eval_env = eval_environment
eval_env = wrappers.EpisodeWrapper(eval_env, eval_episode_length, action_repeat)
eval_env = wrappers.VmapWrapper(eval_env)
eval_env = wrappers.AutoResetWrapper(eval_env)
evaluator = acting.Evaluator(
eval_env,
functools.partial(make_policy, deterministic=deterministic_eval),
num_eval_envs=num_eval_envs,
episode_length=eval_episode_length,
action_repeat=action_repeat,
key=eval_key)
# Run initial eval
if process_id == 0 and num_evals > 1:
metrics, _ = evaluator.run_evaluation(
_unpmap(
(training_state.normalizer_params, training_state.policy_params)),
training_metrics={})
best_pose_error = min(metrics['eval/episode_pose_error'], best_pose_error)
metrics['eval/best_pose_error'] = best_pose_error
progress_fn(0, metrics)
for it in range(num_evals_after_init):
logging.info('starting iteration %s %s', it, time.time() - xt)
# optimization
epoch_key, local_key = jax.random.split(local_key)
epoch_keys = jax.random.split(epoch_key, local_devices_to_use)
(training_state,
training_metrics) = training_epoch_with_timing(training_state, epoch_keys)
if process_id == 0:
# Run evals.
metrics, qp_list = evaluator.run_evaluation(
_unpmap(
(training_state.normalizer_params, training_state.policy_params)),
training_metrics)
best_pose_error = min(metrics['eval/episode_pose_error'], best_pose_error)
metrics['eval/best_pose_error'] = best_pose_error
progress_fn(it + 1, metrics)
if save_dir is not None:
params = _unpmap(
(training_state.normalizer_params, training_state.policy_params))
eval_traj = serialize_qp(qp_list)
if best_pose_error == metrics['eval/episode_pose_error']:
model.save_params(save_dir + '/params_best.pkl', params)
with open(save_dir+'/eval_traj_best.npy', 'wb') as f:
jnp.save(f, eval_traj)
if (it+1) % 10 == 0:
with open(save_dir+f'/eval_traj_{it+1}.npy', 'wb') as f:
jnp.save(f, eval_traj)
# If there was no mistakes the training_state should still be identical on all
# devices.
pmap.assert_is_replicated(training_state)
params = _unpmap(
(training_state.normalizer_params, training_state.policy_params))
pmap.synchronize_hosts()
return (make_policy, params, metrics)
| 10,754 | 36.473868 | 123 | py |
diffmimic | diffmimic-main/diffmimic/brax_lib/acting.py | # Copyright 2022 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Brax training acting functions."""
import time
from typing import Callable, Sequence, Tuple
from brax import envs
from brax.training.types import Metrics
from brax.training.types import Policy
from brax.training.types import PolicyParams
from brax.training.types import PRNGKey
from brax.training.types import Transition
import jax
import numpy as np
import brax
def actor_step(
env: envs.Env,
env_state: envs.State,
policy: Policy,
key: PRNGKey,
extra_fields: Sequence[str] = ()
) -> Tuple[envs.State, brax.QP]:
"""Collect data."""
actions, policy_extras = policy(env_state.obs, key)
nstate = env.step(env_state, actions)
state_extras = {x: nstate.info[x] for x in extra_fields}
return nstate, env_state.qp
def generate_unroll(
env: envs.Env,
env_state: envs.State,
policy: Policy,
key: PRNGKey,
unroll_length: int,
extra_fields: Sequence[str] = ()
) -> Tuple[envs.State, Transition]:
"""Collect trajectories of given unroll_length."""
@jax.jit
def f(carry, unused_t):
state, current_key = carry
current_key, next_key = jax.random.split(current_key)
nstate, qp = actor_step(
env, state, policy, current_key, extra_fields=extra_fields)
return (nstate, next_key), qp
(final_state, _), qp_list = jax.lax.scan(
f, (env_state, key), (), length=unroll_length)
return final_state, qp_list
# TODO: Consider moving this to its own file.
class Evaluator:
"""Class to run evaluations."""
def __init__(self, eval_env: envs.Env,
eval_policy_fn: Callable[[PolicyParams],
Policy], num_eval_envs: int,
episode_length: int, action_repeat: int, key: PRNGKey):
"""Init.
Args:
eval_env: Batched environment to run evals on.
eval_policy_fn: Function returning the policy from the policy parameters.
num_eval_envs: Each env will run 1 episode in parallel for each eval.
episode_length: Maximum length of an episode.
action_repeat: Number of physics steps per env step.
key: RNG key.
"""
self._key = key
self._eval_walltime = 0.
eval_env = envs.wrappers.EvalWrapper(eval_env)
def generate_eval_unroll(policy_params: PolicyParams,
key: PRNGKey) -> (envs.State, brax.QP):
reset_keys = jax.random.split(key, num_eval_envs)
eval_first_state = eval_env.reset(reset_keys)
return generate_unroll(
eval_env,
eval_first_state,
eval_policy_fn(policy_params),
key,
unroll_length=episode_length // action_repeat)
self._generate_eval_unroll = jax.jit(generate_eval_unroll)
self._steps_per_unroll = episode_length * num_eval_envs
def run_evaluation(self,
policy_params: PolicyParams,
training_metrics: Metrics,
aggregate_episodes: bool = True) -> Metrics:
"""Run one epoch of evaluation."""
self._key, unroll_key = jax.random.split(self._key)
t = time.time()
eval_state, qp_list = self._generate_eval_unroll(policy_params, unroll_key)
eval_metrics = eval_state.info['eval_metrics']
eval_metrics.active_episodes.block_until_ready()
epoch_eval_time = time.time() - t
metrics = {
f'eval/episode_{name}': np.mean(value) if aggregate_episodes else value
for name, value in eval_metrics.episode_metrics.items()
}
metrics = {name: value/np.mean(eval_metrics.episode_steps) for name, value in metrics.items()}
metrics['eval/avg_episode_length'] = np.mean(eval_metrics.episode_steps)
metrics['eval/epoch_eval_time'] = epoch_eval_time
metrics['eval/sps'] = self._steps_per_unroll / epoch_eval_time
self._eval_walltime = self._eval_walltime + epoch_eval_time
metrics = {
'eval/walltime': self._eval_walltime,
**training_metrics,
**metrics
}
return metrics, qp_list
| 4,531 | 33.075188 | 98 | py |
diffmimic | diffmimic-main/diffmimic/mimic_envs/humanoid_mimic_train.py | from brax import jumpy as jp
from brax.envs import env
from .humanoid_mimic import HumanoidMimic
from .losses import *
import jax
class HumanoidMimicTrain(HumanoidMimic):
"""Trains a humanoid to mimic reference motion."""
def __init__(self, total_length, rollout_length, early_termination, demo_replay_mode, err_threshold, replay_rate,
**kwargs):
super().__init__(**kwargs)
self.total_length = total_length
self.rollout_length = rollout_length
self.early_termination = early_termination
self.demo_replay_mode = demo_replay_mode
self.err_threshold = err_threshold
self.replay_rate = replay_rate
def reset(self, rng: jp.ndarray) -> env.State:
reward, done, zero = jp.zeros(3)
step_index = jp.randint(rng, high=self.total_length-self.rollout_length+1) # random state initialization (RSI)
qp = self._get_ref_state(step_index)
metrics = {'step_index': step_index, 'pose_error': zero, 'fall': zero}
obs = self._get_obs(qp, step_index=step_index)
state = env.State(qp, obs, reward, done, metrics)
if self.demo_replay_mode != 'none':
state.metrics.update(replay=jp.zeros(1)[0])
if self.demo_replay_mode == 'random':
replay_key, rng = jp.random_split(rng)
state.metrics.update(replay_key=rng)
return state
def step(self, state: env.State, action: jp.ndarray) -> env.State:
state = super(HumanoidMimicTrain, self).step(state, action)
if self.early_termination:
state = state.replace(done=state.metrics['fall'])
if self.demo_replay_mode != 'none':
state = self._demo_replay(state)
return state
def _demo_replay(self, state) -> env.State:
qp = state.qp
ref_qp = self._get_ref_state(state.metrics['step_index'])
if self.demo_replay_mode == 'threshold':
error = loss_l2_pos(qp, ref_qp)
replay = jp.where(error > self.err_threshold, jp.float32(1), jp.float32(0))
elif self.demo_replay_mode == 'random':
replay_key, key = jax.random.split(state.metrics['replay_key'])
state.metrics.update(replay_key=replay_key)
replay = jp.where(jax.random.bernoulli(key, p=self.replay_rate), jp.float32(1), jp.float32(0))
else:
raise NotImplementedError
qp = jp.tree_map(lambda x: x*(1 - replay), qp) + jp.tree_map(lambda x: x*replay, ref_qp)
obs = self._get_obs(qp, state.metrics['step_index'])
state.metrics.update(replay=replay)
return state.replace(qp=qp, obs=obs)
| 2,647 | 43.881356 | 120 | py |
diffmimic | diffmimic-main/diffmimic/utils/rotation6d.py | import jax.numpy as jnp
def quaternion_to_matrix(quaternions):
r, i, j, k = quaternions[..., 0], quaternions[..., 1], quaternions[..., 2], quaternions[..., 3]
two_s = 2.0 / (quaternions * quaternions).sum(-1)
o = jnp.stack(
(
1 - two_s * (j * j + k * k),
two_s * (i * j - k * r),
two_s * (i * k + j * r),
two_s * (i * j + k * r),
1 - two_s * (i * i + k * k),
two_s * (j * k - i * r),
two_s * (i * k - j * r),
two_s * (j * k + i * r),
1 - two_s * (i * i + j * j),
),
-1,
)
return o.reshape(quaternions.shape[:-1] + (3, 3))
def matrix_to_rotation_6d(matrix):
batch_dim = matrix.shape[:-2]
return matrix[..., :2, :].reshape(batch_dim + (6,))
def quaternion_to_rotation_6d(quaternion):
return matrix_to_rotation_6d(quaternion_to_matrix(quaternion)) | 915 | 28.548387 | 99 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.