code stringlengths 17 6.64M |
|---|
class WaitPrint(threading.Thread):
def __init__(self, t, message):
super().__init__()
self.t = t
self.message = message
self.running = True
def stop(self):
self.running = False
def run(self):
for _ in range(int((self.t // 0.1))):
time.sleep(0.1)
if (not self.running):
return
print(self.message, end='')
|
def show_running(func):
@wraps(func)
def g(*args, **kargs):
x = WaitPrint(2, '{}({})... '.format(func.__name__, ', '.join(([repr(x) for x in args] + ['{}={}'.format(key, repr(value)) for (key, value) in kargs.items()]))))
x.start()
t = time.perf_counter()
r = func(*args, **kargs)
if x.is_alive():
x.stop()
else:
print('done in {:.0f} seconds'.format((time.perf_counter() - t)))
return r
return g
|
def cached_dirpklgz(dirname):
'\n Cache a function with a directory\n '
def decorator(func):
'\n The actual decorator\n '
@lru_cache(maxsize=None)
@wraps(func)
def wrapper(*args):
'\n The wrapper of the function\n '
try:
os.makedirs(dirname)
except FileExistsError:
pass
indexfile = os.path.join(dirname, 'index.pkl')
try:
with open(indexfile, 'rb') as file:
index = pickle.load(file)
except FileNotFoundError:
index = {}
try:
filename = index[args]
except KeyError:
index[args] = filename = '{}.pkl.gz'.format(len(index))
with open(indexfile, 'wb') as file:
pickle.dump(index, file)
filepath = os.path.join(dirname, filename)
try:
with gzip.open(filepath, 'rb') as file:
print('load {}... '.format(filename), end='')
result = pickle.load(file)
except FileNotFoundError:
print('compute {}... '.format(filename), end='')
sys.stdout.flush()
result = func(*args)
print('save {}... '.format(filename), end='')
with gzip.open(filepath, 'wb') as file:
pickle.dump(result, file)
print('done')
return result
return wrapper
return decorator
|
def test_so3_rfft(b_in, b_out, device):
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
from s2cnn.soft.so3_fft import so3_rfft
y1 = so3_rfft(x, b_out=b_out)
from s2cnn import so3_rft, so3_soft_grid
import lie_learn.spaces.S3 as S3
weights = torch.tensor(S3.quadrature_weights(b_in), dtype=torch.float, device=device)
x2 = torch.einsum('bac,b->bac', (x, weights))
y2 = so3_rft(x2.view((- 1)), b_out, so3_soft_grid(b_in))
assert ((y1 - y2).abs().max().item() < (0.0001 * y1.abs().mean().item()))
|
def test_inverse(f, g, b_in, b_out, device, complex):
if complex:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), 2, dtype=torch.float, device=device)
else:
x = torch.randn((2 * b_in), (2 * b_in), (2 * b_in), dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def test_inverse2(f, g, b_in, b_out, device):
x = torch.randn(((b_in * ((4 * (b_in ** 2)) - 1)) // 3), 2, dtype=torch.float, device=device)
x = g(f(x, b_out=b_out), b_out=b_in)
y = g(f(x, b_out=b_out), b_out=b_in)
assert ((x - y).abs().max().item() < (0.0001 * y.abs().mean().item()))
|
def compare_cpu_gpu(f, x):
z1 = f(x.cpu())
z2 = f(x.cuda()).cpu()
q = ((z1 - z2).abs().max().item() / z1.std().item())
assert (q < 0.0001)
|
class ConveRTModelConfig(NamedTuple):
num_embed_hidden: int = 512
feed_forward1_hidden: int = 2048
feed_forward2_hidden: int = 1024
num_attention_project: int = 64
vocab_size: int = 25000
num_encoder_layers: int = 6
dropout_rate: float = 0.0
n: int = 121
relative_attns: list = [3, 5, 48, 48, 48, 48]
num_attention_heads: int = 2
token_sequence_truncation: int = 60
|
class ConveRTTrainConfig(NamedTuple):
sp_model_path: str = os.path.join(dirname, 'data/en.wiki.bpe.vs25000.model')
dataset_path: str = os.path.join(dirname, 'data/sample-dataset.json')
test_dataset_path: str = 'data/sample-dataset.json'
model_save_dir: str = 'lightning_logs/checkpoints/'
log_dir: str = 'lightning_logs'
device: str = 'cpu'
use_data_paraller: bool = True
is_reddit: bool = True
train_batch_size: int = 64
test_batch_size: int = 256
split_size: int = 8
learning_rate: float = 0.001
lr_warmup_start: float = 0.1
lr_warmup_end: float = 1.0
warmup_batch: float = 10000
final_batch: float = 100000000.0
learning_rate_end: float = 0.0001
epochs: int = 10
grad_norm_clip: float = 1.0
smoothing: float = 0.2
l2_weight_decay: float = 1e-05
|
class LossFunction(nn.Module):
@staticmethod
def cosine_similarity_matrix(context_embed: torch.Tensor, reply_embed: torch.Tensor) -> torch.Tensor:
assert (context_embed.size(0) == reply_embed.size(0))
cosine_similarity = torch.matmul(context_embed, reply_embed.T)
return cosine_similarity
def forward(self, context_embed: torch.Tensor, reply_embed: torch.Tensor) -> torch.Tensor:
cosine_similarity = self.cosine_similarity_matrix(context_embed, reply_embed)
j = (- torch.sum(torch.diagonal(cosine_similarity)))
cosine_similarity.diagonal().copy_(torch.zeros(cosine_similarity.size(0)))
j = ((0.8 * j) + ((0.2 / (cosine_similarity.size(0) * (cosine_similarity.size(0) - 1))) * torch.sum(cosine_similarity)))
j += torch.sum(torch.logsumexp(cosine_similarity, dim=0))
return j
|
@dataclass
class EncoderInputFeature():
input_ids: torch.Tensor
attention_mask: torch.Tensor
position_ids: torch.Tensor
input_lengths: torch.Tensor
def pad_sequence(self, seq_len: int):
self.input_ids = pad(self.input_ids, [0, (seq_len - self.input_ids.size(0))], 'constant', 0)
self.attention_mask = pad(self.attention_mask, [0, (seq_len - self.attention_mask.size(0))], 'constant', 0)
self.position_ids = pad(self.position_ids, [0, (seq_len - self.position_ids.size(0))], 'constant', 0)
|
@dataclass
class EmbeddingPair():
context: EncoderInputFeature
reply: EncoderInputFeature
|
class DataModule(pl.LightningDataModule):
def __init__(self):
super().__init__()
self.input_attributes = ['input_ids', 'attention_mask', 'position_ids', 'input_lengths']
def batching_input_features(self, encoder_inputs: List[EncoderInputFeature]) -> EncoderInputFeature:
max_seq_len = max([int(encoder_input.input_lengths.item()) for encoder_input in encoder_inputs])
for encoder_input in encoder_inputs:
encoder_input.pad_sequence(max_seq_len)
batch_features = {feature_name: torch.stack([getattr(encoder_input, feature_name) for encoder_input in encoder_inputs], dim=0) for feature_name in self.input_attributes}
return EncoderInputFeature(**batch_features)
def convert_collate_fn(self, features: List[EmbeddingPair]) -> EmbeddingPair:
return EmbeddingPair(context=self.batching_input_features([feature.context for feature in features]), reply=self.batching_input_features([feature.reply for feature in features]))
def train_dataloader(self, train_dataset):
return DataLoader(train_dataset, config.train_batch_size, collate_fn=self.convert_collate_fn, drop_last=True)
def val_dataloader(self):
pass
def test_dataloader(self):
pass
|
class DatasetInstance(NamedTuple):
context: List[str]
response: str
|
def load_instances_from_reddit_json(dataset_path: str) -> List[DatasetInstance]:
instances: List[DatasetInstance] = []
with open(dataset_path) as f:
for line in f:
x = json.loads(line)
context_keys = sorted([key for key in x.keys() if ('context' in key)])
instance = DatasetInstance(context=[x[key] for key in context_keys], response=x['response'])
instances.append(instance)
return instances
|
class RedditData(torch.utils.data.Dataset):
def __init__(self, instances: List[DatasetInstance], sp_processor: SentencePieceProcessor, truncation_length: int):
self.sp_processor = sp_processor
self.instances = instances
self.truncation_length = truncation_length
def __len__(self):
return len(self.instances)
def __getitem__(self, item):
context_str = self.instances[item].context[0]
context_embedding = self._convert_instance_to_embedding(context_str)
reply_embedding = self._convert_instance_to_embedding(self.instances[item].response)
return EmbeddingPair(context=context_embedding, reply=reply_embedding)
def _convert_instance_to_embedding(self, input_str: str) -> EncoderInputFeature:
input_ids = self.sp_processor.EncodeAsIds(input_str)
if self.truncation_length:
input_ids = input_ids[:self.truncation_length]
attention_mask = [1 for _ in range(len(input_ids))]
position_ids = [i for i in range(len(input_ids))]
return EncoderInputFeature(input_ids=torch.tensor(input_ids).to(config.device), attention_mask=torch.tensor(attention_mask).to(config.device), position_ids=torch.tensor(position_ids).to(config.device), input_lengths=torch.tensor(len(input_ids)).to(config.device))
|
class LearningRateDecayCallback(pl.Callback):
def __init__(self, config, lr_decay=True):
super().__init__()
self.lr_warmup_end = config.lr_warmup_end
self.lr_warmup_start = config.lr_warmup_start
self.learning_rate = config.learning_rate
self.warmup_batch = config.warmup_batch
self.final_batch = config.final_batch
self.lr_decay = lr_decay
def on_train_batch_end(self, trainer, pl_module, batch, batch_idx, dataloader_idx):
'\n\n :param trainer:\n :type trainer:\n :param pl_module:\n :type pl_module:\n :param batch:\n :type batch:\n :param batch_idx:\n :type batch_idx:\n :param dataloader_idx:\n :type dataloader_idx:\n '
optimizer = trainer.optimizers[0]
if self.lr_decay:
if (batch_idx < self.warmup_batch):
lr_mult = (float(batch_idx) / float(max(1, self.warmup_batch)))
lr = (self.lr_warmup_start + (lr_mult * (self.lr_warmup_end - self.lr_warmup_start)))
else:
progress = (float((batch_idx - self.warmup_batch)) / float(max(1, (self.final_batch - self.warmup_batch))))
lr = max((self.learning_rate + ((0.5 * (1.0 + math.cos((math.pi * progress)))) * (self.lr_warmup_end - self.learning_rate))), self.learning_rate)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
|
def set_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
|
def find_subword_params(model):
'Long winded helper fn to return Subword Embedding Params for clipping, as they are the only parameters that\n are gradient clipped in the paper, only calculated once after model instantiation, but before training'
embeds = set()
for (mn, m) in model.named_modules():
for (pn, p) in m.named_parameters():
if mn.startswith('transformer_layers.subword_embedding'):
fpn = (('%s.%s' % (mn, pn)) if mn else pn)
embeds.add(fpn)
param_dict = {pn: p for (pn, p) in model.named_parameters()}
return ([param_dict[pn] for pn in sorted(list(embeds))], embeds)
|
class SingleContextConvert(pl.LightningModule):
def __init__(self, model_config: ConveRTModelConfig, train_config: ConveRTTrainConfig):
super().__init__()
self.model_config = model_config
self.train_config = train_config
self.transformer_layers = TransformerLayers(model_config)
self.ff2_context = FeedForward2(model_config)
self.ff2_reply = FeedForward2(model_config)
self.loss_function = LossFunction()
self.weight_decay = train_config.l2_weight_decay
self.hparams = self.train_config._field_defaults
self.hparams.update(self.model_config._field_defaults)
self.subword_params = None
logger.info('number of parameters: %e', sum((p.numel() for p in self.parameters())))
def register_subword_params(self):
self.subword_params = find_subword_params(self)[0]
def forward(self, x):
return self.transformer_layers(x)
def backward(self, trainer, loss, optimizer, optimizer_idx):
'override hook of lightning as want specific grad norm clip of only subword embedding parameters, after loss.backward()\n but before optimizer step'
loss.backward()
torch.nn.utils.clip_grad_norm_(self.subword_params, self.train_config.grad_norm_clip)
def configure_optimizers(self):
'\n here I did not implement weight decay on bias and Layernorm layers as is typical in modern NLP papers.\n I do not think the paper specified params to avoid weight decay on\n :return:\n :rtype:\n '
no_decay = ['bias', 'LayerNorm.weight']
params_decay = [p for (n, p) in self.named_parameters() if (not any(((nd in n) for nd in no_decay)))]
params_nodecay = [p for (n, p) in self.named_parameters() if any(((nd in n) for nd in no_decay))]
optim_groups = [{'params': params_decay, 'weight_decay': self.hparams.l2_weight_decay}, {'params': params_nodecay, 'weight_decay': 0.0}]
optimizer = torch.optim.AdamW(optim_groups, lr=self.hparams.learning_rate)
return optimizer
def training_step(self, batch, batch_idx):
batch_context = batch.context
batch_reply = batch.reply
rx = self(batch_context)
ry = self(batch_reply)
hx = self.ff2_context(rx, batch_context.attention_mask)
hy = self.ff2_reply(ry, batch_reply.attention_mask)
loss = self.loss_function(hx, hy)
tqdm_dict = {'train_loss': loss}
output = OrderedDict({'loss': loss, 'progress_bar': tqdm_dict, 'log': tqdm_dict})
return output
def validation_step(self, batch, batch_idx):
output = self.training_step(batch, batch_idx)
val_output = {'val_loss': output['loss']}
return val_output
|
def _parse_args():
'Parse command-line arguments.'
parser = argparse.ArgumentParser()
parser.add_argument('--progress_bar_refresh_rate', type=int, default=1)
parser.add_argument('--row_log_interval', type=int, default=1)
args = parser.parse_args()
return args
|
def main(**kwargs):
set_seed(1)
train_config = ConveRTTrainConfig()
model_config = ConveRTModelConfig()
tokenizer = SentencePieceProcessor()
args = _parse_args()
tokenizer.Load(train_config.sp_model_path)
train_instances = load_instances_from_reddit_json(train_config.dataset_path)
RD = RedditData(train_instances, tokenizer, 60)
dm = DataModule()
train_loader = dm.train_dataloader(RD)
model = SingleContextConvert(model_config, train_config)
lr_decay = LearningRateDecayCallback(train_config)
model.register_subword_params()
trainer = pl.Trainer.from_argparse_args(args, callbacks=[lr_decay], **kwargs)
trainer.fit(model, train_dataloader=train_loader, val_dataloaders=train_loader)
|
@pytest.fixture
def config():
return ConveRTTrainConfig()
|
@pytest.fixture
def tokenizer() -> SentencePieceProcessor:
tokenizer = SentencePieceProcessor()
tokenizer.Load(config.sp_model_path)
return tokenizer
|
def test_load_instances_from_reddit_json(config):
instances = load_instances_from_reddit_json(config.dataset_path)
assert (len(instances) == 1000)
|
class TestModelTraining(unittest.TestCase):
'Check can overfit small batch etc. without issues'
def test_fast_dev_run(self):
t = time()
try:
main(fast_dev_run=True)
except:
self.fail('Obvious Training Problem!')
time_taken = (time() - t)
self.assertLess(time_taken, 10)
|
@pytest.fixture
def model_config():
return ConveRTModelConfig()
|
@pytest.fixture
def train_config():
return ConveRTTrainConfig(train_batch_size=64, split_size=8, learning_rate=2e-05)
|
def test_circulant_t():
assert (circulant_mask(50, 47).sum().item() == 2494)
try:
circulant_mask(47, 50)
circulant_mask(47, 47)
circulant_mask(47, 45)
except ExceptionType:
self.fail('ciculant_t Failed')
|
def test_SubwordEmbedding(train_config, model_config):
embedding = SubwordEmbedding(model_config)
input_token_ids = torch.randint(high=model_config.vocab_size, size=(train_config.train_batch_size, SEQ_LEN))
positional_input = torch.randint(high=model_config.vocab_size, size=(train_config.train_batch_size, SEQ_LEN))
embedding_output = embedding(input_ids=input_token_ids, position_ids=positional_input)
assert (embedding_output.size() == (train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden))
|
def test_SelfAttention(model_config, train_config):
attention = SelfAttention(model_config, relative_attention)
query = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
attn_mask = torch.ones(query.size()[:(- 1)], dtype=torch.float)
output = attention(query, attn_mask)
assert (output.size() == (train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden))
|
def test_FeedForward1(train_config, model_config):
ff1 = FeedForward1(model_config.num_embed_hidden, model_config.feed_forward1_hidden, model_config.dropout_rate)
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
output = ff1(embed)
assert (output.size() == embed.size())
|
def test_SharedInnerBlock(train_config, model_config):
from random import randrange
SIB = SharedInnerBlock(model_config, model_config.relative_attns[randrange(6)])
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
attn_mask = torch.ones(embed.size()[:(- 1)], dtype=torch.float)
out1 = SIB(embed, attn_mask)
assert (out1.size() == embed.size())
|
def test_MultiheadAttention(train_config, model_config):
MHA = MultiheadAttention(model_config)
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, model_config.num_embed_hidden)
attn_mask = torch.ones(embed.size()[:(- 1)], dtype=torch.float)
assert ((model_config.num_embed_hidden % MHA.num_attention_heads) == 0)
assert (MHA(embed, attn_mask).size() == (train_config.train_batch_size, SEQ_LEN, (model_config.num_embed_hidden * model_config.num_attention_heads)))
|
def test_TransformerLayers(model_config):
TL = TransformerLayers(model_config)
path = str(((Path(__file__).parents[1].resolve() / 'data') / 'batch_context.pickle'))
with open(path, 'rb') as input_file:
encoder_input = pickle.load(input_file)
print(type(encoder_input))
embedding = SubwordEmbedding(model_config)
emb_output = embedding(encoder_input.input_ids, encoder_input.position_ids)
assert (TL(encoder_input).size() == (emb_output.size()[:(- 1)] + ((model_config.num_embed_hidden * model_config.num_attention_heads),)))
|
def test_FeedForward2(model_config, train_config):
embed = torch.rand(train_config.train_batch_size, SEQ_LEN, (model_config.num_embed_hidden * model_config.num_attention_heads))
attn_mask = torch.ones(embed.size()[:(- 1)], dtype=torch.float)
FF2 = FeedForward2(model_config)
assert (FF2(embed, attn_mask).size() == (train_config.train_batch_size, model_config.num_embed_hidden))
|
def wave_frontend(x, is_training):
'Function implementing the front-end proposed by Lee et al. 2017.\n Lee, et al. "Sample-level Deep Convolutional Neural Networks for Music\n Auto-tagging Using Raw Waveforms."\n arXiv preprint arXiv:1703.01789 (2017).\n\n - \'x\': placeholder whith the input.\n - \'is_training\': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n '
initializer = tf.contrib.layers.variance_scaling_initializer()
conv0 = tf.layers.conv1d(inputs=x, filters=64, kernel_size=3, strides=3, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv0 = tf.layers.batch_normalization(conv0, training=is_training)
conv1 = tf.layers.conv1d(inputs=bn_conv0, filters=64, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training)
pool_1 = tf.layers.max_pooling1d(bn_conv1, pool_size=3, strides=3)
conv2 = tf.layers.conv1d(inputs=pool_1, filters=64, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training)
pool_2 = tf.layers.max_pooling1d(bn_conv2, pool_size=3, strides=3)
conv3 = tf.layers.conv1d(inputs=pool_2, filters=128, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training)
pool_3 = tf.layers.max_pooling1d(bn_conv3, pool_size=3, strides=3)
conv4 = tf.layers.conv1d(inputs=pool_3, filters=128, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training)
pool_4 = tf.layers.max_pooling1d(bn_conv4, pool_size=3, strides=3)
conv5 = tf.layers.conv1d(inputs=pool_4, filters=128, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training)
pool_5 = tf.layers.max_pooling1d(bn_conv5, pool_size=3, strides=3)
conv6 = tf.layers.conv1d(inputs=pool_5, filters=256, kernel_size=3, strides=1, padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv6 = tf.layers.batch_normalization(conv6, training=is_training)
pool_6 = tf.layers.max_pooling1d(bn_conv6, pool_size=3, strides=3)
return tf.expand_dims(pool_6, [3])
|
def spec_frontend(x, is_training, config, num_filt):
"Function implementing the proposed spectrogram front-end.\n\n - 'route_out': is the output of the front-end, and therefore the input of\n this function.\n - 'is_training': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n - 'config': dictionary with some configurable parameters like: number of\n output units - config['numOutputNeurons'] or number of frequency bins\n of the spectrogram config['setup_params']['yInput']\n - 'num_filt': multiplicative factor that controls the number of filters\n for every filter shape.\n "
initializer = tf.contrib.layers.variance_scaling_initializer()
y_input = config['setup_params']['yInput']
input_layer = tf.expand_dims(x, 3)
input_pad_7 = tf.pad(input_layer, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
input_pad_3 = tf.pad(input_layer, [[0, 0], [1, 1], [0, 0], [0, 0]], 'CONSTANT')
conv1 = tf.layers.conv2d(inputs=input_pad_7, filters=num_filt, kernel_size=[7, int((0.9 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training)
pool1 = tf.layers.max_pooling2d(inputs=bn_conv1, pool_size=[1, bn_conv1.shape[2]], strides=[1, bn_conv1.shape[2]])
p1 = tf.squeeze(pool1, [2])
conv2 = tf.layers.conv2d(inputs=input_pad_3, filters=(num_filt * 2), kernel_size=[3, int((0.9 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training)
pool2 = tf.layers.max_pooling2d(inputs=bn_conv2, pool_size=[1, bn_conv2.shape[2]], strides=[1, bn_conv2.shape[2]])
p2 = tf.squeeze(pool2, [2])
conv3 = tf.layers.conv2d(inputs=input_layer, filters=(num_filt * 4), kernel_size=[1, int((0.9 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training)
pool3 = tf.layers.max_pooling2d(inputs=bn_conv3, pool_size=[1, bn_conv3.shape[2]], strides=[1, bn_conv3.shape[2]])
p3 = tf.squeeze(pool3, [2])
conv4 = tf.layers.conv2d(inputs=input_pad_7, filters=num_filt, kernel_size=[7, int((0.4 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training)
pool4 = tf.layers.max_pooling2d(inputs=bn_conv4, pool_size=[1, bn_conv4.shape[2]], strides=[1, bn_conv4.shape[2]])
p4 = tf.squeeze(pool4, [2])
conv5 = tf.layers.conv2d(inputs=input_pad_3, filters=(num_filt * 2), kernel_size=[3, int((0.4 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training)
pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[1, bn_conv5.shape[2]], strides=[1, bn_conv5.shape[2]])
p5 = tf.squeeze(pool5, [2])
conv6 = tf.layers.conv2d(inputs=input_layer, filters=(num_filt * 4), kernel_size=[1, int((0.4 * y_input))], padding='valid', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv6 = tf.layers.batch_normalization(conv6, training=is_training)
pool6 = tf.layers.max_pooling2d(inputs=bn_conv6, pool_size=[1, bn_conv6.shape[2]], strides=[1, bn_conv6.shape[2]])
p6 = tf.squeeze(pool6, [2])
pool7 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool7_rs = tf.squeeze(pool7, [3])
conv7 = tf.layers.conv1d(inputs=pool7_rs, filters=num_filt, kernel_size=165, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv7 = tf.layers.batch_normalization(conv7, training=is_training)
pool8 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool8_rs = tf.squeeze(pool8, [3])
conv8 = tf.layers.conv1d(inputs=pool8_rs, filters=(num_filt * 2), kernel_size=128, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv8 = tf.layers.batch_normalization(conv8, training=is_training)
pool9 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool9_rs = tf.squeeze(pool9, [3])
conv9 = tf.layers.conv1d(inputs=pool9_rs, filters=(num_filt * 4), kernel_size=64, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv9 = tf.layers.batch_normalization(conv9, training=is_training)
pool10 = tf.layers.average_pooling2d(inputs=input_layer, pool_size=[1, y_input], strides=[1, y_input])
pool10_rs = tf.squeeze(pool10, [3])
conv10 = tf.layers.conv1d(inputs=pool10_rs, filters=(num_filt * 8), kernel_size=32, padding='same', activation=tf.nn.relu, kernel_initializer=initializer)
bn_conv10 = tf.layers.batch_normalization(conv10, training=is_training)
pool = tf.concat([p1, p2, p3, p4, p5, p6, bn_conv7, bn_conv8, bn_conv9, bn_conv10], 2)
return tf.expand_dims(pool, 3)
|
def backend(route_out, is_training, config, num_units):
"Function implementing the proposed back-end.\n\n - 'route_out': is the output of the front-end, and therefore the input of\n this function.\n - 'is_training': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n - 'config': dictionary with some configurable parameters like: number of\n output units - config['numOutputNeurons'] or number of frequency bins\n of the spectrogram config['setup_params']['yInput']\n - 'num_units': number of units/neurons of the output dense layer.\n "
initializer = tf.contrib.layers.variance_scaling_initializer()
conv1 = tf.layers.conv2d(inputs=route_out, filters=512, kernel_size=[7, route_out.shape[2]], padding='valid', activation=tf.nn.relu, name='1cnnOut', kernel_initializer=initializer)
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training)
bn_conv1_t = tf.transpose(bn_conv1, [0, 1, 3, 2])
bn_conv1_pad = tf.pad(bn_conv1_t, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv2 = tf.layers.conv2d(inputs=bn_conv1_pad, filters=512, kernel_size=[7, bn_conv1_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='2cnnOut', kernel_initializer=initializer)
conv2_t = tf.transpose(conv2, [0, 1, 3, 2])
bn_conv2 = tf.layers.batch_normalization(conv2_t, training=is_training)
res_conv2 = tf.add(bn_conv2, bn_conv1_t)
pool1 = tf.layers.max_pooling2d(inputs=res_conv2, pool_size=[2, 1], strides=[2, 1], name='poolOut')
bn_conv4_pad = tf.pad(pool1, [[0, 0], [3, 3], [0, 0], [0, 0]], 'CONSTANT')
conv5 = tf.layers.conv2d(inputs=bn_conv4_pad, filters=512, kernel_size=[7, bn_conv4_pad.shape[2]], padding='valid', activation=tf.nn.relu, name='3cnnOut', kernel_initializer=initializer)
conv5_t = tf.transpose(conv5, [0, 1, 3, 2])
bn_conv5 = tf.layers.batch_normalization(conv5_t, training=is_training)
res_conv5 = tf.add(bn_conv5, pool1)
max_pool2 = tf.reduce_max(res_conv5, axis=1)
(avg_pool2, var_pool2) = tf.nn.moments(res_conv5, axes=[1])
pool2 = tf.concat([max_pool2, avg_pool2], 2)
flat_pool2 = tf.contrib.layers.flatten(pool2)
flat_pool2_dropout = tf.layers.dropout(flat_pool2, rate=0.5, training=is_training)
dense = tf.layers.dense(inputs=flat_pool2_dropout, units=num_units, activation=tf.nn.relu, kernel_initializer=initializer)
bn_dense = tf.layers.batch_normalization(dense, training=is_training)
dense_dropout = tf.layers.dropout(bn_dense, rate=0.5, training=is_training)
return tf.layers.dense(inputs=dense_dropout, activation=tf.sigmoid, units=config['numOutputNeurons'], kernel_initializer=initializer)
|
def build_model(x, is_training, config):
"Function implementing an example of how to build a model with the\n functions above.\n\n - 'x': placeholder whith the input.\n - 'is_training': placeholder indicating weather it is training or test\n phase, for dropout or batch norm.\n - 'config': dictionary with some configurable parameters like: number of\n output units - config['numOutputNeurons'] or number of frequency bins\n of the spectrogram config['setup_params']['yInput']\n "
return backend(spec_frontend(x, is_training, config, 16), is_training, config, 500)
|
def extract_audioset_features(ids, id2audio_path, id2label):
first_audio = True
for i in ids:
if first_audio:
input_data = vggish_input.wavfile_to_examples(id2audio_path[i])
ground_truth = np.repeat(id2label[i], input_data.shape[0], axis=0)
identifiers = np.repeat(i, input_data.shape[0], axis=0)
first_audio = False
else:
tmp_in = vggish_input.wavfile_to_examples(id2audio_path[i])
input_data = np.concatenate((input_data, tmp_in), axis=0)
tmp_gt = np.repeat(id2label[i], tmp_in.shape[0], axis=0)
ground_truth = np.concatenate((ground_truth, tmp_gt), axis=0)
tmp_id = np.repeat(i, tmp_in.shape[0], axis=0)
identifiers = np.concatenate((identifiers, tmp_id), axis=0)
with tf.Graph().as_default(), tf.Session() as sess:
vggish_slim.define_vggish_slim(training=False)
vggish_slim.load_vggish_slim_checkpoint(sess, 'vggish_model.ckpt')
features_tensor = sess.graph.get_tensor_by_name(vggish_params.INPUT_TENSOR_NAME)
embedding_tensor = sess.graph.get_tensor_by_name(vggish_params.OUTPUT_TENSOR_NAME)
extracted_feat = sess.run([embedding_tensor], feed_dict={features_tensor: input_data})
feature = np.squeeze(np.asarray(extracted_feat))
return [feature, ground_truth, identifiers]
|
def select(x, config, is_training, reuse=False):
if (config['model_number'] == 2):
return vgg_bn(x, config, is_training, 10, reuse)
elif (config['model_number'] == 12):
return vgg_bn(log_learn(x), config, is_training, 10, reuse)
raise RuntimeError("ERROR: Model {} can't be found!".format(config['model_number']))
|
def vgg_bn(x, config, is_training, output_filters, reuse=False):
with tf.variable_scope('vggish', reuse=reuse):
NUMBER_FILTERS = 128
print(('VGG with batchnorm! #filters: ' + str(NUMBER_FILTERS)))
print(('Input: ' + str(x.get_shape)))
bn_input = tf.layers.batch_normalization(x, training=is_training, axis=1)
conv1 = tf.layers.conv2d(inputs=bn_input, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training, axis=(- 1))
pool1 = tf.layers.max_pooling2d(inputs=bn_conv1, pool_size=[2, 2], strides=[2, 2])
print(pool1.get_shape)
conv2 = tf.layers.conv2d(inputs=pool1, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training, axis=(- 1))
pool2 = tf.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2])
print(pool2.get_shape)
conv3 = tf.layers.conv2d(inputs=pool2, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training, axis=(- 1))
pool3 = tf.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2])
print(pool3.get_shape)
conv4 = tf.layers.conv2d(inputs=pool3, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training, axis=(- 1))
pool4 = tf.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2])
print(pool4.get_shape)
conv5 = tf.layers.conv2d(inputs=pool4, filters=NUMBER_FILTERS, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training, axis=(- 1))
pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[2, 2], strides=[2, 2])
print(pool5.get_shape)
flat = tf.layers.flatten(pool5)
do = tf.layers.dropout(flat, rate=0.5, training=is_training)
print(do.get_shape)
output = tf.layers.dense(inputs=do, activation=None, units=output_filters, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
config['embedding_size'] = output.get_shape().as_list()[1]
return [output, config]
|
def log_learn(x):
with tf.variable_scope('log_learn'):
ta = tf.Variable(tf.constant(7, dtype=tf.float32), name='ta', trainable=True)
ba = tf.Variable(tf.constant(1, dtype=tf.float32), name='ba', trainable=True)
alpha = tf.exp(ta, name='alpha')
beta = tf.log((1 + tf.exp(ba)), name='beta')
return tf.log((tf.scalar_mul(alpha, x) + beta))
|
def model_number(x, is_training, config):
if (config['model_number'] == 0):
print('\nMODEL: SB-CNN')
print('-----------------------------------\n')
return sb_cnn(x, is_training, config)
elif (config['model_number'] == 1):
print('\nMODEL: SB-CNN | BN input')
print('-----------------------------------\n')
return sb_cnn_bn(x, is_training, config)
elif (config['model_number'] == 2):
print('\nMODEL: Timbre | BN input')
print('-----------------------------------\n')
return timbre(x, is_training, config, num_filters=config['num_classes_dataset'])
elif (config['model_number'] == 3):
print('\nMODEL: VGG | BN input')
print('-----------------------------------\n')
return vgg(x, is_training, config, num_filters=32)
elif (config['model_number'] == 11):
print('\nMODEL: SB-CNN -> Justin | BN input | LOG learn')
print('-----------------------------------\n')
return sb_cnn_bn(log_learn(x), is_training, config)
elif (config['model_number'] == 12):
print('\nMODEL: Timbre | MP -> direct | BN input | LOG learn')
print('-----------------------------------\n')
return timbre(log_learn(x), is_training, config, num_filters=config['num_classes_dataset'])
elif (config['model_number'] == 13):
print('\nMODEL: VGG | BN input | LOG learn | 32 filters')
print('-----------------------------------\n')
return vgg(log_learn(x), is_training, config, num_filters=32)
elif (config['model_number'] == 14):
print('\nMODEL: VGG | BN input | LOG learn | 128 filters')
print('-----------------------------------\n')
return vgg(log_learn(x), is_training, config, num_filters=128)
raise RuntimeError("ERROR: Model {} can't be found!".format(config['model_number']))
|
def log_learn(x):
with tf.variable_scope('log_learn'):
ta = tf.Variable(tf.constant(7, dtype=tf.float32), name='ta', trainable=True)
ba = tf.Variable(tf.constant(1, dtype=tf.float32), name='ba', trainable=True)
alpha = tf.exp(ta, name='alpha')
beta = tf.log((1 + tf.exp(ba)), name='beta')
return tf.log((tf.scalar_mul(alpha, x) + beta))
|
def vgg(x, is_training, config, num_filters):
with tf.variable_scope('vggish'):
print(('[SMALL FILTERS] Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
bn_input = tf.layers.batch_normalization(input_layer, training=is_training, axis=(- 1))
conv1 = tf.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv1 = tf.layers.batch_normalization(conv1, training=is_training, axis=(- 1))
pool1 = tf.layers.max_pooling2d(inputs=bn_conv1, pool_size=[2, 2], strides=[2, 2])
print(pool1.get_shape)
conv2 = tf.layers.conv2d(inputs=pool1, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv2 = tf.layers.batch_normalization(conv2, training=is_training, axis=(- 1))
pool2 = tf.layers.max_pooling2d(inputs=bn_conv2, pool_size=[2, 2], strides=[2, 2])
print(pool2.get_shape)
conv3 = tf.layers.conv2d(inputs=pool2, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv3 = tf.layers.batch_normalization(conv3, training=is_training, axis=(- 1))
pool3 = tf.layers.max_pooling2d(inputs=bn_conv3, pool_size=[2, 2], strides=[2, 2])
print(pool3.get_shape)
conv4 = tf.layers.conv2d(inputs=pool3, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='4CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv4 = tf.layers.batch_normalization(conv4, training=is_training, axis=(- 1))
pool4 = tf.layers.max_pooling2d(inputs=bn_conv4, pool_size=[2, 2], strides=[2, 2])
print(pool4.get_shape)
conv5 = tf.layers.conv2d(inputs=pool4, filters=num_filters, kernel_size=[3, 3], padding='same', activation=tf.nn.elu, name='5CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
bn_conv5 = tf.layers.batch_normalization(conv5, training=is_training, axis=(- 1))
pool5 = tf.layers.max_pooling2d(inputs=bn_conv5, pool_size=[2, 2], strides=[2, 2])
print(pool5.get_shape)
flat = tf.layers.flatten(pool5)
do = tf.layers.dropout(flat, rate=0.5, training=is_training)
print(do.get_shape)
output = tf.layers.dense(inputs=do, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
return output
|
def timbre(x, is_training, config, num_filters):
with tf.variable_scope('timbre'):
print(('[CNN SINGLE] Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
bn_input = tf.layers.batch_normalization(input_layer, training=is_training, axis=(- 1))
conv1 = tf.layers.conv2d(inputs=bn_input, filters=num_filters, kernel_size=[7, 108], padding='valid', activation=None, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[conv1.shape[1], conv1.shape[2]], strides=[conv1.shape[1], conv1.shape[2]])
output = tf.layers.flatten(pool1)
print(conv1.get_shape)
print(conv1.shape[1])
print(conv1.shape[2])
print(pool1.get_shape)
print(output)
return output
|
def sb_cnn_core(input_, is_training, config):
print(input_.get_shape)
conv1 = tf.layers.conv2d(inputs=input_, filters=24, kernel_size=[5, 5], padding='valid', activation=tf.nn.relu, name='1CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(conv1.get_shape)
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[4, 2], strides=[4, 2])
print(pool1.get_shape)
conv2 = tf.layers.conv2d(inputs=pool1, filters=48, kernel_size=[5, 5], padding='valid', activation=tf.nn.relu, name='2CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(conv2.get_shape)
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[4, 2], strides=[4, 2])
print(pool2.get_shape)
conv3 = tf.layers.conv2d(inputs=pool2, filters=48, kernel_size=[5, 5], padding='valid', activation=tf.nn.relu, name='3CNN', kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(conv3.get_shape)
flat_conv3 = tf.contrib.layers.flatten(conv3)
print(flat_conv3.get_shape)
do_pool5 = tf.layers.dropout(flat_conv3, rate=0.5, training=is_training)
print(do_pool5.get_shape)
dense_out = tf.layers.dense(inputs=do_pool5, activation=tf.nn.relu, units=64, kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
do = tf.layers.dropout(dense_out, rate=0.5, training=is_training)
print(do.get_shape)
output = tf.layers.dense(inputs=do, activation=None, units=config['num_classes_dataset'], kernel_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_AVG', uniform=True))
print(('output: ' + str(output.get_shape)))
return output
|
def sb_cnn(x, is_training, config):
print(('Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
return sb_cnn_core(input_layer, is_training, config)
|
def sb_cnn_bn(x, is_training, config):
print(('Input: ' + str(x.get_shape)))
input_layer = tf.expand_dims(x, 3)
print(input_layer.get_shape)
bn_input = tf.layers.batch_normalization(input_layer, training=is_training, axis=(- 1))
return sb_cnn_core(bn_input, is_training, config)
|
def compute_audio_repr(audio_file, audio_repr_file):
if (config['type'] == 'audioset'):
audio_repr = vggish_input.wavfile_to_examples(audio_file)
print(audio_repr.shape)
else:
(audio, sr) = librosa.load(audio_file, sr=config['resample_sr'])
if (config['type'] == 'waveform'):
audio_repr = audio
audio_repr = np.expand_dims(audio_repr, axis=1)
elif (config['spectrogram_type'] == 'cqt'):
audio_repr = librosa.cqt(audio, sr=sr, hop_length=config['hop'], n_bins=config['cqt_bins'], real=False).T
elif (config['spectrogram_type'] == 'mel'):
audio_repr = librosa.feature.melspectrogram(y=audio, sr=sr, hop_length=config['hop'], n_fft=config['n_fft'], n_mels=config['n_mels']).T
elif (config['spectrogram_type'] == 'stft'):
audio_repr = librosa.stft(y=audio, n_fft=config['n_fft']).T
length = audio_repr.shape[0]
with open(audio_repr_file, 'wb') as f:
pickle.dump(audio_repr, f)
return length
|
def do_process(files, index):
try:
[id, audio_file, audio_repr_file] = files[index]
if (not os.path.exists(audio_repr_file[:(audio_repr_file.rfind('/') + 1)])):
path = Path(audio_repr_file[:(audio_repr_file.rfind('/') + 1)])
path.mkdir(parents=True, exist_ok=True)
length = compute_audio_repr(audio_file, audio_repr_file)
fw = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'index_') + str(config['machine_i'])) + '.tsv'), 'a')
fw.write(('%s\t%s\t%s\n' % (id, audio_repr_file[len(config_file.DATA_FOLDER):], audio_file[len(config_file.DATA_FOLDER):])))
fw.close()
print((((str(index) + '/') + str(len(files))) + (' Computed: %s' % audio_file)))
except Exception as e:
ferrors = open(((((config_file.DATA_FOLDER + config['audio_representation_folder']) + 'errors') + str(config['machine_i'])) + '.txt'), 'a')
ferrors.write((audio_file + '\n'))
ferrors.write(str(e))
ferrors.close()
print('Error computing audio representation: ', audio_file)
print(str(e))
|
def process_files(files):
if DEBUG:
print('WARNING: Parallelization is not used!')
for index in range(0, len(files)):
do_process(files, index)
else:
Parallel(n_jobs=config['num_processing_units'])((delayed(do_process)(files, index) for index in range(0, len(files))))
|
def eval(config, ids, id2audio_repr_path, support_set, id2gt, id2label, tf_vars, vis_vars):
[id_string, save_latents, track_accuracies, printing, transfer_learning, model_folder] = vis_vars
if transfer_learning:
[sess, x, q, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 1]
eval_streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
else:
[sess, x, q, is_train, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 42]
eval_streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
eval_mux_stream = pescador.ChainMux(eval_streams, mode='exhaustive')
eval_batch_streamer = pescador.Streamer(pescador.buffer_stream, eval_mux_stream, buffer_size=config['test_batch_size'], partial=True)
first_eval = True
count = 0
for eval_batch in eval_batch_streamer:
if transfer_learning:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1))})
else:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1)), is_train: False})
if first_eval:
first_eval = False
pred_array = probabilities
id_array = eval_batch['ID']
if save_latents:
embed_array = embeddings
gt_array = eval_batch['Y']
else:
count = (count + 1)
pred_array = np.concatenate((pred_array, probabilities), axis=0)
id_array = np.append(id_array, eval_batch['ID'])
if save_latents:
embed_array = np.concatenate((embed_array, embeddings), axis=0)
gt_array = np.concatenate((gt_array, eval_batch['Y']), axis=0)
epoch_acc = shared.accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label)
if printing:
print(((id_string + ' Number of audios: ') + str(len(ids))))
print(((id_string + ' Accuracy: ') + str(epoch_acc)))
print(((id_string + ' Prototypes: ') + str(prototypes.shape)))
if track_accuracies:
fac = open((model_folder + 'epoch_accuracies.tsv'), 'a')
fac.write((str(epoch_acc) + '\n'))
fac.close()
if save_latents:
print(((id_string + ' Embed_array: ') + str(embed_array.shape)))
print(((id_string + ' GT: ') + str(gt_array.shape)))
np.savez((((model_folder + 'embeddings_') + id_string) + '.npz'), embed_array)
np.savez((model_folder + 'prototypes.npz'), prototypes)
np.savez((((model_folder + 'gt_') + id_string) + '.npz'), gt_array)
print('Storing latents for visualization..')
print('\nPrototypes: ')
print(prototypes)
return epoch_acc
|
def fetch_data(classes_vector, label2selectedIDs, id2audio_repr_path, id2gt, config, transfer_learning=False):
set_dic = {}
gt_dic = {}
id_dic = {}
minimum_number_of_patches = np.inf
total_number_of_patches = 0
for c in classes_vector:
preprocess_batch_size = np.min([len(label2selectedIDs[c]), config['preprocess_batch_size']])
print(('Batch size: ' + str(preprocess_batch_size)))
print(((('IDs used for computing the category ' + str(c)) + ' prototype: ') + str(label2selectedIDs[c])))
pack = [config, config['train_sampling'], config['param_train_sampling']]
if transfer_learning:
streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
else:
streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
mux_stream = pescador.ChainMux(streams, mode='exhaustive')
batch_streamer = pescador.Streamer(pescador.buffer_stream, mux_stream, buffer_size=preprocess_batch_size, partial=True)
first = True
gt = []
for batch in batch_streamer:
if first:
class_set = batch['X']
class_gt = batch['Y']
class_id = batch['ID']
first = False
else:
class_set = np.concatenate((class_set, batch['X']), axis=0)
class_gt = np.concatenate((class_gt, batch['Y']), axis=0)
class_id = np.concatenate((class_id, batch['ID']), axis=0)
print(class_set.shape)
print(class_gt.shape)
print(class_id.shape)
set_dic[c] = class_set
gt_dic[c] = class_gt
id_dic[c] = class_id
minimum_number_of_patches = min(minimum_number_of_patches, class_set.shape[0])
total_number_of_patches += class_set.shape[0]
return [set_dic, gt_dic, id_dic, minimum_number_of_patches, total_number_of_patches]
|
def compute_mean_std(index_file, percentage_index_file):
fgt = open(((config_file.DATA_FOLDER + config['audio_representation_folder']) + index_file))
num_lines = sum((1 for line in open(((config_file.DATA_FOLDER + config['audio_representation_folder']) + index_file))))
tmp = np.array([])
count = 0
for line in fgt.readlines():
(id, audio_repr_path, audio_path) = line.strip().split('\t')
with open((config_file.DATA_FOLDER + audio_repr_path), 'rb') as f:
audio_rep = pickle.load(f)
print(np.max(audio_rep))
audio_rep = shared.pre_processing(audio_rep, N_FRAMES, PAD_SHORT, PRE_PROCESSING, AUDIO_REP_TYPE, normalize_mean=None, normalize_std=None)
print(np.max(audio_rep))
if (count == 0):
tmp = audio_rep
else:
tmp = np.concatenate((tmp, audio_rep), axis=0)
print(tmp.shape)
print(((str(count) + '/') + str(num_lines)))
count = (count + 1)
if (count > (num_lines * percentage_index_file)):
break
print('Formatting data for computing mean - std!')
data_sample = tmp.flatten()
print('Computing mean:')
mean = np.mean(data_sample)
print(mean)
print('Computing std:')
std = np.std(data_sample)
print(std)
return (mean, std)
|
def eval(config, ids, id2audio_repr_path, support_set, id2gt, id2label, tf_vars, vis_vars):
[id_string, save_latents, track_accuracies, printing, transfer_learning, model_folder] = vis_vars
if transfer_learning:
[sess, x, q, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 1]
eval_streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
else:
[sess, x, q, is_train, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 42]
eval_streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
eval_mux_stream = pescador.ChainMux(eval_streams, mode='exhaustive')
eval_batch_streamer = pescador.Streamer(pescador.buffer_stream, eval_mux_stream, buffer_size=config['test_batch_size'], partial=True)
first_eval = True
count = 0
for eval_batch in eval_batch_streamer:
if transfer_learning:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1))})
else:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1)), is_train: False})
if first_eval:
first_eval = False
pred_array = probabilities
id_array = eval_batch['ID']
if save_latents:
embed_array = embeddings
gt_array = eval_batch['Y']
else:
count = (count + 1)
pred_array = np.concatenate((pred_array, probabilities), axis=0)
id_array = np.append(id_array, eval_batch['ID'])
if save_latents:
embed_array = np.concatenate((embed_array, embeddings), axis=0)
gt_array = np.concatenate((gt_array, eval_batch['Y']), axis=0)
epoch_acc = shared.accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label)
if printing:
print(((id_string + ' Number of audios: ') + str(len(ids))))
print(((id_string + ' Accuracy: ') + str(epoch_acc)))
print(((id_string + ' Prototypes: ') + str(prototypes.shape)))
if track_accuracies:
fac = open(((model_folder + id_string) + 'epoch_accuracies.tsv'), 'a')
fac.write((str(epoch_acc) + '\n'))
fac.close()
if save_latents:
print(((id_string + ' Embed_array: ') + str(embed_array.shape)))
print(((id_string + ' GT: ') + str(gt_array.shape)))
np.savez((((model_folder + 'embeddings_') + id_string) + '.npz'), embed_array)
np.savez((model_folder + 'prototypes.npz'), prototypes)
np.savez((((model_folder + 'gt_') + id_string) + '.npz'), gt_array)
print('Storing latents for visualization..')
print('\nPrototypes: ')
print(prototypes)
return epoch_acc
|
def fetch_data(classes_vector, label2selectedIDs, id2audio_repr_path, id2gt, config, transfer_learning=False):
set_dic = {}
gt_dic = {}
id_dic = {}
minimum_number_of_patches = np.inf
total_number_of_patches = 0
for c in classes_vector:
preprocess_batch_size = np.min([len(label2selectedIDs[c]), config['preprocess_batch_size']])
print(('Batch size: ' + str(preprocess_batch_size)))
print(((('IDs used for computing the category ' + str(c)) + ' prototype: ') + str(label2selectedIDs[c])))
pack = [config, config['train_sampling'], config['param_train_sampling']]
if transfer_learning:
streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
else:
streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
mux_stream = pescador.ChainMux(streams, mode='exhaustive')
batch_streamer = pescador.Streamer(pescador.buffer_stream, mux_stream, buffer_size=preprocess_batch_size, partial=True)
first = True
gt = []
for batch in batch_streamer:
if first:
class_set = batch['X']
class_gt = batch['Y']
class_id = batch['ID']
first = False
else:
class_set = np.concatenate((class_set, batch['X']), axis=0)
class_gt = np.concatenate((class_gt, batch['Y']), axis=0)
class_id = np.concatenate((class_id, batch['ID']), axis=0)
print(class_set.shape)
print(class_gt.shape)
print(class_id.shape)
set_dic[c] = class_set
gt_dic[c] = class_gt
id_dic[c] = class_id
minimum_number_of_patches = min(minimum_number_of_patches, class_set.shape[0])
total_number_of_patches += class_set.shape[0]
return [set_dic, gt_dic, id_dic, minimum_number_of_patches, total_number_of_patches]
|
def eval(config, ids, id2audio_repr_path, support_set, id2gt, id2label, tf_vars, vis_vars):
[id_string, save_latents, track_accuracies, printing, transfer_learning, model_folder] = vis_vars
if transfer_learning:
[sess, x, q, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 1]
eval_streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
else:
[sess, x, q, is_train, log_p_y, emb_q, emb_prototypes] = tf_vars
pack = [config, 'overlap_sampling', 42]
eval_streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in ids]
eval_mux_stream = pescador.ChainMux(eval_streams, mode='exhaustive')
eval_batch_streamer = pescador.Streamer(pescador.buffer_stream, eval_mux_stream, buffer_size=config['test_batch_size'], partial=True)
first_eval = True
count = 0
for eval_batch in eval_batch_streamer:
if transfer_learning:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1))})
else:
[probabilities, embeddings, prototypes] = sess.run([log_p_y, emb_q, emb_prototypes], feed_dict={x: support_set, q: np.expand_dims(eval_batch['X'], axis=(- 1)), is_train: False})
if first_eval:
first_eval = False
pred_array = probabilities
id_array = eval_batch['ID']
if save_latents:
embed_array = embeddings
gt_array = eval_batch['Y']
else:
count = (count + 1)
pred_array = np.concatenate((pred_array, probabilities), axis=0)
id_array = np.append(id_array, eval_batch['ID'])
if save_latents:
embed_array = np.concatenate((embed_array, embeddings), axis=0)
gt_array = np.concatenate((gt_array, eval_batch['Y']), axis=0)
epoch_acc = shared.accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label)
if printing:
print(((id_string + ' Number of audios: ') + str(len(ids))))
print(((id_string + ' Accuracy: ') + str(epoch_acc)))
print(((id_string + ' Prototypes: ') + str(prototypes.shape)))
if track_accuracies:
fac = open(((model_folder + id_string) + 'epoch_accuracies.tsv'), 'a')
fac.write((str(epoch_acc) + '\n'))
fac.close()
if save_latents:
print(((id_string + ' Embed_array: ') + str(embed_array.shape)))
print(((id_string + ' GT: ') + str(gt_array.shape)))
np.savez((((model_folder + 'embeddings_') + id_string) + '.npz'), embed_array)
np.savez((model_folder + 'prototypes.npz'), prototypes)
np.savez((((model_folder + 'gt_') + id_string) + '.npz'), gt_array)
print('Storing latents for visualization..')
print('\nPrototypes: ')
print(prototypes)
return epoch_acc
|
def fetch_data(classes_vector, label2selectedIDs, id2audio_repr_path, id2gt, config, transfer_learning=False):
set_dic = {}
gt_dic = {}
id_dic = {}
minimum_number_of_patches = np.inf
total_number_of_patches = 0
for c in classes_vector:
preprocess_batch_size = np.min([len(label2selectedIDs[c]), config['preprocess_batch_size']])
print(('Batch size: ' + str(preprocess_batch_size)))
print(((('IDs used for computing the category ' + str(c)) + ' prototype: ') + str(label2selectedIDs[c])))
pack = [config, config['train_sampling'], config['param_train_sampling']]
if transfer_learning:
streams = [pescador.Streamer(transfer_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
else:
streams = [pescador.Streamer(sl_train.data_gen, id, id2audio_repr_path[id], id2gt[id], pack) for id in label2selectedIDs[c]]
mux_stream = pescador.ChainMux(streams, mode='exhaustive')
batch_streamer = pescador.Streamer(pescador.buffer_stream, mux_stream, buffer_size=preprocess_batch_size, partial=True)
first = True
gt = []
for batch in batch_streamer:
if first:
class_set = batch['X']
class_gt = batch['Y']
class_id = batch['ID']
first = False
else:
class_set = np.concatenate((class_set, batch['X']), axis=0)
class_gt = np.concatenate((class_gt, batch['Y']), axis=0)
class_id = np.concatenate((class_id, batch['ID']), axis=0)
print(class_set.shape)
print(class_gt.shape)
print(class_id.shape)
set_dic[c] = class_set
gt_dic[c] = class_gt
id_dic[c] = class_id
minimum_number_of_patches = min(minimum_number_of_patches, class_set.shape[0])
total_number_of_patches += class_set.shape[0]
return [set_dic, gt_dic, id_dic, minimum_number_of_patches, total_number_of_patches]
|
def euclidean_distance(a, b):
(N, D) = (tf.shape(a)[0], tf.shape(a)[1])
M = tf.shape(b)[0]
a = tf.tile(tf.expand_dims(a, axis=1), (1, M, 1))
b = tf.tile(tf.expand_dims(b, axis=0), (N, 1, 1))
return tf.reduce_mean(tf.square((a - b)), axis=2)
|
def cosine_distance(a, b):
norm_a = tf.nn.l2_normalize(a, axis=1)
norm_b = tf.nn.l2_normalize(b, axis=1)
prod = tf.matmul(norm_a, norm_b, adjoint_b=True)
return (1 - prod)
|
def get_epoch_time():
return int((datetime.now() - datetime(1970, 1, 1)).total_seconds())
|
def label2onehot_exp(label, experiment_classes):
onehot = np.zeros(len(experiment_classes))
position = int(np.squeeze(np.where((label == np.array(experiment_classes)))))
onehot[position] = 1
return onehot
|
def label2onehot(label, length):
onehot = np.zeros(length)
onehot[label] = 1
return onehot
|
def onehot2label(gt):
label = np.int(np.squeeze(np.where((np.array(gt) == max(gt)))))
return label
|
def count_params(trainable_variables):
return np.sum([np.prod(v.get_shape().as_list()) for v in trainable_variables])
|
def load_id2label(gt_file):
ids = []
fgt = open(gt_file)
id2label = dict()
for line in fgt.readlines():
(id, gt) = line.strip().split('\t')
id2label[id] = onehot2label(eval(gt))
ids.append(id)
return (ids, id2label)
|
def load_id2gt(gt_file):
ids = []
fgt = open(gt_file)
id2gt = dict()
for line in fgt.readlines():
(id, gt) = line.strip().split('\t')
id2gt[id] = eval(gt)
ids.append(id)
return (ids, id2gt)
|
def load_label2ids(id2label):
label2ids = {}
for (id, label) in id2label.items():
if (label in label2ids):
label2ids[label].append(id)
else:
label2ids[label] = [id]
return label2ids
|
def load_id2audiopath(index_file):
f = open(index_file)
id2audiopath = dict()
for line in f.readlines():
(id, path) = line.strip().split('\t')
id2audiopath[id] = path
return id2audiopath
|
def load_id2audioReprPath(index_file):
audioReprPaths = []
fspec = open(index_file)
id2audioReprPath = dict()
for line in fspec.readlines():
(id, path, _) = line.strip().split('\t')
id2audioReprPath[id] = path
audioReprPaths.append(path)
return (audioReprPaths, id2audioReprPath)
|
def load_id2length(index_file):
f = open(index_file)
id2length = dict()
for line in f.readlines():
(id, length) = line.strip().split('\t')
id2length[id] = int(length)
return id2length
|
def accuracy_with_aggergated_predictions(pred_array, id_array, ids, id2label):
y_pred = []
y_true = []
for id in ids:
try:
avg = np.mean(pred_array[np.where((id_array == id))], axis=0)
idx_prediction = int(np.where((avg == max(avg)))[0][0])
y_pred.append(idx_prediction)
label = id2label[id]
y_true.append(int(label))
except:
print(id)
return accuracy_score(y_true, y_pred)
|
def few_shot_data_preparation(all_ids_train, all_ids_test, classes_vector, label2ids_train, label2ids_test, config):
if (config['n_shot'] == np.inf):
ids_train = all_ids_train
ids_test = all_ids_test
print('Train IDs: ALL!')
label2selectedIDs = {}
for c in classes_vector:
label2selectedIDs[c] = label2ids_train[c]
else:
first = True
label2selectedIDs = {}
for c in classes_vector:
if (config['n_shot'] != np.inf):
ids_class_train = random.sample(label2ids_train[c], config['n_shot'])
else:
ids_class_train = label2ids_train[c]
if first:
ids_train = ids_class_train
ids_test = label2ids_test[c]
first = False
else:
ids_train = np.concatenate((ids_train, ids_class_train), axis=0)
ids_test = np.concatenate((ids_test, label2ids_test[c]), axis=0)
label2selectedIDs[c] = ids_class_train
print((('\nTrain IDs: ' + str(ids_train)) + '\n'))
return [ids_train, ids_test, label2selectedIDs]
|
def audioset_model(input_signal, reuse=False):
slim = tf.contrib.slim
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=vggish_params.INIT_STDDEV), biases_initializer=tf.zeros_initializer(), activation_fn=tf.nn.relu, trainable=True), slim.arg_scope([slim.conv2d], kernel_size=[3, 3], stride=1, padding='SAME'), slim.arg_scope([slim.max_pool2d], kernel_size=[2, 2], stride=2, padding='SAME'), tf.variable_scope('vggish', reuse=reuse):
net = slim.conv2d(input_signal, 64, scope='conv1')
net = slim.max_pool2d(net, scope='pool1')
net = slim.conv2d(net, 128, scope='conv2')
net = slim.max_pool2d(net, scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3')
net = slim.max_pool2d(net, scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4')
net = slim.max_pool2d(net, scope='pool4')
net = slim.flatten(net)
net = slim.repeat(net, 2, slim.fully_connected, 4096, scope='fc1')
net = slim.fully_connected(net, vggish_params.EMBEDDING_SIZE, scope='fc2')
embeddings = tf.identity(net, name='embedding')
with tf.variable_scope('my_model', reuse=reuse):
return slim.fully_connected(embeddings, 10, activation_fn=None, scope='logits')
|
def waveform_to_examples(data, sample_rate):
'Converts audio waveform into an array of examples for VGGish.\n\n Args:\n data: np.array of either one dimension (mono) or two dimensions\n (multi-channel, with the outer dimension representing channels).\n Each sample is generally expected to lie in the range [-1.0, +1.0],\n although this is not required.\n sample_rate: Sample rate of data.\n\n Returns:\n 3-D np.array of shape [num_examples, num_frames, num_bands] which represents\n a sequence of examples, each of which contains a patch of log mel\n spectrogram, covering num_frames frames of audio and num_bands mel frequency\n bands, where the frame length is vggish_params.STFT_HOP_LENGTH_SECONDS.\n '
if (len(data.shape) > 1):
data = np.mean(data, axis=1)
if (sample_rate != vggish_params.SAMPLE_RATE):
data = resampy.resample(data, sample_rate, vggish_params.SAMPLE_RATE)
log_mel = mel_features.log_mel_spectrogram(data, audio_sample_rate=vggish_params.SAMPLE_RATE, log_offset=vggish_params.LOG_OFFSET, window_length_secs=vggish_params.STFT_WINDOW_LENGTH_SECONDS, hop_length_secs=vggish_params.STFT_HOP_LENGTH_SECONDS, num_mel_bins=vggish_params.NUM_MEL_BINS, lower_edge_hertz=vggish_params.MEL_MIN_HZ, upper_edge_hertz=vggish_params.MEL_MAX_HZ)
features_sample_rate = (1.0 / vggish_params.STFT_HOP_LENGTH_SECONDS)
example_window_length = int(round((vggish_params.EXAMPLE_WINDOW_SECONDS * features_sample_rate)))
example_hop_length = int(round((vggish_params.EXAMPLE_HOP_SECONDS * features_sample_rate)))
log_mel_examples = mel_features.frame(log_mel, window_length=example_window_length, hop_length=example_hop_length)
return log_mel_examples
|
def wavfile_to_examples(wav_file):
'Convenience wrapper around waveform_to_examples() for a common WAV format.\n\n Args:\n wav_file: String path to a file, or a file-like object. The file\n is assumed to contain WAV audio data with signed 16-bit PCM samples.\n\n Returns:\n See waveform_to_examples.\n '
import soundfile as sf
(data, samplerate) = sf.read(wav_file)
tmp_name = (str(int((np.random.rand(1) * 1000000))) + '.wav')
sf.write(tmp_name, data, samplerate, subtype='PCM_16')
(sr, wav_data) = wavfile.read(tmp_name)
import os
os.remove(tmp_name)
assert (wav_data.dtype == np.int16), ('Bad sample type: %r' % wav_data.dtype)
samples = (wav_data / 32768.0)
src_repeat = samples
while (src_repeat.shape[0] < sr):
src_repeat = np.concatenate((src_repeat, samples), axis=0)
samples = src_repeat[:sr]
return waveform_to_examples(samples, sr)
|
def define_vggish_slim(training=False):
"Defines the VGGish TensorFlow model.\n\n All ops are created in the current default graph, under the scope 'vggish/'.\n\n The input is a placeholder named 'vggish/input_features' of type float32 and\n shape [batch_size, num_frames, num_bands] where batch_size is variable and\n num_frames and num_bands are constants, and [num_frames, num_bands] represents\n a log-mel-scale spectrogram patch covering num_bands frequency bands and\n num_frames time frames (where each frame step is usually 10ms). This is\n produced by computing the stabilized log(mel-spectrogram + params.LOG_OFFSET).\n The output is an op named 'vggish/embedding' which produces the activations of\n a 128-D embedding layer, which is usually the penultimate layer when used as\n part of a full model with a final classifier layer.\n\n Args:\n training: If true, all parameters are marked trainable.\n\n Returns:\n The op 'vggish/embeddings'.\n "
with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=params.INIT_STDDEV), biases_initializer=tf.zeros_initializer(), activation_fn=tf.nn.relu, trainable=training), slim.arg_scope([slim.conv2d], kernel_size=[3, 3], stride=1, padding='SAME'), slim.arg_scope([slim.max_pool2d], kernel_size=[2, 2], stride=2, padding='SAME'), tf.variable_scope('vggish'):
features = tf.placeholder(tf.float32, shape=(None, params.NUM_FRAMES, params.NUM_BANDS), name='input_features')
net = tf.reshape(features, [(- 1), params.NUM_FRAMES, params.NUM_BANDS, 1])
net = slim.conv2d(net, 64, scope='conv1')
net = slim.max_pool2d(net, scope='pool1')
net = slim.conv2d(net, 128, scope='conv2')
net = slim.max_pool2d(net, scope='pool2')
net = slim.repeat(net, 2, slim.conv2d, 256, scope='conv3')
net = slim.max_pool2d(net, scope='pool3')
net = slim.repeat(net, 2, slim.conv2d, 512, scope='conv4')
net = slim.max_pool2d(net, scope='pool4')
net = slim.flatten(net)
net = slim.repeat(net, 2, slim.fully_connected, 4096, scope='fc1')
net = slim.fully_connected(net, params.EMBEDDING_SIZE, scope='fc2')
return tf.identity(net, name='embedding')
|
def load_vggish_slim_checkpoint(session, checkpoint_path):
'Loads a pre-trained VGGish-compatible checkpoint.\n\n This function can be used as an initialization function (referred to as\n init_fn in TensorFlow documentation) which is called in a Session after\n initializating all variables. When used as an init_fn, this will load\n a pre-trained checkpoint that is compatible with the VGGish model\n definition. Only variables defined by VGGish will be loaded.\n\n Args:\n session: an active TensorFlow session.\n checkpoint_path: path to a file containing a checkpoint that is\n compatible with the VGGish model definition.\n '
with tf.Graph().as_default():
define_vggish_slim(training=False)
vggish_var_names = [v.name for v in tf.global_variables()]
vggish_vars = [v for v in tf.global_variables() if (v.name in vggish_var_names)]
saver = tf.train.Saver(vggish_vars, name='vggish_load_pretrained', write_version=1)
saver.restore(session, checkpoint_path)
|
class BaseAgent(object):
'\n Class for the basic agent objects.\n To define your own agent, subclass this class and implement the functions below.\n '
def __init__(self, env, policy, logger, storage, device, num_checkpoints):
'\n env: (gym.Env) environment following the openAI Gym API\n '
self.env = env
self.policy = policy
self.logger = logger
self.storage = storage
self.device = device
self.num_checkpoints = num_checkpoints
self.t = 0
def predict(self, obs):
'\n Predict the action with the given input \n '
pass
def update_policy(self):
'\n Train the neural network model\n '
pass
def train(self, num_timesteps):
'\n Train the agent with collecting the trajectories\n '
pass
def evaluate(self):
'\n Evaluate the agent\n '
pass
|
class PPO(BaseAgent):
def __init__(self, env, policy, logger, storage, device, n_checkpoints, n_steps=128, n_envs=8, epoch=3, mini_batch_per_epoch=8, mini_batch_size=(32 * 8), gamma=0.99, lmbda=0.95, learning_rate=0.00025, grad_clip_norm=0.5, eps_clip=0.2, value_coef=0.5, entropy_coef=0.01, normalize_adv=True, normalize_rew=True, use_gae=True, **kwargs):
super(PPO, self).__init__(env, policy, logger, storage, device, n_checkpoints)
self.n_steps = n_steps
self.n_envs = n_envs
self.epoch = epoch
self.mini_batch_per_epoch = mini_batch_per_epoch
self.mini_batch_size = mini_batch_size
self.gamma = gamma
self.lmbda = lmbda
self.learning_rate = learning_rate
self.optimizer = optim.Adam(self.policy.parameters(), lr=learning_rate, eps=1e-05)
self.grad_clip_norm = grad_clip_norm
self.eps_clip = eps_clip
self.value_coef = value_coef
self.entropy_coef = entropy_coef
self.normalize_adv = normalize_adv
self.normalize_rew = normalize_rew
self.use_gae = use_gae
def predict(self, obs, hidden_state, done):
with torch.no_grad():
obs = torch.FloatTensor(obs).to(device=self.device)
hidden_state = torch.FloatTensor(hidden_state).to(device=self.device)
mask = torch.FloatTensor((1 - done)).to(device=self.device)
(dist, value, hidden_state) = self.policy(obs, hidden_state, mask)
act = dist.sample()
log_prob_act = dist.log_prob(act)
return (act.cpu().numpy(), log_prob_act.cpu().numpy(), value.cpu().numpy(), hidden_state.cpu().numpy())
def optimize(self):
(pi_loss_list, value_loss_list, entropy_loss_list) = ([], [], [])
batch_size = ((self.n_steps * self.n_envs) // self.mini_batch_per_epoch)
if (batch_size < self.mini_batch_size):
self.mini_batch_size = batch_size
grad_accumulation_steps = (batch_size / self.mini_batch_size)
grad_accumulation_cnt = 1
self.policy.train()
for e in range(self.epoch):
recurrent = self.policy.is_recurrent()
generator = self.storage.fetch_train_generator(mini_batch_size=self.mini_batch_size, recurrent=recurrent)
for sample in generator:
(obs_batch, hidden_state_batch, act_batch, done_batch, old_log_prob_act_batch, old_value_batch, return_batch, adv_batch) = sample
mask_batch = (1 - done_batch)
(dist_batch, value_batch, _) = self.policy(obs_batch, hidden_state_batch, mask_batch)
log_prob_act_batch = dist_batch.log_prob(act_batch)
ratio = torch.exp((log_prob_act_batch - old_log_prob_act_batch))
surr1 = (ratio * adv_batch)
surr2 = (torch.clamp(ratio, (1.0 - self.eps_clip), (1.0 + self.eps_clip)) * adv_batch)
pi_loss = (- torch.min(surr1, surr2).mean())
clipped_value_batch = (old_value_batch + (value_batch - old_value_batch).clamp((- self.eps_clip), self.eps_clip))
v_surr1 = (value_batch - return_batch).pow(2)
v_surr2 = (clipped_value_batch - return_batch).pow(2)
value_loss = (0.5 * torch.max(v_surr1, v_surr2).mean())
entropy_loss = dist_batch.entropy().mean()
loss = ((pi_loss + (self.value_coef * value_loss)) - (self.entropy_coef * entropy_loss))
loss.backward()
if ((grad_accumulation_cnt % grad_accumulation_steps) == 0):
torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.grad_clip_norm)
self.optimizer.step()
self.optimizer.zero_grad()
grad_accumulation_cnt += 1
pi_loss_list.append(pi_loss.item())
value_loss_list.append(value_loss.item())
entropy_loss_list.append(entropy_loss.item())
summary = {'Loss/pi': np.mean(pi_loss_list), 'Loss/v': np.mean(value_loss_list), 'Loss/entropy': np.mean(entropy_loss_list)}
return summary
def train(self, num_timesteps):
save_every = (num_timesteps // self.num_checkpoints)
checkpoint_cnt = 0
obs = self.env.reset()
hidden_state = np.zeros((self.n_envs, self.storage.hidden_state_size))
done = np.zeros(self.n_envs)
while (self.t < num_timesteps):
self.policy.eval()
for _ in range(self.n_steps):
(act, log_prob_act, value, next_hidden_state) = self.predict(obs, hidden_state, done)
(next_obs, rew, done, info) = self.env.step(act)
self.storage.store(obs, hidden_state, act, rew, done, info, log_prob_act, value)
obs = next_obs
hidden_state = next_hidden_state
(_, _, last_val, hidden_state) = self.predict(obs, hidden_state, done)
self.storage.store_last(obs, hidden_state, last_val)
self.storage.compute_estimates(self.gamma, self.lmbda, self.use_gae, self.normalize_adv)
summary = self.optimize()
self.t += (self.n_steps * self.n_envs)
(rew_batch, done_batch) = self.storage.fetch_log_data()
self.logger.feed(rew_batch, done_batch)
self.logger.write_summary(summary)
self.logger.dump()
self.optimizer = adjust_lr(self.optimizer, self.learning_rate, self.t, num_timesteps)
if (self.t > ((checkpoint_cnt + 1) * save_every)):
torch.save({'state_dict': self.policy.state_dict()}, (((self.logger.logdir + '/model_') + str(self.t)) + '.pth'))
checkpoint_cnt += 1
self.env.close()
|
class NoopResetEnv(gym.Wrapper):
def __init__(self, env, noop_max=30):
'Sample initial states by taking random number of no-ops on reset.\n No-op is assumed to be action 0.\n '
gym.Wrapper.__init__(self, env)
self.noop_max = noop_max
self.override_num_noops = None
self.noop_action = 0
assert (env.unwrapped.get_action_meanings()[0] == 'NOOP')
def reset(self, **kwargs):
' Do no-op action for a number of steps in [1, noop_max].'
self.env.reset(**kwargs)
if (self.override_num_noops is not None):
noops = self.override_num_noops
else:
noops = self.unwrapped.np_random.randint(1, (self.noop_max + 1))
assert (noops > 0)
obs = None
for _ in range(noops):
(obs, _, done, _) = self.env.step(self.noop_action)
if done:
obs = self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
|
class FireResetEnv(gym.Wrapper):
def __init__(self, env):
'Take action on reset for environments that are fixed until firing.'
gym.Wrapper.__init__(self, env)
assert (env.unwrapped.get_action_meanings()[1] == 'FIRE')
assert (len(env.unwrapped.get_action_meanings()) >= 3)
def reset(self, **kwargs):
self.env.reset(**kwargs)
(obs, _, done, _) = self.env.step(1)
if done:
self.env.reset(**kwargs)
(obs, _, done, _) = self.env.step(2)
if done:
self.env.reset(**kwargs)
return obs
def step(self, ac):
return self.env.step(ac)
|
class EpisodicLifeEnv(gym.Wrapper):
def __init__(self, env):
'Make end-of-life == end-of-episode, but only reset on true game over.\n Done by DeepMind for the DQN and co. since it helps value estimation.\n '
gym.Wrapper.__init__(self, env)
self.lives = 0
self.was_real_done = True
def step(self, action):
(obs, reward, done, info) = self.env.step(action)
self.was_real_done = done
lives = self.env.unwrapped.ale.lives()
if ((lives < self.lives) and (lives > 0)):
done = True
self.lives = lives
info['env_done'] = self.was_real_done
return (obs, reward, done, info)
def reset(self, **kwargs):
'Reset only when lives are exhausted.\n This way all states are still reachable even though lives are episodic,\n and the learner need not know about any of this behind-the-scenes.\n '
if self.was_real_done:
obs = self.env.reset(**kwargs)
else:
(obs, _, _, _) = self.env.step(0)
self.lives = self.env.unwrapped.ale.lives()
return obs
|
class MaxAndSkipEnv(gym.Wrapper):
def __init__(self, env, skip=4):
'Return only every `skip`-th frame'
gym.Wrapper.__init__(self, env)
self._obs_buffer = np.zeros(((2,) + env.observation_space.shape), dtype=np.uint8)
self._skip = skip
def step(self, action):
'Repeat action, sum reward, and max over last observations.'
total_reward = 0.0
done = None
for i in range(self._skip):
(obs, reward, done, info) = self.env.step(action)
if (i == (self._skip - 2)):
self._obs_buffer[0] = obs
if (i == (self._skip - 1)):
self._obs_buffer[1] = obs
total_reward += reward
if done:
break
max_frame = self._obs_buffer.max(axis=0)
return (max_frame, total_reward, done, info)
def reset(self, **kwargs):
return self.env.reset(**kwargs)
|
class ClipRewardEnv(gym.RewardWrapper):
def __init__(self, env):
gym.RewardWrapper.__init__(self, env)
def reward(self, reward):
'Bin reward to {+1, 0, -1} by its sign.'
return np.sign(reward)
def step(self, act):
'Bin reward to {+1, 0, -1} by its sign.'
(s, rew, done, info) = self.env.step(act)
info['env_reward'] = rew
return (s, rew, done, info)
|
class WarpFrame(gym.ObservationWrapper):
def __init__(self, env, width=84, height=84, grayscale=True, dict_space_key=None):
'\n Warp frames to 84x84 as done in the Nature paper and later work.\n If the environment uses dictionary observations, `dict_space_key` can be specified which indicates which\n observation should be warped.\n '
super().__init__(env)
self._width = width
self._height = height
self._grayscale = grayscale
self._key = dict_space_key
if self._grayscale:
num_colors = 1
else:
num_colors = 3
new_space = gym.spaces.Box(low=0, high=255, shape=(self._height, self._width, num_colors), dtype=np.uint8)
if (self._key is None):
original_space = self.observation_space
self.observation_space = new_space
else:
original_space = self.observation_space.spaces[self._key]
self.observation_space.spaces[self._key] = new_space
assert ((original_space.dtype == np.uint8) and (len(original_space.shape) == 3))
def observation(self, obs):
if (self._key is None):
frame = obs
else:
frame = obs[self._key]
if self._grayscale:
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY)
frame = cv2.resize(frame, (self._width, self._height), interpolation=cv2.INTER_AREA)
if self._grayscale:
frame = np.expand_dims(frame, (- 1))
if (self._key is None):
obs = frame
else:
obs = obs.copy()
obs[self._key] = frame
return obs
|
class FrameStack(gym.Wrapper):
def __init__(self, env, k):
'Stack k last frames.\n Returns lazy array, which is much more memory efficient.\n See Also\n --------\n baselines.common.atari_wrappers.LazyFrames\n '
gym.Wrapper.__init__(self, env)
self.k = k
self.frames = deque([], maxlen=k)
shp = env.observation_space.shape
self.observation_space = spaces.Box(low=0, high=255, shape=(shp[:(- 1)] + ((shp[(- 1)] * k),)), dtype=env.observation_space.dtype)
def reset(self):
ob = self.env.reset()
for _ in range(self.k):
self.frames.append(ob)
return self._get_ob()
def step(self, action):
(ob, reward, done, info) = self.env.step(action)
self.frames.append(ob)
return (self._get_ob(), reward, done, info)
def _get_ob(self):
assert (len(self.frames) == self.k)
return LazyFrames(list(self.frames))
|
class ScaledFloatFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
self.observation_space = gym.spaces.Box(low=0, high=1, shape=env.observation_space.shape, dtype=np.float32)
def observation(self, observation):
return (np.array(observation).astype(np.float32) / 255.0)
|
class LazyFrames(object):
def __init__(self, frames):
"This object ensures that common frames between the observations are only stored once.\n It exists purely to optimize memory usage which can be huge for DQN's 1M frames replay\n buffers.\n This object should only be converted to numpy array before being passed to the model.\n You'd not believe how complex the previous solution was."
self._frames = frames
self._out = None
def _force(self):
if (self._out is None):
self._out = np.concatenate(self._frames, axis=(- 1))
self._frames = None
return self._out
def __array__(self, dtype=None):
out = self._force()
if (dtype is not None):
out = out.astype(dtype)
return out
def __len__(self):
return len(self._force())
def __getitem__(self, i):
return self._force()[i]
def count(self):
frames = self._force()
return frames.shape[(frames.ndim - 1)]
def frame(self, i):
return self._force()[(..., i)]
|
class TransposeFrame(gym.ObservationWrapper):
def __init__(self, env):
gym.ObservationWrapper.__init__(self, env)
obs_shape = self.observation_space.shape
self.observation_space = gym.spaces.Box(low=0, high=1, shape=(obs_shape[2], obs_shape[0], obs_shape[1]), dtype=np.float32)
def observation(self, observation):
return observation.transpose(2, 0, 1)
|
def wrap_deepmind(env, episode_life=True, preprocess=True, max_and_skip=True, clip_rewards=True, no_op_reset=True, history_length=4, scale=True, transpose=True):
'Configure environment for DeepMind-style Atari.'
if no_op_reset:
env = NoopResetEnv(env, noop_max=30)
if max_and_skip:
env = MaxAndSkipEnv(env, skip=4)
if episode_life:
env = EpisodicLifeEnv(env)
if ('FIRE' in env.unwrapped.get_action_meanings()):
env = FireResetEnv(env)
if preprocess:
env = WarpFrame(env)
if clip_rewards:
env = ClipRewardEnv(env)
if (history_length > 1):
env = FrameStack(env, history_length)
if scale:
env = ScaledFloatFrame(env)
if transpose:
env = TransposeFrame(env)
return env
|
def worker(worker_id, env, master_end, worker_end):
master_end.close()
while True:
(cmd, data) = worker_end.recv()
if (cmd == 'step'):
(ob, reward, done, info) = env.step(data)
if done:
ob = env.reset()
worker_end.send((ob, reward, done, info))
elif (cmd == 'seed'):
worker_end.send(env.seed(data))
elif (cmd == 'reset'):
ob = env.reset()
worker_end.send(ob)
elif (cmd == 'close'):
worker_end.close()
break
else:
raise NotImplementedError
|
class ParallelEnv(object):
'\n This class\n '
def __init__(self, num_processes, env):
self.nenvs = num_processes
self.waiting = False
self.closed = False
self.workers = []
self.observation_space = env.observation_space
self.action_space = env.action_space
(self.master_ends, self.send_ends) = zip(*[Pipe() for _ in range(self.nenvs)])
for (worker_id, (master_end, send_end)) in enumerate(zip(self.master_ends, self.send_ends)):
p = Process(target=worker, args=(worker_id, copy.deepcopy(env), master_end, send_end))
p.start()
self.workers.append(p)
def step(self, actions):
'\n Perform step for each environment and return the stacked transitions\n '
for (master_end, action) in zip(self.master_ends, actions):
master_end.send(('step', action))
self.waiting = True
results = [master_end.recv() for master_end in self.master_ends]
self.waiting = False
(obs, rews, dones, infos) = zip(*results)
return (np.stack(obs), np.stack(rews), np.stack(dones), infos)
def seed(self, seed=None):
for (idx, master_end) in enumerate(self.master_ends):
master_end.send(('seed', (seed + idx)))
return [master_end.recv() for master_end in self.master_ends]
def reset(self):
for master_end in self.master_ends:
master_end.send(('reset', None))
results = [master_end.recv() for master_end in self.master_ends]
return np.stack(results)
def close(self):
if self.closed:
return
if self.waiting:
[master_end.recv() for master_end in self.master_ends]
for master_end in self.master_ends:
master_end.send(('close', None))
for worker in self.workers:
worker.join()
self.closed = True
|
class Logger(object):
def __init__(self, n_envs, logdir):
self.start_time = time.time()
self.n_envs = n_envs
self.logdir = logdir
self.episode_rewards = []
for _ in range(n_envs):
self.episode_rewards.append([])
self.episode_len_buffer = deque(maxlen=40)
self.episode_reward_buffer = deque(maxlen=40)
self.log = pd.DataFrame(columns=['timesteps', 'wall_time', 'num_episodes', 'max_episode_rewards', 'mean_episode_rewards', 'min_episode_rewards', 'max_episode_len', 'mean_episode_len', 'min_episode_len'])
self.writer = SummaryWriter(logdir)
self.timesteps = 0
self.num_episodes = 0
def feed(self, rew_batch, done_batch):
steps = rew_batch.shape[0]
rew_batch = rew_batch.T
done_batch = done_batch.T
for i in range(self.n_envs):
for j in range(steps):
self.episode_rewards[i].append(rew_batch[i][j])
if done_batch[i][j]:
self.episode_len_buffer.append(len(self.episode_rewards[i]))
self.episode_reward_buffer.append(np.sum(self.episode_rewards[i]))
self.episode_rewards[i] = []
self.num_episodes += 1
self.timesteps += (self.n_envs * steps)
def write_summary(self, summary):
for (key, value) in summary.items():
self.writer.add_scalar(key, value, self.timesteps)
def dump(self):
wall_time = (time.time() - self.start_time)
if (self.num_episodes > 0):
episode_statistics = self._get_episode_statistics()
episode_statistics_list = list(episode_statistics.values())
for (key, value) in episode_statistics.items():
self.writer.add_scalar(key, value, self.timesteps)
else:
episode_statistics_list = ([None] * 6)
log = ((([self.timesteps] + [wall_time]) + [self.num_episodes]) + episode_statistics_list)
self.log.loc[len(self.log)] = log
with open((self.logdir + '/log.csv'), 'w') as f:
self.log.to_csv(f, index=False)
print(self.log.loc[(len(self.log) - 1)])
def _get_episode_statistics(self):
episode_statistics = {}
episode_statistics['Rewards/max_episodes'] = np.max(self.episode_reward_buffer)
episode_statistics['Rewards/mean_episodes'] = np.mean(self.episode_reward_buffer)
episode_statistics['Rewards/min_episodes'] = np.min(self.episode_reward_buffer)
episode_statistics['Len/max_episodes'] = np.max(self.episode_len_buffer)
episode_statistics['Len/mean_episodes'] = np.mean(self.episode_len_buffer)
episode_statistics['Len/min_episodes'] = np.min(self.episode_len_buffer)
return episode_statistics
|
def set_global_seeds(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
|
def set_global_log_levels(level):
gym.logger.set_level(level)
|
def orthogonal_init(module, gain=nn.init.calculate_gain('relu')):
if (isinstance(module, nn.Linear) or isinstance(module, nn.Conv2d)):
nn.init.orthogonal_(module.weight.data, gain)
nn.init.constant_(module.bias.data, 0)
return module
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.