id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
8,560 | from multiprocessing.sharedctypes import Value
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
import numpy as np
from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
def lp_gather_features(
pred,
target,
world_size=1,
use_horovod=False
):
if use_horovod:
assert hvd is not None, 'Please install horovod'
with torch.no_grad():
all_preds = hvd.allgather(pred)
all_targets = hvd.allgath(target)
else:
gathered_preds = [torch.zeros_like(pred) for _ in range(world_size)]
gathered_targets = [torch.zeros_like(target) for _ in range(world_size)]
dist.all_gather(gathered_preds, pred)
dist.all_gather(gathered_targets, target)
all_preds = torch.cat(gathered_preds, dim=0)
all_targets = torch.cat(gathered_targets, dim=0)
return all_preds, all_targets | null |
8,561 | from multiprocessing.sharedctypes import Value
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
import numpy as np
from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
def get_map(pred, target):
pred = torch.sigmoid(pred).numpy()
target = target.numpy()
return np.mean(average_precision_score(target, pred, average=None)) | null |
8,562 | from multiprocessing.sharedctypes import Value
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
import numpy as np
from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
def get_acc(pred, target):
pred = torch.argmax(pred,1).numpy()
target = torch.argmax(target,1).numpy()
return accuracy_score(target, pred) | null |
8,563 | from multiprocessing.sharedctypes import Value
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
import numpy as np
from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
def get_mauc(pred, target):
pred = torch.sigmoid(pred).numpy()
target = target.numpy()
return np.mean(roc_auc_score(target, pred, average=None)) | null |
8,564 | from multiprocessing.sharedctypes import Value
import torch
import torch.distributed.nn
from torch import distributed as dist, nn as nn
from torch.nn import functional as F
import numpy as np
from sklearn.metrics import average_precision_score, roc_auc_score, accuracy_score
def calc_celoss(pred, target):
target = torch.argmax(target, 1).long()
return nn.CrossEntropyLoss()(pred, target) | null |
8,565 | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
import torch
from .model import CLAP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
def create_model(
amodel_name: str,
tmodel_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
openai_model_cache_dir: str = os.path.expanduser("~/.cache/clip"),
skip_params=True,
pretrained_audio: str = "",
pretrained_text: str = "",
enable_fusion: bool = False,
fusion_type: str = 'None'
# pretrained_image: bool = False,
):
amodel_name = amodel_name.replace(
"/", "-"
) # for callers using old naming with / in ViT names
pretrained_orig = pretrained
pretrained = pretrained.lower()
if pretrained == "openai":
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
logging.info(f"Loading pretrained ViT-B-16 text encoder from OpenAI.")
# Hard Code in model name
model_cfg["text_cfg"]["model_type"] = tmodel_name
model = load_openai_model(
"ViT-B-16",
model_cfg,
device=device,
jit=jit,
cache_dir=openai_model_cache_dir,
enable_fusion=enable_fusion,
fusion_type=fusion_type
)
# See https://discuss.pytorch.org/t/valueerror-attemting-to-unscale-fp16-gradients/81372
if precision == "amp" or precision == "fp32":
model = model.float()
else:
if amodel_name in _MODEL_CONFIGS:
logging.info(f"Loading {amodel_name} model config.")
model_cfg = deepcopy(_MODEL_CONFIGS[amodel_name])
else:
logging.error(
f"Model config for {amodel_name} not found; available models {list_models()}."
)
raise RuntimeError(f"Model config for {amodel_name} not found.")
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
# if pretrained_image:
# if 'timm_amodel_name' in model_cfg.get('vision_cfg', {}):
# # pretrained weight loading for timm models set via vision_cfg
# model_cfg['vision_cfg']['timm_model_pretrained'] = True
# else:
# assert False, 'pretrained image towers currently only supported for timm models'
model_cfg["text_cfg"]["model_type"] = tmodel_name
model_cfg["enable_fusion"] = enable_fusion
model_cfg["fusion_type"] = fusion_type
model = CLAP(**model_cfg)
if pretrained:
checkpoint_path = ""
url = get_pretrained_url(amodel_name, pretrained)
if url:
checkpoint_path = download_pretrained(url, root=openai_model_cache_dir)
elif os.path.exists(pretrained_orig):
checkpoint_path = pretrained_orig
if checkpoint_path:
logging.info(f"Loading pretrained {amodel_name}-{tmodel_name} weights ({pretrained}).")
ckpt = load_state_dict(checkpoint_path, skip_params=True)
model.load_state_dict(ckpt)
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in ckpt else "Unloaded")
else:
logging.warning(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
raise RuntimeError(
f"Pretrained weights ({pretrained}) not found for model {amodel_name}."
)
if pretrained_audio:
if amodel_name.startswith('PANN'):
if 'Cnn14_mAP' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['model']
keys = list(audio_ckpt.keys())
for key in keys:
if 'spectrogram_extractor' not in key and 'logmel_extractor' not in key:
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key] = v
elif os.path.basename(pretrained_audio).startswith('PANN'): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
elif amodel_name.startswith('HTSAT'):
if 'HTSAT_AudioSet_Saved' in pretrained_audio: # official checkpoint
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model') and ('spectrogram_extractor' not in key
and 'logmel_extractor' not in key):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('HTSAT'): # checkpoint trained via HTSAT codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
audio_ckpt = audio_ckpt['state_dict']
keys = list(audio_ckpt.keys())
for key in keys:
if key.startswith('sed_model'):
v = audio_ckpt.pop(key)
audio_ckpt['audio_branch.' + key[10:]] = v
elif os.path.basename(pretrained_audio).startswith('finetuned'): # checkpoint trained via linear probe codebase
audio_ckpt = torch.load(pretrained_audio, map_location='cpu')
else:
raise ValueError('Unknown audio checkpoint')
else:
raise f'this audio encoder pretrained checkpoint is not support'
model.load_state_dict(audio_ckpt, strict=False)
logging.info(f"Loading pretrained {amodel_name} weights ({pretrained_audio}).")
param_names = [n for n, p in model.named_parameters()]
for n in param_names:
print(n, "\t", "Loaded" if n in audio_ckpt else "Unloaded")
model.to(device=device)
if precision == "fp16":
assert device.type != "cpu"
convert_weights_to_fp16(model)
if jit:
model = torch.jit.script(model)
return model, model_cfg
def image_transform(
image_size: int,
is_train: bool,
mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711)
):
normalize = Normalize(mean=mean, std=std)
if is_train:
return Compose([
RandomResizedCrop(image_size, scale=(0.9, 1.0), interpolation=InterpolationMode.BICUBIC),
_convert_to_rgb,
ToTensor(),
normalize,
])
else:
return Compose([
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
_convert_to_rgb,
ToTensor(),
normalize,
])
def create_model_and_transforms(
model_name: str,
pretrained: str = "",
precision: str = "fp32",
device: torch.device = torch.device("cpu"),
jit: bool = False,
force_quick_gelu: bool = False,
# pretrained_image: bool = False,
):
model = create_model(
model_name,
pretrained,
precision,
device,
jit,
force_quick_gelu=force_quick_gelu,
# pretrained_image=pretrained_image
)
preprocess_train = image_transform(model.visual.image_size, is_train=True)
preprocess_val = image_transform(model.visual.image_size, is_train=False)
return model, preprocess_train, preprocess_val | null |
8,566 | import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
import torch
from .model import CLAP, convert_weights_to_fp16
from .openai import load_openai_model
from .pretrained import get_pretrained_url, download_pretrained
from .transform import image_transform
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = (".json",)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f"*{ext}"))
for cf in config_files:
with open(cf, "r") as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ("embed_dim", "audio_cfg", "text_cfg")):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {
k: v
for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))
}
_rescan_model_configs()
The provided code snippet includes necessary dependencies for implementing the `add_model_config` function. Write a Python function `def add_model_config(path)` to solve the following problem:
add model config path or file and update registry
Here is the function:
def add_model_config(path):
"""add model config path or file and update registry"""
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs() | add model config path or file and update registry |
8,567 | import torch
import torch.nn as nn
from functools import partial
from ldm.modules.x_transformer import Encoder, TransformerWrapper
from torch.utils.checkpoint import checkpoint
from transformers import T5Tokenizer, T5EncoderModel, CLIPTokenizer, CLIPTextModel, AutoTokenizer
from importlib_resources import files
from ldm.modules.encoders.CLAP.utils import read_config_as_args
from ldm.modules.encoders.CLAP.clap import TextEncoder
from ldm.util import default, count_params
import open_clip
The provided code snippet includes necessary dependencies for implementing the `disabled_train` function. Write a Python function `def disabled_train(self, mode=True)` to solve the following problem:
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
Here is the function:
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self | Overwrite model.train with this function to make sure train/eval mode does not change anymore. |
8,568 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
def log_txt_as_img(wh, xc, size=10):
# wh a tuple of (width, height)
# xc a list of captions to plot
b = len(xc)
txts = list()
for bi in range(b):
txt = Image.new("RGB", wh, color="white")
draw = ImageDraw.Draw(txt)
font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)
nc = int(40 * (wh[0] / 256))
lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))
try:
draw.text((0, 0), lines, fill="black", font=font)
except UnicodeEncodeError:
print("Cant encode string for logging. Skipping.")
txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0
txts.append(txt)
txts = np.stack(txts)
txts = torch.tensor(txts)
return txts | null |
8,569 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
def ismap(x):
if not isinstance(x, torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] > 3) | null |
8,570 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
def isimage(x):
if not isinstance(x,torch.Tensor):
return False
return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) | null |
8,571 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
def exists(x):
return x is not None
def default(val, d):
if exists(val):
return val
return d() if isfunction(d) else d | null |
8,572 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
The provided code snippet includes necessary dependencies for implementing the `mean_flat` function. Write a Python function `def mean_flat(tensor)` to solve the following problem:
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions.
Here is the function:
def mean_flat(tensor):
"""
https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape)))) | https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 Take the mean over all non-batch dimensions. |
8,573 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
def count_params(model, verbose=False):
total_params = sum(p.numel() for p in model.parameters())
if verbose:
print(f"{model.__class__.__name__} has {total_params*1.e-6:.2f} M params.")
return total_params | null |
8,574 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
def get_obj_from_str(string, reload=False):
module, cls = string.rsplit(".", 1)
if reload:
module_imp = importlib.import_module(module)
importlib.reload(module_imp)
return getattr(importlib.import_module(module, package=None), cls)
def instantiate_from_config(config,reload=False):
if not "target" in config:
if config == '__is_first_stage__':
return None
elif config == "__is_unconditional__":
return None
raise KeyError("Expected key `target` to instantiate.")
return get_obj_from_str(config["target"],reload=reload)(**config.get("params", dict())) | null |
8,575 | import importlib
import torch
import numpy as np
from tqdm import tqdm
from inspect import isfunction
from PIL import Image, ImageDraw, ImageFont
import hashlib
import requests
import os
URL_MAP = {
'vggishish_lpaps': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a/specvqgan_public/vggishish16.pt',
'vggishish_mean_std_melspec_10s_22050hz': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a/specvqgan_public/train_means_stds_melspec_10s_22050hz.txt',
'melception': 'https://a3s.fi/swift/v1/AUTH_a235c0f452d648828f745589cde1219a/specvqgan_public/melception-21-05-10T09-28-40.pt',
}
CKPT_MAP = {
'vggishish_lpaps': 'vggishish16.pt',
'vggishish_mean_std_melspec_10s_22050hz': 'train_means_stds_melspec_10s_22050hz.txt',
'melception': 'melception-21-05-10T09-28-40.pt',
}
MD5_MAP = {
'vggishish_lpaps': '197040c524a07ccacf7715d7080a80bd',
'vggishish_mean_std_melspec_10s_22050hz': 'f449c6fd0e248936c16f6d22492bb625',
'melception': 'a71a41041e945b457c7d3d814bbcf72d',
}
def download(url, local_path, chunk_size=1024):
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
with requests.get(url, stream=True) as r:
total_size = int(r.headers.get("content-length", 0))
with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
with open(local_path, "wb") as f:
for data in r.iter_content(chunk_size=chunk_size):
if data:
f.write(data)
pbar.update(chunk_size)
def md5_hash(path):
with open(path, "rb") as f:
content = f.read()
return hashlib.md5(content).hexdigest()
def exists(x):
return x is not None
def get_ckpt_path(name, root, check=False):
assert name in URL_MAP
path = os.path.join(root, CKPT_MAP[name])
if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
download(URL_MAP[name], path)
md5 = md5_hash(path)
assert md5 == MD5_MAP[name], md5
return path | null |
8,577 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
The provided code snippet includes necessary dependencies for implementing the `disabled_train` function. Write a Python function `def disabled_train(self, mode=True)` to solve the following problem:
Overwrite model.train with this function to make sure train/eval mode does not change anymore.
Here is the function:
def disabled_train(self, mode=True):
"""Overwrite model.train with this function to make sure train/eval mode
does not change anymore."""
return self | Overwrite model.train with this function to make sure train/eval mode does not change anymore. |
8,578 | import torch
import torch.nn as nn
import numpy as np
import pytorch_lightning as pl
from torch.optim.lr_scheduler import LambdaLR
from einops import rearrange, repeat
from contextlib import contextmanager
from functools import partial
from tqdm import tqdm
from torchvision.utils import make_grid
from pytorch_lightning.utilities.distributed import rank_zero_only
from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config
from ldm.modules.ema import LitEma
from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution
from ldm.models.autoencoder import IdentityFirstStage, AutoencoderKL
from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like
from ldm.models.diffusion.ddim import DDIMSampler
def uniform_on_device(r1, r2, shape, device):
return (r1 - r2) * torch.rand(*shape, device=device) + r2 | null |
8,579 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .film import Film
The provided code snippet includes necessary dependencies for implementing the `init_layer` function. Write a Python function `def init_layer(layer)` to solve the following problem:
Initialize a Linear or Convolutional layer.
Here is the function:
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.) | Initialize a Linear or Convolutional layer. |
8,580 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .film import Film
The provided code snippet includes necessary dependencies for implementing the `init_bn` function. Write a Python function `def init_bn(bn)` to solve the following problem:
Initialize a Batchnorm layer.
Here is the function:
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.) | Initialize a Batchnorm layer. |
8,581 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .film import Film
The provided code snippet includes necessary dependencies for implementing the `init_gru` function. Write a Python function `def init_gru(rnn)` to solve the following problem:
Initialize a GRU layer.
Here is the function:
def init_gru(rnn):
"""Initialize a GRU layer. """
def _concat_init(tensor, init_funcs):
(length, fan_out) = tensor.shape
fan_in = length // len(init_funcs)
for (i, init_func) in enumerate(init_funcs):
init_func(tensor[i * fan_in: (i + 1) * fan_in, :])
def _inner_uniform(tensor):
fan_in = nn.init._calculate_correct_fan(tensor, 'fan_in')
nn.init.uniform_(tensor, -math.sqrt(3 / fan_in), math.sqrt(3 / fan_in))
for i in range(rnn.num_layers):
_concat_init(
getattr(rnn, 'weight_ih_l{}'.format(i)),
[_inner_uniform, _inner_uniform, _inner_uniform]
)
torch.nn.init.constant_(getattr(rnn, 'bias_ih_l{}'.format(i)), 0)
_concat_init(
getattr(rnn, 'weight_hh_l{}'.format(i)),
[_inner_uniform, _inner_uniform, nn.init.orthogonal_]
)
torch.nn.init.constant_(getattr(rnn, 'bias_hh_l{}'.format(i)), 0) | Initialize a GRU layer. |
8,582 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
from .film import Film
def act(x, activation):
if activation == 'relu':
return F.relu_(x)
elif activation == 'leaky_relu':
return F.leaky_relu_(x, negative_slope=0.2)
elif activation == 'swish':
return x * torch.sigmoid(x)
else:
raise Exception('Incorrect activation!') | null |
8,583 | import librosa
import librosa.filters
import math
import numpy as np
import scipy.io.wavfile
def load_wav(path):
max_length = 32000 * 10
wav = librosa.core.load(path, sr=32000)[0]
if len(wav) > max_length:
audio = wav[0:max_length]
# pad audio to max length, 10s for AudioCaps
if len(wav) < max_length:
# audio = torch.nn.functional.pad(audio, (0, self.max_length - audio.size(1)), 'constant')
wav = np.pad(wav, (0, max_length - len(wav)), 'constant')
wav = wav[...,None]
return wav | null |
8,584 | import librosa
import librosa.filters
import math
import numpy as np
import scipy.io.wavfile
def save_wav(wav, path):
wav *= 32767 / max(0.01, np.max(np.abs(wav)))
scipy.io.wavfile.write(path, 32000, wav.astype(np.int16)) | null |
8,585 | import torch
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
from scipy.signal import get_window
import librosa.util as librosa_util
from librosa.util import pad_center, tiny
The provided code snippet includes necessary dependencies for implementing the `window_sumsquare` function. Write a Python function `def window_sumsquare(window, n_frames, hop_length=512, win_length=1024, n_fft=1024, dtype=np.float32, norm=None)` to solve the following problem:
# from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function
Here is the function:
def window_sumsquare(window, n_frames, hop_length=512, win_length=1024,
n_fft=1024, dtype=np.float32, norm=None):
"""
# from librosa 0.6
Compute the sum-square envelope of a window function at a given hop length.
This is used to estimate modulation effects induced by windowing
observations in short-time fourier transforms.
Parameters
----------
window : string, tuple, number, callable, or list-like
Window specification, as in `get_window`
n_frames : int > 0
The number of analysis frames
hop_length : int > 0
The number of samples to advance between frames
win_length : [optional]
The length of the window function. By default, this matches `n_fft`.
n_fft : int > 0
The length of each analysis frame.
dtype : np.dtype
The data type of the output
Returns
-------
wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))`
The sum-squared envelope of the window function
"""
if win_length is None:
win_length = n_fft
n = n_fft + hop_length * (n_frames - 1)
x = np.zeros(n, dtype=dtype)
# Compute the squared window at the desired length
win_sq = get_window(window, win_length, fftbins=True)
win_sq = librosa_util.normalize(win_sq, norm=norm)**2
win_sq = librosa_util.pad_center(win_sq, n_fft)
# Fill the envelope
for i in range(n_frames):
sample = i * hop_length
x[sample:min(n, sample + n_fft)] += win_sq[:max(0, min(n_fft, n - sample))]
return x | # from librosa 0.6 Compute the sum-square envelope of a window function at a given hop length. This is used to estimate modulation effects induced by windowing observations in short-time fourier transforms. Parameters ---------- window : string, tuple, number, callable, or list-like Window specification, as in `get_window` n_frames : int > 0 The number of analysis frames hop_length : int > 0 The number of samples to advance between frames win_length : [optional] The length of the window function. By default, this matches `n_fft`. n_fft : int > 0 The length of each analysis frame. dtype : np.dtype The data type of the output Returns ------- wss : np.ndarray, shape=`(n_fft + hop_length * (n_frames - 1))` The sum-squared envelope of the window function |
8,586 | import torch
import numpy as np
def _random_scale(lower=0.3, upper=0.9):
return float(uniform_torch(lower, upper))
def _random_noise(clean, noise, snr_l=None, snr_h=None):
snr = uniform_torch(snr_l,snr_h)
clean_weight = 10 ** (float(snr) / 20)
return clean, noise/clean_weight, snr
def _to_numpy(wav):
return np.transpose(wav, (1, 0))[0].numpy() # [num_samples]
def normalize_energy_torch(audio, alpha = 1):
'''
If the signal is almost empty(determined by threshold), if will only be divided by 2**15
:param audio: 1d waveform, 2**15
:param alpha: the value of output range from: [-alpha,alpha]
:return: 1d waveform which value range from: [-alpha,alpha]
'''
val_max = activelev_torch([audio])
return (audio / val_max) * alpha
def unify_energy_torch(*args):
max_amp = activelev_torch(args)
mix_scale = 1.0/max_amp
return [x * mix_scale for x in args]
The provided code snippet includes necessary dependencies for implementing the `add_noise_and_scale` function. Write a Python function `def add_noise_and_scale(front, noise, snr_l=0, snr_h=0, scale_lower=1.0, scale_upper=1.0)` to solve the following problem:
:param front: front-head audio, like vocal [samples,channel], will be normlized so any scale will be fine :param noise: noise, [samples,channel], any scale :param snr_l: Optional :param snr_h: Optional :param scale_lower: Optional :param scale_upper: Optional :return: scaled front and noise (noisy = front + noise), all_mel_e2e outputs are noramlized within [-1 , 1]
Here is the function:
def add_noise_and_scale(front, noise, snr_l=0, snr_h=0, scale_lower=1.0, scale_upper=1.0):
"""
:param front: front-head audio, like vocal [samples,channel], will be normlized so any scale will be fine
:param noise: noise, [samples,channel], any scale
:param snr_l: Optional
:param snr_h: Optional
:param scale_lower: Optional
:param scale_upper: Optional
:return: scaled front and noise (noisy = front + noise), all_mel_e2e outputs are noramlized within [-1 , 1]
"""
snr = None
noise, front = normalize_energy_torch(noise), normalize_energy_torch(front) # set noise and vocal to equal range [-1,1]
# print("normalize:",torch.max(noise),torch.max(front))
if snr_l is not None and snr_h is not None:
front, noise, snr = _random_noise(front, noise, snr_l=snr_l, snr_h=snr_h) # remix them with a specific snr
noisy, noise, front = unify_energy_torch(noise + front, noise, front) # normalize noisy, noise and vocal energy into [-1,1]
# print("unify:", torch.max(noise), torch.max(front), torch.max(noisy))
scale = _random_scale(scale_lower, scale_upper) # random scale these three signal
# print("Scale",scale)
noisy, noise, front = noisy * scale, noise * scale, front * scale # apply scale
# print("after scale", torch.max(noisy), torch.max(noise), torch.max(front), snr, scale)
front, noise = _to_numpy(front), _to_numpy(noise) # [num_samples]
mixed_wav = front + noise
return front, noise, mixed_wav, snr, scale | :param front: front-head audio, like vocal [samples,channel], will be normlized so any scale will be fine :param noise: noise, [samples,channel], any scale :param snr_l: Optional :param snr_h: Optional :param scale_lower: Optional :param scale_upper: Optional :return: scaled front and noise (noisy = front + noise), all_mel_e2e outputs are noramlized within [-1 , 1] |
8,587 | import torch
import numpy as np
def activelev(*args):
'''
need to update like matlab
'''
return np.max(np.abs([*args]))
The provided code snippet includes necessary dependencies for implementing the `normalize_energy` function. Write a Python function `def normalize_energy(audio, alpha = 1)` to solve the following problem:
:param audio: 1d waveform, [batchsize, *], :param alpha: the value of output range from: [-alpha,alpha] :return: 1d waveform which value range from: [-alpha,alpha]
Here is the function:
def normalize_energy(audio, alpha = 1):
'''
:param audio: 1d waveform, [batchsize, *],
:param alpha: the value of output range from: [-alpha,alpha]
:return: 1d waveform which value range from: [-alpha,alpha]
'''
val_max = activelev(audio)
return (audio / val_max) * alpha | :param audio: 1d waveform, [batchsize, *], :param alpha: the value of output range from: [-alpha,alpha] :return: 1d waveform which value range from: [-alpha,alpha] |
8,588 | import torch
import numpy as np
def activelev(*args):
'''
need to update like matlab
'''
return np.max(np.abs([*args]))
def unify_energy(*args):
max_amp = activelev(args)
mix_scale = 1.0/max_amp
return [x * mix_scale for x in args] | null |
8,589 | import sys
import os
import gradio as gr
import matplotlib
import librosa
import torch
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import re
import uuid
import soundfile
from PIL import Image
import numpy as np
from omegaconf import OmegaConf
from einops import repeat
from ldm.util import instantiate_from_config
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
from vocoder.bigvgan.models import VocoderBigVGAN
from ldm.models.diffusion.ddim import DDIMSampler
import whisper
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import scipy.io.wavfile as wavfile
import librosa
from audio_infer.utils import config as detection_config
from audio_infer.pytorch.models import PVT
import clip
import numpy as np
def cut_dialogue_history(history_memory, keep_last_n_words = 500):
tokens = history_memory.split()
n_tokens = len(tokens)
print(f"history_memory:{history_memory}, n_tokens: {n_tokens}")
if n_tokens < keep_last_n_words:
return history_memory
else:
paragraphs = history_memory.split('\n')
last_n_tokens = n_tokens
while last_n_tokens >= keep_last_n_words:
last_n_tokens = last_n_tokens - len(paragraphs[0].split(' '))
paragraphs = paragraphs[1:]
return '\n' + '\n'.join(paragraphs) | null |
8,590 | import sys
import os
import gradio as gr
import matplotlib
import librosa
import torch
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms.openai import OpenAI
import re
import uuid
import soundfile
from PIL import Image
import numpy as np
from omegaconf import OmegaConf
from einops import repeat
from ldm.util import instantiate_from_config
from ldm.data.extract_mel_spectrogram import TRANSFORMS_16000
from vocoder.bigvgan.models import VocoderBigVGAN
from ldm.models.diffusion.ddim import DDIMSampler
import whisper
from utils.hparams import set_hparams
from utils.hparams import hparams as hp
import scipy.io.wavfile as wavfile
import librosa
from audio_infer.utils import config as detection_config
from audio_infer.pytorch.models import PVT
import clip
import numpy as np
def merge_audio(audio_path_1, audio_path_2):
merged_signal = []
sr_1, signal_1 = wavfile.read(audio_path_1)
sr_2, signal_2 = wavfile.read(audio_path_2)
merged_signal.append(signal_1)
merged_signal.append(signal_2)
merged_signal = np.hstack(merged_signal)
merged_signal = np.asarray(merged_signal, dtype=np.int16)
audio_filename = os.path.join('audio', str(uuid.uuid4())[0:8] + ".wav")
wavfile.write(audio_filename, sr_2, merged_signal)
return audio_filename | null |
8,591 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
The provided code snippet includes necessary dependencies for implementing the `parse_config_or_kwargs` function. Write a Python function `def parse_config_or_kwargs(config_file, **kwargs)` to solve the following problem:
parse_config_or_kwargs :param config_file: Config file that has parameters, yaml format :param **kwargs: Other alternative parameters or overwrites for config
Here is the function:
def parse_config_or_kwargs(config_file, **kwargs):
"""parse_config_or_kwargs
:param config_file: Config file that has parameters, yaml format
:param **kwargs: Other alternative parameters or overwrites for config
"""
with open(config_file) as con_read:
yaml_config = yaml.load(con_read, Loader=yaml.FullLoader)
arguments = dict(yaml_config, **kwargs)
return arguments | parse_config_or_kwargs :param config_file: Config file that has parameters, yaml format :param **kwargs: Other alternative parameters or overwrites for config |
8,592 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
The provided code snippet includes necessary dependencies for implementing the `split_train_cv` function. Write a Python function `def split_train_cv( data_frame: pd.DataFrame, frac: float = 0.9, y=None, # Only for stratified, computes necessary split **kwargs)` to solve the following problem:
split_train_cv :param data_frame: :type data_frame: pd.DataFrame :param frac: :type frac: float
Here is the function:
def split_train_cv(
data_frame: pd.DataFrame,
frac: float = 0.9,
y=None, # Only for stratified, computes necessary split
**kwargs):
"""split_train_cv
:param data_frame:
:type data_frame: pd.DataFrame
:param frac:
:type frac: float
"""
if kwargs.get('mode',
None) == 'urbansed': # Filenames are DATA_-1 DATA_-2 etc
data_frame.loc[:, 'id'] = data_frame.groupby(
data_frame['filename'].str.split('_').apply(
lambda x: '_'.join(x[:-1]))).ngroup()
sampler = np.random.permutation(data_frame['id'].nunique())
num_train = int(frac * len(sampler))
train_indexes = sampler[:num_train]
cv_indexes = sampler[num_train:]
train_data = data_frame[data_frame['id'].isin(train_indexes)]
cv_data = data_frame[data_frame['id'].isin(cv_indexes)]
del train_data['id']
del cv_data['id']
elif kwargs.get('mode', None) == 'stratified': # stratified --> 分层的 ?
# Use statified sampling
from skmultilearn.model_selection import iterative_train_test_split
index_train, _, index_cv, _ = iterative_train_test_split(
data_frame.index.values.reshape(-1, 1), y, test_size=1. - frac)
train_data = data_frame[data_frame.index.isin(index_train.squeeze())]
cv_data = data_frame[data_frame.index.isin(index_cv.squeeze())] # cv --> cross validation
else:
# Simply split train_test
train_data = data_frame.sample(frac=frac, random_state=10)
cv_data = data_frame[~data_frame.index.isin(train_data.index)]
return train_data, cv_data | split_train_cv :param data_frame: :type data_frame: pd.DataFrame :param frac: :type frac: float |
8,593 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
The provided code snippet includes necessary dependencies for implementing the `pprint_dict` function. Write a Python function `def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml')` to solve the following problem:
pprint_dict :param outputfun: function to use, defaults to sys.stdout :param in_dict: dict to print
Here is the function:
def pprint_dict(in_dict, outputfun=sys.stdout.write, formatter='yaml'): # print yaml file
"""pprint_dict
:param outputfun: function to use, defaults to sys.stdout
:param in_dict: dict to print
"""
if formatter == 'yaml':
format_fun = yaml.dump
elif formatter == 'pretty':
format_fun = pformat
for line in format_fun(in_dict).split('\n'):
outputfun(line) | pprint_dict :param outputfun: function to use, defaults to sys.stdout :param in_dict: dict to print |
8,594 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def getfile_outlogger(outputfile):
log_format = "[<green>{time:YYYY-MM-DD HH:mm:ss}</green>] {message}"
logger.configure(handlers=[{"sink": sys.stderr, "format": log_format}])
if outputfile:
logger.add(outputfile, enqueue=True, format=log_format)
return logger | null |
8,595 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
The provided code snippet includes necessary dependencies for implementing the `train_labelencoder` function. Write a Python function `def train_labelencoder(labels: pd.Series, sparse=True)` to solve the following problem:
encode_labels Encodes labels :param labels: pd.Series representing the raw labels e.g., Speech, Water :param encoder (optional): Encoder already fitted returns encoded labels (many hot) and the encoder
Here is the function:
def train_labelencoder(labels: pd.Series, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
if isinstance(labels[0], six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist() # split label according to ','
elif isinstance(labels[0], np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(labels[0], collections.Iterable):
label_array = labels
encoder = pre.MultiLabelBinarizer(sparse_output=sparse)
encoder.fit(label_array)
return encoder | encode_labels Encodes labels :param labels: pd.Series representing the raw labels e.g., Speech, Water :param encoder (optional): Encoder already fitted returns encoded labels (many hot) and the encoder |
8,596 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
The provided code snippet includes necessary dependencies for implementing the `encode_labels` function. Write a Python function `def encode_labels(labels: pd.Series, encoder=None, sparse=True)` to solve the following problem:
encode_labels Encodes labels :param labels: pd.Series representing the raw labels e.g., Speech, Water :param encoder (optional): Encoder already fitted returns encoded labels (many hot) and the encoder
Here is the function:
def encode_labels(labels: pd.Series, encoder=None, sparse=True):
"""encode_labels
Encodes labels
:param labels: pd.Series representing the raw labels e.g., Speech, Water
:param encoder (optional): Encoder already fitted
returns encoded labels (many hot) and the encoder
"""
assert isinstance(labels, pd.Series), "Labels need to be series"
instance = labels.iloc[0]
if isinstance(instance, six.string_types):
# In case of using non processed strings, e.g., Vaccum, Speech
label_array = labels.str.split(',').values.tolist()
elif isinstance(instance, np.ndarray):
# Encoder does not like to see numpy array
label_array = [lab.tolist() for lab in labels]
elif isinstance(instance, collections.Iterable):
label_array = labels
# get label_array, it is a list ,contain a lot of label, this label are string type
if not encoder:
encoder = pre.MultiLabelBinarizer(sparse_output=sparse) # if we encoder is None, we should init a encoder firstly.
encoder.fit(label_array)
labels_encoded = encoder.transform(label_array) # transform string to digit
return labels_encoded, encoder
# return pd.arrays.SparseArray(
# [row.toarray().ravel() for row in labels_encoded]), encoder | encode_labels Encodes labels :param labels: pd.Series representing the raw labels e.g., Speech, Water :param encoder (optional): Encoder already fitted returns encoded labels (many hot) and the encoder |
8,597 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def _decode_with_timestamps(events,labels):
result_labels = []
# print('.......')
# print('labels ',labels.shape)
# print(labels)
change_indices = find_contiguous_regions(labels)
# print(change_indices)
# assert 1==2
for row in change_indices:
result_labels.append((events,row[0], row[1]))
return result_labels
The provided code snippet includes necessary dependencies for implementing the `decode_with_timestamps` function. Write a Python function `def decode_with_timestamps(events,labels: np.array)` to solve the following problem:
decode_with_timestamps Decodes the predicted label array (2d) into a list of [(Labelname, onset, offset), ...] :param encoder: Encoder during training :type encoder: pre.MultiLabelBinarizer :param labels: n-dim array :type labels: np.array
Here is the function:
def decode_with_timestamps(events,labels: np.array):
"""decode_with_timestamps
Decodes the predicted label array (2d) into a list of
[(Labelname, onset, offset), ...]
:param encoder: Encoder during training
:type encoder: pre.MultiLabelBinarizer
:param labels: n-dim array
:type labels: np.array
"""
# print('events ',events)
# print('labels ',labels.shape)
#assert 1==2
if labels.ndim == 2:
#print('...')
return [_decode_with_timestamps(events[i],labels[i]) for i in range(labels.shape[0])]
else:
return _decode_with_timestamps(events,labels) | decode_with_timestamps Decodes the predicted label array (2d) into a list of [(Labelname, onset, offset), ...] :param encoder: Encoder during training :type encoder: pre.MultiLabelBinarizer :param labels: n-dim array :type labels: np.array |
8,598 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def binarize(pred, threshold=0.5):
# Batch_wise
if pred.ndim == 3:
return np.array(
[pre.binarize(sub, threshold=threshold) for sub in pred])
else:
return pre.binarize(pred, threshold=threshold)
The provided code snippet includes necessary dependencies for implementing the `median_filter` function. Write a Python function `def median_filter(x, window_size, threshold=0.5)` to solve the following problem:
median_filter :param x: input prediction array of shape (B, T, C) or (B, T). Input is a sequence of probabilities 0 <= x <= 1 :param window_size: An integer to use :param threshold: Binary thresholding threshold
Here is the function:
def median_filter(x, window_size, threshold=0.5):
"""median_filter
:param x: input prediction array of shape (B, T, C) or (B, T).
Input is a sequence of probabilities 0 <= x <= 1
:param window_size: An integer to use
:param threshold: Binary thresholding threshold
"""
x = binarize(x, threshold=threshold) # transfer to 0 or 1
if x.ndim == 3:
size = (1, window_size, 1)
elif x.ndim == 2 and x.shape[0] == 1:
# Assume input is class-specific median filtering
# E.g, Batch x Time [1, 501]
size = (1, window_size)
elif x.ndim == 2 and x.shape[0] > 1:
# Assume input is standard median pooling, class-independent
# E.g., Time x Class [501, 10]
size = (window_size, 1)
return scipy.ndimage.median_filter(x, size=size) | median_filter :param x: input prediction array of shape (B, T, C) or (B, T). Input is a sequence of probabilities 0 <= x <= 1 :param window_size: An integer to use :param threshold: Binary thresholding threshold |
8,599 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def inverse_transform_labels(encoder, pred):
if pred.ndim == 3:
return [encoder.inverse_transform(x) for x in pred]
else:
return encoder.inverse_transform(pred) | null |
8,600 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def _double_threshold(x, high_thres, low_thres, n_connect=1, return_arr=True): # in nature, double_threshold considers boundary question
"""_double_threshold
Computes a double threshold over the input array
:param x: input array, needs to be 1d
:param high_thres: High threshold over the array
:param low_thres: Low threshold over the array
:param n_connect: Postprocessing, maximal distance between clusters to connect
:param return_arr: By default this function returns the filtered indiced, but if return_arr = True it returns an array of tsame size as x filled with ones and zeros.
"""
assert x.ndim == 1, "Input needs to be 1d"
high_locations = np.where(x > high_thres)[0] # return the index, where value is greater than high_thres
locations = x > low_thres # return true of false
encoded_pairs = find_contiguous_regions(locations)
# print('encoded_pairs ',encoded_pairs)
filtered_list = list(
filter(
lambda pair:
((pair[0] <= high_locations) & (high_locations <= pair[1])).any(),
encoded_pairs)) # find encoded_pair where inclide a high_lacations
#print('filtered_list ',filtered_list)
filtered_list = connect_(filtered_list, n_connect) # if the distance of two pair is less than n_connect, we can merge them
if return_arr:
zero_one_arr = np.zeros_like(x, dtype=int)
for sl in filtered_list:
zero_one_arr[sl[0]:sl[1]] = 1
return zero_one_arr
return filtered_list
The provided code snippet includes necessary dependencies for implementing the `double_threshold` function. Write a Python function `def double_threshold(x, high_thres, low_thres, n_connect=1)` to solve the following problem:
double_threshold Helper function to calculate double threshold for n-dim arrays :param x: input array :param high_thres: high threshold value :param low_thres: Low threshold value :param n_connect: Distance of <= n clusters will be merged
Here is the function:
def double_threshold(x, high_thres, low_thres, n_connect=1):
"""double_threshold
Helper function to calculate double threshold for n-dim arrays
:param x: input array
:param high_thres: high threshold value
:param low_thres: Low threshold value
:param n_connect: Distance of <= n clusters will be merged
"""
assert x.ndim <= 3, "Whoops something went wrong with the input ({}), check if its <= 3 dims".format(
x.shape)
if x.ndim == 3:
apply_dim = 1
elif x.ndim < 3:
apply_dim = 0
# x is assumed to be 3d: (batch, time, dim)
# Assumed to be 2d : (time, dim)
# Assumed to be 1d : (time)
# time axis is therefore at 1 for 3d and 0 for 2d (
return np.apply_along_axis(lambda x: _double_threshold(
x, high_thres, low_thres, n_connect=n_connect),
axis=apply_dim,
arr=x) | double_threshold Helper function to calculate double threshold for n-dim arrays :param x: input array :param high_thres: high threshold value :param low_thres: Low threshold value :param n_connect: Distance of <= n clusters will be merged |
8,601 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def connect_clusters_(x, n=1):
def connect_clusters(x, n=1):
if x.ndim == 1:
return connect_clusters_(x, n)
if x.ndim >= 2:
return np.apply_along_axis(lambda a: connect_clusters_(a, n=n), -2, x) | null |
8,602 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def predictions_to_time(df, ratio):
df.onset = df.onset * ratio
df.offset = df.offset * ratio
return df | null |
8,603 | import collections
import sys
from loguru import logger
from pprint import pformat
import numpy as np
import pandas as pd
import scipy
import six
import sklearn.preprocessing as pre
import torch
import tqdm
import yaml
from scipy.interpolate import interp1d
def upgrade_resolution(arr, scale):
print('arr ',arr.shape)
x = np.arange(0, arr.shape[0])
f = interp1d(x, arr, kind='linear', axis=0, fill_value='extrapolate')
scale_x = np.arange(0, arr.shape[0], 1 / scale)
up_scale = f(scale_x)
return up_scale | null |
8,604 | from itertools import zip_longest
import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
import math
from sklearn.cluster import KMeans
import os
import time
from functools import partial
import warnings
from functools import partial
import copy
from collections import OrderedDict
import io
import re
The provided code snippet includes necessary dependencies for implementing the `load_checkpoint` function. Write a Python function `def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[(r'^module\.', '')])` to solve the following problem:
Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. revise_keys (list): A list of customized keywords to modify the state_dict in checkpoint. Each item is a (pattern, replacement) pair of the regular expression operations. Default: strip the prefix 'module.' by [(r'^module\\.', '')]. Returns: dict or OrderedDict: The loaded checkpoint.
Here is the function:
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
'''
new_proj = torch.nn.Conv2d(1, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=2).unsqueeze(2).repeat(1,1,3,1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=3).unsqueeze(3).repeat(1,1,1,3))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
'''
new_proj = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
state_dict = OrderedDict({k.replace('backbone.',''):v for k,v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint | Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. revise_keys (list): A list of customized keywords to modify the state_dict in checkpoint. Each item is a (pattern, replacement) pair of the regular expression operations. Default: strip the prefix 'module.' by [(r'^module\\.', '')]. Returns: dict or OrderedDict: The loaded checkpoint. |
8,605 | from itertools import zip_longest
import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
import math
from sklearn.cluster import KMeans
import os
import time
from functools import partial
import warnings
from functools import partial
import copy
from collections import OrderedDict
import io
import re
def init_weights(m):
if isinstance(m, (nn.Conv2d, nn.Conv1d)):
nn.init.kaiming_normal_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
if isinstance(m, nn.Linear):
nn.init.kaiming_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0) | null |
8,606 | from itertools import zip_longest
import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
import math
from sklearn.cluster import KMeans
import os
import time
from functools import partial
import warnings
from functools import partial
import copy
from collections import OrderedDict
import io
import re
The provided code snippet includes necessary dependencies for implementing the `init_layer` function. Write a Python function `def init_layer(layer)` to solve the following problem:
Initialize a Linear or Convolutional layer.
Here is the function:
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.) | Initialize a Linear or Convolutional layer. |
8,607 | from itertools import zip_longest
import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
import math
from sklearn.cluster import KMeans
import os
import time
from functools import partial
import warnings
from functools import partial
import copy
from collections import OrderedDict
import io
import re
The provided code snippet includes necessary dependencies for implementing the `init_bn` function. Write a Python function `def init_bn(bn)` to solve the following problem:
Initialize a Batchnorm layer.
Here is the function:
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.) | Initialize a Batchnorm layer. |
8,608 | from itertools import zip_longest
import numpy as np
from scipy import ndimage
import torch
import torch.nn as nn
import torch.nn.functional as F
import time
from torchlibrosa.augmentation import SpecAugmentation
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
import math
from sklearn.cluster import KMeans
import os
import time
from functools import partial
import warnings
from functools import partial
import copy
from collections import OrderedDict
import io
import re
class MaxPool(nn.Module):
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, decision):
return torch.max(decision, dim=self.pooldim)[0]
class LinearSoftPool(nn.Module):
"""LinearSoftPool
Linear softmax, takes logits and returns a probability, near to the actual maximum value.
Taken from the paper:
A Comparison of Five Multiple Instance Learning Pooling Functions for Sound Event Detection with Weak Labeling
https://arxiv.org/abs/1810.09050
"""
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, time_decision):
return (time_decision**2).sum(self.pooldim) / (time_decision.sum(
self.pooldim)+1e-7)
class MeanPool(nn.Module):
def __init__(self, pooldim=1):
super().__init__()
self.pooldim = pooldim
def forward(self, logits, decision):
return torch.mean(decision, dim=self.pooldim)
class AutoExpPool(nn.Module):
def __init__(self, outputdim=10, pooldim=1):
super().__init__()
self.outputdim = outputdim
self.alpha = nn.Parameter(torch.full((outputdim, ), 1))
self.pooldim = pooldim
def forward(self, logits, decision):
scaled = self.alpha * decision # \alpha * P(Y|x) in the paper
return (logits * torch.exp(scaled)).sum(
self.pooldim) / torch.exp(scaled).sum(self.pooldim)
class SoftPool(nn.Module):
def __init__(self, T=1, pooldim=1):
super().__init__()
self.pooldim = pooldim
self.T = T
def forward(self, logits, decision):
w = torch.softmax(decision / self.T, dim=self.pooldim)
return torch.sum(decision * w, dim=self.pooldim)
class AutoPool(nn.Module):
"""docstring for AutoPool"""
def __init__(self, outputdim=10, pooldim=1):
super().__init__()
self.outputdim = outputdim
self.alpha = nn.Parameter(torch.ones(outputdim))
self.dim = pooldim
def forward(self, logits, decision):
scaled = self.alpha * decision # \alpha * P(Y|x) in the paper
weight = torch.softmax(scaled, dim=self.dim)
return torch.sum(decision * weight, dim=self.dim) # B x C
class AttentionPool(nn.Module):
"""docstring for AttentionPool"""
def __init__(self, inputdim, outputdim=10, pooldim=1, **kwargs):
super().__init__()
self.inputdim = inputdim
self.outputdim = outputdim
self.pooldim = pooldim
self.transform = nn.Linear(inputdim, outputdim)
self.activ = nn.Softmax(dim=self.pooldim)
self.eps = 1e-7
def forward(self, logits, decision):
# Input is (B, T, D)
# B, T , D
w = self.activ(torch.clamp(self.transform(logits), -15, 15))
detect = (decision * w).sum(
self.pooldim) / (w.sum(self.pooldim) + self.eps)
# B, T, D
return detect
The provided code snippet includes necessary dependencies for implementing the `parse_poolingfunction` function. Write a Python function `def parse_poolingfunction(poolingfunction_name='mean', **kwargs)` to solve the following problem:
parse_poolingfunction A heler function to parse any temporal pooling Pooling is done on dimension 1 :param poolingfunction_name: :param **kwargs:
Here is the function:
def parse_poolingfunction(poolingfunction_name='mean', **kwargs):
"""parse_poolingfunction
A heler function to parse any temporal pooling
Pooling is done on dimension 1
:param poolingfunction_name:
:param **kwargs:
"""
poolingfunction_name = poolingfunction_name.lower()
if poolingfunction_name == 'mean':
return MeanPool(pooldim=1)
elif poolingfunction_name == 'max':
return MaxPool(pooldim=1)
elif poolingfunction_name == 'linear':
return LinearSoftPool(pooldim=1)
elif poolingfunction_name == 'expalpha':
return AutoExpPool(outputdim=kwargs['outputdim'], pooldim=1)
elif poolingfunction_name == 'soft':
return SoftPool(pooldim=1)
elif poolingfunction_name == 'auto':
return AutoPool(outputdim=kwargs['outputdim'])
elif poolingfunction_name == 'attention':
return AttentionPool(inputdim=kwargs['inputdim'],
outputdim=kwargs['outputdim']) | parse_poolingfunction A heler function to parse any temporal pooling Pooling is done on dimension 1 :param poolingfunction_name: :param **kwargs: |
8,609 | import numpy as np
import time
import torch
import torch.nn as nn
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def append_to_dict(dict, key, value):
if key in dict.keys():
dict[key].append(value)
else:
dict[key] = [value]
The provided code snippet includes necessary dependencies for implementing the `forward` function. Write a Python function `def forward(model, generator, return_input=False, return_target=False)` to solve the following problem:
Forward data to a model. Args: model: object generator: object return_input: bool return_target: bool Returns: audio_name: (audios_num,) clipwise_output: (audios_num, classes_num) (ifexist) segmentwise_output: (audios_num, segments_num, classes_num) (ifexist) framewise_output: (audios_num, frames_num, classes_num) (optional) return_input: (audios_num, segment_samples) (optional) return_target: (audios_num, classes_num)
Here is the function:
def forward(model, generator, return_input=False,
return_target=False):
"""Forward data to a model.
Args:
model: object
generator: object
return_input: bool
return_target: bool
Returns:
audio_name: (audios_num,)
clipwise_output: (audios_num, classes_num)
(ifexist) segmentwise_output: (audios_num, segments_num, classes_num)
(ifexist) framewise_output: (audios_num, frames_num, classes_num)
(optional) return_input: (audios_num, segment_samples)
(optional) return_target: (audios_num, classes_num)
"""
output_dict = {}
device = next(model.parameters()).device
time1 = time.time()
# Forward data to a model in mini-batches
for n, batch_data_dict in enumerate(generator):
print(n)
batch_waveform = move_data_to_device(batch_data_dict['waveform'], device)
with torch.no_grad():
model.eval()
batch_output = model(batch_waveform)
append_to_dict(output_dict, 'audio_name', batch_data_dict['audio_name'])
append_to_dict(output_dict, 'clipwise_output',
batch_output['clipwise_output'].data.cpu().numpy())
if 'segmentwise_output' in batch_output.keys():
append_to_dict(output_dict, 'segmentwise_output',
batch_output['segmentwise_output'].data.cpu().numpy())
if 'framewise_output' in batch_output.keys():
append_to_dict(output_dict, 'framewise_output',
batch_output['framewise_output'].data.cpu().numpy())
if return_input:
append_to_dict(output_dict, 'waveform', batch_data_dict['waveform'])
if return_target:
if 'target' in batch_data_dict.keys():
append_to_dict(output_dict, 'target', batch_data_dict['target'])
if n % 10 == 0:
print(' --- Inference time: {:.3f} s / 10 iterations ---'.format(
time.time() - time1))
time1 = time.time()
for key in output_dict.keys():
output_dict[key] = np.concatenate(output_dict[key], axis=0)
return output_dict | Forward data to a model. Args: model: object generator: object return_input: bool return_target: bool Returns: audio_name: (audios_num,) clipwise_output: (audios_num, classes_num) (ifexist) segmentwise_output: (audios_num, segments_num, classes_num) (ifexist) framewise_output: (audios_num, frames_num, classes_num) (optional) return_input: (audios_num, segment_samples) (optional) return_target: (audios_num, classes_num) |
8,610 | import numpy as np
import time
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `interpolate` function. Write a Python function `def interpolate(x, ratio)` to solve the following problem:
Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num)
Here is the function:
def interpolate(x, ratio):
"""Interpolate data in time domain. This is used to compensate the
resolution reduction in downsampling of a CNN.
Args:
x: (batch_size, time_steps, classes_num)
ratio: int, ratio to interpolate
Returns:
upsampled: (batch_size, time_steps * ratio, classes_num)
"""
(batch_size, time_steps, classes_num) = x.shape
upsampled = x[:, :, None, :].repeat(1, 1, ratio, 1)
upsampled = upsampled.reshape(batch_size, time_steps * ratio, classes_num)
return upsampled | Interpolate data in time domain. This is used to compensate the resolution reduction in downsampling of a CNN. Args: x: (batch_size, time_steps, classes_num) ratio: int, ratio to interpolate Returns: upsampled: (batch_size, time_steps * ratio, classes_num) |
8,611 | import numpy as np
import time
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `pad_framewise_output` function. Write a Python function `def pad_framewise_output(framewise_output, frames_num)` to solve the following problem:
Pad framewise_output to the same length as input frames. The pad value is the same as the value of the last frame. Args: framewise_output: (batch_size, frames_num, classes_num) frames_num: int, number of frames to pad Outputs: output: (batch_size, frames_num, classes_num)
Here is the function:
def pad_framewise_output(framewise_output, frames_num):
"""Pad framewise_output to the same length as input frames. The pad value
is the same as the value of the last frame.
Args:
framewise_output: (batch_size, frames_num, classes_num)
frames_num: int, number of frames to pad
Outputs:
output: (batch_size, frames_num, classes_num)
"""
pad = framewise_output[:, -1 :, :].repeat(1, frames_num - framewise_output.shape[1], 1)
"""tensor for padding"""
output = torch.cat((framewise_output, pad), dim=1)
"""(batch_size, frames_num, classes_num)"""
return output | Pad framewise_output to the same length as input frames. The pad value is the same as the value of the last frame. Args: framewise_output: (batch_size, frames_num, classes_num) frames_num: int, number of frames to pad Outputs: output: (batch_size, frames_num, classes_num) |
8,612 | import numpy as np
import time
import torch
import torch.nn as nn
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad) | null |
8,613 | import numpy as np
import time
import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `count_flops` function. Write a Python function `def count_flops(model, audio_length)` to solve the following problem:
Count flops. Code modified from others' implementation.
Here is the function:
def count_flops(model, audio_length):
"""Count flops. Code modified from others' implementation.
"""
multiply_adds = True
list_conv2d=[]
def conv2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size[0] * self.kernel_size[1] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_conv2d.append(flops)
list_conv1d=[]
def conv1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0] * (self.in_channels / self.groups) * (2 if multiply_adds else 1)
bias_ops = 1 if self.bias is not None else 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_conv1d.append(flops)
list_linear=[]
def linear_hook(self, input, output):
batch_size = input[0].size(0) if input[0].dim() == 2 else 1
weight_ops = self.weight.nelement() * (2 if multiply_adds else 1)
bias_ops = self.bias.nelement()
flops = batch_size * (weight_ops + bias_ops)
list_linear.append(flops)
list_bn=[]
def bn_hook(self, input, output):
list_bn.append(input[0].nelement() * 2)
list_relu=[]
def relu_hook(self, input, output):
list_relu.append(input[0].nelement() * 2)
list_pooling2d=[]
def pooling2d_hook(self, input, output):
batch_size, input_channels, input_height, input_width = input[0].size()
output_channels, output_height, output_width = output[0].size()
kernel_ops = self.kernel_size * self.kernel_size
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_height * output_width
list_pooling2d.append(flops)
list_pooling1d=[]
def pooling1d_hook(self, input, output):
batch_size, input_channels, input_length = input[0].size()
output_channels, output_length = output[0].size()
kernel_ops = self.kernel_size[0]
bias_ops = 0
params = output_channels * (kernel_ops + bias_ops)
flops = batch_size * params * output_length
list_pooling2d.append(flops)
def foo(net):
childrens = list(net.children())
if not childrens:
if isinstance(net, nn.Conv2d):
net.register_forward_hook(conv2d_hook)
elif isinstance(net, nn.Conv1d):
net.register_forward_hook(conv1d_hook)
elif isinstance(net, nn.Linear):
net.register_forward_hook(linear_hook)
elif isinstance(net, nn.BatchNorm2d) or isinstance(net, nn.BatchNorm1d):
net.register_forward_hook(bn_hook)
elif isinstance(net, nn.ReLU):
net.register_forward_hook(relu_hook)
elif isinstance(net, nn.AvgPool2d) or isinstance(net, nn.MaxPool2d):
net.register_forward_hook(pooling2d_hook)
elif isinstance(net, nn.AvgPool1d) or isinstance(net, nn.MaxPool1d):
net.register_forward_hook(pooling1d_hook)
else:
print('Warning: flop of module {} is not counted!'.format(net))
return
for c in childrens:
foo(c)
# Register hook
foo(model)
device = device = next(model.parameters()).device
input = torch.rand(1, audio_length).to(device)
out = model(input)
total_flops = sum(list_conv2d) + sum(list_conv1d) + sum(list_linear) + \
sum(list_bn) + sum(list_relu) + sum(list_pooling2d) + sum(list_pooling1d)
return total_flops | Count flops. Code modified from others' implementation. |
8,614 | import os
import sys
import numpy as np
import argparse
import time
import logging
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import (create_folder, get_filename, create_logging, Mixup,
StatisticsContainer)
from models import (PVT, PVT2, PVT_lr, PVT_nopretrain, PVT_2layer, Cnn14, Cnn14_no_specaug, Cnn14_no_dropout,
Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128,
Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19,
Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14,
Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128,
Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn6_Transformer, GLAM, GLAM2, GLAM3, Cnn4, EAT)
from pytorch_utils import (move_data_to_device, count_parameters, count_flops,
do_mixup)
from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler,
AlternateTrainSampler, EvaluateSampler, collate_fn)
from evaluate import Evaluator
import config
from losses import get_loss_func
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '{:04d}.log'.format(i1))):
i1 += 1
log_path = os.path.join(log_dir, '{:04d}.log'.format(i1))
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
class Mixup(object):
def __init__(self, mixup_alpha, random_seed=1234):
"""Mixup coefficient generator.
"""
self.mixup_alpha = mixup_alpha
self.random_state = np.random.RandomState(random_seed)
def get_lambda(self, batch_size):
"""Get mixup random coefficients.
Args:
batch_size: int
Returns:
mixup_lambdas: (batch_size,)
"""
mixup_lambdas = []
for n in range(0, batch_size, 2):
lam = self.random_state.beta(self.mixup_alpha, self.mixup_alpha, 1)[0]
mixup_lambdas.append(lam)
mixup_lambdas.append(1. - lam)
return np.array(mixup_lambdas)
class StatisticsContainer(object):
def __init__(self, statistics_path):
"""Contain statistics of different training iterations.
"""
self.statistics_path = statistics_path
self.backup_statistics_path = '{}_{}.pkl'.format(
os.path.splitext(self.statistics_path)[0],
datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
self.statistics_dict = {'bal': [], 'test': []}
def append(self, iteration, statistics, data_type):
statistics['iteration'] = iteration
self.statistics_dict[data_type].append(statistics)
def dump(self):
pickle.dump(self.statistics_dict, open(self.statistics_path, 'wb'))
pickle.dump(self.statistics_dict, open(self.backup_statistics_path, 'wb'))
logging.info(' Dump statistics to {}'.format(self.statistics_path))
logging.info(' Dump statistics to {}'.format(self.backup_statistics_path))
def load_state_dict(self, resume_iteration):
self.statistics_dict = pickle.load(open(self.statistics_path, 'rb'))
resume_statistics_dict = {'bal': [], 'test': []}
for key in self.statistics_dict.keys():
for statistics in self.statistics_dict[key]:
if statistics['iteration'] <= resume_iteration:
resume_statistics_dict[key].append(statistics)
self.statistics_dict = resume_statistics_dict
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
def do_mixup(x, mixup_lambda):
"""Mixup x of even indexes (0, 2, 4, ...) with x of odd indexes
(1, 3, 5, ...).
Args:
x: (batch_size * 2, ...)
mixup_lambda: (batch_size * 2,)
Returns:
out: (batch_size, ...)
"""
out = (x[0 :: 2].transpose(0, -1) * mixup_lambda[0 :: 2] + \
x[1 :: 2].transpose(0, -1) * mixup_lambda[1 :: 2]).transpose(0, -1)
return out
class AudioSetDataset(object):
def __init__(self, sample_rate=32000):
"""This class takes the meta of an audio clip as input, and return
the waveform and target of the audio clip. This class is used by DataLoader.
"""
self.sample_rate = sample_rate
def __getitem__(self, meta):
"""Load waveform and target of an audio clip.
Args:
meta: {
'hdf5_path': str,
'index_in_hdf5': int}
Returns:
data_dict: {
'audio_name': str,
'waveform': (clip_samples,),
'target': (classes_num,)}
"""
hdf5_path = meta['hdf5_path']
index_in_hdf5 = meta['index_in_hdf5']
with h5py.File(hdf5_path, 'r') as hf:
audio_name = hf['audio_name'][index_in_hdf5].decode()
waveform = int16_to_float32(hf['waveform'][index_in_hdf5])
waveform = self.resample(waveform)
target = hf['target'][index_in_hdf5].astype(np.float32)
data_dict = {
'audio_name': audio_name, 'waveform': waveform, 'target': target}
return data_dict
def resample(self, waveform):
"""Resample.
Args:
waveform: (clip_samples,)
Returns:
(resampled_clip_samples,)
"""
if self.sample_rate == 32000:
return waveform
elif self.sample_rate == 16000:
return waveform[0 :: 2]
elif self.sample_rate == 8000:
return waveform[0 :: 4]
else:
raise Exception('Incorrect sample rate!')
class TrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""Balanced sampler. Generate batch meta for training.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
super(TrainSampler, self).__init__(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.indexes = np.arange(self.audios_num)
# Shuffle indexes
self.random_state.shuffle(self.indexes)
self.pointer = 0
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string, 'index_in_hdf5': int},
...]
"""
batch_size = self.batch_size
while True:
batch_meta = []
i = 0
while i < batch_size:
index = self.indexes[self.pointer]
self.pointer += 1
# Shuffle indexes and reset pointer
if self.pointer >= self.audios_num:
self.pointer = 0
self.random_state.shuffle(self.indexes)
# If audio in black list then continue
if self.audio_names[index] in self.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'indexes': self.indexes,
'pointer': self.pointer}
return state
def load_state_dict(self, state):
self.indexes = state['indexes']
self.pointer = state['pointer']
class BalancedTrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""Balanced sampler. Generate batch meta for training. Data are equally
sampled from different sound classes.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
super(BalancedTrainSampler, self).__init__(indexes_hdf5_path,
batch_size, black_list_csv, random_seed)
self.samples_num_per_class = np.sum(self.targets, axis=0)
logging.info('samples_num_per_class: {}'.format(
self.samples_num_per_class.astype(np.int32)))
# Training indexes of all sound classes. E.g.:
# [[0, 11, 12, ...], [3, 4, 15, 16, ...], [7, 8, ...], ...]
self.indexes_per_class = []
for k in range(self.classes_num):
self.indexes_per_class.append(
np.where(self.targets[:, k] == 1)[0])
# Shuffle indexes
for k in range(self.classes_num):
self.random_state.shuffle(self.indexes_per_class[k])
self.queue = []
self.pointers_of_classes = [0] * self.classes_num
def expand_queue(self, queue):
classes_set = np.arange(self.classes_num).tolist()
self.random_state.shuffle(classes_set)
queue += classes_set
return queue
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string, 'index_in_hdf5': int},
...]
"""
batch_size = self.batch_size
while True:
batch_meta = []
i = 0
while i < batch_size:
if len(self.queue) == 0:
self.queue = self.expand_queue(self.queue)
class_id = self.queue.pop(0)
pointer = self.pointers_of_classes[class_id]
self.pointers_of_classes[class_id] += 1
index = self.indexes_per_class[class_id][pointer]
# When finish one epoch of a sound class, then shuffle its indexes and reset pointer
if self.pointers_of_classes[class_id] >= self.samples_num_per_class[class_id]:
self.pointers_of_classes[class_id] = 0
self.random_state.shuffle(self.indexes_per_class[class_id])
# If audio in black list then continue
if self.audio_names[index] in self.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'indexes_per_class': self.indexes_per_class,
'queue': self.queue,
'pointers_of_classes': self.pointers_of_classes}
return state
def load_state_dict(self, state):
self.indexes_per_class = state['indexes_per_class']
self.queue = state['queue']
self.pointers_of_classes = state['pointers_of_classes']
class AlternateTrainSampler(Base):
def __init__(self, indexes_hdf5_path, batch_size, black_list_csv=None,
random_seed=1234):
"""AlternateSampler is a combination of Sampler and Balanced Sampler.
AlternateSampler alternately sample data from Sampler and Blanced Sampler.
Args:
indexes_hdf5_path: string
batch_size: int
black_list_csv: string
random_seed: int
"""
self.sampler1 = TrainSampler(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.sampler2 = BalancedTrainSampler(indexes_hdf5_path, batch_size,
black_list_csv, random_seed)
self.batch_size = batch_size
self.count = 0
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string, 'index_in_hdf5': int},
...]
"""
batch_size = self.batch_size
while True:
self.count += 1
if self.count % 2 == 0:
batch_meta = []
i = 0
while i < batch_size:
index = self.sampler1.indexes[self.sampler1.pointer]
self.sampler1.pointer += 1
# Shuffle indexes and reset pointer
if self.sampler1.pointer >= self.sampler1.audios_num:
self.sampler1.pointer = 0
self.sampler1.random_state.shuffle(self.sampler1.indexes)
# If audio in black list then continue
if self.sampler1.audio_names[index] in self.sampler1.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.sampler1.hdf5_paths[index],
'index_in_hdf5': self.sampler1.indexes_in_hdf5[index]})
i += 1
elif self.count % 2 == 1:
batch_meta = []
i = 0
while i < batch_size:
if len(self.sampler2.queue) == 0:
self.sampler2.queue = self.sampler2.expand_queue(self.sampler2.queue)
class_id = self.sampler2.queue.pop(0)
pointer = self.sampler2.pointers_of_classes[class_id]
self.sampler2.pointers_of_classes[class_id] += 1
index = self.sampler2.indexes_per_class[class_id][pointer]
# When finish one epoch of a sound class, then shuffle its indexes and reset pointer
if self.sampler2.pointers_of_classes[class_id] >= self.sampler2.samples_num_per_class[class_id]:
self.sampler2.pointers_of_classes[class_id] = 0
self.sampler2.random_state.shuffle(self.sampler2.indexes_per_class[class_id])
# If audio in black list then continue
if self.sampler2.audio_names[index] in self.sampler2.black_list_names:
continue
else:
batch_meta.append({
'hdf5_path': self.sampler2.hdf5_paths[index],
'index_in_hdf5': self.sampler2.indexes_in_hdf5[index]})
i += 1
yield batch_meta
def state_dict(self):
state = {
'sampler1': self.sampler1.state_dict(),
'sampler2': self.sampler2.state_dict()}
return state
def load_state_dict(self, state):
self.sampler1.load_state_dict(state['sampler1'])
self.sampler2.load_state_dict(state['sampler2'])
class EvaluateSampler(object):
def __init__(self, indexes_hdf5_path, batch_size):
"""Evaluate sampler. Generate batch meta for evaluation.
Args:
indexes_hdf5_path: string
batch_size: int
"""
self.batch_size = batch_size
with h5py.File(indexes_hdf5_path, 'r') as hf:
self.audio_names = [audio_name.decode() for audio_name in hf['audio_name'][:]]
self.hdf5_paths = [hdf5_path.decode() for hdf5_path in hf['hdf5_path'][:]]
self.indexes_in_hdf5 = hf['index_in_hdf5'][:]
self.targets = hf['target'][:].astype(np.float32)
self.audios_num = len(self.audio_names)
def __iter__(self):
"""Generate batch meta for training.
Returns:
batch_meta: e.g.: [
{'hdf5_path': string,
'index_in_hdf5': int}
...]
"""
batch_size = self.batch_size
pointer = 0
while pointer < self.audios_num:
batch_indexes = np.arange(pointer,
min(pointer + batch_size, self.audios_num))
batch_meta = []
for index in batch_indexes:
batch_meta.append({
'audio_name': self.audio_names[index],
'hdf5_path': self.hdf5_paths[index],
'index_in_hdf5': self.indexes_in_hdf5[index],
'target': self.targets[index]})
pointer += batch_size
yield batch_meta
def collate_fn(list_data_dict):
"""Collate data.
Args:
list_data_dict, e.g., [{'audio_name': str, 'waveform': (clip_samples,), ...},
{'audio_name': str, 'waveform': (clip_samples,), ...},
...]
Returns:
np_data_dict, dict, e.g.,
{'audio_name': (batch_size,), 'waveform': (batch_size, clip_samples), ...}
"""
np_data_dict = {}
for key in list_data_dict[0].keys():
np_data_dict[key] = np.array([data_dict[key] for data_dict in list_data_dict])
return np_data_dict
class Evaluator(object):
def __init__(self, model):
"""Evaluator.
Args:
model: object
"""
self.model = model
def evaluate(self, data_loader):
"""Forward evaluation data and calculate statistics.
Args:
data_loader: object
Returns:
statistics: dict,
{'average_precision': (classes_num,), 'auc': (classes_num,)}
"""
# Forward
output_dict = forward(
model=self.model,
generator=data_loader,
return_target=True)
clipwise_output = output_dict['clipwise_output'] # (audios_num, classes_num)
target = output_dict['target'] # (audios_num, classes_num)
average_precision = metrics.average_precision_score(
target, clipwise_output, average=None)
auc = metrics.roc_auc_score(target, clipwise_output, average=None)
statistics = {'average_precision': average_precision, 'auc': auc}
return statistics
def get_loss_func(loss_type):
if loss_type == 'clip_bce':
return clip_bce
The provided code snippet includes necessary dependencies for implementing the `train` function. Write a Python function `def train(args)` to solve the following problem:
Train AudioSet tagging model. Args: dataset_dir: str workspace: str data_type: 'balanced_train' | 'full_train' window_size: int hop_size: int mel_bins: int model_type: str loss_type: 'clip_bce' balanced: 'none' | 'balanced' | 'alternate' augmentation: 'none' | 'mixup' batch_size: int learning_rate: float resume_iteration: int early_stop: int accumulation_steps: int cuda: bool
Here is the function:
def train(args):
"""Train AudioSet tagging model.
Args:
dataset_dir: str
workspace: str
data_type: 'balanced_train' | 'full_train'
window_size: int
hop_size: int
mel_bins: int
model_type: str
loss_type: 'clip_bce'
balanced: 'none' | 'balanced' | 'alternate'
augmentation: 'none' | 'mixup'
batch_size: int
learning_rate: float
resume_iteration: int
early_stop: int
accumulation_steps: int
cuda: bool
"""
# Arugments & parameters
workspace = args.workspace
data_type = args.data_type
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
loss_type = args.loss_type
balanced = args.balanced
augmentation = args.augmentation
batch_size = args.batch_size
learning_rate = args.learning_rate
resume_iteration = args.resume_iteration
early_stop = args.early_stop
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
filename = args.filename
num_workers = 8
clip_samples = config.clip_samples
classes_num = config.classes_num
loss_func = get_loss_func(loss_type)
# Paths
black_list_csv = None
train_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'{}.h5'.format(data_type))
eval_bal_indexes_hdf5_path = os.path.join(workspace,
'hdf5s', 'indexes', 'balanced_train.h5')
eval_test_indexes_hdf5_path = os.path.join(workspace, 'hdf5s', 'indexes',
'eval.h5')
checkpoints_dir = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_folder(checkpoints_dir)
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
create_folder(os.path.dirname(statistics_path))
logs_dir = os.path.join(workspace, 'logs', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size))
create_logging(logs_dir, filemode='w')
logging.info(args)
if 'cuda' in str(device):
logging.info('Using GPU.')
device = 'cuda'
else:
logging.info('Using CPU. Set --cuda flag to use GPU.')
device = 'cpu'
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
total = sum(p.numel() for p in model.parameters())
print("Total params: %.2fM" % (total/1e6))
logging.info("Total params: %.2fM" % (total/1e6))
#params_num = count_parameters(model)
# flops_num = count_flops(model, clip_samples)
#logging.info('Parameters num: {}'.format(params_num))
# logging.info('Flops num: {:.3f} G'.format(flops_num / 1e9))
# Dataset will be used by DataLoader later. Dataset takes a meta as input
# and return a waveform and a target.
dataset = AudioSetDataset(sample_rate=sample_rate)
# Train sampler
if balanced == 'none':
Sampler = TrainSampler
elif balanced == 'balanced':
Sampler = BalancedTrainSampler
elif balanced == 'alternate':
Sampler = AlternateTrainSampler
train_sampler = Sampler(
indexes_hdf5_path=train_indexes_hdf5_path,
batch_size=batch_size * 2 if 'mixup' in augmentation else batch_size,
black_list_csv=black_list_csv)
# Evaluate sampler
eval_bal_sampler = EvaluateSampler(
indexes_hdf5_path=eval_bal_indexes_hdf5_path, batch_size=batch_size)
eval_test_sampler = EvaluateSampler(
indexes_hdf5_path=eval_test_indexes_hdf5_path, batch_size=batch_size)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=train_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_bal_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_bal_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
eval_test_loader = torch.utils.data.DataLoader(dataset=dataset,
batch_sampler=eval_test_sampler, collate_fn=collate_fn,
num_workers=num_workers, pin_memory=True)
mix=0.5
if 'mixup' in augmentation:
mixup_augmenter = Mixup(mixup_alpha=mix)
print(mix)
logging.info(mix)
# Evaluator
evaluator = Evaluator(model=model)
# Statistics
statistics_container = StatisticsContainer(statistics_path)
# Optimizer
optimizer = optim.AdamW(model.parameters(), lr=learning_rate, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.05, amsgrad=True)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode='max', factor=0.5, patience=4, min_lr=1e-06, verbose=True)
train_bgn_time = time.time()
# Resume training
if resume_iteration > 0:
resume_checkpoint_path = os.path.join(workspace, 'checkpoints', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'{}_iterations.pth'.format(resume_iteration))
logging.info('Loading checkpoint {}'.format(resume_checkpoint_path))
checkpoint = torch.load(resume_checkpoint_path)
model.load_state_dict(checkpoint['model'])
train_sampler.load_state_dict(checkpoint['sampler'])
statistics_container.load_state_dict(resume_iteration)
iteration = checkpoint['iteration']
else:
iteration = 0
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
if resume_iteration:
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
print(optimizer.state_dict()['param_groups'][0]['lr'])
time1 = time.time()
for batch_data_dict in train_loader:
"""batch_data_dict: {
'audio_name': (batch_size [*2 if mixup],),
'waveform': (batch_size [*2 if mixup], clip_samples),
'target': (batch_size [*2 if mixup], classes_num),
(ifexist) 'mixup_lambda': (batch_size * 2,)}
"""
# Evaluate
if (iteration % 2000 == 0 and iteration >= resume_iteration) or (iteration == 0):
train_fin_time = time.time()
bal_statistics = evaluator.evaluate(eval_bal_loader)
test_statistics = evaluator.evaluate(eval_test_loader)
logging.info('Validate bal mAP: {:.3f}'.format(
np.mean(bal_statistics['average_precision'])))
logging.info('Validate test mAP: {:.3f}'.format(
np.mean(test_statistics['average_precision'])))
statistics_container.append(iteration, bal_statistics, data_type='bal')
statistics_container.append(iteration, test_statistics, data_type='test')
statistics_container.dump()
train_time = train_fin_time - train_bgn_time
validate_time = time.time() - train_fin_time
logging.info(
'iteration: {}, train time: {:.3f} s, validate time: {:.3f} s'
''.format(iteration, train_time, validate_time))
logging.info('------------------------------------')
train_bgn_time = time.time()
# Save model
if iteration % 2000 == 0:
checkpoint = {
'iteration': iteration,
'model': model.module.state_dict(),
'sampler': train_sampler.state_dict(),
'optimizer': optimizer.state_dict(),
'scheduler': scheduler.state_dict()}
checkpoint_path = os.path.join(
checkpoints_dir, '{}_iterations.pth'.format(iteration))
torch.save(checkpoint, checkpoint_path)
logging.info('Model saved to {}'.format(checkpoint_path))
# Mixup lambda
if 'mixup' in augmentation:
batch_data_dict['mixup_lambda'] = mixup_augmenter.get_lambda(
batch_size=len(batch_data_dict['waveform']))
# Move data to device
for key in batch_data_dict.keys():
batch_data_dict[key] = move_data_to_device(batch_data_dict[key], device)
# Forward
model.train()
if 'mixup' in augmentation:
batch_output_dict = model(batch_data_dict['waveform'],
batch_data_dict['mixup_lambda'])
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': do_mixup(batch_data_dict['target'],
batch_data_dict['mixup_lambda'])}
"""{'target': (batch_size, classes_num)}"""
else:
batch_output_dict = model(batch_data_dict['waveform'], None)
"""{'clipwise_output': (batch_size, classes_num), ...}"""
batch_target_dict = {'target': batch_data_dict['target']}
"""{'target': (batch_size, classes_num)}"""
# Loss
loss = loss_func(batch_output_dict, batch_target_dict)
# Backward
loss.backward()
optimizer.step()
optimizer.zero_grad()
if iteration % 10 == 0:
print(iteration, loss)
#print('--- Iteration: {}, train time: {:.3f} s / 10 iterations ---'\
# .format(iteration, time.time() - time1))
#time1 = time.time()
if iteration % 2000 == 0:
scheduler.step(np.mean(test_statistics['average_precision']))
print(optimizer.state_dict()['param_groups'][0]['lr'])
logging.info(optimizer.state_dict()['param_groups'][0]['lr'])
# Stop learning
if iteration == early_stop:
break
iteration += 1 | Train AudioSet tagging model. Args: dataset_dir: str workspace: str data_type: 'balanced_train' | 'full_train' window_size: int hop_size: int mel_bins: int model_type: str loss_type: 'clip_bce' balanced: 'none' | 'balanced' | 'alternate' augmentation: 'none' | 'mixup' batch_size: int learning_rate: float resume_iteration: int early_stop: int accumulation_steps: int cuda: bool |
8,615 | import os
import sys
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
The provided code snippet includes necessary dependencies for implementing the `audio_tagging` function. Write a Python function `def audio_tagging(args)` to solve the following problem:
Inference audio tagging result of an audio clip.
Here is the function:
def audio_tagging(args):
"""Inference audio tagging result of an audio clip.
"""
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
classes_num = config.classes_num
labels = config.labels
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
if 'cuda' in str(device):
model.to(device)
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
else:
print('Using CPU.')
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]
"""(classes_num,)"""
sorted_indexes = np.argsort(clipwise_output)[::-1]
# Print audio tagging top probabilities
for k in range(10):
print('{}: {:.3f}'.format(np.array(labels)[sorted_indexes[k]],
clipwise_output[sorted_indexes[k]]))
# Print embedding
if 'embedding' in batch_output_dict.keys():
embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]
print('embedding: {}'.format(embedding.shape))
return clipwise_output, labels | Inference audio tagging result of an audio clip. |
8,616 | import os
import sys
import numpy as np
import argparse
import librosa
import matplotlib.pyplot as plt
import torch
from utilities import create_folder, get_filename
from models import *
from pytorch_utils import move_data_to_device
import config
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
def move_data_to_device(x, device):
if 'float' in str(x.dtype):
x = torch.Tensor(x)
elif 'int' in str(x.dtype):
x = torch.LongTensor(x)
else:
return x
return x.to(device)
The provided code snippet includes necessary dependencies for implementing the `sound_event_detection` function. Write a Python function `def sound_event_detection(args)` to solve the following problem:
Inference sound event detection result of an audio clip.
Here is the function:
def sound_event_detection(args):
"""Inference sound event detection result of an audio clip.
"""
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')
classes_num = config.classes_num
labels = config.labels
frames_per_second = sample_rate // hop_size
# Paths
fig_path = os.path.join('results', '{}.png'.format(get_filename(audio_path)))
create_folder(os.path.dirname(fig_path))
# Model
Model = eval(model_type)
model = Model(sample_rate=sample_rate, window_size=window_size,
hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax,
classes_num=classes_num)
checkpoint = torch.load(checkpoint_path, map_location=device)
model.load_state_dict(checkpoint['model'])
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in str(device):
model.to(device)
# Load audio
(waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
waveform = waveform[None, :] # (1, audio_length)
waveform = move_data_to_device(waveform, device)
# Forward
with torch.no_grad():
model.eval()
batch_output_dict = model(waveform, None)
framewise_output = batch_output_dict['framewise_output'].data.cpu().numpy()[0]
"""(time_steps, classes_num)"""
print('Sound event detection result (time_steps x classes_num): {}'.format(
framewise_output.shape))
sorted_indexes = np.argsort(np.max(framewise_output, axis=0))[::-1]
top_k = 10 # Show top results
top_result_mat = framewise_output[:, sorted_indexes[0 : top_k]]
"""(time_steps, top_k)"""
# Plot result
stft = librosa.core.stft(y=waveform[0].data.cpu().numpy(), n_fft=window_size,
hop_length=hop_size, window='hann', center=True)
frames_num = stft.shape[-1]
fig, axs = plt.subplots(2, 1, sharex=True, figsize=(10, 4))
axs[0].matshow(np.log(np.abs(stft)), origin='lower', aspect='auto', cmap='jet')
axs[0].set_ylabel('Frequency bins')
axs[0].set_title('Log spectrogram')
axs[1].matshow(top_result_mat.T, origin='upper', aspect='auto', cmap='jet', vmin=0, vmax=1)
axs[1].xaxis.set_ticks(np.arange(0, frames_num, frames_per_second))
axs[1].xaxis.set_ticklabels(np.arange(0, frames_num / frames_per_second))
axs[1].yaxis.set_ticks(np.arange(0, top_k))
axs[1].yaxis.set_ticklabels(np.array(labels)[sorted_indexes[0 : top_k]])
axs[1].yaxis.grid(color='k', linestyle='solid', linewidth=0.3, alpha=0.3)
axs[1].set_xlabel('Seconds')
axs[1].xaxis.set_ticks_position('bottom')
plt.tight_layout()
plt.savefig(fig_path)
print('Save sound event detection visualization to {}'.format(fig_path))
return framewise_output, labels | Inference sound event detection result of an audio clip. |
8,617 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup, interpolate, pad_framewise_output
import os
import sys
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import warnings
from functools import partial
from mmdet.utils import get_root_logger
from mmcv.runner import load_checkpoint
from copy import deepcopy
from timm.models.helpers import load_pretrained
from torch.cuda.amp import autocast
from collections import OrderedDict
import io
import re
from mmcv.runner import _load_checkpoint, load_state_dict
import mmcv.runner
import copy
import random
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
from torch import nn, einsum
The provided code snippet includes necessary dependencies for implementing the `load_checkpoint` function. Write a Python function `def load_checkpoint(model, filename, map_location=None, strict=False, logger=None, revise_keys=[(r'^module\.', '')])` to solve the following problem:
Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. revise_keys (list): A list of customized keywords to modify the state_dict in checkpoint. Each item is a (pattern, replacement) pair of the regular expression operations. Default: strip the prefix 'module.' by [(r'^module\\.', '')]. Returns: dict or OrderedDict: The loaded checkpoint.
Here is the function:
def load_checkpoint(model,
filename,
map_location=None,
strict=False,
logger=None,
revise_keys=[(r'^module\.', '')]):
"""Load checkpoint from a file or URI.
Args:
model (Module): Module to load checkpoint.
filename (str): Accept local filepath, URL, ``torchvision://xxx``,
``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for
details.
map_location (str): Same as :func:`torch.load`.
strict (bool): Whether to allow different params for the model and
checkpoint.
logger (:mod:`logging.Logger` or None): The logger for error message.
revise_keys (list): A list of customized keywords to modify the
state_dict in checkpoint. Each item is a (pattern, replacement)
pair of the regular expression operations. Default: strip
the prefix 'module.' by [(r'^module\\.', '')].
Returns:
dict or OrderedDict: The loaded checkpoint.
"""
checkpoint = _load_checkpoint(filename, map_location, logger)
new_proj = torch.nn.Conv2d(1, 64, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
new_proj.weight = torch.nn.Parameter(torch.sum(checkpoint['patch_embed1.proj.weight'], dim=1).unsqueeze(1))
checkpoint['patch_embed1.proj.weight'] = new_proj.weight
# OrderedDict is a subclass of dict
if not isinstance(checkpoint, dict):
raise RuntimeError(
f'No state_dict found in checkpoint file {filename}')
# get state_dict from checkpoint
if 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
# strip prefix of state_dict
metadata = getattr(state_dict, '_metadata', OrderedDict())
for p, r in revise_keys:
state_dict = OrderedDict(
{re.sub(p, r, k): v
for k, v in state_dict.items()})
state_dict = OrderedDict({k.replace('backbone.',''):v for k,v in state_dict.items()})
# Keep metadata in state_dict
state_dict._metadata = metadata
# load state_dict
load_state_dict(model, state_dict, strict, logger)
return checkpoint | Load checkpoint from a file or URI. Args: model (Module): Module to load checkpoint. filename (str): Accept local filepath, URL, ``torchvision://xxx``, ``open-mmlab://xxx``. Please refer to ``docs/model_zoo.md`` for details. map_location (str): Same as :func:`torch.load`. strict (bool): Whether to allow different params for the model and checkpoint. logger (:mod:`logging.Logger` or None): The logger for error message. revise_keys (list): A list of customized keywords to modify the state_dict in checkpoint. Each item is a (pattern, replacement) pair of the regular expression operations. Default: strip the prefix 'module.' by [(r'^module\\.', '')]. Returns: dict or OrderedDict: The loaded checkpoint. |
8,618 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup, interpolate, pad_framewise_output
import os
import sys
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import warnings
from functools import partial
from mmdet.utils import get_root_logger
from mmcv.runner import load_checkpoint
from copy import deepcopy
from timm.models.helpers import load_pretrained
from torch.cuda.amp import autocast
from collections import OrderedDict
import io
import re
from mmcv.runner import _load_checkpoint, load_state_dict
import mmcv.runner
import copy
import random
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
from torch import nn, einsum
The provided code snippet includes necessary dependencies for implementing the `init_layer` function. Write a Python function `def init_layer(layer)` to solve the following problem:
Initialize a Linear or Convolutional layer.
Here is the function:
def init_layer(layer):
"""Initialize a Linear or Convolutional layer. """
nn.init.xavier_uniform_(layer.weight)
if hasattr(layer, 'bias'):
if layer.bias is not None:
layer.bias.data.fill_(0.) | Initialize a Linear or Convolutional layer. |
8,619 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup, interpolate, pad_framewise_output
import os
import sys
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import warnings
from functools import partial
from mmdet.utils import get_root_logger
from mmcv.runner import load_checkpoint
from copy import deepcopy
from timm.models.helpers import load_pretrained
from torch.cuda.amp import autocast
from collections import OrderedDict
import io
import re
from mmcv.runner import _load_checkpoint, load_state_dict
import mmcv.runner
import copy
import random
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
from torch import nn, einsum
The provided code snippet includes necessary dependencies for implementing the `init_bn` function. Write a Python function `def init_bn(bn)` to solve the following problem:
Initialize a Batchnorm layer.
Here is the function:
def init_bn(bn):
"""Initialize a Batchnorm layer. """
bn.bias.data.fill_(0.)
bn.weight.data.fill_(1.) | Initialize a Batchnorm layer. |
8,620 | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup, interpolate, pad_framewise_output
import os
import sys
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchlibrosa.stft import Spectrogram, LogmelFilterBank
from torchlibrosa.augmentation import SpecAugmentation
from audio_infer.pytorch.pytorch_utils import do_mixup
import torch.utils.checkpoint as checkpoint
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
import warnings
from functools import partial
from mmdet.utils import get_root_logger
from mmcv.runner import load_checkpoint
from copy import deepcopy
from timm.models.helpers import load_pretrained
from torch.cuda.amp import autocast
from collections import OrderedDict
import io
import re
from mmcv.runner import _load_checkpoint, load_state_dict
import mmcv.runner
import copy
import random
from einops import rearrange
from einops.layers.torch import Rearrange, Reduce
from torch import nn, einsum
The provided code snippet includes necessary dependencies for implementing the `_conv_filter` function. Write a Python function `def _conv_filter(state_dict, patch_size=16)` to solve the following problem:
convert patch embedding weight from manual patchify + linear proj to conv
Here is the function:
def _conv_filter(state_dict, patch_size=16):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
out_dict = {}
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k:
v = v.reshape((v.shape[0], 3, patch_size, patch_size))
out_dict[k] = v
return out_dict | convert patch embedding weight from manual patchify + linear proj to conv |
8,621 | import os
import sys
import numpy as np
import argparse
import h5py
import math
import time
import logging
import matplotlib.pyplot as plt
import torch
torch.backends.cudnn.benchmark=True
torch.manual_seed(0)
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data
from utilities import get_filename
from models import *
import config
def train(args):
# Arugments & parameters
sample_rate = args.sample_rate
window_size = args.window_size
hop_size = args.hop_size
mel_bins = args.mel_bins
fmin = args.fmin
fmax = args.fmax
model_type = args.model_type
pretrained_checkpoint_path = args.pretrained_checkpoint_path
freeze_base = args.freeze_base
device = 'cuda' if (args.cuda and torch.cuda.is_available()) else 'cpu'
classes_num = config.classes_num
pretrain = True if pretrained_checkpoint_path else False
# Model
Model = eval(model_type)
model = Model(sample_rate, window_size, hop_size, mel_bins, fmin, fmax,
classes_num, freeze_base)
# Load pretrained model
if pretrain:
logging.info('Load pretrained model from {}'.format(pretrained_checkpoint_path))
model.load_from_pretrain(pretrained_checkpoint_path)
# Parallel
print('GPU number: {}'.format(torch.cuda.device_count()))
model = torch.nn.DataParallel(model)
if 'cuda' in device:
model.to(device)
print('Load pretrained model successfully!') | null |
8,622 | import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import create_folder, get_sub_filepaths
import config
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
The provided code snippet includes necessary dependencies for implementing the `create_indexes` function. Write a Python function `def create_indexes(args)` to solve the following problem:
Create indexes a for dataloader to read for training. When users have a new task and their own data, they need to create similar indexes. The indexes contain meta information of "where to find the data for training".
Here is the function:
def create_indexes(args):
"""Create indexes a for dataloader to read for training. When users have
a new task and their own data, they need to create similar indexes. The
indexes contain meta information of "where to find the data for training".
"""
# Arguments & parameters
waveforms_hdf5_path = args.waveforms_hdf5_path
indexes_hdf5_path = args.indexes_hdf5_path
# Paths
create_folder(os.path.dirname(indexes_hdf5_path))
with h5py.File(waveforms_hdf5_path, 'r') as hr:
with h5py.File(indexes_hdf5_path, 'w') as hw:
audios_num = len(hr['audio_name'])
hw.create_dataset('audio_name', data=hr['audio_name'][:], dtype='S20')
hw.create_dataset('target', data=hr['target'][:], dtype=np.bool)
hw.create_dataset('hdf5_path', data=[waveforms_hdf5_path.encode()] * audios_num, dtype='S200')
hw.create_dataset('index_in_hdf5', data=np.arange(audios_num), dtype=np.int32)
print('Write to {}'.format(indexes_hdf5_path)) | Create indexes a for dataloader to read for training. When users have a new task and their own data, they need to create similar indexes. The indexes contain meta information of "where to find the data for training". |
8,623 | import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import create_folder, get_sub_filepaths
import config
def get_sub_filepaths(folder):
paths = []
for root, dirs, files in os.walk(folder):
for name in files:
path = os.path.join(root, name)
paths.append(path)
return paths
The provided code snippet includes necessary dependencies for implementing the `combine_full_indexes` function. Write a Python function `def combine_full_indexes(args)` to solve the following problem:
Combine all balanced and unbalanced indexes hdf5s to a single hdf5. This combined indexes hdf5 is used for training with full data (~20k balanced audio clips + ~1.9m unbalanced audio clips).
Here is the function:
def combine_full_indexes(args):
"""Combine all balanced and unbalanced indexes hdf5s to a single hdf5. This
combined indexes hdf5 is used for training with full data (~20k balanced
audio clips + ~1.9m unbalanced audio clips).
"""
# Arguments & parameters
indexes_hdf5s_dir = args.indexes_hdf5s_dir
full_indexes_hdf5_path = args.full_indexes_hdf5_path
classes_num = config.classes_num
# Paths
paths = get_sub_filepaths(indexes_hdf5s_dir)
paths = [path for path in paths if (
'train' in path and 'full_train' not in path and 'mini' not in path)]
print('Total {} hdf5 to combine.'.format(len(paths)))
with h5py.File(full_indexes_hdf5_path, 'w') as full_hf:
full_hf.create_dataset(
name='audio_name',
shape=(0,),
maxshape=(None,),
dtype='S20')
full_hf.create_dataset(
name='target',
shape=(0, classes_num),
maxshape=(None, classes_num),
dtype=np.bool)
full_hf.create_dataset(
name='hdf5_path',
shape=(0,),
maxshape=(None,),
dtype='S200')
full_hf.create_dataset(
name='index_in_hdf5',
shape=(0,),
maxshape=(None,),
dtype=np.int32)
for path in paths:
with h5py.File(path, 'r') as part_hf:
print(path)
n = len(full_hf['audio_name'][:])
new_n = n + len(part_hf['audio_name'][:])
full_hf['audio_name'].resize((new_n,))
full_hf['audio_name'][n : new_n] = part_hf['audio_name'][:]
full_hf['target'].resize((new_n, classes_num))
full_hf['target'][n : new_n] = part_hf['target'][:]
full_hf['hdf5_path'].resize((new_n,))
full_hf['hdf5_path'][n : new_n] = part_hf['hdf5_path'][:]
full_hf['index_in_hdf5'].resize((new_n,))
full_hf['index_in_hdf5'][n : new_n] = part_hf['index_in_hdf5'][:]
print('Write combined full hdf5 to {}'.format(full_indexes_hdf5_path)) | Combine all balanced and unbalanced indexes hdf5s to a single hdf5. This combined indexes hdf5 is used for training with full data (~20k balanced audio clips + ~1.9m unbalanced audio clips). |
8,624 | import os
import logging
import h5py
import soundfile
import librosa
import numpy as np
import pandas as pd
from scipy import stats
import datetime
import pickle
def int16_to_float32(x):
return (x / 32767.).astype(np.float32) | null |
8,625 | import argparse
import csv
import os
from utilities import create_folder
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
The provided code snippet includes necessary dependencies for implementing the `dcase2017task4` function. Write a Python function `def dcase2017task4(args)` to solve the following problem:
Create black list. Black list is a list of audio ids that will be skipped in training.
Here is the function:
def dcase2017task4(args):
"""Create black list. Black list is a list of audio ids that will be
skipped in training.
"""
# Augments & parameters
workspace = args.workspace
# Black list from DCASE 2017 Task 4
test_weak_csv = 'metadata/black_list/groundtruth_weak_label_testing_set.csv'
evaluation_weak_csv = 'metadata/black_list/groundtruth_weak_label_evaluation_set.csv'
black_list_csv = os.path.join(workspace, 'black_list', 'dcase2017task4.csv')
create_folder(os.path.dirname(black_list_csv))
def get_id_sets(csv_path):
with open(csv_path, 'r') as fr:
reader = csv.reader(fr, delimiter='\t')
lines = list(reader)
ids_set = []
for line in lines:
"""line: ['-5QrBL6MzLg_60.000_70.000.wav', '60.000', '70.000', 'Train horn']"""
ids_set.append(line[0][0 : 11])
ids_set = list(set(ids_set))
return ids_set
test_ids_set = get_id_sets(test_weak_csv)
evaluation_ids_set = get_id_sets(evaluation_weak_csv)
full_ids_set = test_ids_set + evaluation_ids_set
# Write black list
fw = open(black_list_csv, 'w')
for id in full_ids_set:
fw.write('{}\n'.format(id))
print('Write black list to {}'.format(black_list_csv)) | Create black list. Black list is a list of audio ids that will be skipped in training. |
8,626 | import numpy as np
import h5py
import csv
import time
import logging
from utilities import int16_to_float32
The provided code snippet includes necessary dependencies for implementing the `read_black_list` function. Write a Python function `def read_black_list(black_list_csv)` to solve the following problem:
Read audio names from black list.
Here is the function:
def read_black_list(black_list_csv):
"""Read audio names from black list.
"""
with open(black_list_csv, 'r') as fr:
reader = csv.reader(fr)
lines = list(reader)
black_list_names = ['Y{}.wav'.format(line[0]) for line in lines]
return black_list_names | Read audio names from black list. |
8,627 | import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import (create_folder, get_filename, create_logging,
float32_to_int16, pad_or_truncate, read_metadata)
import config
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
The provided code snippet includes necessary dependencies for implementing the `split_unbalanced_csv_to_partial_csvs` function. Write a Python function `def split_unbalanced_csv_to_partial_csvs(args)` to solve the following problem:
Split unbalanced csv to part csvs. Each part csv contains up to 50000 ids.
Here is the function:
def split_unbalanced_csv_to_partial_csvs(args):
"""Split unbalanced csv to part csvs. Each part csv contains up to 50000 ids.
"""
unbalanced_csv_path = args.unbalanced_csv
unbalanced_partial_csvs_dir = args.unbalanced_partial_csvs_dir
create_folder(unbalanced_partial_csvs_dir)
with open(unbalanced_csv_path, 'r') as f:
lines = f.readlines()
lines = lines[3:] # Remove head info
audios_num_per_file = 50000
files_num = int(np.ceil(len(lines) / float(audios_num_per_file)))
for r in range(files_num):
lines_per_file = lines[r * audios_num_per_file :
(r + 1) * audios_num_per_file]
out_csv_path = os.path.join(unbalanced_partial_csvs_dir,
'unbalanced_train_segments_part{:02d}.csv'.format(r))
with open(out_csv_path, 'w') as f:
f.write('empty\n')
f.write('empty\n')
f.write('empty\n')
for line in lines_per_file:
f.write(line)
print('Write out csv to {}'.format(out_csv_path)) | Split unbalanced csv to part csvs. Each part csv contains up to 50000 ids. |
8,628 | import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import (create_folder, get_filename, create_logging,
float32_to_int16, pad_or_truncate, read_metadata)
import config
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '{:04d}.log'.format(i1))):
i1 += 1
log_path = os.path.join(log_dir, '{:04d}.log'.format(i1))
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
The provided code snippet includes necessary dependencies for implementing the `download_wavs` function. Write a Python function `def download_wavs(args)` to solve the following problem:
Download videos and extract audio in wav format.
Here is the function:
def download_wavs(args):
"""Download videos and extract audio in wav format.
"""
# Paths
csv_path = args.csv_path
audios_dir = args.audios_dir
mini_data = args.mini_data
if mini_data:
logs_dir = '_logs/download_dataset/{}'.format(get_filename(csv_path))
else:
logs_dir = '_logs/download_dataset_minidata/{}'.format(get_filename(csv_path))
create_folder(audios_dir)
create_folder(logs_dir)
create_logging(logs_dir, filemode='w')
logging.info('Download log is saved to {}'.format(logs_dir))
# Read csv
with open(csv_path, 'r') as f:
lines = f.readlines()
lines = lines[3:] # Remove csv head info
if mini_data:
lines = lines[0 : 10] # Download partial data for debug
download_time = time.time()
# Download
for (n, line) in enumerate(lines):
items = line.split(', ')
audio_id = items[0]
start_time = float(items[1])
end_time = float(items[2])
duration = end_time - start_time
logging.info('{} {} start_time: {:.1f}, end_time: {:.1f}'.format(
n, audio_id, start_time, end_time))
# Download full video of whatever format
video_name = os.path.join(audios_dir, '_Y{}.%(ext)s'.format(audio_id))
os.system("youtube-dl --quiet -o '{}' -x https://www.youtube.com/watch?v={}"\
.format(video_name, audio_id))
video_paths = glob.glob(os.path.join(audios_dir, '_Y' + audio_id + '.*'))
# If download successful
if len(video_paths) > 0:
video_path = video_paths[0] # Choose one video
# Add 'Y' to the head because some video ids are started with '-'
# which will cause problem
audio_path = os.path.join(audios_dir, 'Y' + audio_id + '.wav')
# Extract audio in wav format
os.system("ffmpeg -loglevel panic -i {} -ac 1 -ar 32000 -ss {} -t 00:00:{} {} "\
.format(video_path,
str(datetime.timedelta(seconds=start_time)), duration,
audio_path))
# Remove downloaded video
os.system("rm {}".format(video_path))
logging.info("Download and convert to {}".format(audio_path))
logging.info('Download finished! Time spent: {:.3f} s'.format(
time.time() - download_time))
logging.info('Logs can be viewed in {}'.format(logs_dir)) | Download videos and extract audio in wav format. |
8,629 | import numpy as np
import argparse
import csv
import os
import glob
import datetime
import time
import logging
import h5py
import librosa
from utilities import (create_folder, get_filename, create_logging,
float32_to_int16, pad_or_truncate, read_metadata)
import config
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def get_filename(path):
path = os.path.realpath(path)
na_ext = path.split('/')[-1]
na = os.path.splitext(na_ext)[0]
return na
def create_logging(log_dir, filemode):
create_folder(log_dir)
i1 = 0
while os.path.isfile(os.path.join(log_dir, '{:04d}.log'.format(i1))):
i1 += 1
log_path = os.path.join(log_dir, '{:04d}.log'.format(i1))
logging.basicConfig(
level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename=log_path,
filemode=filemode)
# Print to console
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
return logging
def read_metadata(csv_path, classes_num, id_to_ix):
"""Read metadata of AudioSet from a csv file.
Args:
csv_path: str
Returns:
meta_dict: {'audio_name': (audios_num,), 'target': (audios_num, classes_num)}
"""
with open(csv_path, 'r') as fr:
lines = fr.readlines()
lines = lines[3:] # Remove heads
audios_num = len(lines)
targets = np.zeros((audios_num, classes_num), dtype=np.bool)
audio_names = []
for n, line in enumerate(lines):
items = line.split(', ')
"""items: ['--4gqARaEJE', '0.000', '10.000', '"/m/068hy,/m/07q6cd_,/m/0bt9lr,/m/0jbk"\n']"""
audio_name = 'Y{}.wav'.format(items[0]) # Audios are started with an extra 'Y' when downloading
label_ids = items[3].split('"')[1].split(',')
audio_names.append(audio_name)
# Target
for id in label_ids:
ix = id_to_ix[id]
targets[n, ix] = 1
meta_dict = {'audio_name': np.array(audio_names), 'target': targets}
return meta_dict
def float32_to_int16(x):
assert np.max(np.abs(x)) <= 1.2
x = np.clip(x, -1, 1)
return (x * 32767.).astype(np.int16)
def pad_or_truncate(x, audio_length):
"""Pad all audio to specific length."""
if len(x) <= audio_length:
return np.concatenate((x, np.zeros(audio_length - len(x))), axis=0)
else:
return x[0 : audio_length]
The provided code snippet includes necessary dependencies for implementing the `pack_waveforms_to_hdf5` function. Write a Python function `def pack_waveforms_to_hdf5(args)` to solve the following problem:
Pack waveform and target of several audio clips to a single hdf5 file. This can speed up loading and training.
Here is the function:
def pack_waveforms_to_hdf5(args):
"""Pack waveform and target of several audio clips to a single hdf5 file.
This can speed up loading and training.
"""
# Arguments & parameters
audios_dir = args.audios_dir
csv_path = args.csv_path
waveforms_hdf5_path = args.waveforms_hdf5_path
mini_data = args.mini_data
clip_samples = config.clip_samples
classes_num = config.classes_num
sample_rate = config.sample_rate
id_to_ix = config.id_to_ix
# Paths
if mini_data:
prefix = 'mini_'
waveforms_hdf5_path += '.mini'
else:
prefix = ''
create_folder(os.path.dirname(waveforms_hdf5_path))
logs_dir = '_logs/pack_waveforms_to_hdf5/{}{}'.format(prefix, get_filename(csv_path))
create_folder(logs_dir)
create_logging(logs_dir, filemode='w')
logging.info('Write logs to {}'.format(logs_dir))
# Read csv file
meta_dict = read_metadata(csv_path, classes_num, id_to_ix)
if mini_data:
mini_num = 10
for key in meta_dict.keys():
meta_dict[key] = meta_dict[key][0 : mini_num]
audios_num = len(meta_dict['audio_name'])
# Pack waveform to hdf5
total_time = time.time()
with h5py.File(waveforms_hdf5_path, 'w') as hf:
hf.create_dataset('audio_name', shape=((audios_num,)), dtype='S20')
hf.create_dataset('waveform', shape=((audios_num, clip_samples)), dtype=np.int16)
hf.create_dataset('target', shape=((audios_num, classes_num)), dtype=np.bool)
hf.attrs.create('sample_rate', data=sample_rate, dtype=np.int32)
# Pack waveform & target of several audio clips to a single hdf5 file
for n in range(audios_num):
audio_path = os.path.join(audios_dir, meta_dict['audio_name'][n])
if os.path.isfile(audio_path):
logging.info('{} {}'.format(n, audio_path))
(audio, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)
audio = pad_or_truncate(audio, clip_samples)
hf['audio_name'][n] = meta_dict['audio_name'][n].encode()
hf['waveform'][n] = float32_to_int16(audio)
hf['target'][n] = meta_dict['target'][n]
else:
logging.info('{} File does not exist! {}'.format(n, audio_path))
logging.info('Write to {}'.format(waveforms_hdf5_path))
logging.info('Pack hdf5 time: {:.3f}'.format(time.time() - total_time)) | Pack waveform and target of several audio clips to a single hdf5 file. This can speed up loading and training. |
8,630 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def crop_label(label):
max_len = 16
if len(label) <= max_len:
return label
else:
words = label.split(' ')
cropped_label = ''
for w in words:
if len(cropped_label + ' ' + w) > max_len:
break
else:
cropped_label += ' {}'.format(w)
return cropped_label
def add_comma(integer):
"""E.g., 1234567 -> 1,234,567
"""
integer = int(integer)
if integer >= 1000:
return str(integer // 1000) + ',' + str(integer % 1000)
else:
return str(integer)
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def plot_classwise_iteration_map(args):
# Paths
save_out_path = 'results/classwise_iteration_map.pdf'
create_folder(os.path.dirname(save_out_path))
# Load statistics
statistics_dict = pickle.load(open('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_WavegramLogmelCnn_balanced_mixup_bs32.pkl', 'rb'))
mAP_mat = np.array([e['average_precision'] for e in statistics_dict['test']])
mAP_mat = mAP_mat[0 : 300, :] # 300 * 2000 = 600k iterations
sorted_indexes = np.argsort(config.full_samples_per_class)[::-1]
fig, axs = plt.subplots(1, 3, figsize=(20, 5))
ranges = [np.arange(0, 10), np.arange(250, 260), np.arange(517, 527)]
axs[0].set_ylabel('AP')
for col in range(0, 3):
axs[col].set_ylim(0, 1.)
axs[col].set_xlim(0, 301)
axs[col].set_xlabel('Iterations')
axs[col].set_ylabel('AP')
axs[col].xaxis.set_ticks(np.arange(0, 301, 100))
axs[col].xaxis.set_ticklabels(['0', '200k', '400k', '600k'])
lines = []
for _ix in ranges[col]:
_label = crop_label(config.labels[sorted_indexes[_ix]]) + \
' ({})'.format(add_comma(config.full_samples_per_class[sorted_indexes[_ix]]))
line, = axs[col].plot(mAP_mat[:, sorted_indexes[_ix]], label=_label)
lines.append(line)
box = axs[col].get_position()
axs[col].set_position([box.x0, box.y0, box.width * 1., box.height])
axs[col].legend(handles=lines, bbox_to_anchor=(1., 1.))
axs[col].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(pad=4, w_pad=1, h_pad=1)
plt.savefig(save_out_path)
print(save_out_path) | null |
8,631 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def load_statistics(statistics_path):
statistics_dict = pickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
return bal_map, test_map
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def plot_six_figures(args):
# Arguments & parameters
classes_num = config.classes_num
labels = config.labels
max_plot_iteration = 540000
iterations = np.arange(0, max_plot_iteration, 2000)
# Paths
class_labels_indices_path = os.path.join('metadata', 'class_labels_indices.csv')
save_out_path = 'results/six_figures.pdf'
create_folder(os.path.dirname(save_out_path))
# Plot
fig, ax = plt.subplots(2, 3, figsize=(14, 7))
bal_alpha = 0.3
test_alpha = 1.0
linewidth = 1.
# (a) Comparison of architectures
if True:
lines = []
# Wavegram-Logmel-CNN
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_WavegramLogmelCnn_balanced_mixup_bs32.pkl')
line, = ax[0, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# MobileNetV1
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_MobileNetV1_balanced_mixup_bs32.pkl')
line, = ax[0, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 0].legend(handles=lines, loc=2)
ax[0, 0].set_title('(a) Comparison of architectures')
# (b) Comparison of training data and augmentation'
if True:
lines = []
# Full data + balanced sampler + mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Full data + balanced sampler + mixup in time domain
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_timedomain_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Full data + balanced sampler + no mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_nomixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Full data + uniform sampler + no mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_nobalanced_nomixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Balanced data + balanced sampler + mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_balanced_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Balanced data + balanced sampler + no mixup
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_balanced_train_Cnn14_balanced_nomixup_bs32.pkl')
line, = ax[0, 1].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 1].legend(handles=lines, loc=2, fontsize=8)
ax[0, 1].set_title('(b) Comparison of training data and augmentation')
# (c) Comparison of embedding size
if True:
lines = []
# Embedding size 2048
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[0, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Embedding size 128
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_emb128_balanced_mixup_bs32.pkl')
line, = ax[0, 2].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Embedding size 32
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_emb32_balanced_mixup_bs32.pkl')
line, = ax[0, 2].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 2].legend(handles=lines, loc=2)
ax[0, 2].set_title('(c) Comparison of embedding size')
# (d) Comparison of amount of training data
if True:
lines = []
# 100% of full training data
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# 80% of full training data
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_0.8full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# 50% of full training data
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_0.5full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 0].legend(handles=lines, loc=2)
ax[1, 0].set_title('(d) Comparison of amount of training data')
# (e) Comparison of sampling rate
if True:
lines = []
# Cnn14 + 32 kHz
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 16 kHz
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_16k_balanced_mixup_bs32.pkl')
line, = ax[1, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 8 kHz
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_8k_balanced_mixup_bs32.pkl')
line, = ax[1, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 1].legend(handles=lines, loc=2)
ax[1, 1].set_title('(e) Comparison of sampling rate')
# (f) Comparison of mel bins number
if True:
lines = []
# Cnn14 + 128 mel bins
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel128_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 2].plot(bal_map, color='g', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 64 mel bins
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel64_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 2].plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# Cnn14 + 32 mel bins
(bal_map, test_map) = load_statistics('paper_statistics/statistics_sr32000_window1024_hop320_mel32_fmin50_fmax14000_full_train_Cnn14_balanced_mixup_bs32.pkl')
line, = ax[1, 2].plot(bal_map, color='b', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 2].legend(handles=lines, loc=2)
ax[1, 2].set_title('(f) Comparison of mel bins number')
for i in range(2):
for j in range(3):
ax[i, j].set_ylim(0, 0.8)
ax[i, j].set_xlim(0, len(iterations))
ax[i, j].set_xlabel('Iterations')
ax[i, j].set_ylabel('mAP')
ax[i, j].xaxis.set_ticks(np.arange(0, len(iterations), 50))
ax[i, j].xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax[i, j].yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax[i, j].yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3',
'', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
ax[i, j].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax[i, j].xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(0, 1, 0)
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path)) | null |
8,632 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def create_folder(fd):
def plot_complexity_map(args):
# Paths
save_out_path = 'results/complexity_mAP.pdf'
create_folder(os.path.dirname(save_out_path))
plt.figure(figsize=(5, 5))
fig, ax = plt.subplots(1, 1)
model_types = np.array(['Cnn6', 'Cnn10', 'Cnn14', 'ResNet22', 'ResNet38', 'ResNet54',
'MobileNetV1', 'MobileNetV2', 'DaiNet', 'LeeNet', 'LeeNet18',
'Res1dNet30', 'Res1dNet44', 'Wavegram-CNN', 'Wavegram-\nLogmel-CNN'])
flops = np.array([21.986, 28.166, 42.220, 30.081, 48.962, 54.563, 3.614, 2.810,
30.395, 4.741, 26.369, 32.688, 61.833, 44.234, 53.510])
mAPs = np.array([0.343, 0.380, 0.431, 0.430, 0.434, 0.429, 0.389, 0.383, 0.295,
0.266, 0.336, 0.365, 0.355, 0.389, 0.439])
sorted_indexes = np.sort(flops)
ax.scatter(flops, mAPs)
shift = [[-5.5, -0.004], [1, -0.004], [-1, -0.014], [-2, 0.006], [-7, 0.006],
[1, -0.01], [0.5, 0.004], [-1, -0.014], [1, -0.007], [0.8, -0.008],
[1, -0.007], [1, 0.002], [-6, -0.015], [1, -0.008], [0.8, 0]]
for i, model_type in enumerate(model_types):
ax.annotate(model_type, (flops[i] + shift[i][0], mAPs[i] + shift[i][1]))
ax.plot(flops[[0, 1, 2]], mAPs[[0, 1, 2]])
ax.plot(flops[[3, 4, 5]], mAPs[[3, 4, 5]])
ax.plot(flops[[6, 7]], mAPs[[6, 7]])
ax.plot(flops[[9, 10]], mAPs[[9, 10]])
ax.plot(flops[[11, 12]], mAPs[[11, 12]])
ax.plot(flops[[13, 14]], mAPs[[13, 14]])
ax.set_xlim(0, 70)
ax.set_ylim(0.2, 0.5)
ax.set_xlabel('Multi-load_statisticss (million)', fontsize=15)
ax.set_ylabel('mAP', fontsize=15)
ax.tick_params(axis='x', labelsize=12)
ax.tick_params(axis='y', labelsize=12)
plt.tight_layout(0, 0, 0)
plt.savefig(save_out_path)
print('Write out figure to {}'.format(save_out_path)) | null |
8,633 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def prepare_plot_long_4_rows(sorted_lbs):
N = len(sorted_lbs)
f,(ax1a, ax2a, ax3a, ax4a) = plt.subplots(4, 1, sharey=False, facecolor='w', figsize=(10, 10.5))
fontsize = 5
K = 132
ax1a.set_xlim(0, K)
ax2a.set_xlim(K, 2 * K)
ax3a.set_xlim(2 * K, 3 * K)
ax4a.set_xlim(3 * K, N)
truncated_sorted_lbs = []
for lb in sorted_lbs:
lb = lb[0 : 25]
words = lb.split(' ')
if len(words[-1]) < 3:
lb = ' '.join(words[0:-1])
truncated_sorted_lbs.append(lb)
ax1a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax2a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax3a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax4a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax1a.set_yscale('log')
ax2a.set_yscale('log')
ax3a.set_yscale('log')
ax4a.set_yscale('log')
ax1b = ax1a.twinx()
ax2b = ax2a.twinx()
ax3b = ax3a.twinx()
ax4b = ax4a.twinx()
ax1b.set_ylim(0., 1.)
ax2b.set_ylim(0., 1.)
ax3b.set_ylim(0., 1.)
ax4b.set_ylim(0., 1.)
ax1b.set_ylabel('Average precision')
ax2b.set_ylabel('Average precision')
ax3b.set_ylabel('Average precision')
ax4b.set_ylabel('Average precision')
ax1b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax2b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax3b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax4b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax1a.xaxis.set_ticks(np.arange(K))
ax1a.xaxis.set_ticklabels(truncated_sorted_lbs[0:K], rotation=90, fontsize=fontsize)
ax1a.xaxis.tick_bottom()
ax1a.set_ylabel("Number of audio clips")
ax2a.xaxis.set_ticks(np.arange(K, 2*K))
ax2a.xaxis.set_ticklabels(truncated_sorted_lbs[K:2*K], rotation=90, fontsize=fontsize)
ax2a.xaxis.tick_bottom()
ax2a.set_ylabel("Number of audio clips")
ax3a.xaxis.set_ticks(np.arange(2*K, 3*K))
ax3a.xaxis.set_ticklabels(truncated_sorted_lbs[2*K:3*K], rotation=90, fontsize=fontsize)
ax3a.xaxis.tick_bottom()
ax3a.set_ylabel("Number of audio clips")
ax4a.xaxis.set_ticks(np.arange(3*K, N))
ax4a.xaxis.set_ticklabels(truncated_sorted_lbs[3*K:], rotation=90, fontsize=fontsize)
ax4a.xaxis.tick_bottom()
ax4a.set_ylabel("Number of audio clips")
ax1a.spines['right'].set_visible(False)
ax1b.spines['right'].set_visible(False)
ax2a.spines['left'].set_visible(False)
ax2b.spines['left'].set_visible(False)
ax2a.spines['right'].set_visible(False)
ax2b.spines['right'].set_visible(False)
ax3a.spines['left'].set_visible(False)
ax3b.spines['left'].set_visible(False)
ax3a.spines['right'].set_visible(False)
ax3b.spines['right'].set_visible(False)
ax4a.spines['left'].set_visible(False)
ax4b.spines['left'].set_visible(False)
plt.subplots_adjust(hspace = 0.8)
return ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b
def _scatter_4_rows(x, ax, ax2, ax3, ax4, s, c, marker='.', alpha=1.):
N = len(x)
ax.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax2.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax3.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax4.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
def _plot_4_rows(x, ax, ax2, ax3, ax4, c, linewidth=1.0, alpha=1.0, label=""):
N = len(x)
ax.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax2.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax3.plot(x, c=c, linewidth=linewidth, alpha=alpha)
line, = ax4.plot(x, c=c, linewidth=linewidth, alpha=alpha, label=label)
return line
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
The provided code snippet includes necessary dependencies for implementing the `plot_long_fig` function. Write a Python function `def plot_long_fig(args)` to solve the following problem:
Average instance system of [1] with an mAP of 0.317. [1] Kong, Qiuqiang, Changsong Yu, Yong Xu, Turab Iqbal, Wenwu Wang, and Mark D. Plumbley. "Weakly labelled audioset tagging with attention neural networks." IEEE/ACM Transactions on Audio, Speech, and Language Processing 27, no. 11 (2019): 1791-1802.
Here is the function:
def plot_long_fig(args):
# Paths
stats = pickle.load(open('paper_statistics/stats_for_long_fig.pkl', 'rb'))
save_out_path = 'results/long_fig.pdf'
create_folder(os.path.dirname(save_out_path))
# Load meta
N = len(config.labels)
sorted_indexes = stats['sorted_indexes_for_plot']
sorted_labels = np.array(config.labels)[sorted_indexes]
audio_clips_per_class = stats['official_balanced_training_samples'] + stats['official_unbalanced_training_samples']
audio_clips_per_class = audio_clips_per_class[sorted_indexes]
# Prepare axes for plot
(ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b) = prepare_plot_long_4_rows(sorted_labels)
# plot the number of training samples
ax1a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax2a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax3a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax4a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
# Load mAP of different systems
"""Average instance system of [1] with an mAP of 0.317.
[1] Kong, Qiuqiang, Changsong Yu, Yong Xu, Turab Iqbal, Wenwu Wang, and
Mark D. Plumbley. "Weakly labelled audioset tagging with attention neural
networks." IEEE/ACM Transactions on Audio, Speech, and Language Processing
27, no. 11 (2019): 1791-1802."""
maps_avg_instances = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
maps_avg_instances = maps_avg_instances[sorted_indexes]
# PANNs Cnn14
maps_panns_cnn14 = stats['panns_cnn14']['eval']['average_precision']
maps_panns_cnn14 = maps_panns_cnn14[sorted_indexes]
# PANNs MobileNetV1
maps_panns_mobilenetv1 = stats['panns_mobilenetv1']['eval']['average_precision']
maps_panns_mobilenetv1 = maps_panns_mobilenetv1[sorted_indexes]
# PANNs Wavegram-Logmel-Cnn14
maps_panns_wavegram_logmel_cnn14 = stats['panns_wavegram_logmel_cnn14']['eval']['average_precision']
maps_panns_wavegram_logmel_cnn14 = maps_panns_wavegram_logmel_cnn14[sorted_indexes]
# Plot mAPs
_scatter_4_rows(maps_panns_wavegram_logmel_cnn14, ax1b, ax2b, ax3b, ax4b, s=5, c='g')
_scatter_4_rows(maps_panns_cnn14, ax1b, ax2b, ax3b, ax4b, s=5, c='r')
_scatter_4_rows(maps_panns_mobilenetv1, ax1b, ax2b, ax3b, ax4b, s=5, c='b')
_scatter_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, s=5, c='k')
linewidth = 0.7
line0te = _plot_4_rows(maps_panns_wavegram_logmel_cnn14, ax1b, ax2b, ax3b, ax4b,
c='g', linewidth=linewidth, label='AP with Wavegram-Logmel-CNN')
line1te = _plot_4_rows(maps_panns_cnn14, ax1b, ax2b, ax3b, ax4b, c='r',
linewidth=linewidth, label='AP with CNN14')
line2te = _plot_4_rows(maps_panns_mobilenetv1, ax1b, ax2b, ax3b, ax4b, c='b',
linewidth=linewidth, label='AP with MobileNetV1')
line3te = _plot_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, c='k',
linewidth=linewidth, label='AP with averaging instances (baseline)')
# Plot label quality
label_quality = stats['label_quality']
sorted_label_quality = np.array(label_quality)[sorted_indexes]
for k in range(len(sorted_label_quality)):
if sorted_label_quality[k] and sorted_label_quality[k] == 1:
sorted_label_quality[k] = 0.99
ax1b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
ax2b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
ax3b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+')
line_label_quality = ax4b.scatter(np.arange(N)[sorted_label_quality != None],
sorted_label_quality[sorted_label_quality != None], s=12, c='r', linewidth=0.8, marker='+', label='Label quality')
ax1b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
ax2b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
ax3b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
ax4b.scatter(np.arange(N)[sorted_label_quality == None],
0.5 * np.ones(len(np.arange(N)[sorted_label_quality == None])), s=12, c='r', linewidth=0.8, marker='_')
plt.legend(handles=[line0te, line1te, line2te, line3te, line_label_quality], fontsize=6, loc=1)
plt.tight_layout(0, 0, 0)
plt.savefig(save_out_path)
print('Save fig to {}'.format(save_out_path)) | Average instance system of [1] with an mAP of 0.317. [1] Kong, Qiuqiang, Changsong Yu, Yong Xu, Turab Iqbal, Wenwu Wang, and Mark D. Plumbley. "Weakly labelled audioset tagging with attention neural networks." IEEE/ACM Transactions on Audio, Speech, and Language Processing 27, no. 11 (2019): 1791-1802. |
8,634 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _load_metrics0(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
def plot(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(15, 8))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
if select == '1_cnn13':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_dropout', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_no_specaug', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_dropout', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_mixup', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_mixup_in_wave', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_mixup_in_wave', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_pooling':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_gwrp', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapgwrp', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_att', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapatt', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_resnet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet34', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet50', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_densenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet121', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet121', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet201', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet201', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_cnn9':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_hop':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop500', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop640', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop1000', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_emb':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb128', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb512', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_mobilenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv1', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv2', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_DaiNet', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='c', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet50', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform_cnn2d':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_decision_level':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelMax', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAvg', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAtt', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_transformer':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer1', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer3', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer3', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer6', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer6', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_bal_train_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_sr':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_16k', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_8k', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_time_domain':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_time_domain', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_partial_full':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.8', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.5', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_window':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 2048,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_win2048', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_melbins':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel128', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_alternate':
max_plot_iteration = 2000000
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'alternate', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_alternate', color='b', alpha=test_alpha)
lines.append(line)
elif select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='MobileNetV1', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='m', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='orange', alpha=test_alpha)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_emb32', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_128', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
lines.append(line)
elif select == '2_aug':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='c', alpha=test_alpha)
lines.append(line)
ax.set_ylim(0, 1.)
ax.set_xlim(0, len(iterations))
ax.xaxis.set_ticks(np.arange(0, len(iterations), 25))
ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.yaxis.set_ticks(np.arange(0, 1.01, 0.05))
ax.yaxis.set_ticklabels(np.around(np.arange(0, 1.01, 0.05), decimals=2))
ax.grid(color='b', linestyle='solid', linewidth=0.3)
plt.legend(handles=lines, loc=2)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def plot_for_paper(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/paper_{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(6, 4))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
linewidth = 1.
max_plot_iteration = 540000
if select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='Wavegram-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
# lines.append(line)
elif select == '2_bal':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_sr':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_partial':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
# 320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
# 320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
elif select == '2_melbins':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax.plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax.set_ylim(0, 0.8)
ax.set_xlim(0, len(iterations))
ax.set_xlabel('Iterations')
ax.set_ylabel('mAP')
ax.xaxis.set_ticks(np.arange(0, len(iterations), 50))
# ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax.yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax.yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3', '', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
# ax.yaxis.set_ticklabels(np.around(np.arange(0, 0.81, 0.05), decimals=2))
ax.yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax.xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.legend(handles=lines, loc=2)
plt.tight_layout(0, 0, 0)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path)) | null |
8,635 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _load_metrics0(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
def plot(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(15, 8))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
if select == '1_cnn13':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_dropout', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_no_specaug', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_dropout', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_mixup', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_mixup_in_wave', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_mixup_in_wave', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_pooling':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_gwrp', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapgwrp', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_att', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapatt', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_resnet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet34', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet50', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_densenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet121', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet121', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet201', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet201', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_cnn9':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_hop':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop500', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop640', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop1000', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_emb':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb128', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb512', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_mobilenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv1', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv2', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_DaiNet', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='c', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet50', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform_cnn2d':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_decision_level':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelMax', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAvg', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAtt', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_transformer':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer1', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer3', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer3', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer6', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer6', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_bal_train_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_sr':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_16k', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_8k', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_time_domain':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_time_domain', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_partial_full':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.8', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.5', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_window':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 2048,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_win2048', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_melbins':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel128', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_alternate':
max_plot_iteration = 2000000
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'alternate', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_alternate', color='b', alpha=test_alpha)
lines.append(line)
elif select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='MobileNetV1', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='m', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='orange', alpha=test_alpha)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_emb32', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_128', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
lines.append(line)
elif select == '2_aug':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='c', alpha=test_alpha)
lines.append(line)
ax.set_ylim(0, 1.)
ax.set_xlim(0, len(iterations))
ax.xaxis.set_ticks(np.arange(0, len(iterations), 25))
ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.yaxis.set_ticks(np.arange(0, 1.01, 0.05))
ax.yaxis.set_ticklabels(np.around(np.arange(0, 1.01, 0.05), decimals=2))
ax.grid(color='b', linestyle='solid', linewidth=0.3)
plt.legend(handles=lines, loc=2)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def plot_for_paper2(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/paper2.pdf'
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(2, 3, figsize=(14, 7))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
def _load_metrics0(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
linewidth = 1.
max_plot_iteration = 540000
if True:
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='CNN14', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='MobileNetV1', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
# line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax[0, 0].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
# line, = ax[0, 0].plot(test_map, label='ResNet38', color='k', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
# (bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
# 320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
# line, = ax.plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
# line, = ax.plot(test_map, label='Wavegram-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
# lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 0].plot(test_map, label='Wavegram-Logmel-CNN', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 0].legend(handles=lines, loc=2)
ax[0, 0].set_title('(a) Comparison of architectures')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (1.9m)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,no-bal,no-mixup (1.9m)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='y', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup-wav (1.9m)', color='y', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (1.9m)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax[0, 1].plot(bal_map, color='k', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,no-mixup (20k)', color='k', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 1].plot(bal_map, color='m', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 1].plot(test_map, label='CNN14,bal,mixup (20k)', color='m', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 1].legend(handles=lines, loc=2, fontsize=8)
ax[0, 1].set_title('(b) Comparison of training data and augmentation')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=2048', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=32', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[0, 2].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[0, 2].plot(test_map, label='CNN14,emb=128', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[0, 2].legend(handles=lines, loc=2)
ax[0, 2].set_title('(c) Comparison of embedding size')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (100% full)', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='CNN14 (80% full)', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 0].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 0].plot(test_map, label='cnn14 (50% full)', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 0].legend(handles=lines, loc=2)
ax[1, 0].set_title('(d) Comparison of amount of training data')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,32kHz', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='b', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,16kHz', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 1].plot(bal_map, color='g', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 1].plot(test_map, label='CNN14,8kHz', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 1].legend(handles=lines, loc=2)
ax[1, 1].set_title('(e) Comparison of sampling rate')
if True:
lines = []
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='r', alpha=bal_alpha, linewidth=linewidth)
line, = ax[1, 2].plot(test_map, label='CNN14,64-melbins', color='r', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='b', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,32-melbins', color='b', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax[1, 2].plot(bal_map, color='g', alpha=bal_alpha)
line, = ax[1, 2].plot(test_map, label='CNN14,128-melbins', color='g', alpha=test_alpha, linewidth=linewidth)
lines.append(line)
ax[1, 2].legend(handles=lines, loc=2)
ax[1, 2].set_title('(f) Comparison of mel bins number')
for i in range(2):
for j in range(3):
ax[i, j].set_ylim(0, 0.8)
ax[i, j].set_xlim(0, len(iterations))
ax[i, j].set_xlabel('Iterations')
ax[i, j].set_ylabel('mAP')
ax[i, j].xaxis.set_ticks(np.arange(0, len(iterations), 50))
# ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax[i, j].xaxis.set_ticklabels(['0', '100k', '200k', '300k', '400k', '500k'])
ax[i, j].yaxis.set_ticks(np.arange(0, 0.81, 0.05))
ax[i, j].yaxis.set_ticklabels(['0', '', '0.1', '', '0.2', '', '0.3', '', '0.4', '', '0.5', '', '0.6', '', '0.7', '', '0.8'])
# ax.yaxis.set_ticklabels(np.around(np.arange(0, 0.81, 0.05), decimals=2))
ax[i, j].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
ax[i, j].xaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(0, 1, 0)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path)) | null |
8,636 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def d_prime(auc):
d_prime = stats.norm().ppf(auc) * np.sqrt(2.0)
return d_prime
def table_values(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
idx = iteration // 2000
mAP = np.mean(statistics_dict['test'][idx]['average_precision'])
mAUC = np.mean(statistics_dict['test'][idx]['auc'])
dprime = d_prime(mAUC)
print('mAP: {:.3f}'.format(mAP))
print('mAUC: {:.3f}'.format(mAUC))
print('dprime: {:.3f}'.format(dprime))
if select == 'cnn13':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn5':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn9':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelmax':
iteration = 400000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelavg':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_decisionlevelatt':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb32':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb128':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_emb512':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop500':
iteration = 440000
_load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop640':
iteration = 440000
_load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'cnn13_hop1000':
iteration = 540000
_load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'mobilenetv1':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'mobilenetv2':
iteration = 560000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet18':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet34':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet50':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'dainet':
iteration = 600000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'leenet':
iteration = 540000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'leenet18':
iteration = 440000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet34_1d':
iteration = 500000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'resnet50_1d':
iteration = 500000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'waveform_cnn2d':
iteration = 660000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32, iteration)
elif select == 'waveform_spandwav':
iteration = 700000
_load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32, iteration) | null |
8,637 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def plot(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(15, 8))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
if select == '1_cnn13':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_dropout', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_no_specaug', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_dropout', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_mixup', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_mixup_in_wave', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_mixup_in_wave', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_pooling':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_gwrp', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapgwrp', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_att', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapatt', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_resnet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet34', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet50', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_densenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet121', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet121', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet201', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet201', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_cnn9':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_hop':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop500', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop640', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop1000', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_emb':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb128', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb512', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_mobilenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv1', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv2', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_DaiNet', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='c', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet50', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform_cnn2d':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_decision_level':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelMax', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAvg', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAtt', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_transformer':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer1', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer3', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer3', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer6', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer6', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_bal_train_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_sr':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_16k', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_8k', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_time_domain':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_time_domain', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_partial_full':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.8', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.5', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_window':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 2048,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_win2048', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_melbins':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel128', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_alternate':
max_plot_iteration = 2000000
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'alternate', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_alternate', color='b', alpha=test_alpha)
lines.append(line)
elif select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='MobileNetV1', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='m', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='orange', alpha=test_alpha)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_emb32', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_128', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
lines.append(line)
elif select == '2_aug':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='c', alpha=test_alpha)
lines.append(line)
ax.set_ylim(0, 1.)
ax.set_xlim(0, len(iterations))
ax.xaxis.set_ticks(np.arange(0, len(iterations), 25))
ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.yaxis.set_ticks(np.arange(0, 1.01, 0.05))
ax.yaxis.set_ticklabels(np.around(np.arange(0, 1.01, 0.05), decimals=2))
ax.grid(color='b', linestyle='solid', linewidth=0.3)
plt.legend(handles=lines, loc=2)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def crop_label(label):
max_len = 16
if len(label) <= max_len:
return label
else:
words = label.split(' ')
cropped_label = ''
for w in words:
if len(cropped_label + ' ' + w) > max_len:
break
else:
cropped_label += ' {}'.format(w)
return cropped_label
def add_comma(integer):
integer = int(integer)
if integer >= 1000:
return str(integer // 1000) + ',' + str(integer % 1000)
else:
return str(integer)
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def plot_class_iteration(args):
# Arguments & parameters
workspace = args.workspace
select = args.select
save_out_path = 'results_map/class_iteration_map.pdf'
create_folder(os.path.dirname(save_out_path))
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
return statistics_dict
iteration = 600000
statistics_dict = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
mAP_mat = np.array([e['average_precision'] for e in statistics_dict['test']])
mAP_mat = mAP_mat[0 : 300, :]
sorted_indexes = np.argsort(config.full_samples_per_class)[::-1]
fig, axs = plt.subplots(1, 3, figsize=(20, 5))
ranges = [np.arange(0, 10), np.arange(250, 260), np.arange(517, 527)]
axs[0].set_ylabel('AP')
for col in range(0, 3):
axs[col].set_ylim(0, 1.)
axs[col].set_xlim(0, 301)
axs[col].set_xlabel('Iterations')
axs[col].set_ylabel('AP')
axs[col].xaxis.set_ticks(np.arange(0, 301, 100))
axs[col].xaxis.set_ticklabels(['0', '200k', '400k', '600k'])
lines = []
for _ix in ranges[col]:
_label = crop_label(config.labels[sorted_indexes[_ix]]) + \
' ({})'.format(add_comma(config.full_samples_per_class[sorted_indexes[_ix]]))
line, = axs[col].plot(mAP_mat[:, sorted_indexes[_ix]], label=_label)
lines.append(line)
box = axs[col].get_position()
axs[col].set_position([box.x0, box.y0, box.width * 1., box.height])
axs[col].legend(handles=lines, bbox_to_anchor=(1., 1.))
axs[col].yaxis.grid(color='k', linestyle='solid', alpha=0.3, linewidth=0.3)
plt.tight_layout(pad=4, w_pad=1, h_pad=1)
plt.savefig(save_out_path)
print(save_out_path) | null |
8,638 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _load_old_metrics(workspace, filename, iteration, data_type):
assert data_type in ['train', 'test']
stat_name = "stat_{}_iters.p".format(iteration)
# Load stats
stat_path = os.path.join(workspace, "stats", filename, data_type, stat_name)
try:
stats = cPickle.load(open(stat_path, 'rb'))
except:
stats = cPickle.load(open(stat_path, 'rb'), encoding='latin1')
precisions = [stat['precisions'] for stat in stats]
recalls = [stat['recalls'] for stat in stats]
maps = np.array([stat['AP'] for stat in stats])
aucs = np.array([stat['auc'] for stat in stats])
return {'average_precision': maps, 'AUC': aucs} | null |
8,639 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _sort(ys):
sorted_idxes = np.argsort(ys)
sorted_idxes = sorted_idxes[::-1]
sorted_ys = ys[sorted_idxes]
sorted_lbs = [config.labels[e] for e in sorted_idxes]
return sorted_ys, sorted_idxes, sorted_lbs
def get_avg_stats(workspace, bgn_iter, fin_iter, interval_iter, filename, data_type):
assert data_type in ['train', 'test']
bal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/bal_train.h5"
eval_hdf5 = "/vol/vssp/msos/audioset/packed_features/eval.h5"
unbal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/unbal_train.h5"
t1 = time.time()
if data_type == 'test':
(te_x, te_y, te_id_list) = load_data(eval_hdf5)
elif data_type == 'train':
(te_x, te_y, te_id_list) = load_data(bal_train_hdf5)
y = te_y
prob_dir = os.path.join(workspace, "probs", filename, data_type)
names = os.listdir(prob_dir)
probs = []
iters = range(bgn_iter, fin_iter, interval_iter)
for iter in iters:
pickle_path = os.path.join(prob_dir, "prob_%d_iters.p" % iter)
try:
prob = cPickle.load(open(pickle_path, 'rb'))
except:
prob = cPickle.load(open(pickle_path, 'rb'), encoding='latin1')
probs.append(prob)
avg_prob = np.mean(np.array(probs), axis=0)
n_out = y.shape[1]
stats = []
for k in range(n_out): # around 7 seconds
(precisions, recalls, thresholds) = metrics.precision_recall_curve(y[:, k], avg_prob[:, k])
avg_precision = metrics.average_precision_score(y[:, k], avg_prob[:, k], average=None)
(fpr, tpr, thresholds) = metrics.roc_curve(y[:, k], avg_prob[:, k])
auc = metrics.roc_auc_score(y[:, k], avg_prob[:, k], average=None)
# eer = pp_data.eer(avg_prob[:, k], y[:, k])
skip = 1000
dict = {'precisions': precisions[0::skip], 'recalls': recalls[0::skip], 'AP': avg_precision,
'fpr': fpr[0::skip], 'fnr': 1. - tpr[0::skip], 'auc': auc}
stats.append(dict)
mAPs = np.array([e['AP'] for e in stats])
aucs = np.array([e['auc'] for e in stats])
print("Get avg time: {}".format(time.time() - t1))
return {'average_precision': mAPs, 'auc': aucs}
def _samples_num_per_class():
bal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/bal_train.h5"
eval_hdf5 = "/vol/vssp/msos/audioset/packed_features/eval.h5"
unbal_train_hdf5 = "/vol/vssp/msos/audioset/packed_features/unbal_train.h5"
(x, y, id_list) = load_data(eval_hdf5)
eval_num = np.sum(y, axis=0)
(x, y, id_list) = load_data(bal_train_hdf5)
bal_num = np.sum(y, axis=0)
(x, y, id_list) = load_data(unbal_train_hdf5)
unbal_num = np.sum(y, axis=0)
return bal_num, unbal_num, eval_num
def get_label_quality():
rate_csv = '/vol/vssp/msos/qk/workspaces/pub_audioset_tagging_cnn_transfer/metadata/qa_true_counts.csv'
with open(rate_csv, 'r') as f:
reader = csv.reader(f, delimiter=',')
lis = list(reader)
rates = []
for n in range(1, len(lis)):
li = lis[n]
if float(li[1]) == 0:
rate = None
else:
rate = float(li[2]) / float(li[1])
rates.append(rate)
return rates
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def summary_stats(args):
# Arguments & parameters
workspace = args.workspace
out_stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
create_folder(os.path.dirname(out_stat_path))
# Old workspace
old_workspace = '/vol/vssp/msos/qk/workspaces/audioset_classification'
# bal_train_metrics = _load_old_metrics(old_workspace, 'tmp127', 20000, 'train')
# eval_metrics = _load_old_metrics(old_workspace, 'tmp127', 20000, 'test')
bal_train_metrics = get_avg_stats(old_workspace, bgn_iter=10000, fin_iter=50001, interval_iter=5000, filename='tmp127_re', data_type='train')
eval_metrics = get_avg_stats(old_workspace, bgn_iter=10000, fin_iter=50001, interval_iter=5000, filename='tmp127_re', data_type='test')
maps0te = eval_metrics['average_precision']
(maps0te, sorted_idxes, sorted_lbs) = _sort(maps0te)
bal_num, unbal_num, eval_num = _samples_num_per_class()
output_dict = {
'labels': config.labels,
'label_quality': get_label_quality(),
'sorted_indexes_for_plot': sorted_idxes,
'official_balanced_trainig_samples': bal_num,
'official_unbalanced_training_samples': unbal_num,
'official_eval_samples': eval_num,
'downloaded_full_training_samples': config.full_samples_per_class,
'averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations':
{'bal_train': bal_train_metrics, 'eval': eval_metrics}
}
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size, iteration):
_workspace = '/vol/vssp/msos/qk/bytedance/workspaces_important/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(_workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
_idx = iteration // 2000
_dict = {'bal_train': {'average_precision': statistics_dict['bal'][_idx]['average_precision'],
'auc': statistics_dict['bal'][_idx]['auc']},
'eval': {'average_precision': statistics_dict['test'][_idx]['average_precision'],
'auc': statistics_dict['test'][_idx]['auc']}}
return _dict
iteration = 600000
output_dict['cnn13_system_iteration60k'] = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32, iteration)
iteration = 560000
output_dict['mobilenetv1_system_iteration56k'] = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32, iteration)
cPickle.dump(output_dict, open(out_stat_path, 'wb'))
print('Write stats for paper to {}'.format(out_stat_path)) | null |
8,640 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _load_metrics0_classwise(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
return statistics_dict['test'][300]['average_precision']
def prepare_plot_long_4_rows(sorted_lbs):
N = len(sorted_lbs)
f,(ax1a, ax2a, ax3a, ax4a) = plt.subplots(4, 1,sharey=False, facecolor='w', figsize=(10, 12))
fontsize = 5
K = 132
ax1a.set_xlim(0, K)
ax2a.set_xlim(K, 2 * K)
ax3a.set_xlim(2 * K, 3 * K)
ax4a.set_xlim(3 * K, N)
truncated_sorted_lbs = []
for lb in sorted_lbs:
lb = lb[0 : 25]
words = lb.split(' ')
if len(words[-1]) < 3:
lb = ' '.join(words[0:-1])
truncated_sorted_lbs.append(lb)
ax1a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax2a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax3a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax4a.grid(which='major', axis='x', linestyle='-', alpha=0.3)
ax1a.set_yscale('log')
ax2a.set_yscale('log')
ax3a.set_yscale('log')
ax4a.set_yscale('log')
ax1b = ax1a.twinx()
ax2b = ax2a.twinx()
ax3b = ax3a.twinx()
ax4b = ax4a.twinx()
ax1b.set_ylim(0., 1.)
ax2b.set_ylim(0., 1.)
ax3b.set_ylim(0., 1.)
ax4b.set_ylim(0., 1.)
ax1b.set_ylabel('Average precision')
ax2b.set_ylabel('Average precision')
ax3b.set_ylabel('Average precision')
ax4b.set_ylabel('Average precision')
ax1b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax2b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax3b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax4b.yaxis.grid(color='grey', linestyle='--', alpha=0.5)
ax1a.xaxis.set_ticks(np.arange(K))
ax1a.xaxis.set_ticklabels(truncated_sorted_lbs[0:K], rotation=90, fontsize=fontsize)
ax1a.xaxis.tick_bottom()
ax1a.set_ylabel("Number of audio clips")
ax2a.xaxis.set_ticks(np.arange(K, 2*K))
ax2a.xaxis.set_ticklabels(truncated_sorted_lbs[K:2*K], rotation=90, fontsize=fontsize)
ax2a.xaxis.tick_bottom()
# ax2a.tick_params(left='off', which='both')
ax2a.set_ylabel("Number of audio clips")
ax3a.xaxis.set_ticks(np.arange(2*K, 3*K))
ax3a.xaxis.set_ticklabels(truncated_sorted_lbs[2*K:3*K], rotation=90, fontsize=fontsize)
ax3a.xaxis.tick_bottom()
ax3a.set_ylabel("Number of audio clips")
ax4a.xaxis.set_ticks(np.arange(3*K, N))
ax4a.xaxis.set_ticklabels(truncated_sorted_lbs[3*K:], rotation=90, fontsize=fontsize)
ax4a.xaxis.tick_bottom()
# ax4a.tick_params(left='off', which='both')
ax4a.set_ylabel("Number of audio clips")
ax1a.spines['right'].set_visible(False)
ax1b.spines['right'].set_visible(False)
ax2a.spines['left'].set_visible(False)
ax2b.spines['left'].set_visible(False)
ax2a.spines['right'].set_visible(False)
ax2b.spines['right'].set_visible(False)
ax3a.spines['left'].set_visible(False)
ax3b.spines['left'].set_visible(False)
ax3a.spines['right'].set_visible(False)
ax3b.spines['right'].set_visible(False)
ax4a.spines['left'].set_visible(False)
ax4b.spines['left'].set_visible(False)
plt.subplots_adjust(hspace = 0.8)
return ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b
def _scatter_4_rows(x, ax, ax2, ax3, ax4, s, c, marker='.', alpha=1.):
N = len(x)
ax.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax2.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax3.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
ax4.scatter(np.arange(N), x, s=s, c=c, marker=marker, alpha=alpha)
def _plot_4_rows(x, ax, ax2, ax3, ax4, c, linewidth=1.0, alpha=1.0, label=""):
N = len(x)
ax.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax2.plot(x, c=c, linewidth=linewidth, alpha=alpha)
ax3.plot(x, c=c, linewidth=linewidth, alpha=alpha)
line, = ax4.plot(x, c=c, linewidth=linewidth, alpha=alpha, label=label)
return line
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def plot_long_fig(args):
# Arguments & parameters
workspace = args.workspace
# Paths
stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
save_out_path = 'results/long_fig.pdf'
create_folder(os.path.dirname(save_out_path))
# Stats
stats = cPickle.load(open(stat_path, 'rb'))
N = len(config.labels)
sorted_indexes = stats['sorted_indexes_for_plot']
sorted_labels = np.array(config.labels)[sorted_indexes]
audio_clips_per_class = stats['official_balanced_trainig_samples'] + stats['official_unbalanced_training_samples']
audio_clips_per_class = audio_clips_per_class[sorted_indexes]
(ax1a, ax2a, ax3a, ax4a, ax1b, ax2b, ax3b, ax4b) = prepare_plot_long_4_rows(sorted_labels)
# plot the same data on both axes
ax1a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax2a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax3a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
ax4a.bar(np.arange(N), audio_clips_per_class, alpha=0.3)
maps_avg_instances = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
maps_avg_instances = maps_avg_instances[sorted_indexes]
maps_cnn13 = stats['cnn13_system_iteration60k']['eval']['average_precision']
maps_cnn13 = maps_cnn13[sorted_indexes]
maps_mobilenetv1 = stats['mobilenetv1_system_iteration56k']['eval']['average_precision']
maps_mobilenetv1 = maps_mobilenetv1[sorted_indexes]
maps_logmel_wavegram_cnn = _load_metrics0_classwise('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
maps_logmel_wavegram_cnn = maps_logmel_wavegram_cnn[sorted_indexes]
_scatter_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, s=5, c='k')
_scatter_4_rows(maps_cnn13, ax1b, ax2b, ax3b, ax4b, s=5, c='r')
_scatter_4_rows(maps_mobilenetv1, ax1b, ax2b, ax3b, ax4b, s=5, c='b')
_scatter_4_rows(maps_logmel_wavegram_cnn, ax1b, ax2b, ax3b, ax4b, s=5, c='g')
linewidth = 0.7
line0te = _plot_4_rows(maps_avg_instances, ax1b, ax2b, ax3b, ax4b, c='k', linewidth=linewidth, label='AP with averaging instances (baseline)')
line1te = _plot_4_rows(maps_cnn13, ax1b, ax2b, ax3b, ax4b, c='r', linewidth=linewidth, label='AP with CNN14')
line2te = _plot_4_rows(maps_mobilenetv1, ax1b, ax2b, ax3b, ax4b, c='b', linewidth=linewidth, label='AP with MobileNetV1')
line3te = _plot_4_rows(maps_logmel_wavegram_cnn, ax1b, ax2b, ax3b, ax4b, c='g', linewidth=linewidth, label='AP with Wavegram-Logmel-CNN')
label_quality = stats['label_quality']
sorted_rate = np.array(label_quality)[sorted_indexes]
for k in range(len(sorted_rate)):
if sorted_rate[k] and sorted_rate[k] == 1:
sorted_rate[k] = 0.99
ax1b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
ax2b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
ax3b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+')
line_label_quality = ax4b.scatter(np.arange(N)[sorted_rate != None], sorted_rate[sorted_rate != None], s=12, c='r', linewidth=0.8, marker='+', label='Label quality')
ax1b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax2b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax3b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
ax4b.scatter(np.arange(N)[sorted_rate == None], 0.5 * np.ones(len(np.arange(N)[sorted_rate == None])), s=12, c='r', linewidth=0.8, marker='_')
plt.legend(handles=[line0te, line1te, line2te, line3te, line_label_quality], fontsize=6, loc=1)
plt.savefig(save_out_path)
print('Save fig to {}'.format(save_out_path)) | null |
8,641 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def plot(args):
# Arguments & parameters
dataset_dir = args.dataset_dir
workspace = args.workspace
select = args.select
classes_num = config.classes_num
max_plot_iteration = 1000000
iterations = np.arange(0, max_plot_iteration, 2000)
class_labels_indices_path = os.path.join(dataset_dir, 'metadata',
'class_labels_indices.csv')
save_out_path = 'results/{}.pdf'.format(select)
create_folder(os.path.dirname(save_out_path))
# Read labels
labels = config.labels
# Plot
fig, ax = plt.subplots(1, 1, figsize=(15, 8))
lines = []
def _load_metrics(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
bal_map = np.array([statistics['average_precision'] for statistics in statistics_dict['bal']]) # (N, classes_num)
bal_map = np.mean(bal_map, axis=-1)
test_map = np.array([statistics['average_precision'] for statistics in statistics_dict['test']]) # (N, classes_num)
test_map = np.mean(test_map, axis=-1)
legend = '{}, {}, bal={}, aug={}, bs={}'.format(data_type, model_type, balanced, augmentation, batch_size)
# return {'bal_map': bal_map, 'test_map': test_map, 'legend': legend}
return bal_map, test_map, legend
bal_alpha = 0.3
test_alpha = 1.0
lines = []
if select == '1_cnn13':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_dropout', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_no_specaug', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_dropout', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_no_mixup', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_mixup_in_wave', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_mixup_in_wave', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_pooling':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_gwrp', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapgwrp', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_att', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_gmpgapatt', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_resnet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet34', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='resnet50', color='c', alpha=test_alpha)
lines.append(line)
elif select == '1_densenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet121', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet121', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'DenseNet201', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='densenet201', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_cnn9':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_hop':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
500, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop500', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
640, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop640', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
1000, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_hop1000', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_emb':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb128', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13_emb512', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_mobilenet':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv1', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV2', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='mobilenetv2', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_LeeNet18', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_LeeNet18', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_DaiNet', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_DaiNet', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='c', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet50', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet50', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_waveform_cnn2d':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_decision_level':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelMax', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelMax', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAvg', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAvg', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_DecisionLevelAtt', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_DecisionLevelAtt', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_transformer':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer1', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer3', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer3', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_Transformer6', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_Transformer6', color='k', alpha=test_alpha)
lines.append(line)
elif select == '1_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_bal_train_aug':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,none', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup_from_0_epoch', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,balanced,mixup_from_0_epoch', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_sr':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_16k', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_8k', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_time_domain':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_time_domain', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_partial_full':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.9_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.9', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='g', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.8', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.7_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='k', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.7', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='m', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,partial_0.5', color='m', alpha=test_alpha)
lines.append(line)
elif select == '1_window':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 2048,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_win2048', color='b', alpha=test_alpha)
lines.append(line)
elif select == '1_melbins':
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel32', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_mel128', color='g', alpha=test_alpha)
lines.append(line)
elif select == '1_alternate':
max_plot_iteration = 2000000
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='r', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'alternate', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14_alternate', color='b', alpha=test_alpha)
lines.append(line)
elif select == '2_all':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn9', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn9', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn5', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn5', color='g', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'MobileNetV1', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='MobileNetV1', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn1d_ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn1d_ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'ResNet34', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='ResNet34', color='grey', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_WavCnn2d', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_WavCnn2d', color='m', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_SpAndWav', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_SpAndWav', color='orange', alpha=test_alpha)
lines.append(line)
elif select == '2_emb':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_emb32', color='r', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_128', color='k', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_emb512', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='Cnn13_512', color='g', alpha=test_alpha)
lines.append(line)
elif select == '2_aug':
iterations = np.arange(0, max_plot_iteration, 2000)
(bal_map, test_map, legend) = _load_metrics0('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13', 'clip_bce', 'balanced', 'mixup', 32)
line, = ax.plot(bal_map, color='b', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn13', color='b', alpha=test_alpha)
lines.append(line)
(bal_map, test_map, legend) = _load_metrics('main', 32000, 1024,
320, 64, 50, 14000, 'full_train', 'Cnn13_no_specaug', 'clip_bce', 'none', 'none', 32)
line, = ax.plot(bal_map, color='c', alpha=bal_alpha)
line, = ax.plot(test_map, label='cnn14,none,none', color='c', alpha=test_alpha)
lines.append(line)
ax.set_ylim(0, 1.)
ax.set_xlim(0, len(iterations))
ax.xaxis.set_ticks(np.arange(0, len(iterations), 25))
ax.xaxis.set_ticklabels(np.arange(0, max_plot_iteration, 50000))
ax.yaxis.set_ticks(np.arange(0, 1.01, 0.05))
ax.yaxis.set_ticklabels(np.around(np.arange(0, 1.01, 0.05), decimals=2))
ax.grid(color='b', linestyle='solid', linewidth=0.3)
plt.legend(handles=lines, loc=2)
# box = ax.get_position()
# ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
# ax.legend(handles=lines, bbox_to_anchor=(1.0, 1.0))
plt.savefig(save_out_path)
print('Save figure to {}'.format(save_out_path))
def create_folder(fd):
if not os.path.exists(fd):
os.makedirs(fd)
def plot_flops(args):
# Arguments & parameters
workspace = args.workspace
# Paths
save_out_path = 'results_map/flops.pdf'
create_folder(os.path.dirname(save_out_path))
plt.figure(figsize=(5, 5))
fig, ax = plt.subplots(1, 1)
model_types = np.array(['Cnn6', 'Cnn10', 'Cnn14', 'ResNet22', 'ResNet38', 'ResNet54',
'MobileNetV1', 'MobileNetV2', 'DaiNet', 'LeeNet', 'LeeNet18',
'Res1dNet30', 'Res1dNet44', 'Wavegram-CNN', 'Wavegram-\nLogmel-CNN'])
flops = np.array([21.986, 21.986, 42.220, 30.081, 48.962, 54.563, 3.614, 2.810,
30.395, 4.741, 26.369, 32.688, 61.833, 44.234, 53.510])
mAPs = np.array([0.343, 0.380, 0.431, 0.430, 0.434, 0.429, 0.389, 0.383, 0.295,
0.266, 0.336, 0.365, 0.355, 0.389, 0.439])
sorted_indexes = np.sort(flops)
ax.scatter(flops, mAPs)
shift = [[1, 0.002], [1, -0.006], [-1, -0.014], [-2, 0.006], [-7, 0.006],
[1, -0.01], [0.5, 0.004], [-1, -0.014], [1, -0.007], [0.8, -0.008],
[1, -0.007], [1, 0.002], [-6, -0.015], [1, -0.008], [0.8, 0]]
for i, model_type in enumerate(model_types):
ax.annotate(model_type, (flops[i] + shift[i][0], mAPs[i] + shift[i][1]))
ax.plot(flops[[0, 1, 2]], mAPs[[0, 1, 2]])
ax.plot(flops[[3, 4, 5]], mAPs[[3, 4, 5]])
ax.plot(flops[[6, 7]], mAPs[[6, 7]])
ax.plot(flops[[9, 10]], mAPs[[9, 10]])
ax.plot(flops[[11, 12]], mAPs[[11, 12]])
ax.plot(flops[[13, 14]], mAPs[[13, 14]])
ax.set_xlim(0, 70)
ax.set_ylim(0.2, 0.5)
ax.set_xlabel('Multi-adds (million)')
ax.set_ylabel('mAP')
plt.tight_layout(0, 0, 0)
plt.savefig(save_out_path)
print('Write out figure to {}'.format(save_out_path)) | null |
8,642 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def spearman(args):
# Arguments & parameters
workspace = args.workspace
# Paths
stat_path = os.path.join(workspace, 'results', 'stats_for_paper.pkl')
# Stats
stats = cPickle.load(open(stat_path, 'rb'))
label_quality = np.array([qu if qu else 0.5 for qu in stats['label_quality']])
training_samples = np.array(stats['official_balanced_trainig_samples']) + \
np.array(stats['official_unbalanced_training_samples'])
mAP = stats['averaging_instance_system_avg_9_probs_from_10000_to_50000_iterations']['eval']['average_precision']
import scipy
samples_spearman = scipy.stats.spearmanr(training_samples, mAP)[0]
quality_spearman = scipy.stats.spearmanr(label_quality, mAP)[0]
print('Training samples spearman: {:.3f}'.format(samples_spearman))
print('Quality spearman: {:.3f}'.format(quality_spearman)) | null |
8,643 | import os
import sys
import numpy as np
import argparse
import h5py
import time
import _pickle as cPickle
import _pickle
import matplotlib.pyplot as plt
import csv
from sklearn import metrics
from utilities import (create_folder, get_filename, d_prime)
import config
def _load_metrics0_classwise2(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace0 = '/mnt/cephfs_new_wj/speechsv/qiuqiang.kong/workspaces/pub_audioset_tagging_cnn_transfer'
statistics_path = os.path.join(workspace0, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
k = 270
mAP = np.mean(statistics_dict['test'][k]['average_precision'])
mAUC = np.mean(statistics_dict['test'][k]['auc'])
dprime = d_prime(mAUC)
return mAP, mAUC, dprime
def _load_metrics_classwise(filename, sample_rate, window_size, hop_size, mel_bins, fmin,
fmax, data_type, model_type, loss_type, balanced, augmentation, batch_size):
workspace = '/mnt/cephfs_new_wj/speechsv/kongqiuqiang/workspaces/cvssp/pub_audioset_tagging_cnn'
statistics_path = os.path.join(workspace, 'statistics', filename,
'sample_rate={},window_size={},hop_size={},mel_bins={},fmin={},fmax={}'.format(
sample_rate, window_size, hop_size, mel_bins, fmin, fmax),
'data_type={}'.format(data_type), model_type,
'loss_type={}'.format(loss_type), 'balanced={}'.format(balanced),
'augmentation={}'.format(augmentation), 'batch_size={}'.format(batch_size),
'statistics.pkl')
statistics_dict = cPickle.load(open(statistics_path, 'rb'))
k = 300
mAP = np.mean(statistics_dict['test'][k]['average_precision'])
mAUC = np.mean(statistics_dict['test'][k]['auc'])
dprime = d_prime(mAUC)
return mAP, mAUC, dprime
def print_results(args):
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_mixup_time_domain', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'none', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'none', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'balanced_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
#
(mAP, mAUC, dprime) = _load_metrics0_classwise2('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn13_emb32', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics0_classwise2('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn13_emb128', 'clip_bce', 'balanced', 'mixup', 32)
# partial
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'partial_0.8_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'partial_0.5_full_train', 'Cnn14', 'clip_bce', 'balanced', 'mixup', 32)
# Sample rate
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_16k', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 64, 50, 14000, 'full_train', 'Cnn14_8k', 'clip_bce', 'balanced', 'mixup', 32)
# Mel bins
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 128, 50, 14000, 'full_train', 'Cnn14_mel128', 'clip_bce', 'balanced', 'mixup', 32)
(mAP, mAUC, dprime) = _load_metrics_classwise('main', 32000, 1024, 320, 32, 50, 14000, 'full_train', 'Cnn14_mel32', 'clip_bce', 'balanced', 'mixup', 32)
import crash
asdf | null |
8,644 | from setuptools import setup, find_packages
import os
def get_long_description():
with open(
os.path.join(os.path.dirname(os.path.abspath(__file__)), "README.md"),
encoding="utf8",
) as fp:
return fp.read() | null |
8,645 | import llm
import random
import time
from typing import Optional
from pydantic import field_validator, Field
class Markov(llm.Model):
model_id = "markov"
can_stream = True
class Options(llm.Options):
length: Optional[int] = Field(
description="Number of words to generate", default=None
)
delay: Optional[float] = Field(
description="Seconds to delay between each token", default=None
)
def validate_length(cls, length):
if length is None:
return None
if length < 2:
raise ValueError("length must be >= 2")
return length
def validate_delay(cls, delay):
if delay is None:
return None
if not 0 <= delay <= 10:
raise ValueError("delay must be between 0 and 10")
return delay
def execute(self, prompt, stream, response, conversation):
text = prompt.prompt
transitions = build_markov_table(text)
length = prompt.options.length or 20
for word in generate(transitions, length):
yield word + " "
if prompt.options.delay:
time.sleep(prompt.options.delay)
def register_models(register):
register(Markov()) | null |
8,646 | import llm
import random
import time
from typing import Optional
from pydantic import field_validator, Field
def build_markov_table(text):
words = text.split()
transitions = {}
# Loop through all but the last word
for i in range(len(words) - 1):
word = words[i]
next_word = words[i + 1]
transitions.setdefault(word, []).append(next_word)
return transitions | null |
8,647 | import llm
import random
import time
from typing import Optional
from pydantic import field_validator, Field
def generate(transitions, length, start_word=None):
all_words = list(transitions.keys())
next_word = start_word or random.choice(all_words)
for i in range(length):
yield next_word
options = transitions.get(next_word) or all_words
next_word = random.choice(options) | null |
8,648 | from sqlite_migrate import Migrations
import hashlib
import time
def m001_create_tables(db):
db["collections"].create({"id": int, "name": str, "model": str}, pk="id")
db["collections"].create_index(["name"], unique=True)
db["embeddings"].create(
{
"collection_id": int,
"id": str,
"embedding": bytes,
"content": str,
"metadata": str,
},
pk=("collection_id", "id"),
) | null |
8,649 | from sqlite_migrate import Migrations
import hashlib
import time
def m002_foreign_key(db):
db["embeddings"].add_foreign_key("collection_id", "collections", "id") | null |
8,650 | from sqlite_migrate import Migrations
import hashlib
import time
def m003_add_updated(db):
db["embeddings"].add_column("updated", int)
# Pretty-print the schema
db["embeddings"].transform()
# Assume anything existing was last updated right now
db.query(
"update embeddings set updated = ? where updated is null", [int(time.time())]
) | null |
8,651 | from sqlite_migrate import Migrations
import hashlib
import time
def m004_store_content_hash(db):
db["embeddings"].add_column("content_hash", bytes)
db["embeddings"].transform(
column_order=(
"collection_id",
"id",
"embedding",
"content",
"content_hash",
"metadata",
"updated",
)
)
# Register functions manually so we can de-register later
def md5(text):
return hashlib.md5(text.encode("utf8")).digest()
def random_md5():
return hashlib.md5(str(time.time()).encode("utf8")).digest()
db.conn.create_function("temp_md5", 1, md5)
db.conn.create_function("temp_random_md5", 0, random_md5)
with db.conn:
db.execute(
"""
update embeddings
set content_hash = temp_md5(content)
where content is not null
"""
)
db.execute(
"""
update embeddings
set content_hash = temp_random_md5()
where content is null
"""
)
db["embeddings"].create_index(["content_hash"])
# De-register functions
db.conn.create_function("temp_md5", 1, None)
db.conn.create_function("temp_random_md5", 0, None) | null |
8,652 | from sqlite_migrate import Migrations
import hashlib
import time
def m005_add_content_blob(db):
db["embeddings"].add_column("content_blob", bytes)
db["embeddings"].transform(
column_order=("collection_id", "id", "embedding", "content", "content_blob")
) | null |
8,653 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def _validate_metadata_json(ctx, param, value):
if value is None:
return value
try:
obj = json.loads(value)
if not isinstance(obj, dict):
raise click.BadParameter("Metadata must be a JSON object")
return obj
except json.JSONDecodeError:
raise click.BadParameter("Metadata must be valid JSON") | null |
8,654 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `cli` function. Write a Python function `def cli()` to solve the following problem:
Access large language models from the command-line Documentation: https://llm.datasette.io/ To get started, obtain an OpenAI key and set it like this: \b $ llm keys set openai Enter key: ... Then execute a prompt like this: llm 'Five outrageous names for a pet pelican'
Here is the function:
def cli():
"""
Access large language models from the command-line
Documentation: https://llm.datasette.io/
To get started, obtain an OpenAI key and set it like this:
\b
$ llm keys set openai
Enter key: ...
Then execute a prompt like this:
llm 'Five outrageous names for a pet pelican'
""" | Access large language models from the command-line Documentation: https://llm.datasette.io/ To get started, obtain an OpenAI key and set it like this: \b $ llm keys set openai Enter key: ... Then execute a prompt like this: llm 'Five outrageous names for a pet pelican' |
8,655 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def prompt(
prompt,
system,
model_id,
options,
template,
param,
no_stream,
no_log,
log,
_continue,
conversation_id,
key,
save,
):
"""
Execute a prompt
Documentation: https://llm.datasette.io/en/stable/usage.html
"""
if log and no_log:
raise click.ClickException("--log and --no-log are mutually exclusive")
model_aliases = get_model_aliases()
def read_prompt():
nonlocal prompt
# Is there extra prompt available on stdin?
stdin_prompt = None
if not sys.stdin.isatty():
stdin_prompt = sys.stdin.read()
if stdin_prompt:
bits = [stdin_prompt]
if prompt:
bits.append(prompt)
prompt = " ".join(bits)
if prompt is None and not save and sys.stdin.isatty():
# Hang waiting for input to stdin (unless --save)
prompt = sys.stdin.read()
return prompt
if save:
# We are saving their prompt/system/etc to a new template
# Fields to save: prompt, system, model - and more in the future
disallowed_options = []
for option, var in (
("--template", template),
("--continue", _continue),
("--cid", conversation_id),
):
if var:
disallowed_options.append(option)
if disallowed_options:
raise click.ClickException(
"--save cannot be used with {}".format(", ".join(disallowed_options))
)
path = template_dir() / f"{save}.yaml"
to_save = {}
if model_id:
try:
to_save["model"] = model_aliases[model_id].model_id
except KeyError:
raise click.ClickException("'{}' is not a known model".format(model_id))
prompt = read_prompt()
if prompt:
to_save["prompt"] = prompt
if system:
to_save["system"] = system
if param:
to_save["defaults"] = dict(param)
path.write_text(
yaml.dump(
to_save,
indent=4,
default_flow_style=False,
),
"utf-8",
)
return
if template:
params = dict(param)
# Cannot be used with system
if system:
raise click.ClickException("Cannot use -t/--template and --system together")
template_obj = load_template(template)
prompt = read_prompt()
try:
prompt, system = template_obj.evaluate(prompt, params)
except Template.MissingVariables as ex:
raise click.ClickException(str(ex))
if model_id is None and template_obj.model:
model_id = template_obj.model
conversation = None
if conversation_id or _continue:
# Load the conversation - loads most recent if no ID provided
try:
conversation = load_conversation(conversation_id)
except UnknownModelError as ex:
raise click.ClickException(str(ex))
# Figure out which model we are using
if model_id is None:
if conversation:
model_id = conversation.model.model_id
else:
model_id = get_default_model()
# Now resolve the model
try:
model = model_aliases[model_id]
except KeyError:
raise click.ClickException("'{}' is not a known model".format(model_id))
# Provide the API key, if one is needed and has been provided
if model.needs_key:
model.key = get_key(key, model.needs_key, model.key_env_var)
if conversation:
# To ensure it can see the key
conversation.model = model
# Validate options
validated_options = {}
if options:
# Validate with pydantic
try:
validated_options = dict(
(key, value)
for key, value in model.Options(**dict(options))
if value is not None
)
except pydantic.ValidationError as ex:
raise click.ClickException(render_errors(ex.errors()))
should_stream = model.can_stream and not no_stream
if not should_stream:
validated_options["stream"] = False
prompt = read_prompt()
prompt_method = model.prompt
if conversation:
prompt_method = conversation.prompt
try:
response = prompt_method(prompt, system, **validated_options)
if should_stream:
for chunk in response:
print(chunk, end="")
sys.stdout.flush()
print("")
else:
print(response.text())
except Exception as ex:
raise click.ClickException(str(ex))
# Log to the database
if (logs_on() or log) and not no_log:
log_path = logs_db_path()
(log_path.parent).mkdir(parents=True, exist_ok=True)
db = sqlite_utils.Database(log_path)
migrate(db)
response.log_to_db(db)
"_continue",
"-c",
"--continue",
is_flag=True,
flag_value=-1,
help="Continue the most recent conversation.",
def load_conversation(conversation_id: Optional[str]) -> Optional[Conversation]:
db = sqlite_utils.Database(logs_db_path())
migrate(db)
if conversation_id is None:
# Return the most recent conversation, or None if there are none
matches = list(db["conversations"].rows_where(order_by="id desc", limit=1))
if matches:
conversation_id = matches[0]["id"]
else:
return None
try:
row = cast(sqlite_utils.db.Table, db["conversations"]).get(conversation_id)
except sqlite_utils.db.NotFoundError:
raise click.ClickException(
"No conversation found with id={}".format(conversation_id)
)
# Inflate that conversation
conversation = Conversation.from_row(row)
for response in db["responses"].rows_where(
"conversation_id = ?", [conversation_id]
):
conversation.responses.append(Response.from_row(response))
return conversation
cls=DefaultGroup,
default="list",
default_if_no_args=True,
def get_default_model(filename="default_model.txt", default=DEFAULT_MODEL):
path = user_dir() / filename
if path.exists():
return path.read_text().strip()
else:
return default
def logs_db_path():
return user_dir() / "logs.db"
def load_template(name):
path = template_dir() / f"{name}.yaml"
if not path.exists():
raise click.ClickException(f"Invalid template: {name}")
try:
loaded = yaml.safe_load(path.read_text())
except yaml.YAMLError as ex:
raise click.ClickException("Invalid YAML: {}".format(str(ex)))
if isinstance(loaded, str):
return Template(name=name, prompt=loaded)
loaded["name"] = name
try:
return Template(**loaded)
except pydantic.ValidationError as ex:
msg = "A validation error occurred:\n"
msg += render_errors(ex.errors())
raise click.ClickException(msg)
def render_errors(errors):
output = []
for error in errors:
output.append(", ".join(error["loc"]))
output.append(" " + error["msg"])
return "\n".join(output)
class UnknownModelError(KeyError):
pass
def get_model(name):
aliases = get_model_aliases()
try:
return aliases[name]
except KeyError:
raise UnknownModelError("Unknown model: " + name)
def get_key(
explicit_key: Optional[str], key_alias: str, env_var: Optional[str] = None
) -> Optional[str]:
"""
Return an API key based on a hierarchy of potential sources.
:param provided_key: A key provided by the user. This may be the key, or an alias of a key in keys.json.
:param key_alias: The alias used to retrieve the key from the keys.json file.
:param env_var: Name of the environment variable to check for the key.
"""
stored_keys = load_keys()
# If user specified an alias, use the key stored for that alias
if explicit_key in stored_keys:
return stored_keys[explicit_key]
if explicit_key:
# User specified a key that's not an alias, use that
return explicit_key
# Stored key over-rides environment variables over-ride the default key
if key_alias in stored_keys:
return stored_keys[key_alias]
# Finally try environment variable
if env_var and os.environ.get(env_var):
return os.environ[env_var]
# Couldn't find it
return None
def migrate(db):
ensure_migrations_table(db)
already_applied = {r["name"] for r in db["_llm_migrations"].rows}
for fn in MIGRATIONS:
name = fn.__name__
if name not in already_applied:
fn(db)
db["_llm_migrations"].insert(
{"name": name, "applied_at": str(datetime.datetime.utcnow())}
)
already_applied.add(name)
The provided code snippet includes necessary dependencies for implementing the `chat` function. Write a Python function `def chat( system, model_id, _continue, conversation_id, template, param, options, no_stream, key, )` to solve the following problem:
Hold an ongoing chat with a model.
Here is the function:
def chat(
system,
model_id,
_continue,
conversation_id,
template,
param,
options,
no_stream,
key,
):
"""
Hold an ongoing chat with a model.
"""
# Left and right arrow keys to move cursor:
readline.parse_and_bind("\\e[D: backward-char")
readline.parse_and_bind("\\e[C: forward-char")
log_path = logs_db_path()
(log_path.parent).mkdir(parents=True, exist_ok=True)
db = sqlite_utils.Database(log_path)
migrate(db)
conversation = None
if conversation_id or _continue:
# Load the conversation - loads most recent if no ID provided
try:
conversation = load_conversation(conversation_id)
except UnknownModelError as ex:
raise click.ClickException(str(ex))
template_obj = None
if template:
params = dict(param)
# Cannot be used with system
if system:
raise click.ClickException("Cannot use -t/--template and --system together")
template_obj = load_template(template)
if model_id is None and template_obj.model:
model_id = template_obj.model
# Figure out which model we are using
if model_id is None:
if conversation:
model_id = conversation.model.model_id
else:
model_id = get_default_model()
# Now resolve the model
try:
model = get_model(model_id)
except KeyError:
raise click.ClickException("'{}' is not a known model".format(model_id))
# Provide the API key, if one is needed and has been provided
if model.needs_key:
model.key = get_key(key, model.needs_key, model.key_env_var)
if conversation is None:
# Start a fresh conversation for this chat
conversation = Conversation(model=model)
else:
# Ensure it can see the API key
conversation.model = model
# Validate options
validated_options = {}
if options:
try:
validated_options = dict(
(key, value)
for key, value in model.Options(**dict(options))
if value is not None
)
except pydantic.ValidationError as ex:
raise click.ClickException(render_errors(ex.errors()))
should_stream = model.can_stream and not no_stream
if not should_stream:
validated_options["stream"] = False
click.echo("Chatting with {}".format(model.model_id))
click.echo("Type 'exit' or 'quit' to exit")
click.echo("Type '!multi' to enter multiple lines, then '!end' to finish")
in_multi = False
accumulated = []
end_token = "!end"
while True:
prompt = click.prompt("", prompt_suffix="> " if not in_multi else "")
if prompt.strip().startswith("!multi"):
in_multi = True
bits = prompt.strip().split()
if len(bits) > 1:
end_token = "!end {}".format(" ".join(bits[1:]))
continue
if in_multi:
if prompt.strip() == end_token:
prompt = "\n".join(accumulated)
in_multi = False
accumulated = []
else:
accumulated.append(prompt)
continue
if template_obj:
try:
prompt, system = template_obj.evaluate(prompt, params)
except Template.MissingVariables as ex:
raise click.ClickException(str(ex))
if prompt.strip() in ("exit", "quit"):
break
response = conversation.prompt(prompt, system, **validated_options)
# System prompt only sent for the first message:
system = None
for chunk in response:
print(chunk, end="")
sys.stdout.flush()
response.log_to_db(db)
print("") | Hold an ongoing chat with a model. |
8,656 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def keys():
"Manage stored API keys for different models"
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `keys_list` function. Write a Python function `def keys_list()` to solve the following problem:
List names of all stored keys
Here is the function:
def keys_list():
"List names of all stored keys"
path = user_dir() / "keys.json"
if not path.exists():
click.echo("No keys found")
return
keys = json.loads(path.read_text())
for key in sorted(keys.keys()):
if key != "// Note":
click.echo(key) | List names of all stored keys |
8,657 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `keys_path_command` function. Write a Python function `def keys_path_command()` to solve the following problem:
Output the path to the keys.json file
Here is the function:
def keys_path_command():
"Output the path to the keys.json file"
click.echo(user_dir() / "keys.json") | Output the path to the keys.json file |
8,658 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
default=None,
def user_dir():
llm_user_path = os.environ.get("LLM_USER_PATH")
if llm_user_path:
path = pathlib.Path(llm_user_path)
else:
path = pathlib.Path(click.get_app_dir("io.datasette.llm"))
path.mkdir(exist_ok=True, parents=True)
return path
The provided code snippet includes necessary dependencies for implementing the `keys_set` function. Write a Python function `def keys_set(name, value)` to solve the following problem:
Save a key in the keys.json file Example usage: \b $ llm keys set openai Enter key: ...
Here is the function:
def keys_set(name, value):
"""
Save a key in the keys.json file
Example usage:
\b
$ llm keys set openai
Enter key: ...
"""
default = {"// Note": "This file stores secret API credentials. Do not share!"}
path = user_dir() / "keys.json"
path.parent.mkdir(parents=True, exist_ok=True)
if not path.exists():
path.write_text(json.dumps(default))
path.chmod(0o600)
try:
current = json.loads(path.read_text())
except json.decoder.JSONDecodeError:
current = default
current[name] = value
path.write_text(json.dumps(current, indent=2) + "\n") | Save a key in the keys.json file Example usage: \b $ llm keys set openai Enter key: ... |
8,659 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
The provided code snippet includes necessary dependencies for implementing the `logs` function. Write a Python function `def logs()` to solve the following problem:
Tools for exploring logged prompts and responses
Here is the function:
def logs():
"Tools for exploring logged prompts and responses" | Tools for exploring logged prompts and responses |
8,660 | import click
from click_default_group import DefaultGroup
from dataclasses import asdict
import io
import json
from llm import (
Collection,
Conversation,
Response,
Template,
UnknownModelError,
encode,
get_embedding_models_with_aliases,
get_embedding_model_aliases,
get_embedding_model,
get_key,
get_plugins,
get_model,
get_model_aliases,
get_models_with_aliases,
user_dir,
set_alias,
remove_alias,
)
from .migrations import migrate
from .plugins import pm
import base64
import pathlib
import pydantic
import readline
from runpy import run_module
import shutil
import sqlite_utils
from sqlite_utils.utils import rows_from_file, Format
import sys
import textwrap
from typing import cast, Optional, Iterable, Union, Tuple
import warnings
import yaml
def logs_db_path():
return user_dir() / "logs.db"
The provided code snippet includes necessary dependencies for implementing the `logs_path` function. Write a Python function `def logs_path()` to solve the following problem:
Output the path to the logs.db file
Here is the function:
def logs_path():
"Output the path to the logs.db file"
click.echo(logs_db_path()) | Output the path to the logs.db file |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.