code stringlengths 17 6.64M |
|---|
def get_least_power2_above(x):
return np.power(2, math.ceil(np.log2(x)))
|
class MelspecInversion(nn.Module):
def __init__(self, n_mels: int=128, sample_rate: int=24000, win_length: int=1024, hop_length: int=256):
super().__init__()
self.n_mels = n_mels
self.sample_rate = sample_rate
self.win_length = win_length
self.hop_length = hop_length
self.melspec_layer = None
@classmethod
def from_pretrained(cls, pretrained_model_path, **config):
model = cls(**config)
model.load_state_dict(torch.load(pretrained_model_path, map_location='cpu'))
return model
def prepare_melspectrogram(self, audio):
if (self.melspec_layer is None):
self.melspec_layer = MelSpectrogram(n_mels=self.n_mels, sample_rate=self.sample_rate, n_fft=get_least_power2_above(self.win_length), win_length=self.win_length, hop_length=self.hop_length, f_min=0.0, f_max=(self.sample_rate / 2.0), center=True, power=2.0, mel_scale='slaney', norm='slaney', normalized=True, pad_mode='constant')
self.melspec_layer = self.melspec_layer.to(audio.device)
melspec = self.melspec_layer(audio)
melspec = (10 * torch.log10((melspec + 1e-10)))
melspec = torch.clamp(((melspec + 100) / 100), min=0.0)
return melspec
|
@torch.jit.script
def silu(x):
return (x * torch.sigmoid(x))
|
def Conv1d(*args, **kwargs):
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
|
class DiffusionEmbedding(nn.Module):
'Diffusion Timestep Embedding'
def __init__(self, max_steps):
super().__init__()
self.register_buffer('embedding', self._build_embedding(max_steps), persistent=False)
self.projection1 = nn.Linear(128, 512)
self.projection2 = nn.Linear(512, 512)
def _build_embedding(self, max_steps):
steps = torch.arange(max_steps).unsqueeze(1)
dims = torch.arange(64).unsqueeze(0)
table = (steps * (10.0 ** ((dims * 4.0) / 63.0)))
table = torch.cat([torch.sin(table), torch.cos(table)], dim=1)
return table
def _lerp_embedding(self, t):
low_idx = torch.floor(t).long()
high_idx = torch.ceil(t).long()
low = self.embedding[low_idx]
high = self.embedding[high_idx]
return (low + ((high - low) * (t - low_idx)))
def forward(self, timestep):
if (timestep.dtype in [torch.int32, torch.int64]):
x = self.embedding[timestep]
else:
x = self._lerp_embedding(timestep)
x = self.projection1(x)
x = silu(x)
x = self.projection2(x)
x = silu(x)
return x
|
class SpectrogramUpsampler(nn.Module):
'Convolution-based Upsampler (16x)'
def __init__(self):
super().__init__()
self.conv1 = nn.ConvTranspose2d(1, 1, [3, 32], stride=[1, 16], padding=[1, 8])
self.conv2 = nn.ConvTranspose2d(1, 1, [3, 32], stride=[1, 16], padding=[1, 8])
def forward(self, x):
x = self.conv1(x)
x = F.leaky_relu(x, 0.4)
x = self.conv2(x)
x = F.leaky_relu(x, 0.4)
x = torch.squeeze(x, 1)
return x
|
class ResidualBlock(nn.Module):
'Building Block for Diffusion Model'
def __init__(self, n_mels, residual_channels, dilation):
super().__init__()
self.dilated_conv = Conv1d(residual_channels, (2 * residual_channels), 3, padding=dilation, dilation=dilation)
self.diffusion_projection = nn.Linear(512, residual_channels)
self.conditioner_projection = Conv1d(n_mels, (2 * residual_channels), 1)
self.output_projection = Conv1d(residual_channels, (2 * residual_channels), 1)
def forward(self, x, timestep, conditioner):
timestep = self.diffusion_projection(timestep).unsqueeze((- 1))
y = (x + timestep)
conditioner = self.conditioner_projection(conditioner)
y = (self.dilated_conv(y) + conditioner)
(y_gate, y_filter) = torch.chunk(y, 2, dim=1)
y = (torch.sigmoid(y_gate) * torch.tanh(y_filter))
y = self.output_projection(y)
(residual, skip) = torch.chunk(y, 2, dim=1)
return (((x + residual) / sqrt(2.0)), skip)
|
class GomiDiff(nn.Module):
'GomiDiff: Gaudio open mel-spectrogram inversion with diffusion models.\n\n Based on Diffwave (Kong et al. 2020).\n\n Args:\n n_mels (int): number of frequency bins\n residual_layers (int): number of residual layers\n residual_channels (int): dimension of channels in aresidual layer\n dilation_cycle_length (int): number of dilation cycles\n num_diffusion_steps (int): number of diffusion steps\n '
def __init__(self, in_channels: int, residual_layers: int, residual_channels: int, dilation_cycle_length: int, num_diffusion_steps: int):
super().__init__()
self.dilation_cycle_length = dilation_cycle_length
self.num_diffusion_steps = num_diffusion_steps
self.film_layers = nn.ModuleList([nn.Conv1d(residual_channels, (2 * residual_channels), 1) for _ in range((residual_layers // dilation_cycle_length))])
self.input_projection = Conv1d(1, residual_channels, 1)
self.diffusion_embedding = DiffusionEmbedding(num_diffusion_steps)
self.spectrogram_upsampler = SpectrogramUpsampler()
self.residual_layers = nn.ModuleList([ResidualBlock(in_channels, residual_channels, (2 ** (i % dilation_cycle_length))) for i in range(residual_layers)])
self.skip_projection = Conv1d(residual_channels, residual_channels, 1)
self.output_projection = Conv1d(residual_channels, 1, 1)
nn.init.zeros_(self.output_projection.weight)
def forward(self, signal, timestep, spectrogram):
x = self.input_projection(signal)
x = F.relu(x)
timestep = self.diffusion_embedding(timestep)
spectrogram = self.spectrogram_upsampler(spectrogram)
skip = None
for (i, layer) in enumerate(self.residual_layers):
if ((i % self.dilation_cycle_length) == 0):
film = self.film_layers[(i // self.dilation_cycle_length)](spectrogram)
(scale, shift) = torch.chunk(film, 2, dim=1)
x = (((1 + (0.01 * scale)) * x) + shift)
(x, skip_connection) = layer(x, timestep, spectrogram)
skip = (skip_connection if (skip is None) else (skip_connection + skip))
x = (skip / sqrt(len(self.residual_layers)))
x = self.skip_projection(x)
x = F.relu(x)
x = self.output_projection(x)
return x
|
class DiffusionWrapper(MelspecInversion):
def __init__(self, in_channels: int, residual_layers: int, residual_channels: int, dilation_cycle_length: int, num_diffusion_steps: int, **mel_config):
super().__init__(n_mels=in_channels, **mel_config)
self.model = GomiDiff(in_channels=in_channels, residual_layers=residual_layers, residual_channels=residual_channels, dilation_cycle_length=dilation_cycle_length, num_diffusion_steps=num_diffusion_steps)
self.scheduler = diffusers.DDPMScheduler(beta_start=0.0001, beta_end=0.05, num_train_timesteps=self.model.num_diffusion_steps)
self.scheduler.set_timesteps(num_inference_steps=self.model.num_diffusion_steps)
@torch.no_grad()
def forward(self, spectrogram, return_whole_sequence=False):
shape = (spectrogram.size(0), 1, (self.hop_length * spectrogram.size((- 1))))
x = torch.randn(*shape, device=spectrogram.device)
if return_whole_sequence:
output_sequence = [x.clone()]
for t in tqdm(self.scheduler.timesteps, total=len(self.scheduler.timesteps)):
timestep = torch.tensor([t], device=spectrogram.device).long()
predicted_noise = self.model(x, timestep, spectrogram)
scheduler_output = self.scheduler.step(predicted_noise, timestep=t, sample=x)
x = scheduler_output['prev_sample']
if return_whole_sequence:
output_sequence.insert(0, x.clone())
if return_whole_sequence:
return output_sequence
return x
|
def load_audio(audio_path: Union[(str, bytes, os.PathLike)], sample_rate: int, mono=True, fast_resample=False):
'Load and resample audio file'
if fast_resample:
(audio, orig_sr) = librosa.load(audio_path, sr=None, mono=mono)
audio = librosa.resample(audio, orig_sr=orig_sr, target_sr=sample_rate, res_type='polyphase')
else:
(audio, _) = librosa.load(audio_path, sr=sample_rate, mono=mono)
audio = np.atleast_2d(audio)
audio = torch.from_numpy(audio).float()
return audio
|
@torch.no_grad()
def _analysis(model: torch.nn.Module, audio: Union[(np.ndarray, torch.Tensor)], device: torch.device):
'Convert waveform into melspectrogram.'
if isinstance(audio, np.ndarray):
audio = torch.from_numpy(audio).float()
if (audio.ndim < 2):
audio = torch.atleast_2d(audio)
if (audio.device != device):
audio = audio.to(device)
melspec = model.prepare_melspectrogram(audio)
return melspec
|
@torch.no_grad()
def _synthesis(model: torch.nn.Module, melspec: torch.Tensor, device: torch.device):
'Convert melspectrogram into waveform.'
if (melspec.ndim < 3):
melspec = torch.atleast_3d(melspec)
if (melspec.device != device):
melspec = melspec.to(device)
recon_audio = model(melspec)
recon_audio = recon_audio.squeeze().cpu()
return recon_audio
|
def analysis_synthesis(model: torch.nn.Module, in_file: Union[(str, bytes, os.PathLike)], out_file: Union[(str, bytes, os.PathLike)], device: torch.device, fast_resample=False):
'Process and save files.'
(_, input_ext) = os.path.splitext(in_file)
(_, output_ext) = os.path.splitext(out_file)
if (input_ext in AUDIO_EXTS):
audio = load_audio(in_file, model.sample_rate, fast_resample=fast_resample)
melspec = _analysis(model, audio, device=device)
elif (input_ext in MEL_EXTS):
if (input_ext == '.npy'):
melspec = np.load(in_file)
melspec = torch.from_numpy(melspec).float()
else:
melspec_dict = torch.load(in_file)
melspec = melspec_dict['melspec']
assert (melspec_dict['n_mels'] == model.n_mels), f"Wrong `n_mels`. expected [{model.n_mels}], got [{melspec_dict['n_mels']}]."
assert (melspec_dict['sample_rate'] == model.sample_rate), f"Wrong `sample_rate`. expected [{model.sample_rate}], got [{melspec_dict['sample_rate']}]."
assert (melspec_dict['win_length'] == model.win_length), f"Wrong `win_length`. expected [{model.win_length}], got [{melspec_dict['win_length']}]."
assert (melspec_dict['hop_length'] == model.hop_length), f"Wrong `hop_length`. expected [{model.hop_length}], got [{melspec_dict['hop_length']}]."
else:
print(f'Unsupported input file extension: {input_ext} for {in_file}.')
if (output_ext in MEL_EXTS):
assert (output_ext == '.pt'), f"Only '.pt' file is supported for melspectrogram extraction. got '{output_ext}'."
torch.save({'melspec': melspec.cpu(), 'n_mels': model.n_mels, 'sample_rate': model.sample_rate, 'win_length': model.win_length, 'hop_length': model.hop_length}, out_file)
elif (output_ext in AUDIO_EXTS):
assert (output_ext == '.wav'), "Only '.wav' file is supported for melspectrogram inversion. got"
f" '{output_ext}'."
recon_audio = _synthesis(model, melspec, device=device)
sf.write(out_file, recon_audio, model.sample_rate, subtype='PCM_16')
else:
print(f'Unsupported output file extension: {output_ext} for {out_file}.')
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', type=str, default=None, choices=['diffusion', 'gan'], help='Model type to run.')
parser.add_argument('-p', '--model_path', type=str, default='checkpoints/', help='Directory path to model checkpoint.')
parser.add_argument('-i', '--input_files', nargs='+', type=str, help=f'Path to input files. Audio files with {AUDIO_EXTS} extension and melspectrogram files with {MEL_EXTS} extension are supported.')
parser.add_argument('-o', '--output_files', nargs='+', type=str, default=['outputs/'], help="(Optional) Path to output files. Audio files with '.wav' extension and melspectrogram files with '.pt' extension are supported. If both input and output files are melspectrogram, error will be raised. (default: `outputs/`)")
parser.add_argument('-d', '--device', nargs='+', type=device_type, default=['cpu'], help="Device to use. Currently multi-GPU inference is not supported. (default: 'cpu')")
parser.add_argument('--n_mels', type=int, default=None, help='Number of bins in melspectrogram. If `args.model` is provided, this value will be ignored.')
parser.add_argument('--sample_rate', type=int, default=None, help='Sample rate for audio sample and melspetrogram. If `args.model` is provided, this value will be ignored.')
parser.add_argument('--win_length', type=int, default=None, help='Windoew length for melspetrogram. If `args.model` is provided, this value will be ignored')
parser.add_argument('--hop_length', type=int, default=None, help='Hop length for melspectrogram. If `args.model` is provided, this value will be ignored.')
args = parser.parse_args()
return args
|
def process(model, device, input_files, output_files):
model.to(device)
for (in_file, out_file) in zip(input_files, output_files):
analysis_synthesis(model=model, in_file=in_file, out_file=out_file, device=device, fast_resample=(len(input_files) > 1))
|
def main():
args = parse_args()
(args.input_files, args.output_files) = preprocess_inout_files(args.input_files, args.output_files)
if (args.model == 'gan'):
model = models.GomiGAN.from_pretrained(pretrained_model_path='checkpoints/gan_state_dict.pt', **config.GANConfig().__dict__)
elif (args.model == 'diffusion'):
model = models.DiffusionWrapper.from_pretrained(pretrained_model_path='checkpoints/diffusion_state_dict.pt', **config.DiffusionConfig().__dict__)
elif (args.model is None):
model = models.MelspecInversion(n_mels=args.n_mels, sample_rate=args.sample_rate, win_length=args.win_length, hop_length=args.hop_length)
process(model, args.device[0], args.input_files, args.output_files)
|
def device_type(device: Union[(str, int)]):
if (device == 'cpu'):
return device
assert device.isnumeric()
return f'cuda:{device}'
|
def check_paths(input_filelist: List[str], output_filelist: List[str]):
"Check errors and return filelist if no error detected.\n\n This methode checks 3 types of errors:\n (1) Extension\n Check if filelists have proper extensions. The allowed extensions\n are defined as constants.\n (2) Lengths\n Check input and output filelist have same length\n (3) Undefined behaviours\n If there is 'mel -> mel' match in input and output filelists, it\n raises error.\n "
def _check_extension(filelist: List[str], key: str):
'Check if filelist'
allowed_exts = (INPUT_EXTS if (key == 'input') else OUTPUT_EXTS)
if any([(os.path.splitext(x)[1] not in allowed_exts) for x in filelist]):
raise ValueError(f'Unsupported format for `{key}_files`. Only {allowed_exts} are supported.')
def _check_lengths():
if (len(input_filelist) != len(output_filelist)):
raise ValueError(f'Mistmatched input / output length. Input length: {len(input_filelist)}, Output length: {len(output_filelist)}')
def _check_mel_to_mel():
for (in_file, out_file) in zip(input_filelist, output_filelist):
(_, in_ext) = os.path.splitext(in_file)
(_, out_ext) = os.path.splitext(out_file)
if (in_ext == out_ext in MEL_EXTS):
raise ValueError(f"Unsupported behaviour. Behaviour for melspectrogram to melspectrogram is not defined. got '-i {in_file} -o {out_file}'.")
_check_extension(input_filelist, key='input')
_check_extension(output_filelist, key='output')
_check_lengths()
_check_mel_to_mel()
return (input_filelist, output_filelist)
|
def preprocess_inout_files(input_files: List[str], output_files: List[str]):
'Process input and output filelists.\n\n For various types of input / output path arguments, it preprocess paths. For\n input paths, it reads text files containing audio file paths or search\n directories using `glob.glob`. For output paths, it automatically generates\n output file paths corresponding to input file paths.\n\n For more informations or examples, please refer `tests/testcases_path.yaml`.\n\n Params:\n input_files: file paths for inputs.\n output_files: file paths for outputs.\n\n Notes:\n `input_files` and `output_files` support various formats. However only\n naive list of paths is supported for multiple item, i.e, multiple items\n for directory path, text file or paths including wild card will result\n in unexpected outcome.\n\n Supported formats for arguments include:\n single or multiple audio / mel file paths:\n - ["foo/bar/input2.wav"]\n - ["foo/bar/input1.wav", "bar/input2.pt", "foo/input3.mp3"]\n (single) text file path\n - ["foo/bar/input.txt"]\n (single) directory\n - ["foo/"]\n - ["bar/"]\n (single) file or directory path with wild card\n - ["foo/*.wav"]\n - ["foo/**/*]\n - ["bar/**/input*.wav"]\n\n '
def _common_process(filelist, key):
assert (key in ['input', 'output']), f'Unknown key: {key}'
(file_base, file_ext) = os.path.splitext(filelist[0])
if (file_ext == '.txt'):
return [line.strip() for line in open(filelist[0])]
elif ('*' in file_base):
return filelist[0]
elif (file_ext == ''):
return os.path.join(file_base, '**/*')
return filelist
def _add_file(input_filelist: List[str], output_filelist: List[str], new_item):
'Check file name confliction and add file path to the list.\n\n If `new_item` exists in `input_filelist` or `output_filelist`, rename it\n by add extension (e.g., ".wav") of input file name right before its\n real extension.\n '
while (new_item in (input_filelist + output_filelist)):
(new_item_base, new_item_ext) = os.path.splitext(new_item)
(_, input_ext) = os.path.splitext(input_filelist[len(output_filelist)])
new_item = ((new_item_base + input_ext) + new_item_ext)
output_filelist.append(new_item)
input_files = _common_process(input_files, 'input')
output_files = _common_process(output_files, 'output')
if (len(input_files) == 0):
warnings.warn('No inputs.', UserWarning)
return ([], [])
elif (('*' not in input_files) and ('*' not in output_files)):
return check_paths(input_files, output_files)
assert ('*' in output_files)
if ('*' in input_files):
(in_base, in_ext) = os.path.splitext(input_files)
input_files = []
for ext in (INPUT_EXTS if (in_ext in ['', '.*']) else [in_ext]):
input_files.extend(glob.glob(f'{in_base}{ext}', recursive=True))
input_root = os.path.commonpath([os.path.split(f)[0] for f in input_files])
input_prefix = os.path.commonprefix([os.path.split(f)[1] for f in input_files])
keep_subdirs = ('**' in output_files)
(output_root, output_tail) = (x.split('*', 1)[0] for x in os.path.split(output_files))
(_, output_ext) = os.path.splitext(output_files)
if (not output_ext):
output_ext = '.wav'
output_files = []
for in_file in input_files:
(subdir, filename) = os.path.split(in_file)
subdir = subdir.removeprefix(input_root).strip('/')
if (not keep_subdirs):
subdir = ''
if output_tail:
filename = (output_tail + filename.removeprefix(input_prefix))
(file_base, _) = os.path.splitext(filename)
filepath = os.path.join(output_root, subdir, f'{file_base}{output_ext}')
_add_file(input_files, output_files, filepath)
return check_paths(input_files, output_files)
|
class MinMaxHeap(object):
'\n Implementation of a Min-max heap following Atkinson, Sack, Santoro, and\n Strothotte (1986): https://doi.org/10.1145/6617.6621\n '
def __init__(self, reserve=0):
self.a = ([None] * reserve)
self.size = 0
def __len__(self):
return self.size
def __iter__(self):
return self
def __list__(self):
return self.a
def __next__(self):
try:
return self.popmin()
except AssertionError:
raise StopIteration
def insert(self, key):
'\n Insert key into heap. Complexity: O(log(n))\n '
if (len(self.a) < (self.size + 1)):
self.a.append(key)
insert(self.a, key, self.size)
self.size += 1
def peekmin(self):
'\n Get minimum element. Complexity: O(1)\n '
return peekmin(self.a, self.size)
def peekmax(self):
'\n Get maximum element. Complexity: O(1)\n '
return peekmax(self.a, self.size)
def popmin(self):
'\n Remove and return minimum element. Complexity: O(log(n))\n '
(m, self.size) = removemin(self.a, self.size)
self.a.pop((- 1))
return m
def popmax(self):
'\n Remove and return maximum element. Complexity: O(log(n))\n '
(m, self.size) = removemax(self.a, self.size)
self.a.pop((- 1))
return m
def replacemax(self, val):
'\n Remove and return maximum element. Complexity: O(log(n))\n '
replacemax(self.a, self.size, val)
def replacemin(self, val):
'\n Remove and return maximum element. Complexity: O(log(n))\n '
replacemin(self.a, self.size, val)
|
def level(i):
return ((i + 1).bit_length() - 1)
|
def trickledown(array, i, size):
if ((level(i) % 2) == 0):
trickledownmin(array, i, size)
else:
trickledownmax(array, i, size)
|
def trickledownmin(array, i, size):
if (size > ((i * 2) + 1)):
m = ((i * 2) + 1)
if ((((i * 2) + 2) < size) and (array[((i * 2) + 2)] < array[m])):
m = ((i * 2) + 2)
child = True
for j in range(((i * 4) + 3), min(((i * 4) + 7), size)):
if (array[j] < array[m]):
m = j
child = False
if child:
if (array[m] < array[i]):
(array[i], array[m]) = (array[m], array[i])
elif (array[m] < array[i]):
if (array[m] < array[i]):
(array[m], array[i]) = (array[i], array[m])
if (array[m] > array[((m - 1) // 2)]):
(array[m], array[((m - 1) // 2)]) = (array[((m - 1) // 2)], array[m])
trickledownmin(array, m, size)
|
def trickledownmax(array, i, size):
if (size > ((i * 2) + 1)):
m = ((i * 2) + 1)
if ((((i * 2) + 2) < size) and (array[((i * 2) + 2)] > array[m])):
m = ((i * 2) + 2)
child = True
for j in range(((i * 4) + 3), min(((i * 4) + 7), size)):
if (array[j] > array[m]):
m = j
child = False
if child:
if (array[m] > array[i]):
(array[i], array[m]) = (array[m], array[i])
elif (array[m] > array[i]):
if (array[m] > array[i]):
(array[m], array[i]) = (array[i], array[m])
if (array[m] < array[((m - 1) // 2)]):
(array[m], array[((m - 1) // 2)]) = (array[((m - 1) // 2)], array[m])
trickledownmax(array, m, size)
|
def bubbleup(array, i):
if ((level(i) % 2) == 0):
if ((i > 0) and (array[i] > array[((i - 1) // 2)])):
(array[i], array[((i - 1) // 2)]) = (array[((i - 1) // 2)], array[i])
bubbleupmax(array, ((i - 1) // 2))
else:
bubbleupmin(array, i)
elif ((i > 0) and (array[i] < array[((i - 1) // 2)])):
(array[i], array[((i - 1) // 2)]) = (array[((i - 1) // 2)], array[i])
bubbleupmin(array, ((i - 1) // 2))
else:
bubbleupmax(array, i)
|
def bubbleupmin(array, i):
while (i > 2):
if (array[i] < array[((i - 3) // 4)]):
(array[i], array[((i - 3) // 4)]) = (array[((i - 3) // 4)], array[i])
i = ((i - 3) // 4)
else:
return
|
def bubbleupmax(array, i):
while (i > 2):
if (array[i] > array[((i - 3) // 4)]):
(array[i], array[((i - 3) // 4)]) = (array[((i - 3) // 4)], array[i])
i = ((i - 3) // 4)
else:
return
|
def peekmin(array, size):
assert (size > 0)
return array[0]
|
def peekmax(array, size):
assert (size > 0)
if (size == 1):
return array[0]
elif (size == 2):
return array[1]
else:
return max(array[1], array[2])
|
def removemin(array, size):
assert (size > 0)
elem = array[0]
array[0] = array[(size - 1)]
trickledown(array, 0, (size - 1))
return (elem, (size - 1))
|
def removemax(array, size):
assert (size > 0)
if (size == 1):
return (array[0], (size - 1))
elif (size == 2):
return (array[1], (size - 1))
else:
i = (1 if (array[1] > array[2]) else 2)
elem = array[i]
array[i] = array[(size - 1)]
trickledown(array, i, (size - 1))
return (elem, (size - 1))
|
def replacemax(array, size, val):
assert (size > 0)
if (size == 1):
array[0] = val
elif (size == 2):
array[1] = val
bubbleup(array, 1)
else:
i = (1 if (array[1] > array[2]) else 2)
array[i] = array[(size - 1)]
trickledown(array, i, size)
array[(size - 1)] = val
bubbleup(array, (size - 1))
|
def replacemin(array, size, val):
assert (size > 0)
array[0] = val
trickledown(array, 0, size)
assert minmaxheapproperty(array, len(array))
|
def insert(array, k, size):
array[size] = k
bubbleup(array, size)
|
def minmaxheapproperty(array, size):
for (i, k) in enumerate(array[:size]):
if ((level(i) % 2) == 0):
for j in range(((2 * i) + 1), min(((2 * i) + 3), size)):
if (array[j] < k):
print(array, j, i, array[j], array[i], level(i))
return False
for j in range(((4 * i) + 3), min(((4 * i) + 7), size)):
if (array[j] < k):
print(array, j, i, array[j], array[i], level(i))
return False
else:
for j in range(((2 * i) + 1), min(((2 * i) + 3), size)):
if (array[j] > k):
print(array, j, i, array[j], array[i], level(i))
return False
for j in range(((4 * i) + 3), min(((4 * i) + 7), size)):
if (array[j] > k):
print(array, j, i, array[j], array[i], level(i))
return False
return True
|
def test(n):
from random import randint
a = ([(- 1)] * n)
l = []
size = 0
for _ in range(n):
x = randint(0, (5 * n))
insert(a, x, size)
size += 1
l.append(x)
assert minmaxheapproperty(a, size)
assert (size == len(l))
print(a)
while (size > 0):
assert (min(l) == peekmin(a, size))
assert (max(l) == peekmax(a, size))
if randint(0, 1):
(e, size) = removemin(a, size)
assert (e == min(l))
else:
(e, size) = removemax(a, size)
assert (e == max(l))
l[l.index(e)] = l[(- 1)]
l.pop((- 1))
assert (len(a[:size]) == len(l))
assert minmaxheapproperty(a, size)
print('OK')
|
def test_heap(n):
from random import randint
heap = MinMaxHeap(n)
l = []
for _ in range(n):
x = randint(0, (5 * n))
heap.insert(x)
l.append(x)
assert minmaxheapproperty(heap.a, len(heap))
assert (len(heap) == len(l))
print(heap.a)
while (len(heap) > 0):
assert (min(l) == heap.peekmin())
assert (max(l) == heap.peekmax())
if randint(0, 1):
e = heap.popmin()
assert (e == min(l))
else:
e = heap.popmax()
assert (e == max(l))
l[l.index(e)] = l[(- 1)]
l.pop((- 1))
assert (len(heap) == len(l))
assert minmaxheapproperty(heap.a, len(heap))
print('OK')
|
class PointerQueue(object):
'\n Implementation of linked list style queue\n '
def __init__(self, initial, reserve=0):
self.queue = SortedDict(zip(initial, range(len(initial))))
self.pointer = initial
self.pointer.extend(([None] * reserve))
def __len__(self):
return len(self.queue)
def __iter__(self):
return iter(self.queue)
def __list__(self):
return self.pointer
def __setitem__(self, key, value):
self.queue[key] = value
self.pointer[value] = key
def __next__(self):
try:
return self.queue.popitem()
except AssertionError:
raise StopIteration
def __nonzero__(self):
return self.queue
def pop(self, key, **kwargs):
return self.queue.pop(key, **kwargs)
def popindex(self, index, **kwargs):
return self.queue.pop(self.pointer[index], **kwargs)
|
class SGNMTPrompt(Cmd):
def default(self, cmd_args):
'Translate a single sentence.'
decode_utils.do_decode(decoder, outputs, [cmd_args.strip()])
def emptyline(self):
pass
def do_translate(self, cmd_args):
'Translate a single sentence.'
decode_utils.do_decode(decoder, outputs, [cmd_args.strip()])
def do_diagnostics(self, cmd_args):
'Run diagnostics to check which external libraries are\n available to SGNMT.'
run_diagnostics()
def do_config(self, cmd_args):
"Change SGNMT configuration. Syntax: 'config <key> <value>.\n For most configuration changes the decoder needs to be\n rebuilt.\n "
global outputs, decoder, args
split_args = cmd_args.split()
if (len(split_args) < 2):
print("Syntax: 'config <key> <new-value>'")
else:
(key, val) = (split_args[0], ' '.join(split_args[1:]))
try:
val = int(val)
except:
try:
val = float(val)
except:
if (val == 'true'):
val = True
elif (val == 'false'):
val = False
setattr(args, key, val)
print(('Setting %s=%s...' % (key, val)))
outputs = decode_utils.create_output_handlers()
if (key in ['wmap', 'src_wmap', 'trg_wmap', 'preprocessing', 'postprocessing', 'bpe_codes']):
io_utils.initialize(args)
elif (not (key in ['outputs', 'output_path'])):
decoder = decode_utils.create_decoder()
def do_quit(self, cmd_args):
'Quits SGNMT.'
raise SystemExit
def do_EOF(self, line):
'Quits SGNMT'
print('quit')
return True
|
def base_init(new_args):
'This function should be called before accessing any other\n function in this module. It initializes the `args` variable on \n which all the create_* factory functions rely on as configuration\n object, and it sets up global function pointers and variables for\n basic things like the indexing scheme, logging verbosity, etc.\n\n Args:\n new_args: Configuration object from the argument parser.\n '
global args
args = new_args
if (sys.version_info < (3, 0)):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
logging.warn('Library is tested with Python 3, but you are using Python 2. Expect the unexpected or switch to >3.5.')
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
logging.getLogger().setLevel(logging.INFO)
if (args.verbosity == 'debug'):
logging.getLogger().setLevel(logging.DEBUG)
elif (args.verbosity == 'info'):
logging.getLogger().setLevel(logging.INFO)
elif (args.verbosity == 'warn'):
logging.getLogger().setLevel(logging.WARN)
elif (args.verbosity == 'error'):
logging.getLogger().setLevel(logging.ERROR)
utils.switch_to_fairseq_indexing()
ui.validate_args(args)
if args.run_diagnostics:
ui.run_diagnostics()
sys.exit()
|
def add_predictor(decoder):
'Adds all enabled predictors to the ``decoder``. This function \n makes heavy use of the global ``args`` which contains the\n SGNMT configuration. Particularly, it reads out ``args.predictors``\n and adds appropriate instances to ``decoder``.\n TODO: Refactor this method as it is waaaay tooooo looong\n \n Args:\n decoder (Decoder): Decoding strategy, see ``create_decoder()``.\n This method will add predictors to this instance with\n ``add_predictor()``\n '
preds = utils.split_comma(args.predictor)
if (not preds):
logging.fatal('Require at least one predictor! See the --predictors argument for more information.')
if (len(preds) > 1):
logging.fatal('Only 1 predictor supported at the moment')
pred = preds[0]
try:
predictor = predictors.PREDICTOR_REGISTRY[pred](args)
decoder.add_predictor(pred, predictor)
logging.info('Initialized predictor {}'.format(pred))
except IOError as e:
logging.fatal(('One of the files required for setting up the predictors could not be read: %s' % e))
decoder.remove_predictor()
except AttributeError as e:
logging.fatal(('Invalid argument for one of the predictors: %s.Stack trace: %s' % (e, traceback.format_exc())))
decoder.remove_predictors()
except NameError as e:
logging.fatal(('Could not find external library: %s. Please make sure that your PYTHONPATH and LD_LIBRARY_PATH contains all paths required for the predictors. Stack trace: %s' % (e, traceback.format_exc())))
decoder.remove_predictor()
except ValueError as e:
logging.fatal(('A number format error occurred while configuring the predictors: %s. Please double-check all integer- or float-valued parameters such as --predictor_weights and try again. Stack trace: %s' % (e, traceback.format_exc())))
decoder.remove_predictor()
except Exception as e:
logging.fatal(('An unexpected %s has occurred while setting up the predictors: %s Stack trace: %s' % (sys.exc_info()[0], e, traceback.format_exc())))
decoder.remove_predictor()
|
def create_decoder():
'Creates the ``Decoder`` instance. This specifies the search \n strategy used to traverse the space spanned by the predictors. This\n method relies on the global ``args`` variable.\n Returns:\n Decoder. Instance of the search strategy\n '
try:
decoder = decoding.DECODER_REGISTRY[args.decoder](args)
except Exception as e:
logging.fatal(('An %s has occurred while initializing the decoder: %s Stack trace: %s' % (sys.exc_info()[0], e, traceback.format_exc())))
sys.exit('Could not initialize decoder.')
add_predictor(decoder)
return decoder
|
def create_output_handlers():
'Creates the output handlers defined in the ``io`` module. \n These handlers create output files in different formats from the\n decoding results.\n \n Args:\n args: Global command line arguments.\n \n Returns:\n list. List of output handlers according --outputs\n '
if (not args.outputs):
return []
outputs = []
for name in utils.split_comma(args.outputs):
path = ((args.output_path % name) if ('%s' in args.output_path) else args.output_path)
try:
outputs.append(output.OUTPUT_REGISTRY[name](path, args))
except KeyError:
logging.fatal(('Output format %s not available. Please double-check the --outputs parameter.' % name))
return outputs
|
def get_sentence_indices(range_param, src_sentences):
"Helper method for ``do_decode`` which returns the indices of the\n sentence to decode\n \n Args:\n range_param (string): ``--range`` parameter from config\n src_sentences (list): A list of strings. The strings are the\n source sentences with word indices to \n translate (e.g. '1 123 432 2')\n "
ids = []
if args.range:
try:
if (':' in args.range):
(from_idx, to_idx) = args.range.split(':')
else:
from_idx = int(args.range)
to_idx = from_idx
ids = range((int(from_idx) - 1), int(to_idx))
except Exception as e:
logging.info(('The --range does not seem to specify a numerical range (%s). Interpreting as file name..' % e))
tmp_path = ('%s/sgnmt-tmp.%s' % (os.path.dirname(args.range), uuid.uuid4()))
logging.debug(('Temporary range file: %s' % tmp_path))
while True:
try:
os.rename(args.range, tmp_path)
with open(tmp_path) as tmp_f:
all_ids = [i.strip() for i in tmp_f]
next_id = None
if all_ids:
next_id = all_ids[0]
all_ids = all_ids[1:]
with open(tmp_path, 'w') as tmp_f:
tmp_f.write('\n'.join(all_ids))
os.rename(tmp_path, args.range)
if (next_id is None):
return
logging.debug(('Fetched ID %s and updated %s' % (next_id, args.range)))
(yield (int(next_id) - 1))
except Exception as e:
logging.debug(('Could not fetch sentence ID from %s (%s). Trying again in 2 seconds...' % (args.range, e)))
time.sleep(2)
elif (src_sentences is False):
logging.fatal('Input method dummy requires --range')
else:
ids = range(len(src_sentences))
for i in ids:
(yield i)
|
def _get_text_output_handler(output_handlers):
'Returns the text output handler if in output_handlers, or None.'
for output_handler in output_handlers:
if (isinstance(output_handler, output.TextOutputHandler) or isinstance(output_handler, output.NBestSeparateOutputHandler)):
return output_handler
return None
|
def _get_score_output_handler(output_handlers):
'Returns the text output handler if in output_handlers, or None.'
for output_handler in output_handlers:
if isinstance(output_handler, output.ScoreOutputHandler):
return output_handler
return None
|
def _postprocess_complete_hypos(hypos):
'This function applies the following operations on the list of\n complete hypotheses returned by the Decoder:\n\n - </s> removal\n - Apply --nbest parameter if necessary\n - Applies combination_scheme on full hypotheses, reorder list\n\n Args:\n hypos (list): List of complete hypotheses\n\n Returns:\n list. Postprocessed hypotheses.\n '
if args.remove_eos:
for hypo in hypos:
if (hypo.trgt_sentence and (hypo.trgt_sentence[(- 1)] == utils.EOS_ID)):
hypo.trgt_sentence = hypo.trgt_sentence[:(- 1)]
if (args.nbest > 0):
hypos = hypos[:args.nbest]
return hypos
|
def _generate_dummy_hypo():
return decoding.core.Hypothesis([utils.UNK_ID], 0.0, [0.0])
|
def do_decode(decoder, output_handlers, src_sentences, trgt_sentences=None, num_log=1):
"This method contains the main decoding loop. It iterates through\n ``src_sentences`` and applies ``decoder.decode()`` to each of them.\n At the end, it calls the output handlers to create output files.\n \n Args:\n decoder (Decoder): Current decoder instance\n output_handlers (list): List of output handlers, see\n ``create_output_handlers()``\n src_sentences (list): A list of strings. The strings are the\n source sentences with word indices to \n translate (e.g. '1 123 432 2')\n "
if (not decoder.has_predictor()):
logging.fatal('Terminated due to an error in the predictor configuration.')
return
all_hypos = []
text_output_handler = _get_text_output_handler(output_handlers)
if text_output_handler:
text_output_handler.open_file()
score_output_handler = _get_score_output_handler(output_handlers)
start_time = time.time()
logging.info(('Start time: %s' % start_time))
sen_indices = []
diversity_metrics = []
not_full = 0
for sen_idx in get_sentence_indices(args.range, src_sentences):
decoder.set_current_sen_id(sen_idx)
try:
src = ('0' if (src_sentences is False) else src_sentences[sen_idx])
if (len(src.split()) > 1000):
print('Skipping ID', str(sen_idx), '. Too long...')
continue
src_print = io_utils.src_sentence(src)
logging.info(('Next sentence (ID: %d): %s' % ((sen_idx + 1), src_print)))
src = io_utils.encode(src)
start_hypo_time = time.time()
decoder.apply_predictor_count = 0
if trgt_sentences:
hypos = decoder.decode(src, io_utils.encode_trg(trgt_sentences[sen_idx]))
else:
hypos = decoder.decode(src)
if (not hypos):
logging.error(('No translation found for ID %d!' % (sen_idx + 1)))
logging.info(('Stats (ID: %d): score=<not-found> num_expansions=%d time=%.2f' % ((sen_idx + 1), decoder.apply_predictor_count, (time.time() - start_hypo_time))))
hypos = [_generate_dummy_hypo()]
hypos = _postprocess_complete_hypos(hypos)
for logged_hypo in hypos[:num_log]:
logging.info(('Decoded (ID: %d): %s' % ((sen_idx + 1), io_utils.decode(logged_hypo.trgt_sentence))))
logging.info(('Stats (ID: %d): score=%f num_expansions=%d time=%.2f perplexity=%.2f' % ((sen_idx + 1), logged_hypo.total_score, decoder.apply_predictor_count, (time.time() - start_hypo_time), utils.perplexity(logged_hypo.score_breakdown))))
if logged_hypo.statistics:
logging.info(('UID Stats (ID: %d): variance=%.3f local variance=%.3f greedy=%.3f squared=%.3f max=%.3f' % ((sen_idx + 1), logged_hypo.statistics.variance(ddof=0), logged_hypo.statistics.local_variance(ddof=0), logged_hypo.statistics.max_offset(), logged_hypo.statistics.squares(), (- logged_hypo.statistics.minimum()))))
if score_output_handler:
try:
score_output_handler.write_score(logged_hypo.score_breakdown)
except IOError as e:
logging.error(('I/O error %d occurred when creating output files: %s' % (sys.exc_info()[0], e)))
if (decoder.nbest > 1):
diversity_score = utils.ngram_diversity([io_utils.decode(h.trgt_sentence) for h in hypos])
logging.info(('Diversity: score=%f ' % diversity_score))
diversity_metrics.append(diversity_score)
if (len(hypos) < decoder.nbest):
not_full += 1
all_hypos.append(hypos)
sen_indices.append(sen_idx)
try:
if text_output_handler:
text_output_handler.write_hypos([hypos])
except IOError as e:
logging.error(('I/O error %d occurred when creating output files: %s' % (sys.exc_info()[0], e)))
except ValueError as e:
logging.error(('Number format error at sentence id %d: %s, Stack trace: %s' % ((sen_idx + 1), e, traceback.format_exc())))
except AttributeError as e:
logging.fatal(('Attribute error at sentence id %d: %s. This often indicates an error in the predictor configuration which could not be detected in initialisation. Stack trace: %s' % ((sen_idx + 1), e, traceback.format_exc())))
except Exception as e:
logging.error(('An unexpected %s error has occurred at sentence id %d: %s, Stack trace: %s' % (sys.exc_info()[0], (sen_idx + 1), e, traceback.format_exc())))
try:
if text_output_handler:
hypos = [_generate_dummy_hypo()]
text_output_handler.write_hypos([hypos])
except IOError as e:
logging.error(('I/O error %d occurred when creating output files: %s' % (sys.exc_info()[0], e)))
logging.info(('Decoding finished. Time: %.2f' % (time.time() - start_time)))
if (decoder.nbest > 1):
print(diversity_metrics)
print('Total not full:', str(not_full))
try:
for output_handler in output_handlers:
if (output_handler == text_output_handler):
output_handler.close_file()
else:
output_handler.write_hypos(all_hypos, sen_indices)
except IOError as e:
logging.error(('I/O error %s occurred when creating output files: %s' % (sys.exc_info()[0], e)))
|
class BeamDecoder(Decoder):
'This decoder implements standard beam search and several\n variants of it such as diversity promoting beam search and beam\n search with heuristic future cost estimates. This implementation\n supports risk-free pruning.\n '
name = 'beam'
def __init__(self, decoder_args):
'Creates a new beam decoder instance. The following values\n '
super(BeamDecoder, self).__init__(decoder_args)
self.nbest = max(1, decoder_args.nbest)
self.beam_size = (decoder_args.beam if (not self.gumbel) else self.nbest)
self.stop_criterion = (self._best_eos if decoder_args.early_stopping else self._all_eos)
def _best_eos(self, hypos):
'Returns true if the best hypothesis ends with </S>'
if (len(self.full_hypos) == 0):
return False
cur_scores = [self.get_max_pos_score(hypo) for hypo in hypos]
return all([(c < self.cur_best.total_score) for c in cur_scores])
def _all_eos(self, hypos):
'Returns true if the all hypotheses end with </S>'
return all([(hypo.get_last_word() == utils.EOS_ID) for hypo in hypos])
def _get_next_hypos(self, all_hypos, all_scores):
'Get hypos for the next iteration. '
inds = utils.argmax_n(all_scores, self.beam_size)
return [all_hypos[ind] for ind in inds]
def _get_initial_hypos(self):
'Get the list of initial ``PartialHypothesis``. '
return [PartialHypothesis(self.get_predictor_states(), self.calculate_stats)]
def decode(self, src_sentence):
'Decodes a single source sentence using beam search. '
self.count = 0
self.time = 0
self.initialize_predictor(src_sentence)
hypos = self._get_initial_hypos()
it = 0
while ((not self.stop_criterion(hypos)) and (it < self.max_len)):
it = (it + 1)
next_hypos = []
next_scores = []
for hypo in hypos:
if (hypo.get_last_word() == utils.EOS_ID):
next_hypos.append(hypo)
next_scores.append(self.get_adjusted_score(hypo))
continue
for next_hypo in self._expand_hypo(hypo, self.beam_size):
next_hypos.append(next_hypo)
next_scores.append(self.get_adjusted_score(next_hypo))
hypos = self._get_next_hypos(next_hypos, next_scores)
return self.get_full_hypos_sorted(hypos)
|
class DiverseBeamDecoder(BeamDecoder):
'This decoder implements diversity promoting beam search Vijayakumar et. al. (2016).\n '
name = 'diverse_beam'
def __init__(self, decoder_args):
super(DiverseBeamDecoder, self).__init__(decoder_args)
assert (not self.gumbel)
self.beam_size = decoder_args.beam
self.num_groups = decoder_args.diversity_groups
self.lmbda = decoder_args.diversity_reward
self.group_sizes = ([(self.beam_size // self.num_groups)] * self.num_groups)
for i in range((self.beam_size - (self.group_sizes[0] * self.num_groups))):
self.group_sizes[i] += 1
assert (sum(self.group_sizes) == self.beam_size)
def _get_initial_hypos(self):
'Get the list of initial ``PartialHypothesis``. '
return [[PartialHypothesis(copy.deepcopy(self.get_predictor_states()), self.calculate_stats)] for i in range(self.num_groups)]
def _get_next_hypos(self, all_hypos, size, other_groups=None):
'Get hypos for the next iteration. '
all_scores = np.array([self.get_adjusted_score(hypo) for hypo in all_hypos])
if other_groups:
all_scores = (all_scores + (self.lmbda * self.hamming_distance_penalty(all_hypos, utils.flattened(other_groups))))
inds = utils.argmax_n(all_scores, size)
return [all_hypos[ind] for ind in inds]
def decode(self, src_sentence):
'Decodes a single source sentence using beam search. '
self.count = 0
self.time = 0
self.initialize_predictor(src_sentence)
hypos = self._get_initial_hypos()
it = 1
while ((not self.stop_criterion(utils.flattened(hypos))) and (it < self.max_len)):
it = (it + 1)
next_hypos = []
for (i, group) in enumerate(hypos):
next_group = []
for hypo in group:
if (hypo.get_last_word() == utils.EOS_ID):
next_group.append(hypo)
continue
for next_hypo in self._expand_hypo(hypo):
next_group.append(next_hypo)
next_hypos.append(self._get_next_hypos(next_group, self.group_sizes[i], next_hypos))
hypos = next_hypos
return self.get_full_hypos_sorted(utils.flattened(hypos))
@staticmethod
def hamming_distance_penalty(set1, set2):
longest_hypo = len(max((set1 + set2), key=len))
hypos = utils.as_ndarray(set1, min_length=longest_hypo)
other_hypos = utils.as_ndarray(set2, min_length=longest_hypo)
return np.apply_along_axis((lambda x: utils.hamming_distance(x, other_hypos)), 1, hypos)
@staticmethod
def add_args(parser):
parser.add_argument('--diversity_groups', default=1, type=int, help="If this is greater than one, promote diversity between groups of hypotheses as in Vijayakumar et. al. (2016). Only compatible with 'diverse_beam' decoder. They found diversity_groups = beam size to be most effective.")
parser.add_argument('--diversity_reward', default=0.5, type=float, help="If this is greater than zero, add reward for diversity between groups as in Vijayakumar et. al. (2016). Only compatible with 'diverse_beam' decoder. Setting value equal to 0 recovers standard beam search.")
|
class Hypothesis():
'Complete translation hypotheses are represented by an instance\n of this class. We store the produced sentence, the combined score,\n and a score breakdown to the separate predictor scores.\n '
def __init__(self, trgt_sentence, total_score, score_breakdown=[], base_score=0.0, statistics=None):
self.trgt_sentence = trgt_sentence
self.total_score = total_score
self.score_breakdown = score_breakdown
self.base_score = base_score
self.statistics = statistics
def __repr__(self):
'Returns a string representation of this hypothesis.'
return ('%s (%f)' % (' '.join((str(w) for w in self.trgt_sentence)), self.total_score))
def __len__(self):
return len(self.trgt_sentence)
def __lt__(self, other):
return (self.total_score < other.total_score)
|
class PartialHypothesis(object):
'Represents a partial hypothesis in various decoders. '
def __init__(self, initial_states=None, use_stats=True):
self.predictor_states = initial_states
self.trgt_sentence = []
(self.score, self.base_score) = (0.0, 0.0)
self.score_breakdown = []
self.word_to_consume = None
self.statistics = ((Statistics() and self.statistics.push(0, 0)) if use_stats else None)
def __repr__(self):
'Returns a string representation of this hypothesis.'
return ('%s (%f)' % (' '.join((str(w) for w in self.trgt_sentence)), self.score))
def __len__(self):
return len(self.trgt_sentence)
def __lt__(self, other):
return (self.score < other.score)
def __add__(self, other):
return (self.trgt_sentence + other)
def get_last_word(self):
'Get the last word in the translation prefix. '
if (not self.trgt_sentence):
return None
return self.trgt_sentence[(- 1)]
def generate_full_hypothesis(self):
'Create a ``Hypothesis`` instance from this hypothesis. '
return Hypothesis(self.trgt_sentence, self.score, self.score_breakdown, self.base_score, self.statistics)
def _new_partial_hypo(self, states, word, score, base_score=None, breakdown=None, cur_max=None):
new_hypo = PartialHypothesis(states, use_stats=False)
new_hypo.score = score
new_hypo.base_score = base_score
new_hypo.score_breakdown = copy.copy(self.score_breakdown)
new_hypo.score_breakdown.append((breakdown if (breakdown is not None) else score))
new_hypo.trgt_sentence = (self.trgt_sentence + [word])
if (self.statistics is not None):
new_hypo.statistics = self.statistics.copy()
new_hypo.statistics.push(new_hypo.score_breakdown[(- 1)], cur_max)
return new_hypo
def expand(self, word, new_states, score, score_breakdown):
return self._new_partial_hypo(new_states, word, score, score_breakdown)
def cheap_expand(self, word, score, base_score=None, breakdown=None, states=None, cur_max=None):
'Creates a new partial hypothesis adding a new word to the\n translation prefix with given probability. Does NOT update the\n predictor states but adds a flag which signals that the last \n word in this hypothesis has not been consumed yet by the \n predictors. This can save memory because we can reuse the \n current state for many hypothesis. It also saves computation\n as we do not consume words which are then discarded anyway by\n the search procedure.\n \n '
hypo = self._new_partial_hypo(states, int(word), score, base_score=base_score, breakdown=breakdown, cur_max=cur_max)
hypo.word_to_consume = int(word)
return hypo
def get_score_variance(self, val=None):
if (val is not None):
return self.statistics.pos_variance(val, ddof=0)
return self.statistics.variance(ddof=0)
def get_score_max(self, val=None):
if (val is not None):
return (- self.statistics.pos_minimum(val))
return (- self.statistics.minimum())
def get_local_variance(self, val=None):
if (val is not None):
return self.statistics.pos_local_variance(val, ddof=0)
return self.statistics.local_variance(ddof=0)
def get_score_greedy(self, val=None):
if (val is not None):
(val, cur_max) = val
return self.statistics.pos_max_offset(val, cur_max)
return self.statistics.max_offset()
def get_squares(self, val=None):
if (val is not None):
return self.statistics.pos_squares(val)
return self.statistics.squares()
|
class Decoder(object):
'A ``Decoder`` instance represents a particular search strategy\n such as A*, beam search, greedy search etc. Decisions are made \n based on the outputs of one or many predictors, which are \n maintained by the ``Decoder`` instance.\n \n Decoders are observable. They fire notifications after \n apply_predictors has been called. \n '
def __init__(self, decoder_args):
'Initializes the decoder instance with no predictors.\n '
super(Decoder, self).__init__()
self.max_len_factor = decoder_args.max_len_factor
self.predictor = None
self.predictor_names = []
self.gumbel = decoder_args.gumbel
self.seed = decoder_args.seed
self.allow_unk_in_output = decoder_args.allow_unk_in_output
self.nbest = 1
self.combine_posteriors = self._combine_posteriors_simple
self.current_sen_id = (- 1)
self.apply_predictor_count = 0
self.temperature = decoder_args.temperature
self.add_incomplete = decoder_args.add_incomplete
self.calculate_stats = ((not decoder_args.no_statistics) and (not self.gumbel))
self.length_norm = decoder_args.length_normalization
self.variance_reg = decoder_args.variance_regularizer
self.local_variance_reg = decoder_args.local_variance_regularizer
self.max_reg = decoder_args.max_regularizer
self.greedy_reg = decoder_args.greedy_regularizer
self.square_reg = decoder_args.square_regularizer
self.not_monotonic = any([self.variance_reg, self.local_variance_reg, self.max_reg, self.greedy_reg, self.square_reg, self.length_norm])
if (any([self.variance_reg, self.local_variance_reg, self.max_reg, self.greedy_reg, self.square_reg]) and (not self.calculate_stats)):
logging.fatal('Must use statistics with regularizers. Cannot use with Gumbel stochastic decoding')
sys.exit(1)
@staticmethod
def add_args(parser):
'Add task-specific arguments to the parser.'
pass
def is_deterministic(self):
return (not self.gumbel)
def get_inclusion_prob_estimate(self, src, hypo, kau=None, **kwargs):
if self.gumbel:
assert (kau is not None)
zet = np.exp((hypo.base_score - kau))
return ((((hypo.base_score - kau) - (zet / 2)) + ((zet ** 2) / 24)) - ((zet ** 4) / 2880))
return hypo.total_score
def add_predictor(self, name, predictor):
"Adds a predictor to the decoder. This means that this \n predictor is going to be used to predict the next target word\n (see ``predict_next``)\n \n Args:\n name (string): Predictor name like 'nmt' or 'fst'\n predictor (Predictor): Predictor instance\n weight (float): Predictor weight\n "
self.predictor = predictor
def remove_predictor(self):
'Removes all predictors of this decoder. '
self.predictor = None
def has_predictor(self):
'Returns true if predictors have been added to the decoder. '
return (self.predictor is not None)
def consume(self, word, i=None):
'Calls ``consume()`` on all predictors. '
self.predictor.consume(word)
def get_initial_dist(self):
return utils.log_softmax(self.predictor.get_initial_dist(), self.temperature)
def _get_non_zero_words(self, predictor, posterior):
'Get the set of words from the predictor posteriors which \n have non-zero probability. This set of words is then passed\n through to the open vocabulary predictors.\n\n This method assumes that both arguments are not empty.\n\n Args:\n bounded_predictor: predictor\n bounded_posterior: Corresponding posterior.\n\n Returns:\n Iterable with all words with non-zero probability.\n '
fin_probs = np.isfinite(posterior)
return [i for (i, b) in enumerate(fin_probs) if b]
def apply_predictor(self, hypo=None, top_n=0):
'Get the distribution over the next word by combining the\n predictor scores.\n\n Args:\n top_n (int): If positive, return only the best n words.\n \n Returns:\n combined,score_breakdown: Two dicts. ``combined`` maps \n target word ids to the combined score, ``score_breakdown``\n contains the scores for each predictor separately \n represented as tuples (unweighted_score, predictor_weight)\n '
assert ((hypo is not None) or (not self.gumbel))
self.apply_predictor_count += 1
posterior = self.predictor.predict_next()
posterior = utils.log_softmax(posterior, temperature=self.temperature)
assert ((len(posterior) - np.count_nonzero(posterior)) <= 1)
non_zero_words = self._get_non_zero_words(self.predictor, posterior)
if (len(non_zero_words) == 0):
non_zero_words = set([utils.EOS_ID])
if self.gumbel:
gumbel_full_posterior = self.gumbelify(hypo, posterior)
(ids, posterior, original_posterior) = self.combine_posteriors(non_zero_words, gumbel_full_posterior, self.predictor.get_unk_probability(posterior), top_n=top_n, original_posterior=posterior)
else:
(ids, posterior, original_posterior) = self.combine_posteriors(non_zero_words, posterior, self.predictor.get_unk_probability(posterior), top_n=top_n)
assert (self.allow_unk_in_output or (not (utils.UNK_ID in ids)))
return (ids, posterior, original_posterior)
def gumbelify(self, hypo, posterior):
vf = np.vectorize((lambda x: (self.get_pos_score(hypo, x) - self.get_adjusted_score(hypo))))
shifted_posterior = vf(posterior)
shifted_posterior = utils.log_softmax(shifted_posterior)
gumbels = np.random.gumbel(loc=0, scale=1, size=shifted_posterior.shape)
gumbel_posterior = ((shifted_posterior + gumbels) + hypo.base_score)
Z = np.max(gumbel_posterior)
v = ((hypo.score - gumbel_posterior) + utils.log1mexp_basic((gumbel_posterior - Z)))
gumbel_full_posterior = ((hypo.score - np.maximum(0, v)) - utils.log1pexp_basic((- np.abs(v))))
(gumbel_full_posterior[(posterior == utils.NEG_INF).nonzero()] == utils.NEG_INF)
return gumbel_full_posterior
def _expand_hypo(self, hypo, limit=0, return_dist=False):
'Get the best beam size expansions of ``hypo``.\n \n Args:\n hypo (PartialHypothesis): Hypothesis to expand\n \n Returns:\n list. List of child hypotheses\n '
self.set_predictor_states(copy.deepcopy(hypo.predictor_states))
if (not (hypo.word_to_consume is None)):
self.consume(hypo.word_to_consume)
hypo.word_to_consume = None
(ids, posterior, original_posterior) = self.apply_predictor(hypo, limit)
max_score = np.max(posterior)
new_states = self.get_predictor_states()
new_hypos = [hypo.cheap_expand(trgt_word, (posterior[idx] if self.gumbel else (posterior[idx] + hypo.score)), base_score=((original_posterior[idx] + hypo.base_score) if self.gumbel else hypo.base_score), breakdown=(original_posterior[idx] if self.gumbel else posterior[idx]), states=new_states, cur_max=max_score) for (idx, trgt_word) in enumerate(ids)]
if return_dist:
return (new_hypos, posterior)
return new_hypos
def get_adjusted_score(self, hypo):
'Combines hypo score with penalties/rewards.'
current_score = hypo.score
if self.gumbel:
return current_score
if self.variance_reg:
current_score -= (self.variance_reg * hypo.get_score_variance())
if self.max_reg:
current_score -= (self.max_reg * hypo.get_score_max())
if self.local_variance_reg:
current_score -= (self.local_variance_reg * hypo.get_local_variance())
if self.greedy_reg:
current_score -= (self.greedy_reg * hypo.get_score_greedy())
if self.square_reg:
current_score -= (self.square_reg * hypo.get_squares())
if self.length_norm:
current_score /= len(hypo)
return current_score
def get_pos_score(self, hypo, val, max_=None):
'Combines hypo score with cost estimate from next round.'
pos_score = (hypo.score + val)
if self.variance_reg:
pos_score -= (self.variance_reg * hypo.get_score_variance(val))
if self.max_reg:
pos_score -= (self.max_reg * hypo.get_score_max(val))
if self.local_variance_reg:
pos_score -= (self.local_variance_reg * hypo.get_local_variance(val))
if self.greedy_reg:
pos_score -= (self.greedy_reg * hypo.get_score_greedy((val, max_)))
if self.square_reg:
pos_score -= (self.square_reg * hypo.get_squares(val))
if self.length_norm:
pos_score /= len(hypo)
return pos_score
def get_max_pos_score(self, hypo):
'For non monotonic regularizers.\n Returns maximum possible score given current values.'
current_score = hypo.score
if self.variance_reg:
current_score -= (((self.variance_reg * hypo.get_score_variance()) * len(hypo)) / self.max_len)
if self.local_variance_reg:
current_score -= (((self.local_variance_reg * hypo.get_local_variance()) * len(hypo)) / self.max_len)
if self.length_norm:
current_score /= self.max_len
return current_score
def _combine_posteriors_simple(self, non_zero_words, posterior, unk_prob, top_n=0, original_posterior=None):
' \n Args:\n non_zero_words (set): All words with positive probability\n posteriors: Predictor posterior distributions calculated\n with ``predict_next()``\n unk_probs: UNK probabilities of the predictors, calculated\n with ``get_unk_probability``\n \n Returns:\n combined,score_breakdown: like in ``apply_predictors()``\n '
if (top_n > 0):
non_zero_words = utils.argmax_n(posterior, top_n)
scores_func = np.vectorize((lambda x: utils.common_get(posterior, x, unk_prob)))
scores = scores_func(non_zero_words)
orig_scores = None
if (original_posterior is not None):
scores_func = np.vectorize((lambda x: utils.common_get(original_posterior, x, unk_prob)))
orig_scores = scores_func(non_zero_words)
return (non_zero_words, scores, orig_scores)
def set_current_sen_id(self, sen_id):
self.current_sen_id = (sen_id - 1)
def initialize_predictor(self, src_sentence):
'First, increases the sentence id counter and calls\n ``initialize()`` on all predictors.\n \n Args:\n src_sentence (list): List of source word ids without <S> or\n </S> which make up the source sentence\n '
if (not self.is_deterministic()):
np.random.seed(seed=self.seed)
self.max_len = int(np.ceil((self.max_len_factor * len(src_sentence))))
self.full_hypos = []
self.current_sen_id += 1
self.predictor.set_current_sen_id(self.current_sen_id)
self.predictor.initialize(src_sentence)
def add_full_hypo(self, hypo):
'Adds a new full hypothesis to ``full_hypos``. This can be\n used by implementing subclasses to add a new hypothesis to the\n result set. \n \n Args:\n hypo (Hypothesis): New complete hypothesis\n '
if ((len(self.full_hypos) == 0) or (hypo.total_score > self.cur_best.total_score)):
self.cur_best = hypo
self.full_hypos.append(hypo)
def get_full_hypos_sorted(self, additional_hypos=None):
'Returns ``full_hypos`` sorted by the total score. Can be \n used by implementing subclasses as return value of\n ``decode``\n \n Returns:\n list. ``full_hypos`` sorted by ``total_score``.\n '
if (additional_hypos is not None):
incompletes = []
for hypo in additional_hypos:
if (hypo.get_last_word() == utils.EOS_ID):
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
else:
incompletes.append(hypo)
if (not self.full_hypos):
logging.warn('No complete hypotheses found')
if ((len(self.full_hypos) < self.nbest) and self.add_incomplete):
logging.warn('Adding incomplete hypotheses as candidates')
incompletes.sort(key=(lambda hypo: hypo.score), reverse=True)
for hypo in incompletes[:(self.nbest - len(self.full_hypos))]:
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
return sorted(self.full_hypos, key=(lambda hypo: hypo.total_score), reverse=True)
def get_empty_hypo(self):
hypo = PartialHypothesis(use_stats=True)
score = self.predictor.get_empty_str_prob()
hypo.score += score
hypo.score_breakdown.append(score)
hypo.trgt_sentence += [utils.EOS_ID]
hypo.score = self.get_adjusted_score(hypo)
return hypo
def set_predictor_states(self, states):
'Calls ``set_state()`` on all predictors. '
self.predictor.set_state(states)
def get_predictor_states(self):
'Calls ``get_state()`` on all predictors. '
return self.predictor.get_state()
@abstractmethod
def decode(self, src_sentence):
'Decodes a single source sentence. This method has to be \n implemented by subclasses. It contains the core of the \n implemented search strategy ``src_sentence`` is a list of\n source word ids representing the source sentence without\n <S> or </S> symbols. This method returns a list of hypotheses,\n order descending by score such that the first entry is the best\n decoding result. Implementations should delegate the scoring of\n hypotheses to the predictors via ``apply_predictors()``, and\n organize predictor states with the methods ``consume()``,\n ``get_predictor_states()`` and ``set_predictor_states()``. In\n this way, the decoder is decoupled from the scoring modules.\n \n Args:\n src_sentence (list): List of source word ids without <S> or\n </S> which make up the source sentence\n \n Returns:\n list. A list of ``Hypothesis`` instances ordered by their\n score.\n \n Raises:\n ``NotImplementedError``: if the method is not implemented\n '
raise NotImplementedError
|
class DijkstraDecoder(Decoder):
name = 'dijkstra'
def __init__(self, decoder_args):
super(DijkstraDecoder, self).__init__(decoder_args)
self.nbest = max(1, decoder_args.nbest)
self.use_lower_bound = (not self.gumbel)
self.capacity = (decoder_args.beam if (not self.gumbel) else self.nbest)
if self.not_monotonic:
logging.warn("Using Dijkstra's with non-monotonic scoring function. Behavior may not be defined!")
def decode(self, src_sentence):
'Decodes a single source sentence using A* search. '
self.initialize_predictor(src_sentence)
self.lower_bound = (self.get_empty_hypo() if self.use_lower_bound else None)
self.cur_capacity = self.capacity
open_set = (MinMaxHeap(reserve=self.capacity) if (self.capacity > 0) else [])
self.push(open_set, 0.0, PartialHypothesis(self.get_predictor_states(), self.calculate_stats))
while open_set:
(c, hypo) = self.pop(open_set)
if (hypo.get_last_word() == utils.EOS_ID):
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
if (len(self.full_hypos) == self.nbest):
return self.get_full_hypos_sorted()
self.cur_capacity -= 1
continue
if (len(hypo) == self.max_len):
continue
for next_hypo in self._expand_hypo(hypo, self.capacity):
score = self.get_adjusted_score(next_hypo)
self.push(open_set, score, next_hypo)
if (not self.full_hypos):
self.add_full_hypo(self.lower_bound.generate_full_hypothesis())
return self.get_full_hypos_sorted()
def push(self, set_, score, hypo):
if (self.lower_bound and (score < self.lower_bound.score)):
return
if isinstance(set_, MinMaxHeap):
if (set_.size < self.cur_capacity):
set_.insert((score, hypo))
else:
min_score = set_.peekmin()[0]
if (score > min_score):
set_.replacemin((score, hypo))
else:
heappush(set_, ((- score), hypo))
def pop(self, set_):
if isinstance(set_, MinMaxHeap):
return set_.popmax()
else:
return heappop(set_)
|
class DijkstraTSDecoder(Decoder):
name = 'dijkstra_ts'
def __init__(self, decoder_args):
super(DijkstraTSDecoder, self).__init__(decoder_args)
self.nbest = max(1, decoder_args.nbest)
self.beam = (decoder_args.beam if (not self.gumbel) else self.nbest)
self.stop_criterion = (self._best_eos if decoder_args.early_stopping else self._all_eos)
self.size_threshold = ((self.beam * decoder_args.memory_threshold_coef) if (decoder_args.memory_threshold_coef > 0) else utils.INF)
def decode(self, src_sentence):
self.initialize_predictor(src_sentence)
self.initialize_order_ds()
self.total_queue_size = 0
while self.queue_order:
(_, t) = next(self.queue_order)
cur_queue = self.queues[t]
(score, hypo) = cur_queue.popmax()
self.total_queue_size -= 1
self.time_sync[t] -= 1
if (hypo.get_last_word() == utils.EOS_ID):
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
if self.stop_criterion():
break
self.update(cur_queue, t, forward_prune=True)
continue
if (t == self.max_len):
self.update(cur_queue, t)
continue
next_queue = self.queues[(t + 1)]
for next_hypo in self._expand_hypo(hypo, self.beam):
self.add_hypo(next_hypo, next_queue, (t + 1))
self.update(cur_queue, t)
self.update(next_queue, (t + 1))
return self.get_full_hypos_sorted()
def initialize_order_ds(self):
self.queues = [MinMaxHeap() for k in range((self.max_len + 1))]
self.queue_order = PointerQueue([0.0], reserve=self.max_len)
self.time_sync = defaultdict((lambda : (self.beam if (self.beam > 0) else utils.INF)))
self.queues[0].insert((0.0, PartialHypothesis(self.get_predictor_states(), self.calculate_stats)))
self.time_sync[0] = 1
def update(self, queue, t, forward_prune=False):
self.queue_order.popindex(t, default=None)
if (self.time_sync[t] <= 0):
self.prune(t)
if (len(queue) > 0):
self.queue_order[queue.peekmax()[0]] = t
if forward_prune:
i = self.max_len
while (i > t):
self.time_sync[i] -= 1
if (self.time_sync[i] <= 0):
self.prune(i)
return
while (len(self.queues[i]) > self.time_sync[i]):
self.queues[i].popmin()
i -= 1
def prune(self, t):
for i in range((t + 1)):
self.queue_order.popindex(i, default=None)
self.queues[i] = []
def add_hypo(self, hypo, queue, t):
score = self.get_adjusted_score(hypo)
if (len(queue) < self.time_sync[t]):
queue.insert((score, hypo))
if (self.total_queue_size >= self.size_threshold):
self.remove_one()
else:
self.total_queue_size += 1
else:
min_score = queue.peekmin()[0]
if (score > min_score):
queue.replacemin((score, hypo))
def remove_one(self):
' helper function for memory threshold'
for (t, q) in enumerate(self.queues):
if (len(q) > 0):
q.popmax()
if (len(q) == 0):
self.queue_order.pop(self.score_by_t[t], default=None)
return
def _best_eos(self):
'Returns true if the best hypothesis ends with </S>'
if (len(self.full_hypos) < min(self.beam, self.nbest)):
return False
if (not self.not_monotonic):
return True
threshold = self.cur_best.total_score
cur_scores = [self.get_max_pos_score(h[1]) for q in self.queues if q for h in q.a]
return all([(c < threshold) for c in cur_scores])
def _all_eos(self):
'Returns true if the all hypotheses end with </S>'
if (len(self.full_hypos) < self.beam):
return False
if (not self.not_monotonic):
return True
threshold = sorted(self.full_hypos, reverse=True)[(self.beam - 1)].total_score
cur_scores = [self.get_max_pos_score(h[1]) for q in self.queues if q for h in q.a]
return all([(c < threshold) for c in cur_scores])
@staticmethod
def add_args(parser):
parser.add_argument('--memory_threshold_coef', default=0, type=int, help='total queue size will be set to `memory_threshold_coef`* beam size. When capacity is exceeded, the worst scoring hypothesis from the earliest time step will be discarded')
|
class GreedyDecoder(Decoder):
name = 'greedy'
def __init__(self, decoder_args):
super(GreedyDecoder, self).__init__(decoder_args)
def decode(self, src_sentence):
self.initialize_predictor(src_sentence)
hypothesis = PartialHypothesis(self.get_predictor_states(), self.calculate_stats)
while ((hypothesis.get_last_word() != utils.EOS_ID) and (len(hypothesis) < self.max_len)):
(ids, posterior, original_posterior) = self.apply_predictor((hypothesis if self.gumbel else None), 1)
trgt_word = ids[0]
if self.gumbel:
hypothesis.base_score += original_posterior[0]
hypothesis.score_breakdown.append(original_posterior[0])
else:
hypothesis.score += posterior[0]
hypothesis.score_breakdown.append(posterior[0])
hypothesis.trgt_sentence.append(trgt_word)
self.consume(trgt_word)
self.add_full_hypo(hypothesis.generate_full_hypothesis())
return self.full_hypos
|
class ReferenceDecoder(Decoder):
name = 'reference'
def __init__(self, decoder_args):
super(ReferenceDecoder, self).__init__(decoder_args)
def decode(self, src_sentence, trgt_sentence):
self.trgt_sentence = (trgt_sentence + [utils.EOS_ID])
self.initialize_predictor(src_sentence)
hypo = PartialHypothesis(self.get_predictor_states(), self.calculate_stats)
while (hypo.get_last_word() != utils.EOS_ID):
self._expand_hypo(hypo)
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
return self.get_full_hypos_sorted()
def _expand_hypo(self, hypo):
self.set_predictor_states(hypo.predictor_states)
next_word = self.trgt_sentence[len(hypo.trgt_sentence)]
(ids, posterior, _) = self.apply_predictor()
ind = utils.binary_search(ids, k)
max_score = utils.max_(posterior)
hypo.predictor_states = self.get_predictor_states()
hypo.score += posterior[ind]
hypo.score_breakdown.append(posterior[ind])
hypo.trgt_sentence += [next_word]
self.consume(next_word)
|
class SamplingDecoder(Decoder):
name = 'sampling'
def __init__(self, decoder_args):
'Creates a new A* decoder instance. The following values are\n fetched from `decoder_args`:\n \n nbest (int): If this is set to a positive value, we do not\n stop decoding at the first complete path, but\n continue search until we collected this many\n complete hypothesis. With an admissible\n heuristic, this will yield an exact n-best\n list.\n \n Args:\n decoder_args (object): Decoder configuration passed through\n from the configuration API.\n '
super(SamplingDecoder, self).__init__(decoder_args)
self.nbest = decoder_args.nbest
assert (not self.gumbel)
def decode(self, src_sentence):
self.initialize_predictor(src_sentence)
hypos = [PartialHypothesis(copy.deepcopy(self.get_predictor_states()), self.calculate_stats) for i in range(self.nbest)]
t = 0
base_seed = (self.nbest * self.seed)
while (hypos and (t < self.max_len)):
next_hypos = []
for (sen_seed, hypo) in enumerate(hypos):
np.random.seed(seed=(base_seed + sen_seed))
if (hypo.get_last_word() == utils.EOS_ID):
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
else:
self._expand_hypo(hypo)
next_hypos.append(hypo)
hypos = next_hypos
t += 1
for hypo in hypos:
hypo.score = self.get_adjusted_score(hypo)
self.add_full_hypo(hypo.generate_full_hypothesis())
return self.get_full_hypos_sorted()
def _expand_hypo(self, hypo):
self.set_predictor_states(hypo.predictor_states)
(ids, posterior, _) = self.apply_predictor()
ind = self._sample(posterior)
next_word = ids[ind]
hypo.predictor_states = self.get_predictor_states()
hypo.score += posterior[ind]
hypo.score_breakdown.append(posterior[ind])
hypo.trgt_sentence += [next_word]
if self.calculate_stats:
hypo.statistics.push(posterior[ind], np.max(posterior))
self.consume(next_word)
def _sample(self, posterior):
return sampling_utils.log_multinomial_sample(posterior)
def is_deterministic(self):
return False
|
class NucleusSamplingDecoder(SamplingDecoder):
name = 'nucleus_sampling'
def __init__(self, decoder_args):
'Creates a new A* decoder instance. The following values are\n fetched from `decoder_args`:\n \n nbest (int): If this is set to a positive value, we do not\n stop decoding at the first complete path, but\n continue search until we collected this many\n complete hypothesis. With an admissible\n heuristic, this will yield an exact n-best\n list.\n \n Args:\n decoder_args (object): Decoder configuration passed through\n from the configuration API.\n '
super(NucleusSamplingDecoder, self).__init__(decoder_args)
self.log_nucleus_threshold = np.log(decoder_args.nucleus_threshold)
def _sample(self, posterior):
return sampling_utils.log_nucleus_multinomial_sample(posterior, nucleus_p=self.log_nucleus_threshold)[0]
@staticmethod
def add_args(parser):
parser.add_argument('--nucleus_threshold', default=0.95, type=float, metavar='N', help="implementation of Holtzman et. al 2019 p-nucleus sampling. Value specifies probability core from which to consider top items for sampling. Only compatible with 'sampling' decoder.")
|
def encode(sentence, target=False):
'Converts a sentence in string representation to a\n sequence of token IDs. Depending on the configuration of this\n module, it applies word maps and/or subword/character segmentation\n on the input. This method calls ``encoder.encode()``.\n\n Args:\n src_sentence (string): A single input sentence\n\n Returns:\n List of integers.\n '
return encoder.encode(sentence)
|
def encode_trg(trg_sentence):
'Converts the target sentence in string representation to a\n sequence of token IDs. Depending on the configuration of this\n module, it applies word maps and/or subword/character segmentation\n on the input. This method calls ``encoder.encode_trg()``.\n\n Args:\n trg_sentence (string): A single input sentence\n\n Returns:\n List of integers.\n '
return encoder.encode_trg(trg_sentence)
|
def decode(trg_sentence):
'Converts the target sentence represented as sequence of token\n IDs to a string representation. This method calls\n ``decoder.decode()``.\n\n Args:\n trg_sentence (list): A sequence of integers (token IDs)\n\n Returns:\n string.\n '
return decoder.decode(trg_sentence)
|
def initialize(args):
'Initializes the ``io`` module, including loading word maps and\n other resources needed for encoding and decoding. Subsequent calls\n of ``encode()`` and ``decode()`` will process input as specified in\n ``args``.\n\n Args:\n args (object): SGNMT config\n '
global encoder, decoder
if args.wmap:
load_src_wmap(args.wmap)
load_trg_wmap(args.wmap)
if args.src_wmap:
load_src_wmap(args.src_wmap)
if args.trg_wmap:
load_trg_wmap(args.trg_wmap)
if (args.preprocessing == 'id'):
encoder = IDEncoder()
elif (args.preprocessing == 'word'):
encoder = WordEncoder()
elif (args.preprocessing == 'char'):
encoder = CharEncoder()
elif (args.preprocessing == 'bpe'):
encoder = BPEEncoder(args.bpe_codes)
elif (args.preprocessing == 'bpe@@'):
encoder = BPEEncoder(args.bpe_codes, '@@', True)
elif (args.preprocessing == 'bpe_'):
encoder = BPEEncoder(args.bpe_codes, '▁', True)
else:
raise NotImplementedError('Unknown preprocessing')
if (args.postprocessing == 'id'):
decoder = IDDecoder()
elif (args.postprocessing == 'word'):
decoder = WordDecoder()
elif (args.postprocessing == 'bart'):
load_bart_decoder(args.fairseq_path)
decoder = BartDecoder()
elif (args.postprocessing == 'char'):
decoder = CharDecoder()
elif (args.postprocessing == 'bpe'):
decoder = BPEDecoder()
elif (args.postprocessing == 'bpe@@'):
decoder = BPEAtAtDecoder()
elif (args.postprocessing == 'bpe_'):
decoder = BPEUndDecoder()
else:
raise NotImplementedError('Unknown postprocessing')
|
class Encoder(object):
'Super class for IO encoders.'
def encode(self, src_sentence):
'Converts the source sentence in string representation to a\n sequence of token IDs. Depending on the configuration of this\n module, it applies word maps and/or subword/character segmentation\n on the input.\n\n Args:\n src_sentence (string): A single input sentence\n\n Returns:\n List of integers.\n '
raise NotImplementedError
def encode_trg(self, trg_sentence):
raise NotImplementedError
|
class Decoder(object):
'"Super class for IO decoders.'
def decode(self, trg_sentence):
'Converts the target sentence represented as sequence of token\n IDs to a string representation.\n\n Args:\n trg_sentence (list): A sequence of integers (token IDs)\n\n Returns:\n string.\n '
raise NotImplementedError
|
class IDEncoder(Encoder):
'Encoder for ID mapping.'
def encode(self, src_sentence):
return [int(w) for w in src_sentence.split()]
|
class IDDecoder(Decoder):
'"Decoder for ID mapping.'
def decode(self, trg_sentence):
return ' '.join(map(str, trg_sentence))
|
class WordEncoder(Encoder):
'Encoder for word based mapping.'
def encode(self, src_sentence):
return [src_wmap.get(w, utils.UNK_ID) for w in src_sentence.split()]
def encode_trg(self, trg_sentence):
return [trg_wmap_rev.get(w, utils.UNK_ID) for w in trg_sentence.split()]
|
class WordDecoder(Decoder):
'"Decoder for word based mapping.'
def decode(self, trg_sentence):
return ' '.join((trg_wmap.get(w, '<UNK>') for w in trg_sentence))
|
class BartDecoder(Decoder):
'"Decoder for word based mapping.'
def decode(self, trg_sentence):
return bart.decode(' '.join((trg_wmap.get(w, '<UNK>') for w in trg_sentence)))
|
class CharEncoder(Encoder):
'Encoder for char mapping.'
def encode(self, src_sentence):
return [src_wmap.get(c, utils.UNK_ID) for c in src_sentence.replace(' ', '_')]
|
class CharDecoder(Decoder):
'"Decoder for char mapping.'
def decode(self, trg_sentence):
return ''.join((trg_wmap.get(c, '<UNK>') for c in trg_sentence)).replace('_', ' ')
|
class BPE(object):
def __init__(self, codes_path, separator='@@', remove_eow=False):
with codecs.open(codes_path, encoding='utf-8') as codes:
codes.seek(0)
offset = 1
firstline = codes.readline()
if firstline.startswith('#version:'):
self.version = tuple([int(x) for x in re.sub('(\\.0+)*$', '', firstline.split()[(- 1)]).split('.')])
offset += 1
else:
self.version = (0, 1)
codes.seek(0)
self.bpe_codes = [tuple(item.strip('\r\n ').split(' ')) for (n, item) in enumerate(codes)]
for (i, item) in enumerate(self.bpe_codes):
if (len(item) != 2):
sys.stderr.write('Error: invalid line {0} in BPE codes file: {1}\n'.format((i + offset), ' '.join(item)))
sys.stderr.write('The line should exist of exactly two subword units, separated by whitespace\n')
sys.exit(1)
self.bpe_codes = dict([(code, i) for (i, code) in reversed(list(enumerate(self.bpe_codes)))])
self.separator = separator
self.cache = {}
self.remove_eow = remove_eow
def process_line(self, line):
'segment line, dealing with leading and trailing whitespace'
out = ''
leading_whitespace = (len(line) - len(line.lstrip('\r\n ')))
if leading_whitespace:
out += line[:leading_whitespace]
out += self.segment(line)
trailing_whitespace = (len(line) - len(line.rstrip('\r\n ')))
if (trailing_whitespace and (trailing_whitespace != len(line))):
out += line[(- trailing_whitespace):]
return out
def segment(self, sentence):
'segment single sentence (whitespace-tokenized string) with BPE encoding'
segments = self.segment_tokens(sentence.strip('\r\n ').split(' '))
return ' '.join(segments)
def segment_tokens(self, tokens):
'segment a sequence of tokens with BPE encoding'
output = []
for word in tokens:
if (not word):
continue
new_word = [out for out in self.encode(word)]
for item in new_word[:(- 1)]:
output.append((item + self.separator))
output.append(new_word[(- 1)])
return output
def get_pairs(self, word):
'Return set of symbol pairs in a word.\n\n word is represented as tuple of symbols (symbols being variable-length strings)\n '
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def encode(self, orig):
'Encode word based on list of BPE merge operations, which are applied consecutively\n '
if (orig in self.cache):
return self.cache[orig]
if (self.version == (0, 1)):
word = (tuple(orig) + ('</w>',))
elif (self.version == (0, 2)):
word = (tuple(orig[:(- 1)]) + ((orig[(- 1)] + '</w>'),))
else:
raise NotImplementedError
pairs = self.get_pairs(word)
if (not pairs):
return orig
while True:
bigram = min(pairs, key=(lambda pair: self.bpe_codes.get(pair, float('inf'))))
if (bigram not in self.bpe_codes):
break
(first, second) = bigram
new_word = []
i = 0
while (i < len(word)):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if ((word[i] == first) and (i < (len(word) - 1)) and (word[(i + 1)] == second)):
new_word.append((first + second))
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if (len(word) == 1):
break
else:
pairs = self.get_pairs(word)
if self.remove_eow:
if (word[(- 1)] == '</w>'):
word = word[:(- 1)]
elif word[(- 1)].endswith('</w>'):
word = (word[:(- 1)] + (word[(- 1)].replace('</w>', ''),))
self.cache[orig] = word
return word
|
class BPEEncoder(Encoder):
'Encoder for BPE mapping.'
def __init__(self, codes_path, separator='', remove_eow=False):
self.bpe = BPE(codes_path, separator, remove_eow)
def encode(self, src_sentence):
bpe_str = self.bpe.segment(src_sentence)
bpe_int = []
for w in bpe_str.split():
if (w not in src_wmap):
logging.warning(("src_wmap does not fully cover bpe_codes ('%s' not found in wmap, skipping)" % w))
else:
bpe_int.append(src_wmap[w])
logging.debug(("BPE segmentation: '%s' => '%s' (%s)" % (src_sentence, bpe_str, ' '.join(map(str, bpe_int)))))
return bpe_int
|
class BPEDecoder(Decoder):
'"Decoder for BPE mapping SGNMT style.'
def decode(self, trg_sentence):
return ''.join((trg_wmap.get(w, '<UNK>') for w in trg_sentence)).replace('</w>', ' ')
|
class BPEAtAtDecoder(Decoder):
'"Decoder for BPE mapping with @@ separator.'
def decode(self, trg_sentence):
return ' '.join((trg_wmap.get(w, '<UNK>') for w in trg_sentence)).replace('@@ ', '')
|
class BPEUndDecoder(Decoder):
'"Decoder for BPE mapping with @@ separator.'
def decode(self, trg_sentence):
return ' '.join((trg_wmap.get(w, '<UNK>') for w in trg_sentence)).replace(' ', '').replace('▁', ' ')
|
def src_sentence(src):
if ('bart' in globals()):
return bart.decode(src)
return src
|
def load_src_wmap(path):
'Loads a source side word map from the file system.\n \n Args:\n path (string): Path to the word map (Format: word id)\n \n Returns:\n dict. Source word map (key: word, value: id)\n '
global src_wmap
if (not path):
src_wmap = {}
return src_wmap
with open(path) as f:
src_wmap = dict(map((lambda e: (e[0], int(e[(- 1)]))), [line.strip().split() for line in f]))
return src_wmap
|
def load_bart_decoder(path):
'Loads a source side word map from the file system.\n \n Args:\n path (string): Path to the word map (Format: word id)\n \n Returns:\n dict. Source word map (key: word, value: id)\n '
from fairseq import options
from fairseq.data import encoders
input_args = ['--path', path, os.path.dirname(path), '--bpe', 'gpt2']
parser = options.get_generation_parser()
args = options.parse_args_and_arch(parser, input_args)
global bart
bart = encoders.build_bpe(args)
|
def load_trg_wmap(path):
'Loads a target side word map from the file system.\n \n Args:\n path (string): Path to the word map (Format: word id)\n \n Returns:\n dict. Source word map (key: id, value: word)\n '
global trg_wmap
if (not path):
trg_wmap = {}
return trg_wmap
with open(path) as f:
trg_wmap = dict(map((lambda e: (int(e[(- 1)]), e[0])), [line.strip().split() for line in f]))
global trg_wmap_rev
trg_wmap_rev = {v: k for (k, v) in trg_wmap.items()}
return trg_wmap
|
def _mkdir(path, name):
try:
os.makedirs(path)
except OSError as exception:
if (exception.errno != errno.EEXIST):
raise
else:
logging.warn(("Output %s directory '%s' already exists." % (name, path)))
|
class OutputHandler(object):
'Interface for output handlers. '
def __init__(self):
' Empty constructor '
pass
@abstractmethod
def write_hypos(self, all_hypos, sen_indices=None):
'This method writes output files to the file system. The\n configuration parameters such as output paths should already\n have been provided via constructor arguments.\n \n Args:\n all_hypos (list): list of nbest lists of hypotheses\n sen_indices (list): List of sentence indices (0-indexed)\n \n Raises:\n IOError. If something goes wrong while writing to the disk\n '
raise NotImplementedError
|
class TextOutputHandler(OutputHandler):
'Writes the first best hypotheses to a plain text file '
name = 'text'
def __init__(self, path, args):
'Creates a plain text output handler to write to ``path`` '
super(TextOutputHandler, self).__init__()
self.path = path
def write_hypos(self, all_hypos, sen_indices=None):
'Writes the hypotheses in ``all_hypos`` to ``path`` '
if (self.f is not None):
for hypos in all_hypos:
self.f.write(io_utils.decode(hypos[0].trgt_sentence))
self.f.write('\n')
self.f.flush()
else:
with codecs.open(self.path, 'w', encoding='utf-8') as f:
for hypos in all_hypos:
f.write(io_utils.decode(hypos[0].trgt_sentence))
f.write('\n')
self.f.flush()
def open_file(self):
self.f = codecs.open(self.path, 'w', encoding='utf-8')
def close_file(self):
self.f.close()
|
class ScoreOutputHandler(OutputHandler):
'Writes the first best hypotheses to a plain text file '
name = 'score'
def __init__(self, path, args):
'Creates a plain text output handler to write to ``path`` '
super(ScoreOutputHandler, self).__init__()
self.path = path
self.open_file()
def write_score(self, score):
'Writes the hypotheses in ``all_hypos`` to ``path`` '
def write(f_, score):
f_.write(str([s[0][0] for s in score]))
f_.write('\n')
f_.flush()
if (self.f is not None):
write(self.f, score)
else:
with codecs.open(self.path, 'w', encoding='utf-8') as f:
write(f, score)
def write_hypos(self, all_hypos, sen_indices=None):
pass
def open_file(self):
self.f = codecs.open(self.path, 'w', encoding='utf-8')
def close_file(self):
self.f.close()
|
class NBestSeparateOutputHandler(OutputHandler):
'Produces n-best files with hypotheses at respecitve positions\n '
name = 'nbest_sep'
def __init__(self, path, args):
'\n Args:\n path (string): Path to the n-best file to write\n N: n-best \n '
super(NBestSeparateOutputHandler, self).__init__()
self.paths = [(((path + '_') + str(i)) + '.txt') for i in range(max(args.nbest, 1))]
def write_hypos(self, all_hypos, sen_indices=None):
'Writes the hypotheses in ``all_hypos`` to ``path`` '
if (not self.f):
self.open_file()
for hypos in all_hypos:
while (len(hypos) < len(self.f)):
hypos.append(hypos[(- 1)])
for i in range(len(self.f)):
self.f[i].write(io_utils.decode(hypos[i].trgt_sentence))
self.f[i].write('\n')
self.f[i].flush()
def open_file(self):
self.f = []
for p in self.paths:
self.f.append(codecs.open(p, 'w', encoding='utf-8'))
def close_file(self):
for f in self.f:
f.close()
|
class NBestOutputHandler(OutputHandler):
'Produces a n-best file in Moses format. The third part of each \n entry is used to store the separated unnormalized predictor scores.\n Note that the sentence IDs are shifted: Moses n-best files start \n with the index 0, but in SGNMT and HiFST we usually refer to the \n first sentence with 1 (e.g. in lattice directories or --range)\n '
name = 'nbest'
def __init__(self, path, args):
'Creates a Moses n-best list output handler.\n \n Args:\n path (string): Path to the n-best file to write\n predictor_names: Names of the predictors whose scores\n should be included in the score breakdown\n in the n-best list\n '
super(NBestOutputHandler, self).__init__()
self.path = path
self.predictor_names = []
name_count = {}
for name in utils.split_comma(args.predictors):
if (not (name in name_count)):
name_count[name] = 1
final_name = name
else:
name_count[name] += 1
final_name = ('%s%d' % (name, name_count[name]))
self.predictor_names.append(final_name.replace('_', '0'))
def write_hypos(self, all_hypos, sen_indices):
'Writes the hypotheses in ``all_hypos`` to ``path`` '
with codecs.open(self.path, 'w', encoding='utf-8') as f:
n_predictors = len(self.predictor_names)
for (idx, hypos) in zip(sen_indices, all_hypos):
for hypo in hypos:
f.write(('%d ||| %s ||| %s ||| %f' % (idx, io_utils.decode(hypo.trgt_sentence), ' '.join((('%s= %f' % (self.predictor_names[i], sum([s[i][0] for s in hypo.score_breakdown]))) for i in range(n_predictors))), hypo.total_score)))
f.write('\n')
idx += 1
|
class NgramOutputHandler(OutputHandler):
'This output handler extracts MBR-style ngram posteriors from the \n hypotheses returned by the decoder. The hypothesis scores are assumed to\n be loglikelihoods, which we renormalize to make sure that we operate on a\n valid distribution. The scores produced by the output handler are \n probabilities of an ngram being in the translation.\n '
name = 'ngram'
def __init__(self, path, args):
'Creates an ngram output handler.\n \n Args:\n path (string): Path to the ngram directory to create\n min_order (int): Minimum order of extracted ngrams\n max_order (int): Maximum order of extracted ngrams\n '
super(NgramOutputHandler, self).__init__()
self.path = path
self.min_order = args.min_order
self.max_order = args.max_order
self.file_pattern = (path + '/%d.txt')
def write_hypos(self, all_hypos, sen_indices):
'Writes ngram files for each sentence in ``all_hypos``.\n \n Args:\n all_hypos (list): list of nbest lists of hypotheses\n sen_indices (list): List of sentence indices (0-indexed)\n \n Raises:\n OSError. If the directory could not be created\n IOError. If something goes wrong while writing to the disk\n '
_mkdir(self.path, 'ngram')
for (sen_idx, hypos) in zip(sen_indices, all_hypos):
sen_idx += 1
total = utils.log_sum([hypo.total_score for hypo in hypos])
normed_scores = [(hypo.total_score - total) for hypo in hypos]
ngrams = defaultdict(dict)
for (hypo_idx, hypo) in enumerate(hypos):
sen_eos = (([utils.GO_ID] + hypo.trgt_sentence) + [utils.EOS_ID])
for pos in range(1, (len(sen_eos) + 1)):
hist = sen_eos[:pos]
for order in range(self.min_order, (self.max_order + 1)):
ngram = ' '.join(map(str, hist[(- order):]))
ngrams[ngram][hypo_idx] = True
with open((self.file_pattern % sen_idx), 'w') as f:
for (ngram, hypo_indices) in ngrams.items():
ngram_score = np.exp(utils.log_sum([normed_scores[hypo_idx] for hypo_idx in hypo_indices]))
f.write(('%s : %f\n' % (ngram, min(1.0, ngram_score))))
|
class Predictor(object):
'A predictor produces the predictive probability distribution of\n the next word given the state of the predictor. The state may \n change during ``predict_next()`` and ``consume()``. The functions\n ``get_state()`` and ``set_state()`` can be used for non-greedy \n decoding. Note: The state describes the predictor with the current\n history. It does not encapsulate the current source sentence, i.e. \n you cannot recover a predictor state if ``initialize()`` was called\n in between. ``predict_next()`` and ``consume()`` must be called \n alternately. This holds even when using ``get_state()`` and \n ``set_state()``: Loading/saving states is transparent to the\n predictor instance.\n '
def __init__(self):
'Initializes ``current_sen_id`` with 0. '
super(Predictor, self).__init__()
self.current_sen_id = 0
@staticmethod
def add_args(parser):
pass
def set_current_sen_id(self, cur_sen_id):
'This function is called between ``initialize()`` calls to \n increment the sentence id counter. It can also be used to skip \n sentences for the --range argument.\n \n Args:\n cur_sen_id (int): Sentence id for the next call of\n ``initialize()``\n '
self.current_sen_id = cur_sen_id
@abstractmethod
def predict_next(self):
'Returns the predictive distribution over the target \n vocabulary for the next word given the predictor state. Note \n that the prediction itself can change the state of the \n predictor. For example, the neural predictor updates the \n decoder network state and its attention to predict the next \n word. Two calls of ``predict_next()`` must be separated by a \n ``consume()`` call.\n \n Returns:\n dictionary,array,list. Word log probabilities for the next \n target token. All ids which are not set are assumed to have\n probability ``get_unk_probability()``\n '
raise NotImplementedError
@abstractmethod
def consume(self, word):
'Expand the current history by ``word`` and update the \n internal predictor state accordingly. Two calls of ``consume()``\n must be separated by a ``predict_next()`` call.\n \n Args:\n word (int): Word to add to the current history\n '
raise NotImplementedError
@abstractmethod
def get_state(self):
'Get the current predictor state. The state can be any object\n or tuple of objects which makes it possible to return to the\n predictor state with the current history.\n \n Returns:\n object. Predictor state\n '
raise NotImplementedError
@abstractmethod
def set_state(self, state):
'Loads a predictor state from an object created with \n ``get_state()``. Note that this does not copy the argument but\n just references the given state. If ``state`` is going to be\n used in the future to return to that point again, you should\n copy the state with ``copy.deepcopy()`` before.\n \n Args:\n state (object): Predictor state as returned by \n ``get_state()``\n '
raise NotImplementedError
@abstractmethod
def coalesce_and_set_states(self, states):
'Loads a predictor state from an object created with \n ``get_state()``. Note that this does not copy the argument but\n just references the given state. If ``state`` is going to be\n used in the future to return to that point again, you should\n copy the state with ``copy.deepcopy()`` before.\n \n Args:\n state (object): Predictor state as returned by \n ``get_state()``\n '
raise NotImplementedError
def get_unk_probability(self, posterior):
'This function defines the probability of all words which are\n not in ``posterior``. This is usually used to combine open and\n closed vocabulary predictors. The argument ``posterior`` should \n have been produced with ``predict_next()``\n \n Args:\n posterior (list,array,dict): Return value of the last call\n of ``predict_next``\n \n Returns:\n float: Score to use for words outside ``posterior``\n '
return utils.NEG_INF
def get_empty_str_prob(self):
return utils.NEG_INF
def initialize(self, src_sentence):
'Initialize the predictor with the given source sentence. \n This resets the internal predictor state and loads everything \n which is constant throughout the processing of a single source\n sentence. For example, the NMT decoder runs the encoder network\n and stores the source annotations.\n \n Args:\n src_sentence (list): List of word IDs which form the source\n sentence without <S> or </S>\n '
pass
def is_equal(self, state1, state2):
'Returns true if two predictor states are equal, i.e. both\n states will always result in the same scores. This is used for\n hypothesis recombination\n \n Args:\n state1 (object): First predictor state\n state2 (object): Second predictor state\n \n Returns:\n bool. True if both states are equal, false if not\n '
return False
|
def gumbel_max_sample(x):
'\n x: log-probability distribution (unnormalized is ok) over discrete random variable\n '
z = np.random.gumbel(loc=0, scale=1, size=x.shape)
return np.nanargmax((x + z))
|
def exponential_sample(x):
'\n probability distribution over discrete random variable\n '
E = (- np.log(np.random.uniform(size=len(x))))
E /= x
return np.nanargmin(E)
|
def log_multinomial_sample(x):
'\n x: log-probability distribution (unnormalized is ok) over discrete random variable\n '
x[np.where(np.isnan(x))] = utils.NEG_INF
c = np.logaddexp.accumulate(x)
key = (np.log(np.random.uniform()) + c[(- 1)])
return bisect(c, key)
|
def log_nucleus_multinomial_sample(x, size=1, nucleus_p=np.log(0.95)):
'\n x: log-probability distribution (unnormalized is ok) over discrete random variable\n '
assert (nucleus_p <= 0)
if (len(x) == 1):
return ([0] * size)
inds = np.argsort((- x))
sortedx = x[inds]
c = np.logaddexp.accumulate(sortedx)
last_ind = bisect(c, (nucleus_p + c[(- 1)]))
idxs = []
for i in range(size):
key = (np.log(np.random.uniform()) + c[last_ind])
idxs.append(inds[bisect(c, key)])
return idxs
|
class DummyPredictor(Predictor):
'Predictor for using fairseq models.'
def __init__(self, vocab_size=10, n_cpu_threads=(- 1), seed=0):
'Initializes a fake predictor with deterministic outputs.\n '
super(DummyPredictor, self).__init__()
self.vocab_size = vocab_size
self.rg = np.random.default_rng(seed=seed)
self.num_dists = 1000
self.model_temperature = 0.5
self.prob_dists = [self.rg.standard_normal(self.vocab_size) for i in range(self.num_dists)]
def get_unk_probability(self, posterior):
'Fetch posterior[utils.UNK_ID]'
return utils.common_get(posterior, utils.UNK_ID, utils.NEG_INF)
def predict_next(self, prefix=None):
hash_rep = (str(self.src) + str((self.consumed if (prefix is None) else prefix)))
hash_key = int(hashlib.sha256(hash_rep.encode('utf-8')).hexdigest(), 16)
dist_key = (hash_key % self.num_dists)
unnorm_posterior = copy.copy(self.prob_dists[dist_key])
unnorm_posterior[utils.EOS_ID] += (((len(self.consumed) - len(self.src)) * unnorm_posterior.max()) / 2)
return utils.log_softmax(unnorm_posterior, temperature=self.model_temperature)
def initialize(self, src_sentence):
'Initialize source tensors, reset consumed.'
self.src = src_sentence
self.consumed = []
def consume(self, word):
'Append ``word`` to the current history.'
self.consumed.append(word)
def get_empty_str_prob(self):
return self.get_initial_dist()[utils.EOS_ID].item()
def get_initial_dist(self):
return self.predict_next(prefix=[])
def get_state(self):
'The predictor state is the complete history.'
return (self.consumed, [[]])
def set_state(self, state):
'The predictor state is the complete history.'
(consumed, inc_states) = state
self.consumed = consumed
def is_equal(self, state1, state2):
'Returns true if the history is the same '
return (state1[0] == state2[0])
|
def base_init(new_args):
global args
args = new_args
if (sys.version_info < (3, 0)):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
logging.warn('Library is tested with Python 3, but you are using Python 2. Expect the unexpected or switch to >3.5.')
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s')
logging.getLogger().setLevel(logging.INFO)
if (args.verbosity == 'debug'):
logging.getLogger().setLevel(logging.DEBUG)
elif (args.verbosity == 'info'):
logging.getLogger().setLevel(logging.INFO)
elif (args.verbosity == 'warn'):
logging.getLogger().setLevel(logging.WARN)
elif (args.verbosity == 'error'):
logging.getLogger().setLevel(logging.ERROR)
utils.switch_to_fairseq_indexing()
|
def add_predictor(decoder):
p = DummyPredictor(vocab_size=20)
decoder.add_predictor('dummy', p)
|
def create_decoder():
try:
decoder = decoding.DECODER_REGISTRY[args.decoder](args)
except Exception as e:
logging.fatal(('An %s has occurred while initializing the decoder: %s Stack trace: %s' % (sys.exc_info()[0], e, traceback.format_exc())))
sys.exit('Could not initialize decoder.')
add_predictor(decoder)
return decoder
|
def _generate_dummy_hypo():
return decoding.core.Hypothesis([utils.UNK_ID], 0.0, [0.0])
|
def create_src_sentences(num_sentences=10, str_length=5):
return [randomString(str_length) for i in range(num_sentences)]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.