repo
stringlengths
7
90
file_url
stringlengths
81
315
file_path
stringlengths
4
228
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 14:38:15
2026-01-05 02:33:18
truncated
bool
2 classes
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/remove_trailing_cedilla.py
thefuck/rules/remove_trailing_cedilla.py
# -*- encoding: utf-8 -*- CEDILLA = u"ç" def match(command): return command.script.endswith(CEDILLA) def get_new_command(command): return command.script[:-1]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/ln_s_order.py
thefuck/rules/ln_s_order.py
import os from thefuck.specific.sudo import sudo_support def _get_destination(script_parts): """When arguments order is wrong first argument will be destination.""" for part in script_parts: if part not in {'ln', '-s', '--symbolic'} and os.path.exists(part): return part @sudo_support def match(command): return (command.script_parts[0] == 'ln' and {'-s', '--symbolic'}.intersection(command.script_parts) and 'File exists' in command.output and _get_destination(command.script_parts)) @sudo_support def get_new_command(command): destination = _get_destination(command.script_parts) parts = command.script_parts[:] parts.remove(destination) parts.append(destination) return ' '.join(parts)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/brew_link.py
thefuck/rules/brew_link.py
from thefuck.utils import for_app @for_app('brew', at_least=2) def match(command): return (command.script_parts[1] in ['ln', 'link'] and "brew link --overwrite --dry-run" in command.output) def get_new_command(command): command_parts = command.script_parts[:] command_parts[1] = 'link' command_parts.insert(2, '--overwrite') command_parts.insert(3, '--dry-run') return ' '.join(command_parts)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/cat_dir.py
thefuck/rules/cat_dir.py
import os from thefuck.utils import for_app @for_app('cat', at_least=1) def match(command): return ( command.output.startswith('cat: ') and os.path.isdir(command.script_parts[1]) ) def get_new_command(command): return command.script.replace('cat', 'ls', 1)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/terraform_no_command.py
thefuck/rules/terraform_no_command.py
import re from thefuck.utils import for_app MISTAKE = r'(?<=Terraform has no command named ")([^"]+)(?="\.)' FIX = r'(?<=Did you mean ")([^"]+)(?="\?)' @for_app('terraform') def match(command): return re.search(MISTAKE, command.output) and re.search(FIX, command.output) def get_new_command(command): mistake = re.search(MISTAKE, command.output).group(0) fix = re.search(FIX, command.output).group(0) return command.script.replace(mistake, fix)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/az_cli.py
thefuck/rules/az_cli.py
import re from thefuck.utils import for_app, replace_argument INVALID_CHOICE = "(?=az)(?:.*): '(.*)' is not in the '.*' command group." OPTIONS = "^The most similar choice to '.*' is:\n\\s*(.*)$" @for_app('az') def match(command): return "is not in the" in command.output and "command group" in command.output def get_new_command(command): mistake = re.search(INVALID_CHOICE, command.output).group(1) options = re.findall(OPTIONS, command.output, flags=re.MULTILINE) return [replace_argument(command.script, mistake, o) for o in options]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/vagrant_up.py
thefuck/rules/vagrant_up.py
from thefuck.shells import shell from thefuck.utils import for_app @for_app('vagrant') def match(command): return 'run `vagrant up`' in command.output.lower() def get_new_command(command): cmds = command.script_parts machine = None if len(cmds) >= 3: machine = cmds[2] start_all_instances = shell.and_(u"vagrant up", command.script) if machine is None: return start_all_instances else: return [shell.and_(u"vagrant up {}".format(machine), command.script), start_all_instances]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/__init__.py
thefuck/rules/__init__.py
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/react_native_command_unrecognized.py
thefuck/rules/react_native_command_unrecognized.py
import re from subprocess import Popen, PIPE from thefuck.utils import for_app, replace_command, cache, eager @for_app('react-native') def match(command): return re.findall(r"Unrecognized command '.*'", command.output) @cache('package.json') @eager def _get_commands(): proc = Popen(['react-native', '--help'], stdout=PIPE) should_yield = False for line in proc.stdout.readlines(): line = line.decode().strip() if not line: continue if 'Commands:' in line: should_yield = True continue if should_yield: yield line.split(' ')[0] def get_new_command(command): misspelled_command = re.findall(r"Unrecognized command '(.*)'", command.output)[0] commands = _get_commands() return replace_command(command, misspelled_command, commands)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/grunt_task_not_found.py
thefuck/rules/grunt_task_not_found.py
import re from subprocess import Popen, PIPE from thefuck.utils import for_app, eager, get_closest, cache regex = re.compile(r'Warning: Task "(.*)" not found.') @for_app('grunt') def match(command): return regex.findall(command.output) @cache('Gruntfile.js') @eager def _get_all_tasks(): proc = Popen(['grunt', '--help'], stdout=PIPE) should_yield = False for line in proc.stdout.readlines(): line = line.decode().strip() if 'Available tasks' in line: should_yield = True continue if should_yield and not line: return if ' ' in line: yield line.split(' ')[0] def get_new_command(command): misspelled_task = regex.findall(command.output)[0].split(':')[0] tasks = _get_all_tasks() fixed = get_closest(misspelled_task, tasks) return command.script.replace(' {}'.format(misspelled_task), ' {}'.format(fixed))
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_merge.py
thefuck/rules/git_merge.py
import re from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('merge' in command.script and ' - not something we can merge' in command.output and 'Did you mean this?' in command.output) @git_support def get_new_command(command): unknown_branch = re.findall(r'merge: (.+) - not something we can merge', command.output)[0] remote_branch = re.findall(r'Did you mean this\?\n\t([^\n]+)', command.output)[0] return replace_argument(command.script, unknown_branch, remote_branch)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_clone_git_clone.py
thefuck/rules/git_clone_git_clone.py
from thefuck.specific.git import git_support @git_support def match(command): return (' git clone ' in command.script and 'fatal: Too many arguments.' in command.output) @git_support def get_new_command(command): return command.script.replace(' git clone ', ' ', 1)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/sed_unterminated_s.py
thefuck/rules/sed_unterminated_s.py
import shlex from thefuck.shells import shell from thefuck.utils import for_app @for_app('sed') def match(command): return "unterminated `s' command" in command.output def get_new_command(command): script = shlex.split(command.script) for (i, e) in enumerate(script): if e.startswith(('s/', '-es/')) and e[-1] != '/': script[i] += '/' return ' '.join(map(shell.quote, script))
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/apt_get_search.py
thefuck/rules/apt_get_search.py
import re from thefuck.specific.apt import apt_available from thefuck.utils import for_app enabled_by_default = apt_available @for_app('apt-get') def match(command): return command.script.startswith('apt-get search') def get_new_command(command): return re.sub(r'^apt-get', 'apt-cache', command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/apt_list_upgradable.py
thefuck/rules/apt_list_upgradable.py
from thefuck.specific.apt import apt_available from thefuck.specific.sudo import sudo_support from thefuck.utils import for_app enabled_by_default = apt_available @sudo_support @for_app('apt') def match(command): return 'apt list --upgradable' in command.output @sudo_support def get_new_command(command): return 'apt list --upgradable'
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/long_form_help.py
thefuck/rules/long_form_help.py
from thefuck.utils import replace_argument import re # regex to match a suggested help command from the tool output help_regex = r"(?:Run|Try) '([^']+)'(?: or '[^']+')? for (?:details|more information)." def match(command): if re.search(help_regex, command.output, re.I) is not None: return True if '--help' in command.output: return True return False def get_new_command(command): if re.search(help_regex, command.output) is not None: match_obj = re.search(help_regex, command.output, re.I) return match_obj.group(1) return replace_argument(command.script, '-h', '--help') enabled_by_default = True priority = 5000
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/systemctl.py
thefuck/rules/systemctl.py
""" The confusion in systemctl's param order is massive. """ from thefuck.specific.sudo import sudo_support from thefuck.utils import for_app @sudo_support @for_app('systemctl') def match(command): # Catches "Unknown operation 'service'." when executing systemctl with # misordered arguments cmd = command.script_parts return (cmd and 'Unknown operation \'' in command.output and len(cmd) - cmd.index('systemctl') == 3) @sudo_support def get_new_command(command): cmd = command.script_parts[:] cmd[-1], cmd[-2] = cmd[-2], cmd[-1] return ' '.join(cmd)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/pip_install.py
thefuck/rules/pip_install.py
from thefuck.utils import for_app from thefuck.specific.sudo import sudo_support @sudo_support @for_app('pip') def match(command): return ('pip install' in command.script and 'Permission denied' in command.output) def get_new_command(command): if '--user' not in command.script: # add --user (attempt 1) return command.script.replace(' install ', ' install --user ') return 'sudo {}'.format(command.script.replace(' --user', '')) # since --user didn't fix things, let's try sudo (attempt 2)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/rm_dir.py
thefuck/rules/rm_dir.py
import re from thefuck.specific.sudo import sudo_support @sudo_support def match(command): return ('rm' in command.script and 'is a directory' in command.output.lower()) @sudo_support def get_new_command(command): arguments = '-rf' if 'hdfs' in command.script: arguments = '-r' return re.sub('\\brm (.*)', 'rm ' + arguments + ' \\1', command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/mkdir_p.py
thefuck/rules/mkdir_p.py
import re from thefuck.specific.sudo import sudo_support @sudo_support def match(command): return ('mkdir' in command.script and 'No such file or directory' in command.output) @sudo_support def get_new_command(command): return re.sub('\\bmkdir (.*)', 'mkdir -p \\1', command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/no_such_file.py
thefuck/rules/no_such_file.py
import re from thefuck.shells import shell patterns = ( r"mv: cannot move '[^']*' to '([^']*)': No such file or directory", r"mv: cannot move '[^']*' to '([^']*)': Not a directory", r"cp: cannot create regular file '([^']*)': No such file or directory", r"cp: cannot create regular file '([^']*)': Not a directory", ) def match(command): for pattern in patterns: if re.search(pattern, command.output): return True return False def get_new_command(command): for pattern in patterns: file = re.findall(pattern, command.output) if file: file = file[0] dir = file[0:file.rfind('/')] formatme = shell.and_('mkdir -p {}', '{}') return formatme.format(dir, command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/unsudo.py
thefuck/rules/unsudo.py
patterns = ['you cannot perform this operation as root'] def match(command): if command.script_parts and command.script_parts[0] != 'sudo': return False for pattern in patterns: if pattern in command.output.lower(): return True return False def get_new_command(command): return ' '.join(command.script_parts[1:])
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/no_command.py
thefuck/rules/no_command.py
from thefuck.utils import get_all_executables, get_close_matches, \ get_valid_history_without_current, get_closest, which from thefuck.specific.sudo import sudo_support @sudo_support def match(command): return (not which(command.script_parts[0]) and ('not found' in command.output or 'is not recognized as' in command.output) and bool(get_close_matches(command.script_parts[0], get_all_executables()))) def _get_used_executables(command): for script in get_valid_history_without_current(command): yield script.split(' ')[0] @sudo_support def get_new_command(command): old_command = command.script_parts[0] # One from history: already_used = get_closest( old_command, _get_used_executables(command), fallback_to_first=False) if already_used: new_cmds = [already_used] else: new_cmds = [] # Other from all executables: new_cmds += [cmd for cmd in get_close_matches(old_command, get_all_executables()) if cmd not in new_cmds] return [command.script.replace(old_command, cmd, 1) for cmd in new_cmds] priority = 3000
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/pacman_not_found.py
thefuck/rules/pacman_not_found.py
""" Fixes wrong package names with pacman or yaourt. For example the `llc` program is in package `llvm` so this: yay -S llc should be: yay -S llvm """ from thefuck.utils import replace_command from thefuck.specific.archlinux import get_pkgfile, archlinux_env def match(command): return (command.script_parts and (command.script_parts[0] in ('pacman', 'yay', 'pikaur', 'yaourt') or command.script_parts[0:2] == ['sudo', 'pacman']) and 'error: target not found:' in command.output) def get_new_command(command): pgr = command.script_parts[-1] return replace_command(command, pgr, get_pkgfile(pgr)) enabled_by_default, _ = archlinux_env()
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/cd_parent.py
thefuck/rules/cd_parent.py
# Adds the missing space between the cd command and the target directory # when trying to cd to the parent directory. # # Does not really save chars, but is fun :D # # Example: # > cd.. # cd..: command not found def match(command): return command.script == 'cd..' def get_new_command(command): return 'cd ..'
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_two_dashes.py
thefuck/rules/git_two_dashes.py
from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('error: did you mean `' in command.output and '` (with two dashes ?)' in command.output) @git_support def get_new_command(command): to = command.output.split('`')[1] return replace_argument(command.script, to[1:], to)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_main_master.py
thefuck/rules/git_main_master.py
from thefuck.specific.git import git_support @git_support def match(command): return "'master'" in command.output or "'main'" in command.output @git_support def get_new_command(command): if "'master'" in command.output: return command.script.replace("master", "main") return command.script.replace("main", "master") priority = 1200
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/docker_login.py
thefuck/rules/docker_login.py
from thefuck.utils import for_app from thefuck.shells import shell @for_app('docker') def match(command): return ('docker' in command.script and "access denied" in command.output and "may require 'docker login'" in command.output) def get_new_command(command): return shell.and_('docker login', command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/dnf_no_such_command.py
thefuck/rules/dnf_no_such_command.py
import subprocess import re from thefuck.specific.sudo import sudo_support from thefuck.utils import for_app, replace_command from thefuck.specific.dnf import dnf_available regex = re.compile(r'No such command: (.*)\.') @sudo_support @for_app('dnf') def match(command): return 'no such command' in command.output.lower() def _parse_operations(help_text_lines): operation_regex = re.compile(r'^([a-z-]+) +', re.MULTILINE) return operation_regex.findall(help_text_lines) def _get_operations(): proc = subprocess.Popen(["dnf", '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.stdout.read().decode("utf-8") return _parse_operations(lines) @sudo_support def get_new_command(command): misspelled_command = regex.findall(command.output)[0] return replace_command(command, misspelled_command, _get_operations()) enabled_by_default = dnf_available
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_rebase_merge_dir.py
thefuck/rules/git_rebase_merge_dir.py
from thefuck.utils import get_close_matches from thefuck.specific.git import git_support @git_support def match(command): return (' rebase' in command.script and 'It seems that there is already a rebase-merge directory' in command.output and 'I wonder if you are in the middle of another rebase' in command.output) @git_support def get_new_command(command): command_list = ['git rebase --continue', 'git rebase --abort', 'git rebase --skip'] rm_cmd = command.output.split('\n')[-4] command_list.append(rm_cmd.strip()) return get_close_matches(command.script, command_list, 4, 0)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/hostscli.py
thefuck/rules/hostscli.py
import re from thefuck.specific.sudo import sudo_support from thefuck.utils import replace_command, for_app no_command = "Error: No such command" no_website = "hostscli.errors.WebsiteImportError" @sudo_support @for_app('hostscli') def match(command): errors = [no_command, no_website] for error in errors: if error in command.output: return True return False @sudo_support def get_new_command(command): if no_website in command.output: return ['hostscli websites'] misspelled_command = re.findall( r'Error: No such command ".*"', command.output)[0] commands = ['block', 'unblock', 'websites', 'block_all', 'unblock_all'] return replace_command(command, misspelled_command, commands)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/gradle_wrapper.py
thefuck/rules/gradle_wrapper.py
import os from thefuck.utils import for_app, which @for_app('gradle') def match(command): return (not which(command.script_parts[0]) and 'not found' in command.output and os.path.isfile('gradlew')) def get_new_command(command): return u'./gradlew {}'.format(' '.join(command.script_parts[1:]))
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_lfs_mistype.py
thefuck/rules/git_lfs_mistype.py
import re from thefuck.utils import get_all_matched_commands, replace_command from thefuck.specific.git import git_support @git_support def match(command): ''' Match a mistyped command ''' return 'lfs' in command.script and 'Did you mean this?' in command.output @git_support def get_new_command(command): broken_cmd = re.findall(r'Error: unknown command "([^"]*)" for "git-lfs"', command.output)[0] matched = get_all_matched_commands(command.output, ['Did you mean', ' for usage.']) return replace_command(command, broken_cmd, matched)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_flag_after_filename.py
thefuck/rules/git_flag_after_filename.py
import re from thefuck.specific.git import git_support error_pattern = "fatal: bad flag '(.*?)' used after filename" error_pattern2 = "fatal: option '(.*?)' must come before non-option arguments" @git_support def match(command): return re.search(error_pattern, command.output) or re.search(error_pattern2, command.output) @git_support def get_new_command(command): command_parts = command.script_parts[:] # find the bad flag bad_flag = match(command).group(1) bad_flag_index = command_parts.index(bad_flag) # find the filename for index in reversed(range(bad_flag_index)): if command_parts[index][0] != '-': filename_index = index break # swap them command_parts[bad_flag_index], command_parts[filename_index] = \ command_parts[filename_index], command_parts[bad_flag_index] # noqa: E122 return u' '.join(command_parts)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_pull.py
thefuck/rules/git_pull.py
from thefuck.shells import shell from thefuck.specific.git import git_support @git_support def match(command): return 'pull' in command.script and 'set-upstream' in command.output @git_support def get_new_command(command): line = command.output.split('\n')[-3].strip() branch = line.split(' ')[-1] set_upstream = line.replace('<remote>', 'origin')\ .replace('<branch>', branch) return shell.and_(set_upstream, command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/man_no_space.py
thefuck/rules/man_no_space.py
def match(command): return (command.script.startswith(u'man') and u'command not found' in command.output.lower()) def get_new_command(command): return u'man {}'.format(command.script[3:]) priority = 2000
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/ag_literal.py
thefuck/rules/ag_literal.py
from thefuck.utils import for_app @for_app('ag') def match(command): return command.output.endswith('run ag with -Q\n') def get_new_command(command): return command.script.replace('ag', 'ag -Q', 1)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/python_command.py
thefuck/rules/python_command.py
from thefuck.specific.sudo import sudo_support # add 'python' suffix to the command if # 1) The script does not have execute permission or # 2) is interpreted as shell script @sudo_support def match(command): return (command.script_parts and command.script_parts[0].endswith('.py') and ('Permission denied' in command.output or 'command not found' in command.output)) @sudo_support def get_new_command(command): return 'python ' + command.script
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/npm_run_script.py
thefuck/rules/npm_run_script.py
from thefuck.specific.npm import npm_available, get_scripts from thefuck.utils import for_app enabled_by_default = npm_available @for_app('npm') def match(command): return ('Usage: npm <command>' in command.output and not any(part.startswith('ru') for part in command.script_parts) and command.script_parts[1] in get_scripts()) def get_new_command(command): parts = command.script_parts[:] parts.insert(1, 'run-script') return ' '.join(parts)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_branch_exists.py
thefuck/rules/git_branch_exists.py
import re from thefuck.shells import shell from thefuck.specific.git import git_support from thefuck.utils import eager @git_support def match(command): return ("fatal: A branch named '" in command.output and "' already exists." in command.output) @git_support @eager def get_new_command(command): branch_name = re.findall( r"fatal: A branch named '(.+)' already exists.", command.output)[0] branch_name = branch_name.replace("'", r"\'") new_command_templates = [['git branch -d {0}', 'git branch {0}'], ['git branch -d {0}', 'git checkout -b {0}'], ['git branch -D {0}', 'git branch {0}'], ['git branch -D {0}', 'git checkout -b {0}'], ['git checkout {0}']] for new_command_template in new_command_templates: yield shell.and_(*new_command_template).format(branch_name)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/rails_migrations_pending.py
thefuck/rules/rails_migrations_pending.py
import re from thefuck.shells import shell SUGGESTION_REGEX = r"To resolve this issue, run:\s+(.*?)\n" def match(command): return "Migrations are pending. To resolve this issue, run:" in command.output def get_new_command(command): migration_script = re.search(SUGGESTION_REGEX, command.output).group(1) return shell.and_(migration_script, command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_stash_pop.py
thefuck/rules/git_stash_pop.py
from thefuck.shells import shell from thefuck.specific.git import git_support @git_support def match(command): return ('stash' in command.script and 'pop' in command.script and 'Your local changes to the following files would be overwritten by merge' in command.output) @git_support def get_new_command(command): return shell.and_('git add --update', 'git stash pop', 'git reset .') # make it come before the other applicable rules priority = 900
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/cpp11.py
thefuck/rules/cpp11.py
from thefuck.utils import for_app @for_app('g++', 'clang++') def match(command): return ('This file requires compiler and library support for the ' 'ISO C++ 2011 standard.' in command.output or '-Wc++11-extensions' in command.output) def get_new_command(command): return command.script + ' -std=c++11'
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/tmux.py
thefuck/rules/tmux.py
import re from thefuck.utils import replace_command, for_app @for_app('tmux') def match(command): return ('ambiguous command:' in command.output and 'could be:' in command.output) def get_new_command(command): cmd = re.match(r"ambiguous command: (.*), could be: (.*)", command.output) old_cmd = cmd.group(1) suggestions = [c.strip() for c in cmd.group(2).split(',')] return replace_command(command, old_cmd, suggestions)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/brew_reinstall.py
thefuck/rules/brew_reinstall.py
import re from thefuck.utils import for_app warning_regex = re.compile(r'Warning: (?:.(?!is ))+ is already installed and ' r'up-to-date') message_regex = re.compile(r'To reinstall (?:(?!, ).)+, run `brew reinstall ' r'[^`]+`') @for_app('brew', at_least=2) def match(command): return ('install' in command.script and warning_regex.search(command.output) and message_regex.search(command.output)) def get_new_command(command): return command.script.replace('install', 'reinstall')
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/fix_alt_space.py
thefuck/rules/fix_alt_space.py
# -*- encoding: utf-8 -*- import re from thefuck.specific.sudo import sudo_support @sudo_support def match(command): return ('command not found' in command.output.lower() and u' ' in command.script) @sudo_support def get_new_command(command): return re.sub(u' ', ' ', command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_remote_seturl_add.py
thefuck/rules/git_remote_seturl_add.py
from thefuck.utils import replace_argument from thefuck.specific.git import git_support @git_support def match(command): return ('set-url' in command.script and 'fatal: No such remote' in command.output) def get_new_command(command): return replace_argument(command.script, 'set-url', 'add')
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/gulp_not_task.py
thefuck/rules/gulp_not_task.py
import re import subprocess from thefuck.utils import replace_command, for_app, cache @for_app('gulp') def match(command): return 'is not in your gulpfile' in command.output @cache('gulpfile.js') def get_gulp_tasks(): proc = subprocess.Popen(['gulp', '--tasks-simple'], stdout=subprocess.PIPE) return [line.decode('utf-8')[:-1] for line in proc.stdout.readlines()] def get_new_command(command): wrong_task = re.findall(r"Task '(\w+)' is not in your gulpfile", command.output)[0] return replace_command(command, wrong_task, get_gulp_tasks())
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_push_different_branch_names.py
thefuck/rules/git_push_different_branch_names.py
import re from thefuck.specific.git import git_support @git_support def match(command): return "push" in command.script and "The upstream branch of your current branch does not match" in command.output @git_support def get_new_command(command): return re.findall(r'^ +(git push [^\s]+ [^\s]+)', command.output, re.MULTILINE)[0]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_rm_staged.py
thefuck/rules/git_rm_staged.py
from thefuck.specific.git import git_support @git_support def match(command): return (' rm ' in command.script and 'error: the following file has changes staged in the index' in command.output and 'use --cached to keep the file, or -f to force removal' in command.output) @git_support def get_new_command(command): command_parts = command.script_parts[:] index = command_parts.index('rm') + 1 command_parts.insert(index, '--cached') command_list = [u' '.join(command_parts)] command_parts[index] = '-f' command_list.append(u' '.join(command_parts)) return command_list
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/man.py
thefuck/rules/man.py
from thefuck.utils import for_app @for_app('man', at_least=1) def match(command): return True def get_new_command(command): if '3' in command.script: return command.script.replace("3", "2") if '2' in command.script: return command.script.replace("2", "3") last_arg = command.script_parts[-1] help_command = last_arg + ' --help' # If there are no man pages for last_arg, suggest `last_arg --help` instead. # Otherwise, suggest `--help` after suggesting other man page sections. if command.output.strip() == 'No manual entry for ' + last_arg: return [help_command] split_cmd2 = command.script_parts split_cmd3 = split_cmd2[:] split_cmd2.insert(1, ' 2 ') split_cmd3.insert(1, ' 3 ') return [ "".join(split_cmd3), "".join(split_cmd2), help_command, ]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_add.py
thefuck/rules/git_add.py
import re from thefuck.shells import shell from thefuck.specific.git import git_support from thefuck.system import Path from thefuck.utils import memoize @memoize def _get_missing_file(command): pathspec = re.findall( r"error: pathspec '([^']*)' " r'did not match any file\(s\) known to git.', command.output)[0] if Path(pathspec).exists(): return pathspec @git_support def match(command): return ('did not match any file(s) known to git.' in command.output and _get_missing_file(command)) @git_support def get_new_command(command): missing_file = _get_missing_file(command) formatme = shell.and_('git add -- {}', '{}') return formatme.format(missing_file, command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/pacman.py
thefuck/rules/pacman.py
from thefuck.specific.archlinux import get_pkgfile, archlinux_env from thefuck.shells import shell def match(command): return 'not found' in command.output and get_pkgfile(command.script) def get_new_command(command): packages = get_pkgfile(command.script) formatme = shell.and_('{} -S {}', '{}') return [formatme.format(pacman, package, command.script) for package in packages] enabled_by_default, pacman = archlinux_env()
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/git_branch_0flag.py
thefuck/rules/git_branch_0flag.py
from thefuck.shells import shell from thefuck.specific.git import git_support from thefuck.utils import memoize @memoize def first_0flag(script_parts): return next((p for p in script_parts if len(p) == 2 and p.startswith("0")), None) @git_support def match(command): return command.script_parts[1] == "branch" and first_0flag(command.script_parts) @git_support def get_new_command(command): branch_name = first_0flag(command.script_parts) fixed_flag = branch_name.replace("0", "-") fixed_script = command.script.replace(branch_name, fixed_flag) if "A branch named '" in command.output and "' already exists." in command.output: delete_branch = u"git branch -D {}".format(branch_name) return shell.and_(delete_branch, fixed_script) return fixed_script
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/yarn_command_not_found.py
thefuck/rules/yarn_command_not_found.py
import re from subprocess import Popen, PIPE from thefuck.utils import (for_app, eager, replace_command, replace_argument, cache, which) regex = re.compile(r'error Command "(.*)" not found.') @for_app('yarn') def match(command): return regex.findall(command.output) npm_commands = {'require': 'add'} @eager def _get_all_tasks(): proc = Popen(['yarn', '--help'], stdout=PIPE) should_yield = False for line in proc.stdout.readlines(): line = line.decode().strip() if 'Commands:' in line: should_yield = True continue if should_yield and '- ' in line: yield line.split(' ')[-1] if which('yarn'): _get_all_tasks = cache(which('yarn'))(_get_all_tasks) def get_new_command(command): misspelled_task = regex.findall(command.output)[0] if misspelled_task in npm_commands: yarn_command = npm_commands[misspelled_task] return replace_argument(command.script, misspelled_task, yarn_command) else: tasks = _get_all_tasks() return replace_command(command, misspelled_task, tasks)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/ls_lah.py
thefuck/rules/ls_lah.py
from thefuck.utils import for_app @for_app('ls') def match(command): return command.script_parts and 'ls -' not in command.script def get_new_command(command): command = command.script_parts[:] command[0] = 'ls -lah' return ' '.join(command)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/tsuru_login.py
thefuck/rules/tsuru_login.py
from thefuck.shells import shell from thefuck.utils import for_app @for_app('tsuru') def match(command): return ('not authenticated' in command.output and 'session has expired' in command.output) def get_new_command(command): return shell.and_('tsuru login', command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/docker_not_command.py
thefuck/rules/docker_not_command.py
from itertools import dropwhile, takewhile, islice import re import subprocess from thefuck.utils import replace_command, for_app, which, cache from thefuck.specific.sudo import sudo_support @sudo_support @for_app('docker') def match(command): return 'is not a docker command' in command.output or 'Usage: docker' in command.output def _parse_commands(lines, starts_with): lines = dropwhile(lambda line: not line.startswith(starts_with), lines) lines = islice(lines, 1, None) lines = list(takewhile(lambda line: line.strip(), lines)) return [line.strip().split(' ')[0] for line in lines] def get_docker_commands(): proc = subprocess.Popen('docker', stdout=subprocess.PIPE, stderr=subprocess.PIPE) # Old version docker returns its output to stdout, while newer version returns to stderr. lines = proc.stdout.readlines() or proc.stderr.readlines() lines = [line.decode('utf-8') for line in lines] # Only newer versions of docker have management commands in the help text. if 'Management Commands:\n' in lines: management_commands = _parse_commands(lines, 'Management Commands:') else: management_commands = [] regular_commands = _parse_commands(lines, 'Commands:') return management_commands + regular_commands if which('docker'): get_docker_commands = cache(which('docker'))(get_docker_commands) @sudo_support def get_new_command(command): if 'Usage:' in command.output and len(command.script_parts) > 1: management_subcommands = _parse_commands(command.output.split('\n'), 'Commands:') return replace_command(command, command.script_parts[2], management_subcommands) wrong_command = re.findall( r"docker: '(\w+)' is not a docker command.", command.output)[0] return replace_command(command, wrong_command, get_docker_commands())
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/rules/python_module_error.py
thefuck/rules/python_module_error.py
import re from thefuck.shells import shell MISSING_MODULE = r"ModuleNotFoundError: No module named '([^']+)'" def match(command): return "ModuleNotFoundError: No module named '" in command.output def get_new_command(command): missing_module = re.findall(MISSING_MODULE, command.output)[0] return shell.and_("pip install {}".format(missing_module), command.script)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/nix.py
thefuck/specific/nix.py
from thefuck.utils import which nix_available = bool(which('nix'))
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/archlinux.py
thefuck/specific/archlinux.py
""" This file provide some utility functions for Arch Linux specific rules.""" import subprocess from .. import utils @utils.memoize def get_pkgfile(command): """ Gets the packages that provide the given command using `pkgfile`. If the command is of the form `sudo foo`, searches for the `foo` command instead. """ try: command = command.strip() if command.startswith('sudo '): command = command[5:] command = command.split(" ")[0] packages = subprocess.check_output( ['pkgfile', '-b', '-v', command], universal_newlines=True, stderr=utils.DEVNULL ).splitlines() return [package.split()[0] for package in packages] except subprocess.CalledProcessError as err: if err.returncode == 1 and err.output == "": return [] else: raise err def archlinux_env(): if utils.which('yay'): pacman = 'yay' elif utils.which('pikaur'): pacman = 'pikaur' elif utils.which('yaourt'): pacman = 'yaourt' elif utils.which('pacman'): pacman = 'sudo pacman' else: return False, None enabled_by_default = utils.which('pkgfile') return enabled_by_default, pacman
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/git.py
thefuck/specific/git.py
import re from decorator import decorator from ..utils import is_app from ..shells import shell @decorator def git_support(fn, command): """Resolves git aliases and supports testing for both git and hub.""" # supports GitHub's `hub` command # which is recommended to be used with `alias git=hub` # but at this point, shell aliases have already been resolved if not is_app(command, 'git', 'hub'): return False # perform git aliases expansion if command.output and 'trace: alias expansion:' in command.output: search = re.search("trace: alias expansion: ([^ ]*) => ([^\n]*)", command.output) alias = search.group(1) # by default git quotes everything, for example: # 'commit' '--amend' # which is surprising and does not allow to easily test for # eg. 'git commit' expansion = ' '.join(shell.quote(part) for part in shell.split_command(search.group(2))) new_script = re.sub(r"\b{}\b".format(alias), expansion, command.script) command = command.update(script=new_script) return fn(command)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/apt.py
thefuck/specific/apt.py
from thefuck.utils import which apt_available = bool(which('apt-get'))
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/sudo.py
thefuck/specific/sudo.py
import six from decorator import decorator @decorator def sudo_support(fn, command): """Removes sudo before calling fn and adds it after.""" if not command.script.startswith('sudo '): return fn(command) result = fn(command.update(script=command.script[5:])) if result and isinstance(result, six.string_types): return u'sudo {}'.format(result) elif isinstance(result, list): return [u'sudo {}'.format(x) for x in result] else: return result
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/npm.py
thefuck/specific/npm.py
import re from subprocess import Popen, PIPE from thefuck.utils import memoize, eager, which npm_available = bool(which('npm')) @memoize @eager def get_scripts(): """Get custom npm scripts.""" proc = Popen(['npm', 'run-script'], stdout=PIPE) should_yeild = False for line in proc.stdout.readlines(): line = line.decode() if 'available via `npm run-script`:' in line: should_yeild = True continue if should_yeild and re.match(r'^ [^ ]+', line): yield line.strip().split(' ')[0]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/dnf.py
thefuck/specific/dnf.py
from thefuck.utils import which dnf_available = bool(which('dnf'))
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/brew.py
thefuck/specific/brew.py
import subprocess from ..utils import memoize, which brew_available = bool(which('brew')) @memoize def get_brew_path_prefix(): """To get brew path""" try: return subprocess.check_output(['brew', '--prefix'], universal_newlines=True).strip() except Exception: return None
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/__init__.py
thefuck/specific/__init__.py
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/specific/yum.py
thefuck/specific/yum.py
from thefuck.utils import which yum_available = bool(which('yum'))
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/output_readers/shell_logger.py
thefuck/output_readers/shell_logger.py
import json import os import socket try: from shutil import get_terminal_size except ImportError: from backports.shutil_get_terminal_size import get_terminal_size import pyte from .. import const, logs def _get_socket_path(): return os.environ.get(const.SHELL_LOGGER_SOCKET_ENV) def is_available(): """Returns `True` if shell logger socket available. :rtype: book """ path = _get_socket_path() if not path: return False return os.path.exists(path) def _get_last_n(n): with socket.socket(socket.AF_UNIX) as client: client.connect(_get_socket_path()) request = json.dumps({ "type": "list", "count": n, }) + '\n' client.sendall(request.encode('utf-8')) response = client.makefile().readline() return json.loads(response)['commands'] def _get_output_lines(output): lines = output.split('\n') screen = pyte.Screen(get_terminal_size().columns, len(lines)) stream = pyte.Stream(screen) stream.feed('\n'.join(lines)) return screen.display def get_output(script): """Gets command output from shell logger.""" with logs.debug_time(u'Read output from external shell logger'): commands = _get_last_n(const.SHELL_LOGGER_LIMIT) for command in commands: if command['command'] == script: lines = _get_output_lines(command['output']) output = '\n'.join(lines).strip() return output else: logs.warn("Output isn't available in shell logger") return None
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/output_readers/read_log.py
thefuck/output_readers/read_log.py
import os import shlex import mmap import re try: from shutil import get_terminal_size except ImportError: from backports.shutil_get_terminal_size import get_terminal_size import six import pyte from ..exceptions import ScriptNotInLog from .. import const, logs def _group_by_calls(log): ps1 = os.environ['PS1'] ps1_newlines = ps1.count('\\n') + ps1.count('\n') ps1_counter = 0 script_line = None lines = [] for line in log: if const.USER_COMMAND_MARK in line or ps1_counter > 0: if script_line and ps1_counter == 0: yield script_line, lines if ps1_newlines > 0: if ps1_counter <= 0: ps1_counter = ps1_newlines else: ps1_counter -= 1 script_line = line lines = [line] elif script_line is not None: lines.append(line) if script_line: yield script_line, lines def _get_script_group_lines(grouped, script): if six.PY2: script = script.encode('utf-8') parts = shlex.split(script) for script_line, lines in reversed(grouped): if all(part in script_line for part in parts): return lines raise ScriptNotInLog def _get_output_lines(script, log_file): data = log_file.read().decode() data = re.sub(r'\x00+$', '', data) lines = data.split('\n') grouped = list(_group_by_calls(lines)) script_lines = _get_script_group_lines(grouped, script) screen = pyte.Screen(get_terminal_size().columns, len(script_lines)) stream = pyte.Stream(screen) stream.feed('\n'.join(script_lines)) return screen.display def _skip_old_lines(log_file): size = os.path.getsize(os.environ['THEFUCK_OUTPUT_LOG']) if size > const.LOG_SIZE_IN_BYTES: log_file.seek(size - const.LOG_SIZE_IN_BYTES) def get_output(script): """Reads script output from log. :type script: str :rtype: str | None """ if six.PY2: logs.warn('Experimental instant mode is Python 3+ only') return None if 'THEFUCK_OUTPUT_LOG' not in os.environ: logs.warn("Output log isn't specified") return None if const.USER_COMMAND_MARK not in os.environ.get('PS1', ''): logs.warn( "PS1 doesn't contain user command mark, please ensure " "that PS1 is not changed after The Fuck alias initialization") return None try: with logs.debug_time(u'Read output from log'): fd = os.open(os.environ['THEFUCK_OUTPUT_LOG'], os.O_RDONLY) buffer = mmap.mmap(fd, const.LOG_SIZE_IN_BYTES, mmap.MAP_SHARED, mmap.PROT_READ) _skip_old_lines(buffer) lines = _get_output_lines(script, buffer) output = '\n'.join(lines).strip() logs.debug(u'Received output: {}'.format(output)) return output except OSError: logs.warn("Can't read output log") return None except ScriptNotInLog: logs.warn("Script not found in output log") return None
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/output_readers/__init__.py
thefuck/output_readers/__init__.py
from ..conf import settings from . import read_log, rerun, shell_logger def get_output(script, expanded): """Get output of the script. :param script: Console script. :type script: str :param expanded: Console script with expanded aliases. :type expanded: str :rtype: str """ if shell_logger.is_available(): return shell_logger.get_output(script) if settings.instant_mode: return read_log.get_output(script) else: return rerun.get_output(script, expanded)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/output_readers/rerun.py
thefuck/output_readers/rerun.py
import os import shlex import six from subprocess import Popen, PIPE, STDOUT from psutil import AccessDenied, Process, TimeoutExpired from .. import logs from ..conf import settings def _kill_process(proc): """Tries to kill the process otherwise just logs a debug message, the process will be killed when thefuck terminates. :type proc: Process """ try: proc.kill() except AccessDenied: logs.debug(u'Rerun: process PID {} ({}) could not be terminated'.format( proc.pid, proc.exe())) def _wait_output(popen, is_slow): """Returns `True` if we can get output of the command in the `settings.wait_command` time. Command will be killed if it wasn't finished in the time. :type popen: Popen :rtype: bool """ proc = Process(popen.pid) try: proc.wait(settings.wait_slow_command if is_slow else settings.wait_command) return True except TimeoutExpired: for child in proc.children(recursive=True): _kill_process(child) _kill_process(proc) return False def get_output(script, expanded): """Runs the script and obtains stdin/stderr. :type script: str :type expanded: str :rtype: str | None """ env = dict(os.environ) env.update(settings.env) if six.PY2: expanded = expanded.encode('utf-8') split_expand = shlex.split(expanded) is_slow = split_expand[0] in settings.slow_commands if split_expand else False with logs.debug_time(u'Call: {}; with env: {}; is slow: {}'.format( script, env, is_slow)): result = Popen(expanded, shell=True, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=env) if _wait_output(result, is_slow): output = result.stdout.read().decode('utf-8', errors='replace') logs.debug(u'Received output: {}'.format(output)) return output else: logs.debug(u'Execution timed out!') return None
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/shells/zsh.py
thefuck/shells/zsh.py
from time import time import os from subprocess import Popen, PIPE from tempfile import gettempdir from uuid import uuid4 from ..conf import settings from ..const import ARGUMENT_PLACEHOLDER, USER_COMMAND_MARK from ..utils import DEVNULL, memoize from .generic import Generic class Zsh(Generic): friendly_name = 'ZSH' def app_alias(self, alias_name): # It is VERY important to have the variables declared WITHIN the function return ''' {name} () {{ TF_PYTHONIOENCODING=$PYTHONIOENCODING; export TF_SHELL=zsh; export TF_ALIAS={name}; TF_SHELL_ALIASES=$(alias); export TF_SHELL_ALIASES; TF_HISTORY="$(fc -ln -10)"; export TF_HISTORY; export PYTHONIOENCODING=utf-8; TF_CMD=$( thefuck {argument_placeholder} $@ ) && eval $TF_CMD; unset TF_HISTORY; export PYTHONIOENCODING=$TF_PYTHONIOENCODING; {alter_history} }} '''.format( name=alias_name, argument_placeholder=ARGUMENT_PLACEHOLDER, alter_history=('test -n "$TF_CMD" && print -s $TF_CMD' if settings.alter_history else '')) def instant_mode_alias(self, alias_name): if os.environ.get('THEFUCK_INSTANT_MODE', '').lower() == 'true': mark = ('%{' + USER_COMMAND_MARK + '\b' * len(USER_COMMAND_MARK) + '%}') return ''' export PS1="{user_command_mark}$PS1"; {app_alias} '''.format(user_command_mark=mark, app_alias=self.app_alias(alias_name)) else: log_path = os.path.join( gettempdir(), 'thefuck-script-log-{}'.format(uuid4().hex)) return ''' export THEFUCK_INSTANT_MODE=True; export THEFUCK_OUTPUT_LOG={log}; thefuck --shell-logger {log}; rm -f {log}; exit '''.format(log=log_path) def _parse_alias(self, alias): name, value = alias.split('=', 1) if value[0] == value[-1] == '"' or value[0] == value[-1] == "'": value = value[1:-1] return name, value @memoize def get_aliases(self): raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n') return dict(self._parse_alias(alias) for alias in raw_aliases if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.zsh_history')) def _get_history_line(self, command_script): return u': {}:0;{}\n'.format(int(time()), command_script) def _script_from_history(self, line): if ';' in line: return line.split(';', 1)[1] else: return '' def how_to_configure(self): return self._create_shell_configuration( content=u'eval $(thefuck --alias)', path='~/.zshrc', reload='source ~/.zshrc') def _get_version(self): """Returns the version of the current shell""" proc = Popen(['zsh', '-c', 'echo $ZSH_VERSION'], stdout=PIPE, stderr=DEVNULL) return proc.stdout.read().decode('utf-8').strip()
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/shells/generic.py
thefuck/shells/generic.py
import io import os import shlex import six from collections import namedtuple from ..logs import warn from ..utils import memoize from ..conf import settings from ..system import Path ShellConfiguration = namedtuple('ShellConfiguration', ( 'content', 'path', 'reload', 'can_configure_automatically')) class Generic(object): friendly_name = 'Generic Shell' def get_aliases(self): return {} def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases: return command_script.replace(binary, aliases[binary], 1) else: return command_script def from_shell(self, command_script): """Prepares command before running in app.""" return self._expand_aliases(command_script) def to_shell(self, command_script): """Prepares command for running in shell.""" return command_script def app_alias(self, alias_name): return """alias {0}='eval "$(TF_ALIAS={0} PYTHONIOENCODING=utf-8 """ \ """thefuck "$(fc -ln -1)")"'""".format(alias_name) def instant_mode_alias(self, alias_name): warn("Instant mode not supported by your shell") return self.app_alias(alias_name) def _get_history_file_name(self): return '' def _get_history_line(self, command_script): return '' @memoize def get_history(self): return list(self._get_history_lines()) def _get_history_lines(self): """Returns list of history entries.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with io.open(history_file_name, 'r', encoding='utf-8', errors='ignore') as history_file: lines = history_file.readlines() if settings.history_limit: lines = lines[-settings.history_limit:] for line in lines: prepared = self._script_from_history(line) \ .strip() if prepared: yield prepared def and_(self, *commands): return u' && '.join(commands) def or_(self, *commands): return u' || '.join(commands) def how_to_configure(self): return def split_command(self, command): """Split the command using shell-like syntax.""" encoded = self.encode_utf8(command) try: splitted = [s.replace("??", "\\ ") for s in shlex.split(encoded.replace('\\ ', '??'))] except ValueError: splitted = encoded.split(' ') return self.decode_utf8(splitted) def encode_utf8(self, command): if six.PY2: return command.encode('utf8') return command def decode_utf8(self, command_parts): if six.PY2: return [s.decode('utf8') for s in command_parts] return command_parts def quote(self, s): """Return a shell-escaped version of the string s.""" if six.PY2: from pipes import quote else: from shlex import quote return quote(s) def _script_from_history(self, line): return line def put_to_history(self, command): """Adds fixed command to shell history. In most of shells we change history on shell-level, but not all shells support it (Fish). """ def get_builtin_commands(self): """Returns shells builtin commands.""" return ['alias', 'bg', 'bind', 'break', 'builtin', 'case', 'cd', 'command', 'compgen', 'complete', 'continue', 'declare', 'dirs', 'disown', 'echo', 'enable', 'eval', 'exec', 'exit', 'export', 'fc', 'fg', 'getopts', 'hash', 'help', 'history', 'if', 'jobs', 'kill', 'let', 'local', 'logout', 'popd', 'printf', 'pushd', 'pwd', 'read', 'readonly', 'return', 'set', 'shift', 'shopt', 'source', 'suspend', 'test', 'times', 'trap', 'type', 'typeset', 'ulimit', 'umask', 'unalias', 'unset', 'until', 'wait', 'while'] def _get_version(self): """Returns the version of the current shell""" return '' def info(self): """Returns the name and version of the current shell""" try: version = self._get_version() except Exception as e: warn(u'Could not determine shell version: {}'.format(e)) version = '' return u'{} {}'.format(self.friendly_name, version).rstrip() def _create_shell_configuration(self, content, path, reload): return ShellConfiguration( content=content, path=path, reload=reload, can_configure_automatically=Path(path).expanduser().exists())
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/shells/powershell.py
thefuck/shells/powershell.py
from subprocess import Popen, PIPE from ..utils import DEVNULL from .generic import Generic, ShellConfiguration class Powershell(Generic): friendly_name = 'PowerShell' def app_alias(self, alias_name): return 'function ' + alias_name + ' {\n' \ ' $history = (Get-History -Count 1).CommandLine;\n' \ ' if (-not [string]::IsNullOrWhiteSpace($history)) {\n' \ ' $fuck = $(thefuck $args $history);\n' \ ' if (-not [string]::IsNullOrWhiteSpace($fuck)) {\n' \ ' if ($fuck.StartsWith("echo")) { $fuck = $fuck.Substring(5); }\n' \ ' else { iex "$fuck"; }\n' \ ' }\n' \ ' }\n' \ ' [Console]::ResetColor() \n' \ '}\n' def and_(self, *commands): return u' -and '.join('({0})'.format(c) for c in commands) def how_to_configure(self): return ShellConfiguration( content=u'iex "$(thefuck --alias)"', path='$profile', reload='. $profile', can_configure_automatically=False) def _get_version(self): """Returns the version of the current shell""" try: proc = Popen( ['powershell.exe', '$PSVersionTable.PSVersion'], stdout=PIPE, stderr=DEVNULL) version = proc.stdout.read().decode('utf-8').rstrip().split('\n') return '.'.join(version[-1].split()) except IOError: proc = Popen(['pwsh', '--version'], stdout=PIPE, stderr=DEVNULL) return proc.stdout.read().decode('utf-8').split()[-1]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/shells/__init__.py
thefuck/shells/__init__.py
"""Package with shell specific actions, each shell class should implement `from_shell`, `to_shell`, `app_alias`, `put_to_history` and `get_aliases` methods. """ import os from psutil import Process from .bash import Bash from .fish import Fish from .generic import Generic from .tcsh import Tcsh from .zsh import Zsh from .powershell import Powershell shells = {'bash': Bash, 'fish': Fish, 'zsh': Zsh, 'csh': Tcsh, 'tcsh': Tcsh, 'powershell': Powershell, 'pwsh': Powershell} def _get_shell_from_env(): name = os.environ.get('TF_SHELL') if name in shells: return shells[name]() def _get_shell_from_proc(): proc = Process(os.getpid()) while proc is not None and proc.pid > 0: try: name = proc.name() except TypeError: name = proc.name name = os.path.splitext(name)[0] if name in shells: return shells[name]() try: proc = proc.parent() except TypeError: proc = proc.parent return Generic() shell = _get_shell_from_env() or _get_shell_from_proc()
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/shells/tcsh.py
thefuck/shells/tcsh.py
from subprocess import Popen, PIPE from time import time import os from ..utils import DEVNULL, memoize from .generic import Generic class Tcsh(Generic): friendly_name = 'Tcsh' def app_alias(self, alias_name): return ("alias {0} 'setenv TF_SHELL tcsh && setenv TF_ALIAS {0} && " "set fucked_cmd=`history -h 2 | head -n 1` && " "eval `thefuck ${{fucked_cmd}}`'").format(alias_name) def _parse_alias(self, alias): name, value = alias.split("\t", 1) return name, value @memoize def get_aliases(self): proc = Popen(['tcsh', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) return dict( self._parse_alias(alias) for alias in proc.stdout.read().decode('utf-8').split('\n') if alias and '\t' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.history')) def _get_history_line(self, command_script): return u'#+{}\n{}\n'.format(int(time()), command_script) def how_to_configure(self): return self._create_shell_configuration( content=u'eval `thefuck --alias`', path='~/.tcshrc', reload='tcsh') def _get_version(self): """Returns the version of the current shell""" proc = Popen(['tcsh', '--version'], stdout=PIPE, stderr=DEVNULL) return proc.stdout.read().decode('utf-8').split()[1]
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/shells/fish.py
thefuck/shells/fish.py
from subprocess import Popen, PIPE from time import time import os import sys import six from .. import logs from ..conf import settings from ..const import ARGUMENT_PLACEHOLDER from ..utils import DEVNULL, cache from .generic import Generic @cache('~/.config/fish/config.fish', '~/.config/fish/functions') def _get_functions(overridden): proc = Popen(['fish', '-ic', 'functions'], stdout=PIPE, stderr=DEVNULL) functions = proc.stdout.read().decode('utf-8').strip().split('\n') return {func: func for func in functions if func not in overridden} @cache('~/.config/fish/config.fish') def _get_aliases(overridden): aliases = {} proc = Popen(['fish', '-ic', 'alias'], stdout=PIPE, stderr=DEVNULL) alias_out = proc.stdout.read().decode('utf-8').strip() if not alias_out: return aliases for alias in alias_out.split('\n'): for separator in (' ', '='): split_alias = alias.replace('alias ', '', 1).split(separator, 1) if len(split_alias) == 2: name, value = split_alias break else: continue if name not in overridden: aliases[name] = value return aliases class Fish(Generic): friendly_name = 'Fish Shell' def _get_overridden_aliases(self): overridden = os.environ.get('THEFUCK_OVERRIDDEN_ALIASES', os.environ.get('TF_OVERRIDDEN_ALIASES', '')) default = {'cd', 'grep', 'ls', 'man', 'open'} for alias in overridden.split(','): default.add(alias.strip()) return sorted(default) def app_alias(self, alias_name): if settings.alter_history: alter_history = (' builtin history delete --exact' ' --case-sensitive -- $fucked_up_command\n' ' builtin history merge\n') else: alter_history = '' # It is VERY important to have the variables declared WITHIN the alias return ('function {0} -d "Correct your previous console command"\n' ' set -l fucked_up_command $history[1]\n' ' env TF_SHELL=fish TF_ALIAS={0} PYTHONIOENCODING=utf-8' ' thefuck $fucked_up_command {2} $argv | read -l unfucked_command\n' ' if [ "$unfucked_command" != "" ]\n' ' eval $unfucked_command\n{1}' ' end\n' 'end').format(alias_name, alter_history, ARGUMENT_PLACEHOLDER) def get_aliases(self): overridden = self._get_overridden_aliases() functions = _get_functions(overridden) raw_aliases = _get_aliases(overridden) functions.update(raw_aliases) return functions def _expand_aliases(self, command_script): aliases = self.get_aliases() binary = command_script.split(' ')[0] if binary in aliases and aliases[binary] != binary: return command_script.replace(binary, aliases[binary], 1) elif binary in aliases: return u'fish -ic "{}"'.format(command_script.replace('"', r'\"')) else: return command_script def _get_history_file_name(self): return os.path.expanduser('~/.config/fish/fish_history') def _get_history_line(self, command_script): return u'- cmd: {}\n when: {}\n'.format(command_script, int(time())) def _script_from_history(self, line): if '- cmd: ' in line: return line.split('- cmd: ', 1)[1] else: return '' def and_(self, *commands): return u'; and '.join(commands) def or_(self, *commands): return u'; or '.join(commands) def how_to_configure(self): return self._create_shell_configuration( content=u"thefuck --alias | source", path='~/.config/fish/config.fish', reload='fish') def _get_version(self): """Returns the version of the current shell""" proc = Popen(['fish', '--version'], stdout=PIPE, stderr=DEVNULL) return proc.stdout.read().decode('utf-8').split()[-1] def put_to_history(self, command): try: return self._put_to_history(command) except IOError: logs.exception("Can't update history", sys.exc_info()) def _put_to_history(self, command_script): """Puts command script to shell history.""" history_file_name = self._get_history_file_name() if os.path.isfile(history_file_name): with open(history_file_name, 'a') as history: entry = self._get_history_line(command_script) if six.PY2: history.write(entry.encode('utf-8')) else: history.write(entry)
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
nvbn/thefuck
https://github.com/nvbn/thefuck/blob/c7e7e1d884d3bb241ea6448f72a989434c2a35ec/thefuck/shells/bash.py
thefuck/shells/bash.py
import os from subprocess import Popen, PIPE from tempfile import gettempdir from uuid import uuid4 from ..conf import settings from ..const import ARGUMENT_PLACEHOLDER, USER_COMMAND_MARK from ..utils import DEVNULL, memoize from .generic import Generic class Bash(Generic): friendly_name = 'Bash' def app_alias(self, alias_name): # It is VERY important to have the variables declared WITHIN the function return ''' function {name} () {{ TF_PYTHONIOENCODING=$PYTHONIOENCODING; export TF_SHELL=bash; export TF_ALIAS={name}; export TF_SHELL_ALIASES=$(alias); export TF_HISTORY=$(fc -ln -10); export PYTHONIOENCODING=utf-8; TF_CMD=$( thefuck {argument_placeholder} "$@" ) && eval "$TF_CMD"; unset TF_HISTORY; export PYTHONIOENCODING=$TF_PYTHONIOENCODING; {alter_history} }} '''.format( name=alias_name, argument_placeholder=ARGUMENT_PLACEHOLDER, alter_history=('history -s $TF_CMD;' if settings.alter_history else '')) def instant_mode_alias(self, alias_name): if os.environ.get('THEFUCK_INSTANT_MODE', '').lower() == 'true': mark = USER_COMMAND_MARK + '\b' * len(USER_COMMAND_MARK) return ''' export PS1="{user_command_mark}$PS1"; {app_alias} '''.format(user_command_mark=mark, app_alias=self.app_alias(alias_name)) else: log_path = os.path.join( gettempdir(), 'thefuck-script-log-{}'.format(uuid4().hex)) return ''' export THEFUCK_INSTANT_MODE=True; export THEFUCK_OUTPUT_LOG={log}; thefuck --shell-logger {log}; rm {log}; exit '''.format(log=log_path) def _parse_alias(self, alias): name, value = alias.replace('alias ', '', 1).split('=', 1) if value[0] == value[-1] == '"' or value[0] == value[-1] == "'": value = value[1:-1] return name, value @memoize def get_aliases(self): raw_aliases = os.environ.get('TF_SHELL_ALIASES', '').split('\n') return dict(self._parse_alias(alias) for alias in raw_aliases if alias and '=' in alias) def _get_history_file_name(self): return os.environ.get("HISTFILE", os.path.expanduser('~/.bash_history')) def _get_history_line(self, command_script): return u'{}\n'.format(command_script) def how_to_configure(self): if os.path.join(os.path.expanduser('~'), '.bashrc'): config = '~/.bashrc' elif os.path.join(os.path.expanduser('~'), '.bash_profile'): config = '~/.bash_profile' else: config = 'bash config' return self._create_shell_configuration( content=u'eval "$(thefuck --alias)"', path=config, reload=u'source {}'.format(config)) def _get_version(self): """Returns the version of the current shell""" proc = Popen(['bash', '-c', 'echo $BASH_VERSION'], stdout=PIPE, stderr=DEVNULL) return proc.stdout.read().decode('utf-8').strip()
python
MIT
c7e7e1d884d3bb241ea6448f72a989434c2a35ec
2026-01-04T14:38:15.457096Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/tests/test_normalizer.py
tests/test_normalizer.py
import pytest from whisper.normalizers import EnglishTextNormalizer from whisper.normalizers.english import ( EnglishNumberNormalizer, EnglishSpellingNormalizer, ) @pytest.mark.parametrize("std", [EnglishNumberNormalizer(), EnglishTextNormalizer()]) def test_number_normalizer(std): assert std("two") == "2" assert std("thirty one") == "31" assert std("five twenty four") == "524" assert std("nineteen ninety nine") == "1999" assert std("twenty nineteen") == "2019" assert std("two point five million") == "2500000" assert std("four point two billions") == "4200000000s" assert std("200 thousand") == "200000" assert std("200 thousand dollars") == "$200000" assert std("$20 million") == "$20000000" assert std("€52.4 million") == "€52400000" assert std("£77 thousands") == "£77000s" assert std("two double o eight") == "2008" assert std("three thousand twenty nine") == "3029" assert std("forty three thousand two hundred sixty") == "43260" assert std("forty three thousand two hundred and sixty") == "43260" assert std("nineteen fifties") == "1950s" assert std("thirty first") == "31st" assert std("thirty three thousand and three hundred and thirty third") == "33333rd" assert std("three billion") == "3000000000" assert std("millions") == "1000000s" assert std("july third twenty twenty") == "july 3rd 2020" assert std("august twenty sixth twenty twenty one") == "august 26th 2021" assert std("3 14") == "3 14" assert std("3.14") == "3.14" assert std("3 point 2") == "3.2" assert std("3 point 14") == "3.14" assert std("fourteen point 4") == "14.4" assert std("two point two five dollars") == "$2.25" assert std("two hundred million dollars") == "$200000000" assert std("$20.1 million") == "$20100000" assert std("ninety percent") == "90%" assert std("seventy six per cent") == "76%" assert std("double oh seven") == "007" assert std("double zero seven") == "007" assert std("nine one one") == "911" assert std("nine double one") == "911" assert std("one triple oh one") == "10001" assert std("two thousandth") == "2000th" assert std("thirty two thousandth") == "32000th" assert std("minus 500") == "-500" assert std("positive twenty thousand") == "+20000" assert std("two dollars and seventy cents") == "$2.70" assert std("3 cents") == "¢3" assert std("$0.36") == "¢36" assert std("three euros and sixty five cents") == "€3.65" assert std("three and a half million") == "3500000" assert std("forty eight and a half dollars") == "$48.5" assert std("b747") == "b 747" assert std("10 th") == "10th" assert std("10th") == "10th" def test_spelling_normalizer(): std = EnglishSpellingNormalizer() assert std("mobilisation") == "mobilization" assert std("cancelation") == "cancellation" def test_text_normalizer(): std = EnglishTextNormalizer() assert std("Let's") == "let us" assert std("he's like") == "he is like" assert std("she's been like") == "she has been like" assert std("10km") == "10 km" assert std("10mm") == "10 mm" assert std("RC232") == "rc 232" assert ( std("Mr. Park visited Assoc. Prof. Kim Jr.") == "mister park visited associate professor kim junior" )
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/tests/test_timing.py
tests/test_timing.py
import numpy as np import pytest import scipy.ndimage import torch from whisper.timing import dtw_cpu, dtw_cuda, median_filter sizes = [ (10, 20), (32, 16), (123, 1500), (234, 189), ] shapes = [ (10,), (1, 15), (4, 5, 345), (6, 12, 240, 512), ] @pytest.mark.parametrize("N, M", sizes) def test_dtw(N: int, M: int): steps = np.concatenate([np.zeros(N - 1), np.ones(M - 1)]) np.random.shuffle(steps) x = np.random.random((N, M)).astype(np.float32) i, j, k = 0, 0, 0 trace = [] while True: x[i, j] -= 1 trace.append((i, j)) if k == len(steps): break if k + 1 < len(steps) and steps[k] != steps[k + 1]: i += 1 j += 1 k += 2 continue if steps[k] == 0: i += 1 if steps[k] == 1: j += 1 k += 1 trace = np.array(trace).T dtw_trace = dtw_cpu(x) assert np.allclose(trace, dtw_trace) @pytest.mark.requires_cuda @pytest.mark.parametrize("N, M", sizes) def test_dtw_cuda_equivalence(N: int, M: int): x_numpy = np.random.randn(N, M).astype(np.float32) x_cuda = torch.from_numpy(x_numpy).cuda() trace_cpu = dtw_cpu(x_numpy) trace_cuda = dtw_cuda(x_cuda) assert np.allclose(trace_cpu, trace_cuda) @pytest.mark.parametrize("shape", shapes) def test_median_filter(shape): x = torch.randn(*shape) for filter_width in [3, 5, 7, 13]: filtered = median_filter(x, filter_width) # using np.pad to reflect-pad, because Scipy's behavior is different near the edges. pad_width = filter_width // 2 padded_x = np.pad( x, [(0, 0)] * (x.ndim - 1) + [(pad_width, pad_width)], mode="reflect" ) scipy_filtered = scipy.ndimage.median_filter( padded_x, [1] * (x.ndim - 1) + [filter_width] ) scipy_filtered = scipy_filtered[..., pad_width:-pad_width] assert np.allclose(filtered, scipy_filtered) @pytest.mark.requires_cuda @pytest.mark.parametrize("shape", shapes) def test_median_filter_equivalence(shape): x = torch.randn(*shape) for filter_width in [3, 5, 7, 13]: filtered_cpu = median_filter(x, filter_width) filtered_gpu = median_filter(x.cuda(), filter_width).cpu() assert np.allclose(filtered_cpu, filtered_gpu)
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/tests/test_audio.py
tests/test_audio.py
import os.path import numpy as np from whisper.audio import SAMPLE_RATE, load_audio, log_mel_spectrogram def test_audio(): audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac") audio = load_audio(audio_path) assert audio.ndim == 1 assert SAMPLE_RATE * 10 < audio.shape[0] < SAMPLE_RATE * 12 assert 0 < audio.std() < 1 mel_from_audio = log_mel_spectrogram(audio) mel_from_file = log_mel_spectrogram(audio_path) assert np.allclose(mel_from_audio, mel_from_file) assert mel_from_audio.max() - mel_from_audio.min() <= 2.0
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/tests/conftest.py
tests/conftest.py
import random as rand import numpy import pytest def pytest_configure(config): config.addinivalue_line("markers", "requires_cuda") @pytest.fixture def random(): rand.seed(42) numpy.random.seed(42)
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/tests/test_transcribe.py
tests/test_transcribe.py
import os import pytest import torch import whisper from whisper.tokenizer import get_tokenizer @pytest.mark.parametrize("model_name", whisper.available_models()) def test_transcribe(model_name: str): device = "cuda" if torch.cuda.is_available() else "cpu" model = whisper.load_model(model_name).to(device) audio_path = os.path.join(os.path.dirname(__file__), "jfk.flac") language = "en" if model_name.endswith(".en") else None result = model.transcribe( audio_path, language=language, temperature=0.0, word_timestamps=True ) assert result["language"] == "en" assert result["text"] == "".join([s["text"] for s in result["segments"]]) transcription = result["text"].lower() assert "my fellow americans" in transcription assert "your country" in transcription assert "do for you" in transcription tokenizer = get_tokenizer(model.is_multilingual, num_languages=model.num_languages) all_tokens = [t for s in result["segments"] for t in s["tokens"]] assert tokenizer.decode(all_tokens) == result["text"] assert tokenizer.decode_with_timestamps(all_tokens).startswith("<|0.00|>") timing_checked = False for segment in result["segments"]: for timing in segment["words"]: assert timing["start"] < timing["end"] if timing["word"].strip(" ,") == "Americans": assert timing["start"] <= 1.8 assert timing["end"] >= 1.8 timing_checked = True assert timing_checked
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/tests/test_tokenizer.py
tests/test_tokenizer.py
import pytest from whisper.tokenizer import get_tokenizer @pytest.mark.parametrize("multilingual", [True, False]) def test_tokenizer(multilingual): tokenizer = get_tokenizer(multilingual=False) assert tokenizer.sot in tokenizer.sot_sequence assert len(tokenizer.all_language_codes) == len(tokenizer.all_language_tokens) assert all(c < tokenizer.timestamp_begin for c in tokenizer.all_language_tokens) def test_multilingual_tokenizer(): gpt2_tokenizer = get_tokenizer(multilingual=False) multilingual_tokenizer = get_tokenizer(multilingual=True) text = "다람쥐 헌 쳇바퀴에 타고파" gpt2_tokens = gpt2_tokenizer.encode(text) multilingual_tokens = multilingual_tokenizer.encode(text) assert gpt2_tokenizer.decode(gpt2_tokens) == text assert multilingual_tokenizer.decode(multilingual_tokens) == text assert len(gpt2_tokens) > len(multilingual_tokens) def test_split_on_unicode(): multilingual_tokenizer = get_tokenizer(multilingual=True) tokens = [8404, 871, 287, 6, 246, 526, 3210, 20378] words, word_tokens = multilingual_tokenizer.split_tokens_on_unicode(tokens) assert words == [" elle", " est", " l", "'", "\ufffd", "é", "rit", "oire"] assert word_tokens == [[8404], [871], [287], [6], [246], [526], [3210], [20378]]
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/transcribe.py
whisper/transcribe.py
import argparse import os import traceback import warnings from typing import TYPE_CHECKING, List, Optional, Tuple, Union import numpy as np import torch import tqdm from .audio import ( FRAMES_PER_SECOND, HOP_LENGTH, N_FRAMES, N_SAMPLES, SAMPLE_RATE, log_mel_spectrogram, pad_or_trim, ) from .decoding import DecodingOptions, DecodingResult from .timing import add_word_timestamps from .tokenizer import LANGUAGES, TO_LANGUAGE_CODE, get_tokenizer from .utils import ( exact_div, format_timestamp, get_end, get_writer, make_safe, optional_float, optional_int, str2bool, ) if TYPE_CHECKING: from .model import Whisper def transcribe( model: "Whisper", audio: Union[str, np.ndarray, torch.Tensor], *, verbose: Optional[bool] = None, temperature: Union[float, Tuple[float, ...]] = (0.0, 0.2, 0.4, 0.6, 0.8, 1.0), compression_ratio_threshold: Optional[float] = 2.4, logprob_threshold: Optional[float] = -1.0, no_speech_threshold: Optional[float] = 0.6, condition_on_previous_text: bool = True, initial_prompt: Optional[str] = None, carry_initial_prompt: bool = False, word_timestamps: bool = False, prepend_punctuations: str = "\"'“¿([{-", append_punctuations: str = "\"'.。,,!!??::”)]}、", clip_timestamps: Union[str, List[float]] = "0", hallucination_silence_threshold: Optional[float] = None, **decode_options, ): """ Transcribe an audio file using Whisper Parameters ---------- model: Whisper The Whisper model instance audio: Union[str, np.ndarray, torch.Tensor] The path to the audio file to open, or the audio waveform verbose: bool Whether to display the text being decoded to the console. If True, displays all the details, If False, displays minimal details. If None, does not display anything temperature: Union[float, Tuple[float, ...]] Temperature for sampling. It can be a tuple of temperatures, which will be successively used upon failures according to either `compression_ratio_threshold` or `logprob_threshold`. compression_ratio_threshold: float If the gzip compression ratio is above this value, treat as failed logprob_threshold: float If the average log probability over sampled tokens is below this value, treat as failed no_speech_threshold: float If the no_speech probability is higher than this value AND the average log probability over sampled tokens is below `logprob_threshold`, consider the segment as silent condition_on_previous_text: bool if True, the previous output of the model is provided as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop, such as repetition looping or timestamps going out of sync. word_timestamps: bool Extract word-level timestamps using the cross-attention pattern and dynamic time warping, and include the timestamps for each word in each segment. prepend_punctuations: str If word_timestamps is True, merge these punctuation symbols with the next word append_punctuations: str If word_timestamps is True, merge these punctuation symbols with the previous word initial_prompt: Optional[str] Optional text to provide as a prompt for the first window. This can be used to provide, or "prompt-engineer" a context for transcription, e.g. custom vocabularies or proper nouns to make it more likely to predict those word correctly. carry_initial_prompt: bool If carry_initial_prompt is True, `initial_prompt` is prepended to the prompt of each internal `decode()` call. If there is not enough context space at the start of the prompt, it is left-sliced to make space. decode_options: dict Keyword arguments to construct `DecodingOptions` instances clip_timestamps: Union[str, List[float]] Comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process. The last end timestamp defaults to the end of the file. hallucination_silence_threshold: Optional[float] When word_timestamps is True, skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected Returns ------- A dictionary containing the resulting text ("text") and segment-level details ("segments"), and the spoken language ("language"), which is detected when `decode_options["language"]` is None. """ dtype = torch.float16 if decode_options.get("fp16", True) else torch.float32 if model.device == torch.device("cpu"): if torch.cuda.is_available(): warnings.warn("Performing inference on CPU when CUDA is available") if dtype == torch.float16: warnings.warn("FP16 is not supported on CPU; using FP32 instead") dtype = torch.float32 if dtype == torch.float32: decode_options["fp16"] = False # Pad 30-seconds of silence to the input audio, for slicing mel = log_mel_spectrogram(audio, model.dims.n_mels, padding=N_SAMPLES) content_frames = mel.shape[-1] - N_FRAMES content_duration = float(content_frames * HOP_LENGTH / SAMPLE_RATE) if decode_options.get("language", None) is None: if not model.is_multilingual: decode_options["language"] = "en" else: if verbose: print( "Detecting language using up to the first 30 seconds. Use `--language` to specify the language" ) mel_segment = pad_or_trim(mel, N_FRAMES).to(model.device).to(dtype) _, probs = model.detect_language(mel_segment) decode_options["language"] = max(probs, key=probs.get) if verbose is not None: print( f"Detected language: {LANGUAGES[decode_options['language']].title()}" ) language: str = decode_options["language"] task: str = decode_options.get("task", "transcribe") tokenizer = get_tokenizer( model.is_multilingual, num_languages=model.num_languages, language=language, task=task, ) if isinstance(clip_timestamps, str): clip_timestamps = [ float(ts) for ts in (clip_timestamps.split(",") if clip_timestamps else []) ] seek_points: List[int] = [round(ts * FRAMES_PER_SECOND) for ts in clip_timestamps] if len(seek_points) == 0: seek_points.append(0) if len(seek_points) % 2 == 1: seek_points.append(content_frames) seek_clips: List[Tuple[int, int]] = list(zip(seek_points[::2], seek_points[1::2])) punctuation = "\"'“¿([{-\"'.。,,!!??::”)]}、" if word_timestamps and task == "translate": warnings.warn("Word-level timestamps on translations may not be reliable.") def decode_with_fallback(segment: torch.Tensor) -> DecodingResult: temperatures = ( [temperature] if isinstance(temperature, (int, float)) else temperature ) decode_result = None for t in temperatures: kwargs = {**decode_options} if t > 0: # disable beam_size and patience when t > 0 kwargs.pop("beam_size", None) kwargs.pop("patience", None) else: # disable best_of when t == 0 kwargs.pop("best_of", None) options = DecodingOptions(**kwargs, temperature=t) decode_result = model.decode(segment, options) needs_fallback = False if ( compression_ratio_threshold is not None and decode_result.compression_ratio > compression_ratio_threshold ): needs_fallback = True # too repetitive if ( logprob_threshold is not None and decode_result.avg_logprob < logprob_threshold ): needs_fallback = True # average log probability is too low if ( no_speech_threshold is not None and decode_result.no_speech_prob > no_speech_threshold and logprob_threshold is not None and decode_result.avg_logprob < logprob_threshold ): needs_fallback = False # silence if not needs_fallback: break return decode_result clip_idx = 0 seek = seek_clips[clip_idx][0] input_stride = exact_div( N_FRAMES, model.dims.n_audio_ctx ) # mel frames per output token: 2 time_precision = ( input_stride * HOP_LENGTH / SAMPLE_RATE ) # time per output token: 0.02 (seconds) all_tokens = [] all_segments = [] prompt_reset_since = 0 remaining_prompt_length = model.dims.n_text_ctx // 2 - 1 if initial_prompt is not None: initial_prompt_tokens = tokenizer.encode(" " + initial_prompt.strip()) all_tokens.extend(initial_prompt_tokens) remaining_prompt_length -= len(initial_prompt_tokens) else: initial_prompt_tokens = [] def new_segment( *, start: float, end: float, tokens: torch.Tensor, result: DecodingResult ): tokens = tokens.tolist() text_tokens = [token for token in tokens if token < tokenizer.eot] return { "seek": seek, "start": start, "end": end, "text": tokenizer.decode(text_tokens), "tokens": tokens, "temperature": result.temperature, "avg_logprob": result.avg_logprob, "compression_ratio": result.compression_ratio, "no_speech_prob": result.no_speech_prob, } # show the progress bar when verbose is False (if True, transcribed text will be printed) with tqdm.tqdm( total=content_frames, unit="frames", disable=verbose is not False ) as pbar: last_speech_timestamp = 0.0 # NOTE: This loop is obscurely flattened to make the diff readable. # A later commit should turn this into a simpler nested loop. # for seek_clip_start, seek_clip_end in seek_clips: # while seek < seek_clip_end while clip_idx < len(seek_clips): seek_clip_start, seek_clip_end = seek_clips[clip_idx] if seek < seek_clip_start: seek = seek_clip_start if seek >= seek_clip_end: clip_idx += 1 if clip_idx < len(seek_clips): seek = seek_clips[clip_idx][0] continue time_offset = float(seek * HOP_LENGTH / SAMPLE_RATE) window_end_time = float((seek + N_FRAMES) * HOP_LENGTH / SAMPLE_RATE) segment_size = min(N_FRAMES, content_frames - seek, seek_clip_end - seek) mel_segment = mel[:, seek : seek + segment_size] segment_duration = segment_size * HOP_LENGTH / SAMPLE_RATE mel_segment = pad_or_trim(mel_segment, N_FRAMES).to(model.device).to(dtype) if carry_initial_prompt: nignored = max(len(initial_prompt_tokens), prompt_reset_since) remaining_prompt = all_tokens[nignored:][-remaining_prompt_length:] decode_options["prompt"] = initial_prompt_tokens + remaining_prompt else: decode_options["prompt"] = all_tokens[prompt_reset_since:] result: DecodingResult = decode_with_fallback(mel_segment) tokens = torch.tensor(result.tokens) if no_speech_threshold is not None: # no voice activity check should_skip = result.no_speech_prob > no_speech_threshold if ( logprob_threshold is not None and result.avg_logprob > logprob_threshold ): # don't skip if the logprob is high enough, despite the no_speech_prob should_skip = False if should_skip: seek += segment_size # fast-forward to the next segment boundary continue previous_seek = seek current_segments = [] # anomalous words are very long/short/improbable def word_anomaly_score(word: dict) -> float: probability = word.get("probability", 0.0) duration = word["end"] - word["start"] score = 0.0 if probability < 0.15: score += 1.0 if duration < 0.133: score += (0.133 - duration) * 15 if duration > 2.0: score += duration - 2.0 return score def is_segment_anomaly(segment: Optional[dict]) -> bool: if segment is None or not segment["words"]: return False words = [w for w in segment["words"] if w["word"] not in punctuation] words = words[:8] score = sum(word_anomaly_score(w) for w in words) return score >= 3 or score + 0.01 >= len(words) def next_words_segment(segments: List[dict]) -> Optional[dict]: return next((s for s in segments if s["words"]), None) timestamp_tokens: torch.Tensor = tokens.ge(tokenizer.timestamp_begin) single_timestamp_ending = timestamp_tokens[-2:].tolist() == [False, True] consecutive = torch.where(timestamp_tokens[:-1] & timestamp_tokens[1:])[0] consecutive.add_(1) if len(consecutive) > 0: # if the output contains two consecutive timestamp tokens slices = consecutive.tolist() if single_timestamp_ending: slices.append(len(tokens)) last_slice = 0 for current_slice in slices: sliced_tokens = tokens[last_slice:current_slice] start_timestamp_pos = ( sliced_tokens[0].item() - tokenizer.timestamp_begin ) end_timestamp_pos = ( sliced_tokens[-1].item() - tokenizer.timestamp_begin ) current_segments.append( new_segment( start=time_offset + start_timestamp_pos * time_precision, end=time_offset + end_timestamp_pos * time_precision, tokens=sliced_tokens, result=result, ) ) last_slice = current_slice if single_timestamp_ending: # single timestamp at the end means no speech after the last timestamp. seek += segment_size else: # otherwise, ignore the unfinished segment and seek to the last timestamp last_timestamp_pos = ( tokens[last_slice - 1].item() - tokenizer.timestamp_begin ) seek += last_timestamp_pos * input_stride else: duration = segment_duration timestamps = tokens[timestamp_tokens.nonzero().flatten()] if ( len(timestamps) > 0 and timestamps[-1].item() != tokenizer.timestamp_begin ): # no consecutive timestamps but it has a timestamp; use the last one. last_timestamp_pos = ( timestamps[-1].item() - tokenizer.timestamp_begin ) duration = last_timestamp_pos * time_precision current_segments.append( new_segment( start=time_offset, end=time_offset + duration, tokens=tokens, result=result, ) ) seek += segment_size if word_timestamps: add_word_timestamps( segments=current_segments, model=model, tokenizer=tokenizer, mel=mel_segment, num_frames=segment_size, prepend_punctuations=prepend_punctuations, append_punctuations=append_punctuations, last_speech_timestamp=last_speech_timestamp, ) if not single_timestamp_ending: last_word_end = get_end(current_segments) if last_word_end is not None and last_word_end > time_offset: seek = round(last_word_end * FRAMES_PER_SECOND) # skip silence before possible hallucinations if hallucination_silence_threshold is not None: threshold = hallucination_silence_threshold if not single_timestamp_ending: last_word_end = get_end(current_segments) if last_word_end is not None and last_word_end > time_offset: remaining_duration = window_end_time - last_word_end if remaining_duration > threshold: seek = round(last_word_end * FRAMES_PER_SECOND) else: seek = previous_seek + segment_size # if first segment might be a hallucination, skip leading silence first_segment = next_words_segment(current_segments) if first_segment is not None and is_segment_anomaly(first_segment): gap = first_segment["start"] - time_offset if gap > threshold: seek = previous_seek + round(gap * FRAMES_PER_SECOND) continue # skip silence before any possible hallucination that is surrounded # by silence or more hallucinations hal_last_end = last_speech_timestamp for si in range(len(current_segments)): segment = current_segments[si] if not segment["words"]: continue if is_segment_anomaly(segment): next_segment = next_words_segment( current_segments[si + 1 :] ) if next_segment is not None: hal_next_start = next_segment["words"][0]["start"] else: hal_next_start = time_offset + segment_duration silence_before = ( segment["start"] - hal_last_end > threshold or segment["start"] < threshold or segment["start"] - time_offset < 2.0 ) silence_after = ( hal_next_start - segment["end"] > threshold or is_segment_anomaly(next_segment) or window_end_time - segment["end"] < 2.0 ) if silence_before and silence_after: seek = round( max(time_offset + 1, segment["start"]) * FRAMES_PER_SECOND ) if content_duration - segment["end"] < threshold: seek = content_frames current_segments[si:] = [] break hal_last_end = segment["end"] last_word_end = get_end(current_segments) if last_word_end is not None: last_speech_timestamp = last_word_end if verbose: for segment in current_segments: start, end, text = segment["start"], segment["end"], segment["text"] line = f"[{format_timestamp(start)} --> {format_timestamp(end)}] {text}" print(make_safe(line)) # if a segment is instantaneous or does not contain text, clear it for i, segment in enumerate(current_segments): if segment["start"] == segment["end"] or segment["text"].strip() == "": segment["text"] = "" segment["tokens"] = [] segment["words"] = [] all_segments.extend( [ {"id": i, **segment} for i, segment in enumerate( current_segments, start=len(all_segments) ) ] ) all_tokens.extend( [token for segment in current_segments for token in segment["tokens"]] ) if not condition_on_previous_text or result.temperature > 0.5: # do not feed the prompt tokens if a high temperature was used prompt_reset_since = len(all_tokens) # update progress bar pbar.update(min(content_frames, seek) - previous_seek) return dict( text=tokenizer.decode(all_tokens[len(initial_prompt_tokens) :]), segments=all_segments, language=language, ) def cli(): from . import available_models def valid_model_name(name): if name in available_models() or os.path.exists(name): return name raise ValueError( f"model should be one of {available_models()} or path to a model checkpoint" ) # fmt: off parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument("audio", nargs="+", type=str, help="audio file(s) to transcribe") parser.add_argument("--model", default="turbo", type=valid_model_name, help="name of the Whisper model to use") parser.add_argument("--model_dir", type=str, default=None, help="the path to save model files; uses ~/.cache/whisper by default") parser.add_argument("--device", default="cuda" if torch.cuda.is_available() else "cpu", help="device to use for PyTorch inference") parser.add_argument("--output_dir", "-o", type=str, default=".", help="directory to save the outputs") parser.add_argument("--output_format", "-f", type=str, default="all", choices=["txt", "vtt", "srt", "tsv", "json", "all"], help="format of the output file; if not specified, all available formats will be produced") parser.add_argument("--verbose", type=str2bool, default=True, help="whether to print out the progress and debug messages") parser.add_argument("--task", type=str, default="transcribe", choices=["transcribe", "translate"], help="whether to perform X->X speech recognition ('transcribe') or X->English translation ('translate')") parser.add_argument("--language", type=str, default=None, choices=sorted(LANGUAGES.keys()) + sorted([k.title() for k in TO_LANGUAGE_CODE.keys()]), help="language spoken in the audio, specify None to perform language detection") parser.add_argument("--temperature", type=float, default=0, help="temperature to use for sampling") parser.add_argument("--best_of", type=optional_int, default=5, help="number of candidates when sampling with non-zero temperature") parser.add_argument("--beam_size", type=optional_int, default=5, help="number of beams in beam search, only applicable when temperature is zero") parser.add_argument("--patience", type=float, default=None, help="optional patience value to use in beam decoding, as in https://arxiv.org/abs/2204.05424, the default (1.0) is equivalent to conventional beam search") parser.add_argument("--length_penalty", type=float, default=None, help="optional token length penalty coefficient (alpha) as in https://arxiv.org/abs/1609.08144, uses simple length normalization by default") parser.add_argument("--suppress_tokens", type=str, default="-1", help="comma-separated list of token ids to suppress during sampling; '-1' will suppress most special characters except common punctuations") parser.add_argument("--initial_prompt", type=str, default=None, help="optional text to provide as a prompt for the first window.") parser.add_argument("--carry_initial_prompt", type=str2bool, default=False, help="if True, prepend initial_prompt to every internal decode() call. May reduce the effectiveness of condition_on_previous_text") parser.add_argument("--condition_on_previous_text", type=str2bool, default=True, help="if True, provide the previous output of the model as a prompt for the next window; disabling may make the text inconsistent across windows, but the model becomes less prone to getting stuck in a failure loop") parser.add_argument("--fp16", type=str2bool, default=True, help="whether to perform inference in fp16; True by default") parser.add_argument("--temperature_increment_on_fallback", type=optional_float, default=0.2, help="temperature to increase when falling back when the decoding fails to meet either of the thresholds below") parser.add_argument("--compression_ratio_threshold", type=optional_float, default=2.4, help="if the gzip compression ratio is higher than this value, treat the decoding as failed") parser.add_argument("--logprob_threshold", type=optional_float, default=-1.0, help="if the average log probability is lower than this value, treat the decoding as failed") parser.add_argument("--no_speech_threshold", type=optional_float, default=0.6, help="if the probability of the <|nospeech|> token is higher than this value AND the decoding has failed due to `logprob_threshold`, consider the segment as silence") parser.add_argument("--word_timestamps", type=str2bool, default=False, help="(experimental) extract word-level timestamps and refine the results based on them") parser.add_argument("--prepend_punctuations", type=str, default="\"\'“¿([{-", help="if word_timestamps is True, merge these punctuation symbols with the next word") parser.add_argument("--append_punctuations", type=str, default="\"\'.。,,!!??::”)]}、", help="if word_timestamps is True, merge these punctuation symbols with the previous word") parser.add_argument("--highlight_words", type=str2bool, default=False, help="(requires --word_timestamps True) underline each word as it is spoken in srt and vtt") parser.add_argument("--max_line_width", type=optional_int, default=None, help="(requires --word_timestamps True) the maximum number of characters in a line before breaking the line") parser.add_argument("--max_line_count", type=optional_int, default=None, help="(requires --word_timestamps True) the maximum number of lines in a segment") parser.add_argument("--max_words_per_line", type=optional_int, default=None, help="(requires --word_timestamps True, no effect with --max_line_width) the maximum number of words in a segment") parser.add_argument("--threads", type=optional_int, default=0, help="number of threads used by torch for CPU inference; supercedes MKL_NUM_THREADS/OMP_NUM_THREADS") parser.add_argument("--clip_timestamps", type=str, default="0", help="comma-separated list start,end,start,end,... timestamps (in seconds) of clips to process, where the last end timestamp defaults to the end of the file") parser.add_argument("--hallucination_silence_threshold", type=optional_float, help="(requires --word_timestamps True) skip silent periods longer than this threshold (in seconds) when a possible hallucination is detected") # fmt: on args = parser.parse_args().__dict__ model_name: str = args.pop("model") model_dir: str = args.pop("model_dir") output_dir: str = args.pop("output_dir") output_format: str = args.pop("output_format") device: str = args.pop("device") os.makedirs(output_dir, exist_ok=True) if model_name.endswith(".en") and args["language"] not in {"en", "English"}: if args["language"] is not None: warnings.warn( f"{model_name} is an English-only model but receipted '{args['language']}'; using English instead." ) args["language"] = "en" temperature = args.pop("temperature") if (increment := args.pop("temperature_increment_on_fallback")) is not None: temperature = tuple(np.arange(temperature, 1.0 + 1e-6, increment)) else: temperature = [temperature] if (threads := args.pop("threads")) > 0: torch.set_num_threads(threads) from . import load_model model = load_model(model_name, device=device, download_root=model_dir) writer = get_writer(output_format, output_dir) word_options = [ "highlight_words", "max_line_count", "max_line_width", "max_words_per_line", ] if not args["word_timestamps"]: for option in word_options: if args[option]: parser.error(f"--{option} requires --word_timestamps True") if args["max_line_count"] and not args["max_line_width"]: warnings.warn("--max_line_count has no effect without --max_line_width") if args["max_words_per_line"] and args["max_line_width"]: warnings.warn("--max_words_per_line has no effect with --max_line_width") writer_args = {arg: args.pop(arg) for arg in word_options} for audio_path in args.pop("audio"): try: result = transcribe(model, audio_path, temperature=temperature, **args) writer(result, audio_path, **writer_args) except Exception as e: traceback.print_exc() print(f"Skipping {audio_path} due to {type(e).__name__}: {str(e)}") if __name__ == "__main__": cli()
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/decoding.py
whisper/decoding.py
from dataclasses import dataclass, field, replace from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Sequence, Tuple, Union import numpy as np import torch import torch.nn.functional as F from torch import Tensor from torch.distributions import Categorical from .audio import CHUNK_LENGTH from .tokenizer import Tokenizer, get_tokenizer from .utils import compression_ratio if TYPE_CHECKING: from .model import Whisper @torch.no_grad() def detect_language( model: "Whisper", mel: Tensor, tokenizer: Tokenizer = None ) -> Tuple[Tensor, List[dict]]: """ Detect the spoken language in the audio, and return them as list of strings, along with the ids of the most probable language tokens and the probability distribution over all language tokens. This is performed outside the main decode loop in order to not interfere with kv-caching. Returns ------- language_tokens : Tensor, shape = (n_audio,) ids of the most probable language tokens, which appears after the startoftranscript token. language_probs : List[Dict[str, float]], length = n_audio list of dictionaries containing the probability distribution over all languages. """ if tokenizer is None: tokenizer = get_tokenizer( model.is_multilingual, num_languages=model.num_languages ) if ( tokenizer.language is None or tokenizer.language_token not in tokenizer.sot_sequence ): raise ValueError( "This model doesn't have language tokens so it can't perform lang id" ) single = mel.ndim == 2 if single: mel = mel.unsqueeze(0) # skip encoder forward pass if already-encoded audio features were given if mel.shape[-2:] != (model.dims.n_audio_ctx, model.dims.n_audio_state): mel = model.encoder(mel) # forward pass using a single token, startoftranscript n_audio = mel.shape[0] x = torch.tensor([[tokenizer.sot]] * n_audio).to(mel.device) # [n_audio, 1] logits = model.logits(x, mel)[:, 0] # collect detected languages; suppress all non-language tokens mask = torch.ones(logits.shape[-1], dtype=torch.bool) mask[list(tokenizer.all_language_tokens)] = False logits[:, mask] = -np.inf language_tokens = logits.argmax(dim=-1) language_token_probs = logits.softmax(dim=-1).cpu() language_probs = [ { c: language_token_probs[i, j].item() for j, c in zip(tokenizer.all_language_tokens, tokenizer.all_language_codes) } for i in range(n_audio) ] if single: language_tokens = language_tokens[0] language_probs = language_probs[0] return language_tokens, language_probs @dataclass(frozen=True) class DecodingOptions: # whether to perform X->X "transcribe" or X->English "translate" task: str = "transcribe" # language that the audio is in; uses detected language if None language: Optional[str] = None # sampling-related options temperature: float = 0.0 sample_len: Optional[int] = None # maximum number of tokens to sample best_of: Optional[int] = None # number of independent sample trajectories, if t > 0 beam_size: Optional[int] = None # number of beams in beam search, if t == 0 patience: Optional[float] = None # patience in beam search (arxiv:2204.05424) # "alpha" in Google NMT, or None for length norm, when ranking generations # to select which to return among the beams or best-of-N samples length_penalty: Optional[float] = None # text or tokens to feed as the prompt or the prefix; for more info: # https://github.com/openai/whisper/discussions/117#discussioncomment-3727051 prompt: Optional[Union[str, List[int]]] = None # for the previous context prefix: Optional[Union[str, List[int]]] = None # to prefix the current context # list of tokens ids (or comma-separated token ids) to suppress # "-1" will suppress a set of symbols as defined in `tokenizer.non_speech_tokens()` suppress_tokens: Optional[Union[str, Iterable[int]]] = "-1" suppress_blank: bool = True # this will suppress blank outputs # timestamp sampling options without_timestamps: bool = False # use <|notimestamps|> to sample text tokens only max_initial_timestamp: Optional[float] = 1.0 # implementation details fp16: bool = True # use fp16 for most of the calculation @dataclass(frozen=True) class DecodingResult: audio_features: Tensor language: str language_probs: Optional[Dict[str, float]] = None tokens: List[int] = field(default_factory=list) text: str = "" avg_logprob: float = np.nan no_speech_prob: float = np.nan temperature: float = np.nan compression_ratio: float = np.nan class Inference: def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor: """Perform a forward pass on the decoder and return per-token logits""" raise NotImplementedError def rearrange_kv_cache(self, source_indices) -> None: """Update the key-value cache according to the updated beams""" raise NotImplementedError def cleanup_caching(self) -> None: """Clean up any resources or hooks after decoding is finished""" pass class PyTorchInference(Inference): def __init__(self, model: "Whisper", initial_token_length: int): self.model: "Whisper" = model self.initial_token_length = initial_token_length self.kv_cache = {} self.hooks = [] key_modules = [block.attn.key for block in self.model.decoder.blocks] value_modules = [block.attn.value for block in self.model.decoder.blocks] self.kv_modules = key_modules + value_modules def logits(self, tokens: Tensor, audio_features: Tensor) -> Tensor: if not self.kv_cache: self.kv_cache, self.hooks = self.model.install_kv_cache_hooks() if tokens.shape[-1] > self.initial_token_length: # only need to use the last token except in the first forward pass tokens = tokens[:, -1:] return self.model.decoder(tokens, audio_features, kv_cache=self.kv_cache) def cleanup_caching(self): for hook in self.hooks: hook.remove() self.kv_cache = {} self.hooks = [] def rearrange_kv_cache(self, source_indices): if source_indices != list(range(len(source_indices))): for module in self.kv_modules: # update the key/value cache to contain the selected sequences self.kv_cache[module] = self.kv_cache[module][source_indices].detach() class SequenceRanker: def rank( self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]] ) -> List[int]: """ Given a list of groups of samples and their cumulative log probabilities, return the indices of the samples in each group to select as the final result """ raise NotImplementedError class MaximumLikelihoodRanker(SequenceRanker): """ Select the sample with the highest log probabilities, penalized using either a simple length normalization or Google NMT paper's length penalty """ def __init__(self, length_penalty: Optional[float]): self.length_penalty = length_penalty def rank(self, tokens: List[List[Tensor]], sum_logprobs: List[List[float]]): def scores(logprobs, lengths): result = [] for logprob, length in zip(logprobs, lengths): if self.length_penalty is None: penalty = length else: # from the Google NMT paper penalty = ((5 + length) / 6) ** self.length_penalty result.append(logprob / penalty) return result # get the sequence with the highest score lengths = [[len(t) for t in s] for s in tokens] return [np.argmax(scores(p, l)) for p, l in zip(sum_logprobs, lengths)] class TokenDecoder: def reset(self): """Initialize any stateful variables for decoding a new sequence""" def update( self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor ) -> Tuple[Tensor, bool]: """Specify how to select the next token, based on the current trace and logits Parameters ---------- tokens : Tensor, shape = (n_batch, current_sequence_length) all tokens in the context so far, including the prefix and sot_sequence tokens logits : Tensor, shape = (n_batch, vocab_size) per-token logits of the probability distribution at the current step sum_logprobs : Tensor, shape = (n_batch) cumulative log probabilities for each sequence Returns ------- tokens : Tensor, shape = (n_batch, current_sequence_length + 1) the tokens, appended with the selected next token completed : bool True if all sequences has reached the end of text """ raise NotImplementedError def finalize( self, tokens: Tensor, sum_logprobs: Tensor ) -> Tuple[Sequence[Sequence[Tensor]], List[List[float]]]: """Finalize search and return the final candidate sequences Parameters ---------- tokens : Tensor, shape = (n_audio, n_group, current_sequence_length) all tokens in the context so far, including the prefix and sot_sequence sum_logprobs : Tensor, shape = (n_audio, n_group) cumulative log probabilities for each sequence Returns ------- tokens : Sequence[Sequence[Tensor]], length = n_audio sequence of Tensors containing candidate token sequences, for each audio input sum_logprobs : List[List[float]], length = n_audio sequence of cumulative log probabilities corresponding to the above """ raise NotImplementedError class GreedyDecoder(TokenDecoder): def __init__(self, temperature: float, eot: int): self.temperature = temperature self.eot = eot def update( self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor ) -> Tuple[Tensor, bool]: if self.temperature == 0: next_tokens = logits.argmax(dim=-1) else: next_tokens = Categorical(logits=logits / self.temperature).sample() logprobs = F.log_softmax(logits.float(), dim=-1) current_logprobs = logprobs[torch.arange(logprobs.shape[0]), next_tokens] sum_logprobs += current_logprobs * (tokens[:, -1] != self.eot) next_tokens[tokens[:, -1] == self.eot] = self.eot tokens = torch.cat([tokens, next_tokens[:, None]], dim=-1) completed = (tokens[:, -1] == self.eot).all() return tokens, completed def finalize(self, tokens: Tensor, sum_logprobs: Tensor): # make sure each sequence has at least one EOT token at the end tokens = F.pad(tokens, (0, 1), value=self.eot) return tokens, sum_logprobs.tolist() class BeamSearchDecoder(TokenDecoder): def __init__( self, beam_size: int, eot: int, inference: Inference, patience: Optional[float] = None, ): self.beam_size = beam_size self.eot = eot self.inference = inference self.patience = patience or 1.0 self.max_candidates: int = round(beam_size * self.patience) self.finished_sequences = None assert ( self.max_candidates > 0 ), f"Invalid beam size ({beam_size}) or patience ({patience})" def reset(self): self.finished_sequences = None def update( self, tokens: Tensor, logits: Tensor, sum_logprobs: Tensor ) -> Tuple[Tensor, bool]: if tokens.shape[0] % self.beam_size != 0: raise ValueError(f"{tokens.shape}[0] % {self.beam_size} != 0") n_audio = tokens.shape[0] // self.beam_size if self.finished_sequences is None: # for the first update self.finished_sequences = [{} for _ in range(n_audio)] logprobs = F.log_softmax(logits.float(), dim=-1) next_tokens, source_indices, finished_sequences = [], [], [] for i in range(n_audio): scores, sources, finished = {}, {}, {} # STEP 1: calculate the cumulative log probabilities for possible candidates for j in range(self.beam_size): idx = i * self.beam_size + j prefix = tokens[idx].tolist() for logprob, token in zip(*logprobs[idx].topk(self.beam_size + 1)): new_logprob = (sum_logprobs[idx] + logprob).item() sequence = tuple(prefix + [token.item()]) scores[sequence] = new_logprob sources[sequence] = idx # STEP 2: rank the candidates and keep the top beam_size sequences for each audio saved = 0 for sequence in sorted(scores, key=scores.get, reverse=True): if sequence[-1] == self.eot: finished[sequence] = scores[sequence] else: sum_logprobs[len(next_tokens)] = scores[sequence] next_tokens.append(sequence) source_indices.append(sources[sequence]) saved += 1 if saved == self.beam_size: break finished_sequences.append(finished) tokens = torch.tensor(next_tokens, device=tokens.device) self.inference.rearrange_kv_cache(source_indices) # add newly finished sequences to self.finished_sequences assert len(self.finished_sequences) == len(finished_sequences) for previously_finished, newly_finished in zip( self.finished_sequences, finished_sequences ): for seq in sorted(newly_finished, key=newly_finished.get, reverse=True): if len(previously_finished) >= self.max_candidates: break # the candidate list is full previously_finished[seq] = newly_finished[seq] # mark as completed if all audio has enough number of samples completed = all( len(sequences) >= self.max_candidates for sequences in self.finished_sequences ) return tokens, completed def finalize(self, preceding_tokens: Tensor, sum_logprobs: Tensor): # collect all finished sequences, including patience, and add unfinished ones if not enough sum_logprobs = sum_logprobs.cpu() for i, sequences in enumerate(self.finished_sequences): if ( len(sequences) < self.beam_size ): # when not enough sequences are finished for j in list(np.argsort(sum_logprobs[i]))[::-1]: sequence = preceding_tokens[i, j].tolist() + [self.eot] sequences[tuple(sequence)] = sum_logprobs[i][j].item() if len(sequences) >= self.beam_size: break tokens: List[List[Tensor]] = [ [torch.tensor(seq) for seq in sequences.keys()] for sequences in self.finished_sequences ] sum_logprobs: List[List[float]] = [ list(sequences.values()) for sequences in self.finished_sequences ] return tokens, sum_logprobs class LogitFilter: def apply(self, logits: Tensor, tokens: Tensor) -> None: """Apply any filtering or masking to logits in-place Parameters ---------- logits : Tensor, shape = (n_batch, vocab_size) per-token logits of the probability distribution at the current step tokens : Tensor, shape = (n_batch, current_sequence_length) all tokens in the context so far, including the prefix and sot_sequence tokens """ raise NotImplementedError class SuppressBlank(LogitFilter): def __init__(self, tokenizer: Tokenizer, sample_begin: int): self.tokenizer = tokenizer self.sample_begin = sample_begin def apply(self, logits: Tensor, tokens: Tensor): if tokens.shape[1] == self.sample_begin: logits[:, self.tokenizer.encode(" ") + [self.tokenizer.eot]] = -np.inf class SuppressTokens(LogitFilter): def __init__(self, suppress_tokens: Sequence[int]): self.suppress_tokens = list(suppress_tokens) def apply(self, logits: Tensor, tokens: Tensor): logits[:, self.suppress_tokens] = -np.inf class ApplyTimestampRules(LogitFilter): def __init__( self, tokenizer: Tokenizer, sample_begin: int, max_initial_timestamp_index: Optional[int], ): self.tokenizer = tokenizer self.sample_begin = sample_begin self.max_initial_timestamp_index = max_initial_timestamp_index def apply(self, logits: Tensor, tokens: Tensor): # suppress <|notimestamps|> which is handled by without_timestamps if self.tokenizer.no_timestamps is not None: logits[:, self.tokenizer.no_timestamps] = -np.inf # timestamps have to appear in pairs, except directly before EOT; mask logits accordingly for k in range(tokens.shape[0]): sampled_tokens = tokens[k, self.sample_begin :] seq = [t for t in sampled_tokens.tolist()] last_was_timestamp = ( len(seq) >= 1 and seq[-1] >= self.tokenizer.timestamp_begin ) penultimate_was_timestamp = ( len(seq) < 2 or seq[-2] >= self.tokenizer.timestamp_begin ) if last_was_timestamp: if penultimate_was_timestamp: # has to be non-timestamp logits[k, self.tokenizer.timestamp_begin :] = -np.inf else: # cannot be normal text tokens logits[k, : self.tokenizer.eot] = -np.inf timestamps = sampled_tokens[ sampled_tokens.ge(self.tokenizer.timestamp_begin) ] if timestamps.numel() > 0: # timestamps shouldn't decrease; forbid timestamp tokens smaller than the last # also force each segment to have a nonzero length, to prevent infinite looping if last_was_timestamp and not penultimate_was_timestamp: timestamp_last = timestamps[-1] else: timestamp_last = timestamps[-1] + 1 logits[k, self.tokenizer.timestamp_begin : timestamp_last] = -np.inf if tokens.shape[1] == self.sample_begin: # suppress generating non-timestamp tokens at the beginning logits[:, : self.tokenizer.timestamp_begin] = -np.inf # apply the `max_initial_timestamp` option if self.max_initial_timestamp_index is not None: last_allowed = ( self.tokenizer.timestamp_begin + self.max_initial_timestamp_index ) logits[:, last_allowed + 1 :] = -np.inf # if sum of probability over timestamps is above any other token, sample timestamp logprobs = F.log_softmax(logits.float(), dim=-1) for k in range(tokens.shape[0]): timestamp_logprob = logprobs[k, self.tokenizer.timestamp_begin :].logsumexp( dim=-1 ) max_text_token_logprob = logprobs[k, : self.tokenizer.timestamp_begin].max() if timestamp_logprob > max_text_token_logprob: logits[k, : self.tokenizer.timestamp_begin] = -np.inf class DecodingTask: inference: Inference sequence_ranker: SequenceRanker decoder: TokenDecoder logit_filters: List[LogitFilter] def __init__(self, model: "Whisper", options: DecodingOptions): self.model = model language = options.language or "en" tokenizer = get_tokenizer( model.is_multilingual, num_languages=model.num_languages, language=language, task=options.task, ) self.tokenizer: Tokenizer = tokenizer self.options: DecodingOptions = self._verify_options(options) self.n_group: int = options.beam_size or options.best_of or 1 self.n_ctx: int = model.dims.n_text_ctx self.sample_len: int = options.sample_len or model.dims.n_text_ctx // 2 self.sot_sequence: Tuple[int] = tokenizer.sot_sequence if self.options.without_timestamps: self.sot_sequence = tokenizer.sot_sequence_including_notimestamps self.initial_tokens: Tuple[int] = self._get_initial_tokens() self.sample_begin: int = len(self.initial_tokens) self.sot_index: int = self.initial_tokens.index(tokenizer.sot) # inference: implements the forward pass through the decoder, including kv caching self.inference = PyTorchInference(model, len(self.initial_tokens)) # sequence ranker: implements how to rank a group of sampled sequences self.sequence_ranker = MaximumLikelihoodRanker(options.length_penalty) # decoder: implements how to select the next tokens, given the autoregressive distribution if options.beam_size is not None: self.decoder = BeamSearchDecoder( options.beam_size, tokenizer.eot, self.inference, options.patience ) else: self.decoder = GreedyDecoder(options.temperature, tokenizer.eot) # logit filters: applies various rules to suppress or penalize certain tokens self.logit_filters = [] if self.options.suppress_blank: self.logit_filters.append(SuppressBlank(self.tokenizer, self.sample_begin)) if self.options.suppress_tokens: self.logit_filters.append(SuppressTokens(self._get_suppress_tokens())) if not options.without_timestamps: precision = CHUNK_LENGTH / model.dims.n_audio_ctx # usually 0.02 seconds max_initial_timestamp_index = None if options.max_initial_timestamp: max_initial_timestamp_index = round( self.options.max_initial_timestamp / precision ) self.logit_filters.append( ApplyTimestampRules( tokenizer, self.sample_begin, max_initial_timestamp_index ) ) def _verify_options(self, options: DecodingOptions) -> DecodingOptions: if options.beam_size is not None and options.best_of is not None: raise ValueError("beam_size and best_of can't be given together") if options.temperature == 0: if options.best_of is not None: raise ValueError("best_of with greedy sampling (T=0) is not compatible") if options.patience is not None and options.beam_size is None: raise ValueError("patience requires beam_size to be given") if options.length_penalty is not None and not ( 0 <= options.length_penalty <= 1 ): raise ValueError("length_penalty (alpha) should be a value between 0 and 1") return options def _get_initial_tokens(self) -> Tuple[int]: tokens = list(self.sot_sequence) if prefix := self.options.prefix: prefix_tokens = ( self.tokenizer.encode(" " + prefix.strip()) if isinstance(prefix, str) else prefix ) if self.sample_len is not None: max_prefix_len = self.n_ctx // 2 - self.sample_len prefix_tokens = prefix_tokens[-max_prefix_len:] tokens = tokens + prefix_tokens if prompt := self.options.prompt: prompt_tokens = ( self.tokenizer.encode(" " + prompt.strip()) if isinstance(prompt, str) else prompt ) tokens = ( [self.tokenizer.sot_prev] + prompt_tokens[-(self.n_ctx // 2 - 1) :] + tokens ) return tuple(tokens) def _get_suppress_tokens(self) -> Tuple[int]: suppress_tokens = self.options.suppress_tokens if isinstance(suppress_tokens, str): suppress_tokens = [int(t) for t in suppress_tokens.split(",")] if -1 in suppress_tokens: suppress_tokens = [t for t in suppress_tokens if t >= 0] suppress_tokens.extend(self.tokenizer.non_speech_tokens) elif suppress_tokens is None or len(suppress_tokens) == 0: suppress_tokens = [] # interpret empty string as an empty list else: assert isinstance(suppress_tokens, list), "suppress_tokens must be a list" suppress_tokens.extend( [ self.tokenizer.transcribe, self.tokenizer.translate, self.tokenizer.sot, self.tokenizer.sot_prev, self.tokenizer.sot_lm, ] ) if self.tokenizer.no_speech is not None: # no-speech probability is collected separately suppress_tokens.append(self.tokenizer.no_speech) return tuple(sorted(set(suppress_tokens))) def _get_audio_features(self, mel: Tensor): if self.options.fp16: mel = mel.half() if mel.shape[-2:] == ( self.model.dims.n_audio_ctx, self.model.dims.n_audio_state, ): # encoded audio features are given; skip audio encoding audio_features = mel else: audio_features = self.model.encoder(mel) if audio_features.dtype != ( torch.float16 if self.options.fp16 else torch.float32 ): return TypeError( f"audio_features has an incorrect dtype: {audio_features.dtype}" ) return audio_features def _detect_language(self, audio_features: Tensor, tokens: Tensor): languages = [self.options.language] * audio_features.shape[0] lang_probs = None if self.options.language is None or self.options.task == "lang_id": lang_tokens, lang_probs = self.model.detect_language( audio_features, self.tokenizer ) languages = [max(probs, key=probs.get) for probs in lang_probs] if self.options.language is None: tokens[:, self.sot_index + 1] = lang_tokens # write language tokens return languages, lang_probs def _main_loop(self, audio_features: Tensor, tokens: Tensor): n_batch = tokens.shape[0] sum_logprobs: Tensor = torch.zeros(n_batch, device=audio_features.device) no_speech_probs = [np.nan] * n_batch try: for i in range(self.sample_len): logits = self.inference.logits(tokens, audio_features) if ( i == 0 and self.tokenizer.no_speech is not None ): # save no_speech_probs probs_at_sot = logits[:, self.sot_index].float().softmax(dim=-1) no_speech_probs = probs_at_sot[:, self.tokenizer.no_speech].tolist() # now we need to consider the logits at the last token only logits = logits[:, -1] # apply the logit filters, e.g. for suppressing or applying penalty to for logit_filter in self.logit_filters: logit_filter.apply(logits, tokens) # expand the tokens tensor with the selected next tokens tokens, completed = self.decoder.update(tokens, logits, sum_logprobs) if completed or tokens.shape[-1] > self.n_ctx: break finally: self.inference.cleanup_caching() return tokens, sum_logprobs, no_speech_probs @torch.no_grad() def run(self, mel: Tensor) -> List[DecodingResult]: self.decoder.reset() tokenizer: Tokenizer = self.tokenizer n_audio: int = mel.shape[0] audio_features: Tensor = self._get_audio_features(mel) # encoder forward pass tokens: Tensor = torch.tensor([self.initial_tokens]).repeat(n_audio, 1) # detect language if requested, overwriting the language token languages, language_probs = self._detect_language(audio_features, tokens) if self.options.task == "lang_id": return [ DecodingResult( audio_features=features, language=language, language_probs=probs ) for features, language, probs in zip( audio_features, languages, language_probs ) ] # repeat text tensors by the group size, for beam search or best-of-n sampling tokens = tokens.repeat_interleave(self.n_group, dim=0).to(audio_features.device) # call the main sampling loop tokens, sum_logprobs, no_speech_probs = self._main_loop(audio_features, tokens) # reshape the tensors to have (n_audio, n_group) as the first two dimensions audio_features = audio_features[:: self.n_group] no_speech_probs = no_speech_probs[:: self.n_group] assert audio_features.shape[0] == len(no_speech_probs) == n_audio tokens = tokens.reshape(n_audio, self.n_group, -1) sum_logprobs = sum_logprobs.reshape(n_audio, self.n_group) # get the final candidates for each group, and slice between the first sampled token and EOT tokens, sum_logprobs = self.decoder.finalize(tokens, sum_logprobs) tokens: List[List[Tensor]] = [ [t[self.sample_begin : (t == tokenizer.eot).nonzero()[0, 0]] for t in s] for s in tokens ] # select the top-ranked sample in each group selected = self.sequence_ranker.rank(tokens, sum_logprobs) tokens: List[List[int]] = [t[i].tolist() for i, t in zip(selected, tokens)] texts: List[str] = [tokenizer.decode(t).strip() for t in tokens] sum_logprobs: List[float] = [lp[i] for i, lp in zip(selected, sum_logprobs)] avg_logprobs: List[float] = [ lp / (len(t) + 1) for t, lp in zip(tokens, sum_logprobs) ] fields = ( texts, languages, tokens, audio_features, avg_logprobs, no_speech_probs, ) if len(set(map(len, fields))) != 1: raise RuntimeError(f"inconsistent result lengths: {list(map(len, fields))}") return [ DecodingResult( audio_features=features, language=language, tokens=tokens, text=text, avg_logprob=avg_logprob, no_speech_prob=no_speech_prob, temperature=self.options.temperature, compression_ratio=compression_ratio(text), ) for text, language, tokens, features, avg_logprob, no_speech_prob in zip( *fields ) ] @torch.no_grad() def decode( model: "Whisper", mel: Tensor, options: DecodingOptions = DecodingOptions(), **kwargs, ) -> Union[DecodingResult, List[DecodingResult]]: """ Performs decoding of 30-second audio segment(s), provided as Mel spectrogram(s). Parameters ---------- model: Whisper the Whisper model instance mel: torch.Tensor, shape = (80, 3000) or (*, 80, 3000) A tensor containing the Mel spectrogram(s) options: DecodingOptions A dataclass that contains all necessary options for decoding 30-second segments Returns ------- result: Union[DecodingResult, List[DecodingResult]] The result(s) of decoding contained in `DecodingResult` dataclass instance(s) """ if single := mel.ndim == 2: mel = mel.unsqueeze(0) if kwargs: options = replace(options, **kwargs) result = DecodingTask(model, options).run(mel) return result[0] if single else result
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/audio.py
whisper/audio.py
import os from functools import lru_cache from subprocess import CalledProcessError, run from typing import Optional, Union import numpy as np import torch import torch.nn.functional as F from .utils import exact_div # hard-coded audio hyperparameters SAMPLE_RATE = 16000 N_FFT = 400 HOP_LENGTH = 160 CHUNK_LENGTH = 30 N_SAMPLES = CHUNK_LENGTH * SAMPLE_RATE # 480000 samples in a 30-second chunk N_FRAMES = exact_div(N_SAMPLES, HOP_LENGTH) # 3000 frames in a mel spectrogram input N_SAMPLES_PER_TOKEN = HOP_LENGTH * 2 # the initial convolutions has stride 2 FRAMES_PER_SECOND = exact_div(SAMPLE_RATE, HOP_LENGTH) # 10ms per audio frame TOKENS_PER_SECOND = exact_div(SAMPLE_RATE, N_SAMPLES_PER_TOKEN) # 20ms per audio token def load_audio(file: str, sr: int = SAMPLE_RATE): """ Open an audio file and read as mono waveform, resampling as necessary Parameters ---------- file: str The audio file to open sr: int The sample rate to resample the audio if necessary Returns ------- A NumPy array containing the audio waveform, in float32 dtype. """ # This launches a subprocess to decode audio while down-mixing # and resampling as necessary. Requires the ffmpeg CLI in PATH. # fmt: off cmd = [ "ffmpeg", "-nostdin", "-threads", "0", "-i", file, "-f", "s16le", "-ac", "1", "-acodec", "pcm_s16le", "-ar", str(sr), "-" ] # fmt: on try: out = run(cmd, capture_output=True, check=True).stdout except CalledProcessError as e: raise RuntimeError(f"Failed to load audio: {e.stderr.decode()}") from e return np.frombuffer(out, np.int16).flatten().astype(np.float32) / 32768.0 def pad_or_trim(array, length: int = N_SAMPLES, *, axis: int = -1): """ Pad or trim the audio array to N_SAMPLES, as expected by the encoder. """ if torch.is_tensor(array): if array.shape[axis] > length: array = array.index_select( dim=axis, index=torch.arange(length, device=array.device) ) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = F.pad(array, [pad for sizes in pad_widths[::-1] for pad in sizes]) else: if array.shape[axis] > length: array = array.take(indices=range(length), axis=axis) if array.shape[axis] < length: pad_widths = [(0, 0)] * array.ndim pad_widths[axis] = (0, length - array.shape[axis]) array = np.pad(array, pad_widths) return array @lru_cache(maxsize=None) def mel_filters(device, n_mels: int) -> torch.Tensor: """ load the mel filterbank matrix for projecting STFT into a Mel spectrogram. Allows decoupling librosa dependency; saved using: np.savez_compressed( "mel_filters.npz", mel_80=librosa.filters.mel(sr=16000, n_fft=400, n_mels=80), mel_128=librosa.filters.mel(sr=16000, n_fft=400, n_mels=128), ) """ assert n_mels in {80, 128}, f"Unsupported n_mels: {n_mels}" filters_path = os.path.join(os.path.dirname(__file__), "assets", "mel_filters.npz") with np.load(filters_path, allow_pickle=False) as f: return torch.from_numpy(f[f"mel_{n_mels}"]).to(device) def log_mel_spectrogram( audio: Union[str, np.ndarray, torch.Tensor], n_mels: int = 80, padding: int = 0, device: Optional[Union[str, torch.device]] = None, ): """ Compute the log-Mel spectrogram of Parameters ---------- audio: Union[str, np.ndarray, torch.Tensor], shape = (*) The path to audio or either a NumPy array or Tensor containing the audio waveform in 16 kHz n_mels: int The number of Mel-frequency filters, only 80 and 128 are supported padding: int Number of zero samples to pad to the right device: Optional[Union[str, torch.device]] If given, the audio tensor is moved to this device before STFT Returns ------- torch.Tensor, shape = (n_mels, n_frames) A Tensor that contains the Mel spectrogram """ if not torch.is_tensor(audio): if isinstance(audio, str): audio = load_audio(audio) audio = torch.from_numpy(audio) if device is not None: audio = audio.to(device) if padding > 0: audio = F.pad(audio, (0, padding)) window = torch.hann_window(N_FFT).to(audio.device) stft = torch.stft(audio, N_FFT, HOP_LENGTH, window=window, return_complex=True) magnitudes = stft[..., :-1].abs() ** 2 filters = mel_filters(audio.device, n_mels) mel_spec = filters @ magnitudes log_spec = torch.clamp(mel_spec, min=1e-10).log10() log_spec = torch.maximum(log_spec, log_spec.max() - 8.0) log_spec = (log_spec + 4.0) / 4.0 return log_spec
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/triton_ops.py
whisper/triton_ops.py
from functools import lru_cache import numpy as np import torch try: import triton import triton.language as tl except ImportError: raise RuntimeError("triton import failed; try `pip install --pre triton`") @triton.jit def dtw_kernel( cost, trace, x, x_stride, cost_stride, trace_stride, N, M, BLOCK_SIZE: tl.constexpr ): offsets = tl.arange(0, BLOCK_SIZE) mask = offsets < M for k in range(1, N + M + 1): # k = i + j tl.debug_barrier() p0 = cost + (k - 1) * cost_stride p1 = cost + k * cost_stride p2 = cost + k * cost_stride + 1 c0 = tl.load(p0 + offsets, mask=mask) c1 = tl.load(p1 + offsets, mask=mask) c2 = tl.load(p2 + offsets, mask=mask) x_row = tl.load(x + (k - 1) * x_stride + offsets, mask=mask, other=0) cost_row = x_row + tl.minimum(tl.minimum(c0, c1), c2) cost_ptr = cost + (k + 1) * cost_stride + 1 tl.store(cost_ptr + offsets, cost_row, mask=mask) trace_ptr = trace + (k + 1) * trace_stride + 1 tl.store(trace_ptr + offsets, 2, mask=mask & (c2 <= c0) & (c2 <= c1)) tl.store(trace_ptr + offsets, 1, mask=mask & (c1 <= c0) & (c1 <= c2)) tl.store(trace_ptr + offsets, 0, mask=mask & (c0 <= c1) & (c0 <= c2)) @lru_cache(maxsize=None) def median_kernel(filter_width: int): @triton.jit def kernel( y, x, x_stride, y_stride, BLOCK_SIZE: tl.constexpr ): # x.shape[-1] == filter_width row_idx = tl.program_id(0) offsets = tl.arange(0, BLOCK_SIZE) mask = offsets < y_stride x_ptr = x + row_idx * x_stride # noqa: F841 y_ptr = y + row_idx * y_stride LOAD_ALL_ROWS_HERE # noqa: F821 BUBBLESORT_HERE # noqa: F821 tl.store(y_ptr + offsets, MIDDLE_ROW_HERE, mask=mask) # noqa: F821 kernel = triton.JITFunction(kernel.fn) new_kernel = kernel.src.replace( " LOAD_ALL_ROWS_HERE", "\n".join( [ f" row{i} = tl.load(x_ptr + offsets + {i}, mask=mask)" for i in range(filter_width) ] ), ) new_kernel = new_kernel.replace( " BUBBLESORT_HERE", "\n\n".join( [ "\n\n".join( [ "\n".join( [ f" smaller = tl.where(row{j} < row{j + 1}, row{j}, row{j + 1})", f" larger = tl.where(row{j} > row{j + 1}, row{j}, row{j + 1})", f" row{j} = smaller", f" row{j + 1} = larger", ] ) for j in range(filter_width - i - 1) ] ) for i in range(filter_width // 2 + 1) ] ), ) new_kernel = new_kernel.replace("MIDDLE_ROW_HERE", f"row{filter_width // 2}") if hasattr(kernel, "_unsafe_update_src") is True: kernel._unsafe_update_src(new_kernel) kernel.hash = None else: kernel.src = new_kernel return kernel def median_filter_cuda(x: torch.Tensor, filter_width: int): """Apply a median filter of given width along the last dimension of x""" slices = x.contiguous().unfold(-1, filter_width, 1) grid = np.prod(slices.shape[:-2]) kernel = median_kernel(filter_width) y = torch.empty_like(slices[..., 0]) BLOCK_SIZE = 1 << (y.stride(-2) - 1).bit_length() kernel[(grid,)](y, x, x.stride(-2), y.stride(-2), BLOCK_SIZE=BLOCK_SIZE) return y
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/model.py
whisper/model.py
import base64 import gzip from contextlib import contextmanager from dataclasses import dataclass from typing import Dict, Iterable, Optional, Tuple import numpy as np import torch import torch.nn.functional as F from torch import Tensor, nn from .decoding import decode as decode_function from .decoding import detect_language as detect_language_function from .transcribe import transcribe as transcribe_function try: from torch.nn.functional import scaled_dot_product_attention SDPA_AVAILABLE = True except (ImportError, RuntimeError, OSError): scaled_dot_product_attention = None SDPA_AVAILABLE = False @dataclass class ModelDimensions: n_mels: int n_audio_ctx: int n_audio_state: int n_audio_head: int n_audio_layer: int n_vocab: int n_text_ctx: int n_text_state: int n_text_head: int n_text_layer: int class LayerNorm(nn.LayerNorm): def forward(self, x: Tensor) -> Tensor: return super().forward(x.float()).type(x.dtype) class Linear(nn.Linear): def forward(self, x: Tensor) -> Tensor: return F.linear( x, self.weight.to(x.dtype), None if self.bias is None else self.bias.to(x.dtype), ) class Conv1d(nn.Conv1d): def _conv_forward( self, x: Tensor, weight: Tensor, bias: Optional[Tensor] ) -> Tensor: return super()._conv_forward( x, weight.to(x.dtype), None if bias is None else bias.to(x.dtype) ) def sinusoids(length, channels, max_timescale=10000): """Returns sinusoids for positional embedding""" assert channels % 2 == 0 log_timescale_increment = np.log(max_timescale) / (channels // 2 - 1) inv_timescales = torch.exp(-log_timescale_increment * torch.arange(channels // 2)) scaled_time = torch.arange(length)[:, np.newaxis] * inv_timescales[np.newaxis, :] return torch.cat([torch.sin(scaled_time), torch.cos(scaled_time)], dim=1) @contextmanager def disable_sdpa(): prev_state = MultiHeadAttention.use_sdpa try: MultiHeadAttention.use_sdpa = False yield finally: MultiHeadAttention.use_sdpa = prev_state class MultiHeadAttention(nn.Module): use_sdpa = True def __init__(self, n_state: int, n_head: int): super().__init__() self.n_head = n_head self.query = Linear(n_state, n_state) self.key = Linear(n_state, n_state, bias=False) self.value = Linear(n_state, n_state) self.out = Linear(n_state, n_state) def forward( self, x: Tensor, xa: Optional[Tensor] = None, mask: Optional[Tensor] = None, kv_cache: Optional[dict] = None, ): q = self.query(x) if kv_cache is None or xa is None or self.key not in kv_cache: # hooks, if installed (i.e. kv_cache is not None), will prepend the cached kv tensors; # otherwise, perform key/value projections for self- or cross-attention as usual. k = self.key(x if xa is None else xa) v = self.value(x if xa is None else xa) else: # for cross-attention, calculate keys and values once and reuse in subsequent calls. k = kv_cache[self.key] v = kv_cache[self.value] wv, qk = self.qkv_attention(q, k, v, mask) return self.out(wv), qk def qkv_attention( self, q: Tensor, k: Tensor, v: Tensor, mask: Optional[Tensor] = None ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]: n_batch, n_ctx, n_state = q.shape scale = (n_state // self.n_head) ** -0.25 q = q.view(*q.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) k = k.view(*k.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) v = v.view(*v.shape[:2], self.n_head, -1).permute(0, 2, 1, 3) if SDPA_AVAILABLE and MultiHeadAttention.use_sdpa: a = scaled_dot_product_attention( q, k, v, is_causal=mask is not None and n_ctx > 1 ) out = a.permute(0, 2, 1, 3).flatten(start_dim=2) qk = None else: qk = (q * scale) @ (k * scale).transpose(-1, -2) if mask is not None: qk = qk + mask[:n_ctx, :n_ctx] qk = qk.float() w = F.softmax(qk, dim=-1).to(q.dtype) out = (w @ v).permute(0, 2, 1, 3).flatten(start_dim=2) qk = qk.detach() return out, qk class ResidualAttentionBlock(nn.Module): def __init__(self, n_state: int, n_head: int, cross_attention: bool = False): super().__init__() self.attn = MultiHeadAttention(n_state, n_head) self.attn_ln = LayerNorm(n_state) self.cross_attn = ( MultiHeadAttention(n_state, n_head) if cross_attention else None ) self.cross_attn_ln = LayerNorm(n_state) if cross_attention else None n_mlp = n_state * 4 self.mlp = nn.Sequential( Linear(n_state, n_mlp), nn.GELU(), Linear(n_mlp, n_state) ) self.mlp_ln = LayerNorm(n_state) def forward( self, x: Tensor, xa: Optional[Tensor] = None, mask: Optional[Tensor] = None, kv_cache: Optional[dict] = None, ): x = x + self.attn(self.attn_ln(x), mask=mask, kv_cache=kv_cache)[0] if self.cross_attn: x = x + self.cross_attn(self.cross_attn_ln(x), xa, kv_cache=kv_cache)[0] x = x + self.mlp(self.mlp_ln(x)) return x class AudioEncoder(nn.Module): def __init__( self, n_mels: int, n_ctx: int, n_state: int, n_head: int, n_layer: int ): super().__init__() self.conv1 = Conv1d(n_mels, n_state, kernel_size=3, padding=1) self.conv2 = Conv1d(n_state, n_state, kernel_size=3, stride=2, padding=1) self.register_buffer("positional_embedding", sinusoids(n_ctx, n_state)) self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList( [ResidualAttentionBlock(n_state, n_head) for _ in range(n_layer)] ) self.ln_post = LayerNorm(n_state) def forward(self, x: Tensor): """ x : torch.Tensor, shape = (batch_size, n_mels, n_ctx) the mel spectrogram of the audio """ x = F.gelu(self.conv1(x)) x = F.gelu(self.conv2(x)) x = x.permute(0, 2, 1) assert x.shape[1:] == self.positional_embedding.shape, "incorrect audio shape" x = (x + self.positional_embedding).to(x.dtype) for block in self.blocks: x = block(x) x = self.ln_post(x) return x class TextDecoder(nn.Module): def __init__( self, n_vocab: int, n_ctx: int, n_state: int, n_head: int, n_layer: int ): super().__init__() self.token_embedding = nn.Embedding(n_vocab, n_state) self.positional_embedding = nn.Parameter(torch.empty(n_ctx, n_state)) self.blocks: Iterable[ResidualAttentionBlock] = nn.ModuleList( [ ResidualAttentionBlock(n_state, n_head, cross_attention=True) for _ in range(n_layer) ] ) self.ln = LayerNorm(n_state) mask = torch.empty(n_ctx, n_ctx).fill_(-np.inf).triu_(1) self.register_buffer("mask", mask, persistent=False) def forward(self, x: Tensor, xa: Tensor, kv_cache: Optional[dict] = None): """ x : torch.LongTensor, shape = (batch_size, <= n_ctx) the text tokens xa : torch.Tensor, shape = (batch_size, n_audio_ctx, n_audio_state) the encoded audio features to be attended on """ offset = next(iter(kv_cache.values())).shape[1] if kv_cache else 0 x = ( self.token_embedding(x) + self.positional_embedding[offset : offset + x.shape[-1]] ) x = x.to(xa.dtype) for block in self.blocks: x = block(x, xa, mask=self.mask, kv_cache=kv_cache) x = self.ln(x) logits = ( x @ torch.transpose(self.token_embedding.weight.to(x.dtype), 0, 1) ).float() return logits class Whisper(nn.Module): def __init__(self, dims: ModelDimensions): super().__init__() self.dims = dims self.encoder = AudioEncoder( self.dims.n_mels, self.dims.n_audio_ctx, self.dims.n_audio_state, self.dims.n_audio_head, self.dims.n_audio_layer, ) self.decoder = TextDecoder( self.dims.n_vocab, self.dims.n_text_ctx, self.dims.n_text_state, self.dims.n_text_head, self.dims.n_text_layer, ) # use the last half among the decoder layers for time alignment by default; # to use a specific set of heads, see `set_alignment_heads()` below. all_heads = torch.zeros( self.dims.n_text_layer, self.dims.n_text_head, dtype=torch.bool ) all_heads[self.dims.n_text_layer // 2 :] = True self.register_buffer("alignment_heads", all_heads.to_sparse(), persistent=False) def set_alignment_heads(self, dump: bytes): array = np.frombuffer( gzip.decompress(base64.b85decode(dump)), dtype=bool ).copy() mask = torch.from_numpy(array).reshape( self.dims.n_text_layer, self.dims.n_text_head ) self.register_buffer("alignment_heads", mask.to_sparse(), persistent=False) def embed_audio(self, mel: torch.Tensor): return self.encoder(mel) def logits(self, tokens: torch.Tensor, audio_features: torch.Tensor): return self.decoder(tokens, audio_features) def forward( self, mel: torch.Tensor, tokens: torch.Tensor ) -> Dict[str, torch.Tensor]: return self.decoder(tokens, self.encoder(mel)) @property def device(self): return next(self.parameters()).device @property def is_multilingual(self): return self.dims.n_vocab >= 51865 @property def num_languages(self): return self.dims.n_vocab - 51765 - int(self.is_multilingual) def install_kv_cache_hooks(self, cache: Optional[dict] = None): """ The `MultiHeadAttention` module optionally accepts `kv_cache` which stores the key and value tensors calculated for the previous positions. This method returns a dictionary that stores all caches, and the necessary hooks for the key and value projection modules that save the intermediate tensors to be reused during later calculations. Returns ------- cache : Dict[nn.Module, torch.Tensor] A dictionary object mapping the key/value projection modules to its cache hooks : List[RemovableHandle] List of PyTorch RemovableHandle objects to stop the hooks to be called """ cache = {**cache} if cache is not None else {} hooks = [] def save_to_cache(module, _, output): if module not in cache or output.shape[1] > self.dims.n_text_ctx: # save as-is, for the first token or cross attention cache[module] = output else: cache[module] = torch.cat([cache[module], output], dim=1).detach() return cache[module] def install_hooks(layer: nn.Module): if isinstance(layer, MultiHeadAttention): hooks.append(layer.key.register_forward_hook(save_to_cache)) hooks.append(layer.value.register_forward_hook(save_to_cache)) self.decoder.apply(install_hooks) return cache, hooks detect_language = detect_language_function transcribe = transcribe_function decode = decode_function
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/version.py
whisper/version.py
__version__ = "20250625"
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/__main__.py
whisper/__main__.py
from .transcribe import cli cli()
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/utils.py
whisper/utils.py
import json import os import re import sys import zlib from typing import Callable, List, Optional, TextIO system_encoding = sys.getdefaultencoding() if system_encoding != "utf-8": def make_safe(string): # replaces any character not representable using the system default encoding with an '?', # avoiding UnicodeEncodeError (https://github.com/openai/whisper/discussions/729). return string.encode(system_encoding, errors="replace").decode(system_encoding) else: def make_safe(string): # utf-8 can encode any Unicode code point, so no need to do the round-trip encoding return string def exact_div(x, y): assert x % y == 0 return x // y def str2bool(string): str2val = {"True": True, "False": False} if string in str2val: return str2val[string] else: raise ValueError(f"Expected one of {set(str2val.keys())}, got {string}") def optional_int(string): return None if string == "None" else int(string) def optional_float(string): return None if string == "None" else float(string) def compression_ratio(text) -> float: text_bytes = text.encode("utf-8") return len(text_bytes) / len(zlib.compress(text_bytes)) def format_timestamp( seconds: float, always_include_hours: bool = False, decimal_marker: str = "." ): assert seconds >= 0, "non-negative timestamp expected" milliseconds = round(seconds * 1000.0) hours = milliseconds // 3_600_000 milliseconds -= hours * 3_600_000 minutes = milliseconds // 60_000 milliseconds -= minutes * 60_000 seconds = milliseconds // 1_000 milliseconds -= seconds * 1_000 hours_marker = f"{hours:02d}:" if always_include_hours or hours > 0 else "" return ( f"{hours_marker}{minutes:02d}:{seconds:02d}{decimal_marker}{milliseconds:03d}" ) def get_start(segments: List[dict]) -> Optional[float]: return next( (w["start"] for s in segments for w in s["words"]), segments[0]["start"] if segments else None, ) def get_end(segments: List[dict]) -> Optional[float]: return next( (w["end"] for s in reversed(segments) for w in reversed(s["words"])), segments[-1]["end"] if segments else None, ) class ResultWriter: extension: str def __init__(self, output_dir: str): self.output_dir = output_dir def __call__( self, result: dict, audio_path: str, options: Optional[dict] = None, **kwargs ): audio_basename = os.path.basename(audio_path) audio_basename = os.path.splitext(audio_basename)[0] output_path = os.path.join( self.output_dir, audio_basename + "." + self.extension ) with open(output_path, "w", encoding="utf-8") as f: self.write_result(result, file=f, options=options, **kwargs) def write_result( self, result: dict, file: TextIO, options: Optional[dict] = None, **kwargs ): raise NotImplementedError class WriteTXT(ResultWriter): extension: str = "txt" def write_result( self, result: dict, file: TextIO, options: Optional[dict] = None, **kwargs ): for segment in result["segments"]: print(segment["text"].strip(), file=file, flush=True) class SubtitlesWriter(ResultWriter): always_include_hours: bool decimal_marker: str def iterate_result( self, result: dict, options: Optional[dict] = None, *, max_line_width: Optional[int] = None, max_line_count: Optional[int] = None, highlight_words: bool = False, max_words_per_line: Optional[int] = None, ): options = options or {} max_line_width = max_line_width or options.get("max_line_width") max_line_count = max_line_count or options.get("max_line_count") highlight_words = highlight_words or options.get("highlight_words", False) max_words_per_line = max_words_per_line or options.get("max_words_per_line") preserve_segments = max_line_count is None or max_line_width is None max_line_width = max_line_width or 1000 max_words_per_line = max_words_per_line or 1000 def iterate_subtitles(): line_len = 0 line_count = 1 # the next subtitle to yield (a list of word timings with whitespace) subtitle: List[dict] = [] last: float = get_start(result["segments"]) or 0.0 for segment in result["segments"]: chunk_index = 0 words_count = max_words_per_line while chunk_index < len(segment["words"]): remaining_words = len(segment["words"]) - chunk_index if max_words_per_line > len(segment["words"]) - chunk_index: words_count = remaining_words for i, original_timing in enumerate( segment["words"][chunk_index : chunk_index + words_count] ): timing = original_timing.copy() long_pause = ( not preserve_segments and timing["start"] - last > 3.0 ) has_room = line_len + len(timing["word"]) <= max_line_width seg_break = i == 0 and len(subtitle) > 0 and preserve_segments if ( line_len > 0 and has_room and not long_pause and not seg_break ): # line continuation line_len += len(timing["word"]) else: # new line timing["word"] = timing["word"].strip() if ( len(subtitle) > 0 and max_line_count is not None and (long_pause or line_count >= max_line_count) or seg_break ): # subtitle break yield subtitle subtitle = [] line_count = 1 elif line_len > 0: # line break line_count += 1 timing["word"] = "\n" + timing["word"] line_len = len(timing["word"].strip()) subtitle.append(timing) last = timing["start"] chunk_index += max_words_per_line if len(subtitle) > 0: yield subtitle if len(result["segments"]) > 0 and "words" in result["segments"][0]: for subtitle in iterate_subtitles(): subtitle_start = self.format_timestamp(subtitle[0]["start"]) subtitle_end = self.format_timestamp(subtitle[-1]["end"]) subtitle_text = "".join([word["word"] for word in subtitle]) if highlight_words: last = subtitle_start all_words = [timing["word"] for timing in subtitle] for i, this_word in enumerate(subtitle): start = self.format_timestamp(this_word["start"]) end = self.format_timestamp(this_word["end"]) if last != start: yield last, start, subtitle_text yield start, end, "".join( [ ( re.sub(r"^(\s*)(.*)$", r"\1<u>\2</u>", word) if j == i else word ) for j, word in enumerate(all_words) ] ) last = end else: yield subtitle_start, subtitle_end, subtitle_text else: for segment in result["segments"]: segment_start = self.format_timestamp(segment["start"]) segment_end = self.format_timestamp(segment["end"]) segment_text = segment["text"].strip().replace("-->", "->") yield segment_start, segment_end, segment_text def format_timestamp(self, seconds: float): return format_timestamp( seconds=seconds, always_include_hours=self.always_include_hours, decimal_marker=self.decimal_marker, ) class WriteVTT(SubtitlesWriter): extension: str = "vtt" always_include_hours: bool = False decimal_marker: str = "." def write_result( self, result: dict, file: TextIO, options: Optional[dict] = None, **kwargs ): print("WEBVTT\n", file=file) for start, end, text in self.iterate_result(result, options, **kwargs): print(f"{start} --> {end}\n{text}\n", file=file, flush=True) class WriteSRT(SubtitlesWriter): extension: str = "srt" always_include_hours: bool = True decimal_marker: str = "," def write_result( self, result: dict, file: TextIO, options: Optional[dict] = None, **kwargs ): for i, (start, end, text) in enumerate( self.iterate_result(result, options, **kwargs), start=1 ): print(f"{i}\n{start} --> {end}\n{text}\n", file=file, flush=True) class WriteTSV(ResultWriter): """ Write a transcript to a file in TSV (tab-separated values) format containing lines like: <start time in integer milliseconds>\t<end time in integer milliseconds>\t<transcript text> Using integer milliseconds as start and end times means there's no chance of interference from an environment setting a language encoding that causes the decimal in a floating point number to appear as a comma; also is faster and more efficient to parse & store, e.g., in C++. """ extension: str = "tsv" def write_result( self, result: dict, file: TextIO, options: Optional[dict] = None, **kwargs ): print("start", "end", "text", sep="\t", file=file) for segment in result["segments"]: print(round(1000 * segment["start"]), file=file, end="\t") print(round(1000 * segment["end"]), file=file, end="\t") print(segment["text"].strip().replace("\t", " "), file=file, flush=True) class WriteJSON(ResultWriter): extension: str = "json" def write_result( self, result: dict, file: TextIO, options: Optional[dict] = None, **kwargs ): json.dump(result, file) def get_writer( output_format: str, output_dir: str ) -> Callable[[dict, TextIO, dict], None]: writers = { "txt": WriteTXT, "vtt": WriteVTT, "srt": WriteSRT, "tsv": WriteTSV, "json": WriteJSON, } if output_format == "all": all_writers = [writer(output_dir) for writer in writers.values()] def write_all( result: dict, file: TextIO, options: Optional[dict] = None, **kwargs ): for writer in all_writers: writer(result, file, options, **kwargs) return write_all return writers[output_format](output_dir)
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/__init__.py
whisper/__init__.py
import hashlib import io import os import urllib import warnings from typing import List, Optional, Union import torch from tqdm import tqdm from .audio import load_audio, log_mel_spectrogram, pad_or_trim from .decoding import DecodingOptions, DecodingResult, decode, detect_language from .model import ModelDimensions, Whisper from .transcribe import transcribe from .version import __version__ _MODELS = { "tiny.en": "https://openaipublic.azureedge.net/main/whisper/models/d3dd57d32accea0b295c96e26691aa14d8822fac7d9d27d5dc00b4ca2826dd03/tiny.en.pt", "tiny": "https://openaipublic.azureedge.net/main/whisper/models/65147644a518d12f04e32d6f3b26facc3f8dd46e5390956a9424a650c0ce22b9/tiny.pt", "base.en": "https://openaipublic.azureedge.net/main/whisper/models/25a8566e1d0c1e2231d1c762132cd20e0f96a85d16145c3a00adf5d1ac670ead/base.en.pt", "base": "https://openaipublic.azureedge.net/main/whisper/models/ed3a0b6b1c0edf879ad9b11b1af5a0e6ab5db9205f891f668f8b0e6c6326e34e/base.pt", "small.en": "https://openaipublic.azureedge.net/main/whisper/models/f953ad0fd29cacd07d5a9eda5624af0f6bcf2258be67c92b79389873d91e0872/small.en.pt", "small": "https://openaipublic.azureedge.net/main/whisper/models/9ecf779972d90ba49c06d968637d720dd632c55bbf19d441fb42bf17a411e794/small.pt", "medium.en": "https://openaipublic.azureedge.net/main/whisper/models/d7440d1dc186f76616474e0ff0b3b6b879abc9d1a4926b7adfa41db2d497ab4f/medium.en.pt", "medium": "https://openaipublic.azureedge.net/main/whisper/models/345ae4da62f9b3d59415adc60127b97c714f32e89e936602e85993674d08dcb1/medium.pt", "large-v1": "https://openaipublic.azureedge.net/main/whisper/models/e4b87e7e0bf463eb8e6956e646f1e277e901512310def2c24bf0e11bd3c28e9a/large-v1.pt", "large-v2": "https://openaipublic.azureedge.net/main/whisper/models/81f7c96c852ee8fc832187b0132e569d6c3065a3252ed18e56effd0b6a73e524/large-v2.pt", "large-v3": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", "large": "https://openaipublic.azureedge.net/main/whisper/models/e5b1a55b89c1367dacf97e3e19bfd829a01529dbfdeefa8caeb59b3f1b81dadb/large-v3.pt", "large-v3-turbo": "https://openaipublic.azureedge.net/main/whisper/models/aff26ae408abcba5fbf8813c21e62b0941638c5f6eebfb145be0c9839262a19a/large-v3-turbo.pt", "turbo": "https://openaipublic.azureedge.net/main/whisper/models/aff26ae408abcba5fbf8813c21e62b0941638c5f6eebfb145be0c9839262a19a/large-v3-turbo.pt", } # base85-encoded (n_layers, n_heads) boolean arrays indicating the cross-attention heads that are # highly correlated to the word-level timing, i.e. the alignment between audio and text tokens. _ALIGNMENT_HEADS = { "tiny.en": b"ABzY8J1N>@0{>%R00Bk>$p{7v037`oCl~+#00", "tiny": b"ABzY8bu8Lr0{>%RKn9Fp%m@SkK7Kt=7ytkO", "base.en": b"ABzY8;40c<0{>%RzzG;p*o+Vo09|#PsxSZm00", "base": b"ABzY8KQ!870{>%RzyTQH3`Q^yNP!>##QT-<FaQ7m", "small.en": b"ABzY8>?_)10{>%RpeA61k&I|OI3I$65C{;;pbCHh0B{qLQ;+}v00", "small": b"ABzY8DmU6=0{>%Rpa?J`kvJ6qF(V^F86#Xh7JUGMK}P<N0000", "medium.en": b"ABzY8usPae0{>%R7<zz_OvQ{)4kMa0BMw6u5rT}kRKX;$NfYBv00*Hl@qhsU00", "medium": b"ABzY8B0Jh+0{>%R7}kK1fFL7w6%<-Pf*t^=N)Qr&0RR9", "large-v1": b"ABzY8r9j$a0{>%R7#4sLmoOs{s)o3~84-RPdcFk!JR<kSfC2yj", "large-v2": b"ABzY8zd+h!0{>%R7=D0pU<_bnWW*tkYAhobTNnu$jnkEkXqp)j;w1Tzk)UH3X%SZd&fFZ2fC2yj", "large-v3": b"ABzY8gWO1E0{>%R7(9S+Kn!D~%ngiGaR?*L!iJG9p-nab0JQ=-{D1-g00", "large": b"ABzY8gWO1E0{>%R7(9S+Kn!D~%ngiGaR?*L!iJG9p-nab0JQ=-{D1-g00", "large-v3-turbo": b"ABzY8j^C+e0{>%RARaKHP%t(lGR*)0g!tONPyhe`", "turbo": b"ABzY8j^C+e0{>%RARaKHP%t(lGR*)0g!tONPyhe`", } def _download(url: str, root: str, in_memory: bool) -> Union[bytes, str]: os.makedirs(root, exist_ok=True) expected_sha256 = url.split("/")[-2] download_target = os.path.join(root, os.path.basename(url)) if os.path.exists(download_target) and not os.path.isfile(download_target): raise RuntimeError(f"{download_target} exists and is not a regular file") if os.path.isfile(download_target): with open(download_target, "rb") as f: model_bytes = f.read() if hashlib.sha256(model_bytes).hexdigest() == expected_sha256: return model_bytes if in_memory else download_target else: warnings.warn( f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file" ) with urllib.request.urlopen(url) as source, open(download_target, "wb") as output: with tqdm( total=int(source.info().get("Content-Length")), ncols=80, unit="iB", unit_scale=True, unit_divisor=1024, ) as loop: while True: buffer = source.read(8192) if not buffer: break output.write(buffer) loop.update(len(buffer)) model_bytes = open(download_target, "rb").read() if hashlib.sha256(model_bytes).hexdigest() != expected_sha256: raise RuntimeError( "Model has been downloaded but the SHA256 checksum does not not match. Please retry loading the model." ) return model_bytes if in_memory else download_target def available_models() -> List[str]: """Returns the names of available models""" return list(_MODELS.keys()) def load_model( name: str, device: Optional[Union[str, torch.device]] = None, download_root: str = None, in_memory: bool = False, ) -> Whisper: """ Load a Whisper ASR model Parameters ---------- name : str one of the official model names listed by `whisper.available_models()`, or path to a model checkpoint containing the model dimensions and the model state_dict. device : Union[str, torch.device] the PyTorch device to put the model into download_root: str path to download the model files; by default, it uses "~/.cache/whisper" in_memory: bool whether to preload the model weights into host memory Returns ------- model : Whisper The Whisper ASR model instance """ if device is None: device = "cuda" if torch.cuda.is_available() else "cpu" if download_root is None: default = os.path.join(os.path.expanduser("~"), ".cache") download_root = os.path.join(os.getenv("XDG_CACHE_HOME", default), "whisper") if name in _MODELS: checkpoint_file = _download(_MODELS[name], download_root, in_memory) alignment_heads = _ALIGNMENT_HEADS[name] elif os.path.isfile(name): checkpoint_file = open(name, "rb").read() if in_memory else name alignment_heads = None else: raise RuntimeError( f"Model {name} not found; available models = {available_models()}" ) with ( io.BytesIO(checkpoint_file) if in_memory else open(checkpoint_file, "rb") ) as fp: kwargs = {"weights_only": True} if torch.__version__ >= "1.13" else {} checkpoint = torch.load(fp, map_location=device, **kwargs) del checkpoint_file dims = ModelDimensions(**checkpoint["dims"]) model = Whisper(dims) model.load_state_dict(checkpoint["model_state_dict"]) if alignment_heads is not None: model.set_alignment_heads(alignment_heads) return model.to(device)
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/tokenizer.py
whisper/tokenizer.py
import base64 import os import string from dataclasses import dataclass, field from functools import cached_property, lru_cache from typing import Dict, List, Optional, Tuple import tiktoken LANGUAGES = { "en": "english", "zh": "chinese", "de": "german", "es": "spanish", "ru": "russian", "ko": "korean", "fr": "french", "ja": "japanese", "pt": "portuguese", "tr": "turkish", "pl": "polish", "ca": "catalan", "nl": "dutch", "ar": "arabic", "sv": "swedish", "it": "italian", "id": "indonesian", "hi": "hindi", "fi": "finnish", "vi": "vietnamese", "he": "hebrew", "uk": "ukrainian", "el": "greek", "ms": "malay", "cs": "czech", "ro": "romanian", "da": "danish", "hu": "hungarian", "ta": "tamil", "no": "norwegian", "th": "thai", "ur": "urdu", "hr": "croatian", "bg": "bulgarian", "lt": "lithuanian", "la": "latin", "mi": "maori", "ml": "malayalam", "cy": "welsh", "sk": "slovak", "te": "telugu", "fa": "persian", "lv": "latvian", "bn": "bengali", "sr": "serbian", "az": "azerbaijani", "sl": "slovenian", "kn": "kannada", "et": "estonian", "mk": "macedonian", "br": "breton", "eu": "basque", "is": "icelandic", "hy": "armenian", "ne": "nepali", "mn": "mongolian", "bs": "bosnian", "kk": "kazakh", "sq": "albanian", "sw": "swahili", "gl": "galician", "mr": "marathi", "pa": "punjabi", "si": "sinhala", "km": "khmer", "sn": "shona", "yo": "yoruba", "so": "somali", "af": "afrikaans", "oc": "occitan", "ka": "georgian", "be": "belarusian", "tg": "tajik", "sd": "sindhi", "gu": "gujarati", "am": "amharic", "yi": "yiddish", "lo": "lao", "uz": "uzbek", "fo": "faroese", "ht": "haitian creole", "ps": "pashto", "tk": "turkmen", "nn": "nynorsk", "mt": "maltese", "sa": "sanskrit", "lb": "luxembourgish", "my": "myanmar", "bo": "tibetan", "tl": "tagalog", "mg": "malagasy", "as": "assamese", "tt": "tatar", "haw": "hawaiian", "ln": "lingala", "ha": "hausa", "ba": "bashkir", "jw": "javanese", "su": "sundanese", "yue": "cantonese", } # language code lookup by name, with a few language aliases TO_LANGUAGE_CODE = { **{language: code for code, language in LANGUAGES.items()}, "burmese": "my", "valencian": "ca", "flemish": "nl", "haitian": "ht", "letzeburgesch": "lb", "pushto": "ps", "panjabi": "pa", "moldavian": "ro", "moldovan": "ro", "sinhalese": "si", "castilian": "es", "mandarin": "zh", } @dataclass class Tokenizer: """A thin wrapper around `tiktoken` providing quick access to special tokens""" encoding: tiktoken.Encoding num_languages: int language: Optional[str] = None task: Optional[str] = None sot_sequence: Tuple[int] = () special_tokens: Dict[str, int] = field(default_factory=dict) def __post_init__(self): for special in self.encoding.special_tokens_set: special_token = self.encoding.encode_single_token(special) self.special_tokens[special] = special_token sot: int = self.special_tokens["<|startoftranscript|>"] translate: int = self.special_tokens["<|translate|>"] transcribe: int = self.special_tokens["<|transcribe|>"] langs = tuple(LANGUAGES.keys())[: self.num_languages] sot_sequence = [sot] if self.language is not None: sot_sequence.append(sot + 1 + langs.index(self.language)) if self.task is not None: task_token: int = transcribe if self.task == "transcribe" else translate sot_sequence.append(task_token) self.sot_sequence = tuple(sot_sequence) def encode(self, text, **kwargs): return self.encoding.encode(text, **kwargs) def decode(self, token_ids: List[int], **kwargs) -> str: token_ids = [t for t in token_ids if t < self.timestamp_begin] return self.encoding.decode(token_ids, **kwargs) def decode_with_timestamps(self, token_ids: List[int], **kwargs) -> str: """ Timestamp tokens are above other special tokens' id range and are ignored by `decode()`. This method decodes given tokens with timestamps tokens annotated, e.g. "<|1.08|>". """ return self.encoding.decode(token_ids, **kwargs) @cached_property def eot(self) -> int: return self.encoding.eot_token @cached_property def transcribe(self) -> int: return self.special_tokens["<|transcribe|>"] @cached_property def translate(self) -> int: return self.special_tokens["<|translate|>"] @cached_property def sot(self) -> int: return self.special_tokens["<|startoftranscript|>"] @cached_property def sot_lm(self) -> int: return self.special_tokens["<|startoflm|>"] @cached_property def sot_prev(self) -> int: return self.special_tokens["<|startofprev|>"] @cached_property def no_speech(self) -> int: return self.special_tokens["<|nospeech|>"] @cached_property def no_timestamps(self) -> int: return self.special_tokens["<|notimestamps|>"] @cached_property def timestamp_begin(self) -> int: return self.special_tokens["<|0.00|>"] @cached_property def language_token(self) -> int: """Returns the token id corresponding to the value of the `language` field""" if self.language is None: raise ValueError("This tokenizer does not have language token configured") return self.to_language_token(self.language) def to_language_token(self, language): if token := self.special_tokens.get(f"<|{language}|>", None): return token raise KeyError(f"Language {language} not found in tokenizer.") @cached_property def all_language_tokens(self) -> Tuple[int]: result = [] for token, token_id in self.special_tokens.items(): if token.strip("<|>") in LANGUAGES: result.append(token_id) return tuple(result)[: self.num_languages] @cached_property def all_language_codes(self) -> Tuple[str]: return tuple(self.decode([_l]).strip("<|>") for _l in self.all_language_tokens) @cached_property def sot_sequence_including_notimestamps(self) -> Tuple[int]: return tuple(list(self.sot_sequence) + [self.no_timestamps]) @cached_property def non_speech_tokens(self) -> Tuple[int]: """ Returns the list of tokens to suppress in order to avoid any speaker tags or non-speech annotations, to prevent sampling texts that are not actually spoken in the audio, e.g. - ♪♪♪ - ( SPEAKING FOREIGN LANGUAGE ) - [DAVID] Hey there, keeping basic punctuations like commas, periods, question marks, exclamation points, etc. """ symbols = list('"#()*+/:;<=>@[\\]^_`{|}~「」『』') symbols += ( "<< >> <<< >>> -- --- -( -[ (' (\" (( )) ((( ))) [[ ]] {{ }} ♪♪ ♪♪♪".split() ) # symbols that may be a single token or multiple tokens depending on the tokenizer. # In case they're multiple tokens, suppress the first token, which is safe because: # These are between U+2640 and U+267F miscellaneous symbols that are okay to suppress # in generations, and in the 3-byte UTF-8 representation they share the first two bytes. miscellaneous = set("♩♪♫♬♭♮♯") assert all(0x2640 <= ord(c) <= 0x267F for c in miscellaneous) # allow hyphens "-" and single quotes "'" between words, but not at the beginning of a word result = {self.encoding.encode(" -")[0], self.encoding.encode(" '")[0]} for symbol in symbols + list(miscellaneous): for tokens in [ self.encoding.encode(symbol), self.encoding.encode(" " + symbol), ]: if len(tokens) == 1 or symbol in miscellaneous: result.add(tokens[0]) return tuple(sorted(result)) def split_to_word_tokens(self, tokens: List[int]): if self.language in {"zh", "ja", "th", "lo", "my", "yue"}: # These languages don't typically use spaces, so it is difficult to split words # without morpheme analysis. Here, we instead split words at any # position where the tokens are decoded as valid unicode points return self.split_tokens_on_unicode(tokens) return self.split_tokens_on_spaces(tokens) def split_tokens_on_unicode(self, tokens: List[int]): decoded_full = self.decode_with_timestamps(tokens) replacement_char = "\ufffd" words = [] word_tokens = [] current_tokens = [] unicode_offset = 0 for token in tokens: current_tokens.append(token) decoded = self.decode_with_timestamps(current_tokens) if ( replacement_char not in decoded or decoded_full[unicode_offset + decoded.index(replacement_char)] == replacement_char ): words.append(decoded) word_tokens.append(current_tokens) current_tokens = [] unicode_offset += len(decoded) return words, word_tokens def split_tokens_on_spaces(self, tokens: List[int]): subwords, subword_tokens_list = self.split_tokens_on_unicode(tokens) words = [] word_tokens = [] for subword, subword_tokens in zip(subwords, subword_tokens_list): special = subword_tokens[0] >= self.eot with_space = subword.startswith(" ") punctuation = subword.strip() in string.punctuation if special or with_space or punctuation or len(words) == 0: words.append(subword) word_tokens.append(subword_tokens) else: words[-1] = words[-1] + subword word_tokens[-1].extend(subword_tokens) return words, word_tokens @lru_cache(maxsize=None) def get_encoding(name: str = "gpt2", num_languages: int = 99): vocab_path = os.path.join(os.path.dirname(__file__), "assets", f"{name}.tiktoken") ranks = { base64.b64decode(token): int(rank) for token, rank in (line.split() for line in open(vocab_path) if line) } n_vocab = len(ranks) special_tokens = {} specials = [ "<|endoftext|>", "<|startoftranscript|>", *[f"<|{lang}|>" for lang in list(LANGUAGES.keys())[:num_languages]], "<|translate|>", "<|transcribe|>", "<|startoflm|>", "<|startofprev|>", "<|nospeech|>", "<|notimestamps|>", *[f"<|{i * 0.02:.2f}|>" for i in range(1501)], ] for token in specials: special_tokens[token] = n_vocab n_vocab += 1 return tiktoken.Encoding( name=os.path.basename(vocab_path), explicit_n_vocab=n_vocab, pat_str=r"""'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+""", mergeable_ranks=ranks, special_tokens=special_tokens, ) @lru_cache(maxsize=None) def get_tokenizer( multilingual: bool, *, num_languages: int = 99, language: Optional[str] = None, task: Optional[str] = None, # Literal["transcribe", "translate", None] ) -> Tokenizer: if language is not None: language = language.lower() if language not in LANGUAGES: if language in TO_LANGUAGE_CODE: language = TO_LANGUAGE_CODE[language] else: raise ValueError(f"Unsupported language: {language}") if multilingual: encoding_name = "multilingual" language = language or "en" task = task or "transcribe" else: encoding_name = "gpt2" language = None task = None encoding = get_encoding(name=encoding_name, num_languages=num_languages) return Tokenizer( encoding=encoding, num_languages=num_languages, language=language, task=task )
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/timing.py
whisper/timing.py
import itertools import subprocess import warnings from dataclasses import dataclass from typing import TYPE_CHECKING, List import numba import numpy as np import torch import torch.nn.functional as F from .audio import HOP_LENGTH, SAMPLE_RATE, TOKENS_PER_SECOND from .tokenizer import Tokenizer if TYPE_CHECKING: from .model import Whisper def median_filter(x: torch.Tensor, filter_width: int): """Apply a median filter of width `filter_width` along the last dimension of `x`""" pad_width = filter_width // 2 if x.shape[-1] <= pad_width: # F.pad requires the padding width to be smaller than the input dimension return x if (ndim := x.ndim) <= 2: # `F.pad` does not support 1D or 2D inputs for reflect padding but supports 3D and 4D x = x[None, None, :] assert ( filter_width > 0 and filter_width % 2 == 1 ), "`filter_width` should be an odd number" result = None x = F.pad(x, (filter_width // 2, filter_width // 2, 0, 0), mode="reflect") if x.is_cuda: try: from .triton_ops import median_filter_cuda result = median_filter_cuda(x, filter_width) except (RuntimeError, subprocess.CalledProcessError): warnings.warn( "Failed to launch Triton kernels, likely due to missing CUDA toolkit; " "falling back to a slower median kernel implementation..." ) if result is None: # sort() is faster than torch.median (https://github.com/pytorch/pytorch/issues/51450) result = x.unfold(-1, filter_width, 1).sort()[0][..., filter_width // 2] if ndim <= 2: result = result[0, 0] return result @numba.jit(nopython=True) def backtrace(trace: np.ndarray): i = trace.shape[0] - 1 j = trace.shape[1] - 1 trace[0, :] = 2 trace[:, 0] = 1 result = [] while i > 0 or j > 0: result.append((i - 1, j - 1)) if trace[i, j] == 0: i -= 1 j -= 1 elif trace[i, j] == 1: i -= 1 elif trace[i, j] == 2: j -= 1 else: raise ValueError("Unexpected trace[i, j]") result = np.array(result) return result[::-1, :].T @numba.jit(nopython=True, parallel=True) def dtw_cpu(x: np.ndarray): N, M = x.shape cost = np.ones((N + 1, M + 1), dtype=np.float32) * np.inf trace = -np.ones((N + 1, M + 1), dtype=np.float32) cost[0, 0] = 0 for j in range(1, M + 1): for i in range(1, N + 1): c0 = cost[i - 1, j - 1] c1 = cost[i - 1, j] c2 = cost[i, j - 1] if c0 < c1 and c0 < c2: c, t = c0, 0 elif c1 < c0 and c1 < c2: c, t = c1, 1 else: c, t = c2, 2 cost[i, j] = x[i - 1, j - 1] + c trace[i, j] = t return backtrace(trace) def dtw_cuda(x, BLOCK_SIZE=1024): from .triton_ops import dtw_kernel M, N = x.shape assert M < BLOCK_SIZE, f"M should be smaller than {BLOCK_SIZE=}" x_skew = ( F.pad(x, (0, M + 1), value=np.inf).flatten()[: M * (N + M)].reshape(M, N + M) ) x_skew = x_skew.T.contiguous() cost = torch.ones(N + M + 2, M + 2) * np.inf cost[0, 0] = 0 cost = cost.to(x.device) trace = torch.zeros_like(cost, dtype=torch.int32) dtw_kernel[(1,)]( cost, trace, x_skew, x_skew.stride(0), cost.stride(0), trace.stride(0), N, M, BLOCK_SIZE=BLOCK_SIZE, ) trace = trace.T.flatten()[: (M + 1) * (M + N + 3)].reshape(M + 1, M + N + 3)[ :, : N + 1 ] return backtrace(trace.cpu().numpy()) def dtw(x: torch.Tensor) -> np.ndarray: if x.is_cuda: try: return dtw_cuda(x) except (RuntimeError, subprocess.CalledProcessError): warnings.warn( "Failed to launch Triton kernels, likely due to missing CUDA toolkit; " "falling back to a slower DTW implementation..." ) return dtw_cpu(x.double().cpu().numpy()) @dataclass class WordTiming: word: str tokens: List[int] start: float end: float probability: float def find_alignment( model: "Whisper", tokenizer: Tokenizer, text_tokens: List[int], mel: torch.Tensor, num_frames: int, *, medfilt_width: int = 7, qk_scale: float = 1.0, ) -> List[WordTiming]: if len(text_tokens) == 0: return [] tokens = torch.tensor( [ *tokenizer.sot_sequence, tokenizer.no_timestamps, *text_tokens, tokenizer.eot, ] ).to(model.device) # install hooks on the cross attention layers to retrieve the attention weights QKs = [None] * model.dims.n_text_layer hooks = [ block.cross_attn.register_forward_hook( lambda _, ins, outs, index=i: QKs.__setitem__(index, outs[-1][0]) ) for i, block in enumerate(model.decoder.blocks) ] from .model import disable_sdpa with torch.no_grad(), disable_sdpa(): logits = model(mel.unsqueeze(0), tokens.unsqueeze(0))[0] sampled_logits = logits[len(tokenizer.sot_sequence) :, : tokenizer.eot] token_probs = sampled_logits.softmax(dim=-1) text_token_probs = token_probs[np.arange(len(text_tokens)), text_tokens] text_token_probs = text_token_probs.tolist() for hook in hooks: hook.remove() # heads * tokens * frames weights = torch.stack([QKs[_l][_h] for _l, _h in model.alignment_heads.indices().T]) weights = weights[:, :, : num_frames // 2] weights = (weights * qk_scale).softmax(dim=-1) std, mean = torch.std_mean(weights, dim=-2, keepdim=True, unbiased=False) weights = (weights - mean) / std weights = median_filter(weights, medfilt_width) matrix = weights.mean(axis=0) matrix = matrix[len(tokenizer.sot_sequence) : -1] text_indices, time_indices = dtw(-matrix) words, word_tokens = tokenizer.split_to_word_tokens(text_tokens + [tokenizer.eot]) if len(word_tokens) <= 1: # return on eot only # >>> np.pad([], (1, 0)) # array([0.]) # This results in crashes when we lookup jump_times with float, like # IndexError: arrays used as indices must be of integer (or boolean) type return [] word_boundaries = np.pad(np.cumsum([len(t) for t in word_tokens[:-1]]), (1, 0)) jumps = np.pad(np.diff(text_indices), (1, 0), constant_values=1).astype(bool) jump_times = time_indices[jumps] / TOKENS_PER_SECOND start_times = jump_times[word_boundaries[:-1]] end_times = jump_times[word_boundaries[1:]] word_probabilities = [ np.mean(text_token_probs[i:j]) for i, j in zip(word_boundaries[:-1], word_boundaries[1:]) ] return [ WordTiming(word, tokens, start, end, probability) for word, tokens, start, end, probability in zip( words, word_tokens, start_times, end_times, word_probabilities ) ] def merge_punctuations(alignment: List[WordTiming], prepended: str, appended: str): # merge prepended punctuations i = len(alignment) - 2 j = len(alignment) - 1 while i >= 0: previous = alignment[i] following = alignment[j] if previous.word.startswith(" ") and previous.word.strip() in prepended: # prepend it to the following word following.word = previous.word + following.word following.tokens = previous.tokens + following.tokens previous.word = "" previous.tokens = [] else: j = i i -= 1 # merge appended punctuations i = 0 j = 1 while j < len(alignment): previous = alignment[i] following = alignment[j] if not previous.word.endswith(" ") and following.word in appended: # append it to the previous word previous.word = previous.word + following.word previous.tokens = previous.tokens + following.tokens following.word = "" following.tokens = [] else: i = j j += 1 def add_word_timestamps( *, segments: List[dict], model: "Whisper", tokenizer: Tokenizer, mel: torch.Tensor, num_frames: int, prepend_punctuations: str = "\"'“¿([{-", append_punctuations: str = "\"'.。,,!!??::”)]}、", last_speech_timestamp: float, **kwargs, ): if len(segments) == 0: return text_tokens_per_segment = [ [token for token in segment["tokens"] if token < tokenizer.eot] for segment in segments ] text_tokens = list(itertools.chain.from_iterable(text_tokens_per_segment)) alignment = find_alignment(model, tokenizer, text_tokens, mel, num_frames, **kwargs) word_durations = np.array([t.end - t.start for t in alignment]) word_durations = word_durations[word_durations.nonzero()] median_duration = np.median(word_durations) if len(word_durations) > 0 else 0.0 median_duration = min(0.7, float(median_duration)) max_duration = median_duration * 2 # hack: truncate long words at sentence boundaries. # a better segmentation algorithm based on VAD should be able to replace this. if len(word_durations) > 0: sentence_end_marks = ".。!!??" # ensure words at sentence boundaries are not longer than twice the median word duration. for i in range(1, len(alignment)): if alignment[i].end - alignment[i].start > max_duration: if alignment[i].word in sentence_end_marks: alignment[i].end = alignment[i].start + max_duration elif alignment[i - 1].word in sentence_end_marks: alignment[i].start = alignment[i].end - max_duration merge_punctuations(alignment, prepend_punctuations, append_punctuations) time_offset = segments[0]["seek"] * HOP_LENGTH / SAMPLE_RATE word_index = 0 for segment, text_tokens in zip(segments, text_tokens_per_segment): saved_tokens = 0 words = [] while word_index < len(alignment) and saved_tokens < len(text_tokens): timing = alignment[word_index] if timing.word: words.append( dict( word=timing.word, start=round(time_offset + timing.start, 2), end=round(time_offset + timing.end, 2), probability=timing.probability, ) ) saved_tokens += len(timing.tokens) word_index += 1 # hack: truncate long words at segment boundaries. # a better segmentation algorithm based on VAD should be able to replace this. if len(words) > 0: # ensure the first and second word after a pause is not longer than # twice the median word duration. if words[0]["end"] - last_speech_timestamp > median_duration * 4 and ( words[0]["end"] - words[0]["start"] > max_duration or ( len(words) > 1 and words[1]["end"] - words[0]["start"] > max_duration * 2 ) ): if ( len(words) > 1 and words[1]["end"] - words[1]["start"] > max_duration ): boundary = max(words[1]["end"] / 2, words[1]["end"] - max_duration) words[0]["end"] = words[1]["start"] = boundary words[0]["start"] = max(0, words[0]["end"] - max_duration) # prefer the segment-level start timestamp if the first word is too long. if ( segment["start"] < words[0]["end"] and segment["start"] - 0.5 > words[0]["start"] ): words[0]["start"] = max( 0, min(words[0]["end"] - median_duration, segment["start"]) ) else: segment["start"] = words[0]["start"] # prefer the segment-level end timestamp if the last word is too long. if ( segment["end"] > words[-1]["start"] and segment["end"] + 0.5 < words[-1]["end"] ): words[-1]["end"] = max( words[-1]["start"] + median_duration, segment["end"] ) else: segment["end"] = words[-1]["end"] last_speech_timestamp = segment["end"] segment["words"] = words
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/normalizers/english.py
whisper/normalizers/english.py
import json import os import re from fractions import Fraction from typing import Iterator, List, Match, Optional, Union from more_itertools import windowed from .basic import remove_symbols_and_diacritics class EnglishNumberNormalizer: """ Convert any spelled-out numbers into arabic numbers, while handling: - remove any commas - keep the suffixes such as: `1960s`, `274th`, `32nd`, etc. - spell out currency symbols after the number. e.g. `$20 million` -> `20000000 dollars` - spell out `one` and `ones` - interpret successive single-digit numbers as nominal: `one oh one` -> `101` """ def __init__(self): super().__init__() self.zeros = {"o", "oh", "zero"} self.ones = { name: i for i, name in enumerate( [ "one", "two", "three", "four", "five", "six", "seven", "eight", "nine", "ten", "eleven", "twelve", "thirteen", "fourteen", "fifteen", "sixteen", "seventeen", "eighteen", "nineteen", ], start=1, ) } self.ones_plural = { "sixes" if name == "six" else name + "s": (value, "s") for name, value in self.ones.items() } self.ones_ordinal = { "zeroth": (0, "th"), "first": (1, "st"), "second": (2, "nd"), "third": (3, "rd"), "fifth": (5, "th"), "twelfth": (12, "th"), **{ name + ("h" if name.endswith("t") else "th"): (value, "th") for name, value in self.ones.items() if value > 3 and value != 5 and value != 12 }, } self.ones_suffixed = {**self.ones_plural, **self.ones_ordinal} self.tens = { "twenty": 20, "thirty": 30, "forty": 40, "fifty": 50, "sixty": 60, "seventy": 70, "eighty": 80, "ninety": 90, } self.tens_plural = { name.replace("y", "ies"): (value, "s") for name, value in self.tens.items() } self.tens_ordinal = { name.replace("y", "ieth"): (value, "th") for name, value in self.tens.items() } self.tens_suffixed = {**self.tens_plural, **self.tens_ordinal} self.multipliers = { "hundred": 100, "thousand": 1_000, "million": 1_000_000, "billion": 1_000_000_000, "trillion": 1_000_000_000_000, "quadrillion": 1_000_000_000_000_000, "quintillion": 1_000_000_000_000_000_000, "sextillion": 1_000_000_000_000_000_000_000, "septillion": 1_000_000_000_000_000_000_000_000, "octillion": 1_000_000_000_000_000_000_000_000_000, "nonillion": 1_000_000_000_000_000_000_000_000_000_000, "decillion": 1_000_000_000_000_000_000_000_000_000_000_000, } self.multipliers_plural = { name + "s": (value, "s") for name, value in self.multipliers.items() } self.multipliers_ordinal = { name + "th": (value, "th") for name, value in self.multipliers.items() } self.multipliers_suffixed = { **self.multipliers_plural, **self.multipliers_ordinal, } self.decimals = {*self.ones, *self.tens, *self.zeros} self.preceding_prefixers = { "minus": "-", "negative": "-", "plus": "+", "positive": "+", } self.following_prefixers = { "pound": "£", "pounds": "£", "euro": "€", "euros": "€", "dollar": "$", "dollars": "$", "cent": "¢", "cents": "¢", } self.prefixes = set( list(self.preceding_prefixers.values()) + list(self.following_prefixers.values()) ) self.suffixers = { "per": {"cent": "%"}, "percent": "%", } self.specials = {"and", "double", "triple", "point"} self.words = set( [ key for mapping in [ self.zeros, self.ones, self.ones_suffixed, self.tens, self.tens_suffixed, self.multipliers, self.multipliers_suffixed, self.preceding_prefixers, self.following_prefixers, self.suffixers, self.specials, ] for key in mapping ] ) self.literal_words = {"one", "ones"} def process_words(self, words: List[str]) -> Iterator[str]: prefix: Optional[str] = None value: Optional[Union[str, int]] = None skip = False def to_fraction(s: str): try: return Fraction(s) except ValueError: return None def output(result: Union[str, int]): nonlocal prefix, value result = str(result) if prefix is not None: result = prefix + result value = None prefix = None return result if len(words) == 0: return for prev, current, next in windowed([None] + words + [None], 3): if skip: skip = False continue next_is_numeric = next is not None and re.match(r"^\d+(\.\d+)?$", next) has_prefix = current[0] in self.prefixes current_without_prefix = current[1:] if has_prefix else current if re.match(r"^\d+(\.\d+)?$", current_without_prefix): # arabic numbers (potentially with signs and fractions) f = to_fraction(current_without_prefix) assert f is not None if value is not None: if isinstance(value, str) and value.endswith("."): # concatenate decimals / ip address components value = str(value) + str(current) continue else: yield output(value) prefix = current[0] if has_prefix else prefix if f.denominator == 1: value = f.numerator # store integers as int else: value = current_without_prefix elif current not in self.words: # non-numeric words if value is not None: yield output(value) yield output(current) elif current in self.zeros: value = str(value or "") + "0" elif current in self.ones: ones = self.ones[current] if value is None: value = ones elif isinstance(value, str) or prev in self.ones: if ( prev in self.tens and ones < 10 ): # replace the last zero with the digit assert value[-1] == "0" value = value[:-1] + str(ones) else: value = str(value) + str(ones) elif ones < 10: if value % 10 == 0: value += ones else: value = str(value) + str(ones) else: # eleven to nineteen if value % 100 == 0: value += ones else: value = str(value) + str(ones) elif current in self.ones_suffixed: # ordinal or cardinal; yield the number right away ones, suffix = self.ones_suffixed[current] if value is None: yield output(str(ones) + suffix) elif isinstance(value, str) or prev in self.ones: if prev in self.tens and ones < 10: assert value[-1] == "0" yield output(value[:-1] + str(ones) + suffix) else: yield output(str(value) + str(ones) + suffix) elif ones < 10: if value % 10 == 0: yield output(str(value + ones) + suffix) else: yield output(str(value) + str(ones) + suffix) else: # eleven to nineteen if value % 100 == 0: yield output(str(value + ones) + suffix) else: yield output(str(value) + str(ones) + suffix) value = None elif current in self.tens: tens = self.tens[current] if value is None: value = tens elif isinstance(value, str): value = str(value) + str(tens) else: if value % 100 == 0: value += tens else: value = str(value) + str(tens) elif current in self.tens_suffixed: # ordinal or cardinal; yield the number right away tens, suffix = self.tens_suffixed[current] if value is None: yield output(str(tens) + suffix) elif isinstance(value, str): yield output(str(value) + str(tens) + suffix) else: if value % 100 == 0: yield output(str(value + tens) + suffix) else: yield output(str(value) + str(tens) + suffix) elif current in self.multipliers: multiplier = self.multipliers[current] if value is None: value = multiplier elif isinstance(value, str) or value == 0: f = to_fraction(value) p = f * multiplier if f is not None else None if f is not None and p.denominator == 1: value = p.numerator else: yield output(value) value = multiplier else: before = value // 1000 * 1000 residual = value % 1000 value = before + residual * multiplier elif current in self.multipliers_suffixed: multiplier, suffix = self.multipliers_suffixed[current] if value is None: yield output(str(multiplier) + suffix) elif isinstance(value, str): f = to_fraction(value) p = f * multiplier if f is not None else None if f is not None and p.denominator == 1: yield output(str(p.numerator) + suffix) else: yield output(value) yield output(str(multiplier) + suffix) else: # int before = value // 1000 * 1000 residual = value % 1000 value = before + residual * multiplier yield output(str(value) + suffix) value = None elif current in self.preceding_prefixers: # apply prefix (positive, minus, etc.) if it precedes a number if value is not None: yield output(value) if next in self.words or next_is_numeric: prefix = self.preceding_prefixers[current] else: yield output(current) elif current in self.following_prefixers: # apply prefix (dollars, cents, etc.) only after a number if value is not None: prefix = self.following_prefixers[current] yield output(value) else: yield output(current) elif current in self.suffixers: # apply suffix symbols (percent -> '%') if value is not None: suffix = self.suffixers[current] if isinstance(suffix, dict): if next in suffix: yield output(str(value) + suffix[next]) skip = True else: yield output(value) yield output(current) else: yield output(str(value) + suffix) else: yield output(current) elif current in self.specials: if next not in self.words and not next_is_numeric: # apply special handling only if the next word can be numeric if value is not None: yield output(value) yield output(current) elif current == "and": # ignore "and" after hundreds, thousands, etc. if prev not in self.multipliers: if value is not None: yield output(value) yield output(current) elif current == "double" or current == "triple": if next in self.ones or next in self.zeros: repeats = 2 if current == "double" else 3 ones = self.ones.get(next, 0) value = str(value or "") + str(ones) * repeats skip = True else: if value is not None: yield output(value) yield output(current) elif current == "point": if next in self.decimals or next_is_numeric: value = str(value or "") + "." else: # should all have been covered at this point raise ValueError(f"Unexpected token: {current}") else: # all should have been covered at this point raise ValueError(f"Unexpected token: {current}") if value is not None: yield output(value) def preprocess(self, s: str): # replace "<number> and a half" with "<number> point five" results = [] segments = re.split(r"\band\s+a\s+half\b", s) for i, segment in enumerate(segments): if len(segment.strip()) == 0: continue if i == len(segments) - 1: results.append(segment) else: results.append(segment) last_word = segment.rsplit(maxsplit=2)[-1] if last_word in self.decimals or last_word in self.multipliers: results.append("point five") else: results.append("and a half") s = " ".join(results) # put a space at number/letter boundary s = re.sub(r"([a-z])([0-9])", r"\1 \2", s) s = re.sub(r"([0-9])([a-z])", r"\1 \2", s) # but remove spaces which could be a suffix s = re.sub(r"([0-9])\s+(st|nd|rd|th|s)\b", r"\1\2", s) return s def postprocess(self, s: str): def combine_cents(m: Match): try: currency = m.group(1) integer = m.group(2) cents = int(m.group(3)) return f"{currency}{integer}.{cents:02d}" except ValueError: return m.string def extract_cents(m: Match): try: return f"¢{int(m.group(1))}" except ValueError: return m.string # apply currency postprocessing; "$2 and ¢7" -> "$2.07" s = re.sub(r"([€£$])([0-9]+) (?:and )?¢([0-9]{1,2})\b", combine_cents, s) s = re.sub(r"[€£$]0.([0-9]{1,2})\b", extract_cents, s) # write "one(s)" instead of "1(s)", just for the readability s = re.sub(r"\b1(s?)\b", r"one\1", s) return s def __call__(self, s: str): s = self.preprocess(s) s = " ".join(word for word in self.process_words(s.split()) if word is not None) s = self.postprocess(s) return s class EnglishSpellingNormalizer: """ Applies British-American spelling mappings as listed in [1]. [1] https://www.tysto.com/uk-us-spelling-list.html """ def __init__(self): mapping_path = os.path.join(os.path.dirname(__file__), "english.json") self.mapping = json.load(open(mapping_path)) def __call__(self, s: str): return " ".join(self.mapping.get(word, word) for word in s.split()) class EnglishTextNormalizer: def __init__(self): self.ignore_patterns = r"\b(hmm|mm|mhm|mmm|uh|um)\b" self.replacers = { # common contractions r"\bwon't\b": "will not", r"\bcan't\b": "can not", r"\blet's\b": "let us", r"\bain't\b": "aint", r"\by'all\b": "you all", r"\bwanna\b": "want to", r"\bgotta\b": "got to", r"\bgonna\b": "going to", r"\bi'ma\b": "i am going to", r"\bimma\b": "i am going to", r"\bwoulda\b": "would have", r"\bcoulda\b": "could have", r"\bshoulda\b": "should have", r"\bma'am\b": "madam", # contractions in titles/prefixes r"\bmr\b": "mister ", r"\bmrs\b": "missus ", r"\bst\b": "saint ", r"\bdr\b": "doctor ", r"\bprof\b": "professor ", r"\bcapt\b": "captain ", r"\bgov\b": "governor ", r"\bald\b": "alderman ", r"\bgen\b": "general ", r"\bsen\b": "senator ", r"\brep\b": "representative ", r"\bpres\b": "president ", r"\brev\b": "reverend ", r"\bhon\b": "honorable ", r"\basst\b": "assistant ", r"\bassoc\b": "associate ", r"\blt\b": "lieutenant ", r"\bcol\b": "colonel ", r"\bjr\b": "junior ", r"\bsr\b": "senior ", r"\besq\b": "esquire ", # prefect tenses, ideally it should be any past participles, but it's harder.. r"'d been\b": " had been", r"'s been\b": " has been", r"'d gone\b": " had gone", r"'s gone\b": " has gone", r"'d done\b": " had done", # "'s done" is ambiguous r"'s got\b": " has got", # general contractions r"n't\b": " not", r"'re\b": " are", r"'s\b": " is", r"'d\b": " would", r"'ll\b": " will", r"'t\b": " not", r"'ve\b": " have", r"'m\b": " am", } self.standardize_numbers = EnglishNumberNormalizer() self.standardize_spellings = EnglishSpellingNormalizer() def __call__(self, s: str): s = s.lower() s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis s = re.sub(self.ignore_patterns, "", s) s = re.sub(r"\s+'", "'", s) # when there's a space before an apostrophe for pattern, replacement in self.replacers.items(): s = re.sub(pattern, replacement, s) s = re.sub(r"(\d),(\d)", r"\1\2", s) # remove commas between digits s = re.sub(r"\.([^0-9]|$)", r" \1", s) # remove periods not followed by numbers s = remove_symbols_and_diacritics(s, keep=".%$¢€£") # keep numeric symbols s = self.standardize_numbers(s) s = self.standardize_spellings(s) # now remove prefix/suffix symbols that are not preceded/followed by numbers s = re.sub(r"[.$¢€£]([^0-9])", r" \1", s) s = re.sub(r"([^0-9])%", r"\1 ", s) s = re.sub(r"\s+", " ", s) # replace any successive whitespaces with a space return s
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/normalizers/basic.py
whisper/normalizers/basic.py
import re import unicodedata import regex # non-ASCII letters that are not separated by "NFKD" normalization ADDITIONAL_DIACRITICS = { "œ": "oe", "Œ": "OE", "ø": "o", "Ø": "O", "æ": "ae", "Æ": "AE", "ß": "ss", "ẞ": "SS", "đ": "d", "Đ": "D", "ð": "d", "Ð": "D", "þ": "th", "Þ": "th", "ł": "l", "Ł": "L", } def remove_symbols_and_diacritics(s: str, keep=""): """ Replace any other markers, symbols, and punctuations with a space, and drop any diacritics (category 'Mn' and some manual mappings) """ return "".join( ( c if c in keep else ( ADDITIONAL_DIACRITICS[c] if c in ADDITIONAL_DIACRITICS else ( "" if unicodedata.category(c) == "Mn" else " " if unicodedata.category(c)[0] in "MSP" else c ) ) ) for c in unicodedata.normalize("NFKD", s) ) def remove_symbols(s: str): """ Replace any other markers, symbols, punctuations with a space, keeping diacritics """ return "".join( " " if unicodedata.category(c)[0] in "MSP" else c for c in unicodedata.normalize("NFKC", s) ) class BasicTextNormalizer: def __init__(self, remove_diacritics: bool = False, split_letters: bool = False): self.clean = ( remove_symbols_and_diacritics if remove_diacritics else remove_symbols ) self.split_letters = split_letters def __call__(self, s: str): s = s.lower() s = re.sub(r"[<\[][^>\]]*[>\]]", "", s) # remove words between brackets s = re.sub(r"\(([^)]+?)\)", "", s) # remove words between parenthesis s = self.clean(s).lower() if self.split_letters: s = " ".join(regex.findall(r"\X", s, regex.U)) s = re.sub( r"\s+", " ", s ) # replace any successive whitespace characters with a space return s
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false
openai/whisper
https://github.com/openai/whisper/blob/c0d2f624c09dc18e709e37c2ad90c039a4eb72a2/whisper/normalizers/__init__.py
whisper/normalizers/__init__.py
from .basic import BasicTextNormalizer as BasicTextNormalizer from .english import EnglishTextNormalizer as EnglishTextNormalizer
python
MIT
c0d2f624c09dc18e709e37c2ad90c039a4eb72a2
2026-01-04T14:38:15.473103Z
false