id int64 0 190k | prompt stringlengths 21 13.4M | docstring stringlengths 1 12k ⌀ |
|---|---|---|
17,405 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
class PathConfig:
"""
Helper class containing constants for various directories and files.
The script will only add / change / delete content in its own directories, which start with `parser-`.
Files within `parser-output` are the end result that the user is probably interested in.
Files within `parser-cache` are temporary working files, which improve the efficiency if you run
this script multiple times. They can safely be removed without harming the consistency of the
files within `parser-output`.
"""
def __init__(self, dir_archive):
self.dir_archive = dir_archive
self.dir_input_data = os.path.join(dir_archive, 'data')
self.file_account_js = os.path.join(self.dir_input_data, 'account.js')
# check if user is in correct folder
if not os.path.isfile(self.file_account_js):
print(f'Error: Failed to load {self.file_account_js}. ')
exit()
self.dir_input_media = find_dir_input_media(self.dir_input_data)
self.dir_output = os.path.join(self.dir_archive, 'parser-output')
self.dir_output_media = os.path.join(self.dir_output, 'media')
self.dir_output_cache = os.path.join(self.dir_archive, 'parser-cache')
self.file_output_following = os.path.join(self.dir_output, 'following.txt')
self.file_output_followers = os.path.join(self.dir_output, 'followers.txt')
self.file_download_log = os.path.join(self.dir_output_media, 'download_log.txt')
self.file_tweet_icon = os.path.join(self.dir_output_media, 'tweet.ico')
self.files_input_tweets = find_files_input_tweets(self.dir_input_data)
# structured like an actual tweet output file, can be used to compute relative urls to a media file
self.example_file_output_tweets = self.create_path_for_file_output_tweets(year=2020, month=12)
def create_path_for_file_output_tweets(self, year, month, format="html", kind="tweets") -> str:
"""Builds the path for a tweet-archive file based on some properties."""
# Previously the filename was f'{dt.year}-{dt.month:02}-01-Tweet-Archive-{dt.year}-{dt.month:02}'
return os.path.join(self.dir_output, f"{kind}-{format}", f"{year:04}", f"{year:04}-{month:02}-01-{kind}.{format}")
def create_path_for_file_output_dms(self, name: str, index: Optional[int]=None, format: str="html", kind: str="DMs") -> str:
"""Builds the path for a dm-archive file based on some properties."""
index_suffix = ""
if (index):
index_suffix = f"-part{index:03}"
return os.path.join(self.dir_output, kind, f"{kind}-{name}{index_suffix}.{format}")
def create_path_for_file_output_single(self, format: str, kind: str)->str:
"""Builds the path for a single output file which, i.e. one that is not part of a larger group or sequence."""
return os.path.join(self.dir_output, f"{kind}.{format}")
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
The provided code snippet includes necessary dependencies for implementing the `extract_username` function. Write a Python function `def extract_username(paths: PathConfig)` to solve the following problem:
Returns the user's Twitter username from account.js.
Here is the function:
def extract_username(paths: PathConfig):
"""Returns the user's Twitter username from account.js."""
account = read_json_from_js_file(paths.file_account_js)
return account[0]['account']['username'] | Returns the user's Twitter username from account.js. |
17,406 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
The provided code snippet includes necessary dependencies for implementing the `find_files_input_tweets` function. Write a Python function `def find_files_input_tweets(dir_path_input_data)` to solve the following problem:
Identify the tweet archive's file and folder names - they change slightly depending on the archive size it seems.
Here is the function:
def find_files_input_tweets(dir_path_input_data):
"""Identify the tweet archive's file and folder names -
they change slightly depending on the archive size it seems."""
input_tweets_file_templates = ['tweet.js', 'tweets.js', 'tweets-part*.js']
files_paths_input_tweets = []
for input_tweets_file_template in input_tweets_file_templates:
files_paths_input_tweets += glob.glob(os.path.join(dir_path_input_data, input_tweets_file_template))
if len(files_paths_input_tweets)==0:
print(f'Error: no files matching {input_tweets_file_templates} in {dir_path_input_data}')
exit()
return files_paths_input_tweets | Identify the tweet archive's file and folder names - they change slightly depending on the archive size it seems. |
17,407 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
def find_dir_input_media(dir_path_input_data):
input_media_dir_templates = ['tweet_media', 'tweets_media']
input_media_dirs = []
for input_media_dir_template in input_media_dir_templates:
input_media_dirs += glob.glob(os.path.join(dir_path_input_data, input_media_dir_template))
if len(input_media_dirs) == 0:
print(f'Error: no folders matching {input_media_dir_templates} in {dir_path_input_data}')
exit()
if len(input_media_dirs) > 1:
print(f'Error: multiple folders matching {input_media_dir_templates} in {dir_path_input_data}')
exit()
return input_media_dirs[0] | null |
17,408 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
class PathConfig:
"""
Helper class containing constants for various directories and files.
The script will only add / change / delete content in its own directories, which start with `parser-`.
Files within `parser-output` are the end result that the user is probably interested in.
Files within `parser-cache` are temporary working files, which improve the efficiency if you run
this script multiple times. They can safely be removed without harming the consistency of the
files within `parser-output`.
"""
def __init__(self, dir_archive):
self.dir_archive = dir_archive
self.dir_input_data = os.path.join(dir_archive, 'data')
self.file_account_js = os.path.join(self.dir_input_data, 'account.js')
# check if user is in correct folder
if not os.path.isfile(self.file_account_js):
print(f'Error: Failed to load {self.file_account_js}. ')
exit()
self.dir_input_media = find_dir_input_media(self.dir_input_data)
self.dir_output = os.path.join(self.dir_archive, 'parser-output')
self.dir_output_media = os.path.join(self.dir_output, 'media')
self.dir_output_cache = os.path.join(self.dir_archive, 'parser-cache')
self.file_output_following = os.path.join(self.dir_output, 'following.txt')
self.file_output_followers = os.path.join(self.dir_output, 'followers.txt')
self.file_download_log = os.path.join(self.dir_output_media, 'download_log.txt')
self.file_tweet_icon = os.path.join(self.dir_output_media, 'tweet.ico')
self.files_input_tweets = find_files_input_tweets(self.dir_input_data)
# structured like an actual tweet output file, can be used to compute relative urls to a media file
self.example_file_output_tweets = self.create_path_for_file_output_tweets(year=2020, month=12)
def create_path_for_file_output_tweets(self, year, month, format="html", kind="tweets") -> str:
"""Builds the path for a tweet-archive file based on some properties."""
# Previously the filename was f'{dt.year}-{dt.month:02}-01-Tweet-Archive-{dt.year}-{dt.month:02}'
return os.path.join(self.dir_output, f"{kind}-{format}", f"{year:04}", f"{year:04}-{month:02}-01-{kind}.{format}")
def create_path_for_file_output_dms(self, name: str, index: Optional[int]=None, format: str="html", kind: str="DMs") -> str:
"""Builds the path for a dm-archive file based on some properties."""
index_suffix = ""
if (index):
index_suffix = f"-part{index:03}"
return os.path.join(self.dir_output, kind, f"{kind}-{name}{index_suffix}.{format}")
def create_path_for_file_output_single(self, format: str, kind: str)->str:
"""Builds the path for a single output file which, i.e. one that is not part of a larger group or sequence."""
return os.path.join(self.dir_output, f"{kind}.{format}")
def mkdirs_for_file(path_file):
"""Creates the parent directory of the given file, if it does not exist yet."""
path_dir = os.path.split(path_file)[0]
os.makedirs(path_dir, exist_ok=True)
def download_file_if_larger(url, filename, index, count, sleep_time):
"""Attempts to download from the specified URL. Overwrites file if larger.
Returns whether the file is now known to be the largest available, and the number of bytes downloaded.
"""
requests = import_module('requests')
imagesize = import_module('imagesize')
pref = f'{index:3d}/{count:3d} {filename}: '
# Sleep briefly, in an attempt to minimize the possibility of trigging some auto-cutoff mechanism
if index > 1:
print(f'{pref}Sleeping...', end='\r')
time.sleep(sleep_time)
# Request the URL (in stream mode so that we can conditionally abort depending on the headers)
print(f'{pref}Requesting headers for {url}...', end='\r')
byte_size_before = os.path.getsize(filename)
try:
with requests.get(url, stream=True, timeout=2) as res:
if not res.status_code == 200:
# Try to get content of response as `res.text`.
# For twitter.com, this will be empty in most (all?) cases.
# It is successfully tested with error responses from other domains.
raise Exception(f'Download failed with status "{res.status_code} {res.reason}". '
f'Response content: "{res.text}"')
byte_size_after = int(res.headers['content-length'])
if byte_size_after != byte_size_before:
# Proceed with the full download
tmp_filename = filename+'.tmp'
print(f'{pref}Downloading {url}... ', end='\r')
with open(tmp_filename,'wb') as f:
shutil.copyfileobj(res.raw, f)
post = f'{byte_size_after/2**20:.1f}MB downloaded'
width_before, height_before = imagesize.get(filename)
width_after, height_after = imagesize.get(tmp_filename)
pixels_before, pixels_after = width_before * height_before, width_after * height_after
pixels_percentage_increase = 100.0 * (pixels_after - pixels_before) / pixels_before
if width_before == -1 and height_before == -1 and width_after == -1 and height_after == -1:
# could not check size of both versions, probably a video or unsupported image format
os.replace(tmp_filename, filename)
bytes_percentage_increase = 100.0 * (byte_size_after - byte_size_before) / byte_size_before
logging.info(f'{pref}SUCCESS. New version is {bytes_percentage_increase:3.0f}% '
f'larger in bytes (pixel comparison not possible). {post}')
return True, byte_size_after
elif width_before == -1 or height_before == -1 or width_after == -1 or height_after == -1:
# could not check size of one version, this should not happen (corrupted download?)
logging.info(f'{pref}SKIPPED. Pixel size comparison inconclusive: '
f'{width_before}*{height_before}px vs. {width_after}*{height_after}px. {post}')
return False, byte_size_after
elif pixels_after >= pixels_before:
os.replace(tmp_filename, filename)
bytes_percentage_increase = 100.0 * (byte_size_after - byte_size_before) / byte_size_before
if bytes_percentage_increase >= 0:
logging.info(f'{pref}SUCCESS. New version is {bytes_percentage_increase:3.0f}% larger in bytes '
f'and {pixels_percentage_increase:3.0f}% larger in pixels. {post}')
else:
logging.info(f'{pref}SUCCESS. New version is actually {-bytes_percentage_increase:3.0f}% '
f'smaller in bytes but {pixels_percentage_increase:3.0f}% '
f'larger in pixels. {post}')
return True, byte_size_after
else:
logging.info(f'{pref}SKIPPED. Online version has {-pixels_percentage_increase:3.0f}% '
f'smaller pixel size. {post}')
return True, byte_size_after
else:
logging.info(f'{pref}SKIPPED. Online version is same byte size, assuming same content. Not downloaded.')
return True, 0
except Exception as err:
logging.error(f"{pref}FAIL. Media couldn't be retrieved from {url} because of exception: {err}")
return False, 0
The provided code snippet includes necessary dependencies for implementing the `download_larger_media` function. Write a Python function `def download_larger_media(media_sources, paths: PathConfig)` to solve the following problem:
Uses (filename, URL) tuples in media_sources to download files from remote storage. Aborts downloads if the remote file is the same size or smaller than the existing local version. Retries the failed downloads several times, with increasing pauses between each to avoid being blocked.
Here is the function:
def download_larger_media(media_sources, paths: PathConfig):
"""Uses (filename, URL) tuples in media_sources to download files from remote storage.
Aborts downloads if the remote file is the same size or smaller than the existing local version.
Retries the failed downloads several times, with increasing pauses between each to avoid being blocked.
"""
# Log to file as well as the console
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(message)s')
mkdirs_for_file(paths.file_download_log)
logfile_handler = logging.FileHandler(filename=paths.file_download_log, mode='w')
logfile_handler.setLevel(logging.INFO)
logging.getLogger().addHandler(logfile_handler)
# Download new versions
start_time = time.time()
total_bytes_downloaded = 0
sleep_time = 0.25
remaining_tries = 5
while remaining_tries > 0:
number_of_files = len(media_sources)
success_count = 0
retries = []
for index, (local_media_path, media_url) in enumerate(media_sources):
success, bytes_downloaded = download_file_if_larger(
media_url, local_media_path, index + 1, number_of_files, sleep_time
)
if success:
success_count += 1
else:
retries.append((local_media_path, media_url))
total_bytes_downloaded += bytes_downloaded
# show % done and estimated remaining time:
time_elapsed: float = time.time() - start_time
estimated_time_per_file: float = time_elapsed / (index + 1)
estimated_time_remaining: datetime.datetime = \
datetime.datetime.fromtimestamp(
(number_of_files - (index + 1)) * estimated_time_per_file,
tz=datetime.timezone.utc
)
if estimated_time_remaining.hour >= 1:
time_remaining_string: str = \
f"{estimated_time_remaining.hour} hour{'' if estimated_time_remaining.hour == 1 else 's'} " \
f"{estimated_time_remaining.minute} minute{'' if estimated_time_remaining.minute == 1 else 's'}"
elif estimated_time_remaining.minute >= 1:
time_remaining_string: str = \
f"{estimated_time_remaining.minute} minute{'' if estimated_time_remaining.minute == 1 else 's'} " \
f"{estimated_time_remaining.second} second{'' if estimated_time_remaining.second == 1 else 's'}"
else:
time_remaining_string: str = \
f"{estimated_time_remaining.second} second{'' if estimated_time_remaining.second == 1 else 's'}"
if index + 1 == number_of_files:
print(' 100 % done.')
else:
print(f' {(100*(index+1)/number_of_files):.1f} % done, about {time_remaining_string} remaining...')
media_sources = retries
remaining_tries -= 1
sleep_time += 2
logging.info(f'\n{success_count} of {number_of_files} tested media files '
f'are known to be the best-quality available.\n')
if len(retries) == 0:
break
if remaining_tries > 0:
print(f'----------------------\n\nRetrying the ones that failed, with a longer sleep. '
f'{remaining_tries} tries remaining.\n')
end_time = time.time()
logging.info(f'Total downloaded: {total_bytes_downloaded/2**20:.1f}MB = {total_bytes_downloaded/2**30:.2f}GB')
logging.info(f'Time taken: {end_time-start_time:.0f}s')
print(f'Wrote log to {paths.file_download_log}') | Uses (filename, URL) tuples in media_sources to download files from remote storage. Aborts downloads if the remote file is the same size or smaller than the existing local version. Retries the failed downloads several times, with increasing pauses between each to avoid being blocked. |
17,409 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
class PathConfig:
"""
Helper class containing constants for various directories and files.
The script will only add / change / delete content in its own directories, which start with `parser-`.
Files within `parser-output` are the end result that the user is probably interested in.
Files within `parser-cache` are temporary working files, which improve the efficiency if you run
this script multiple times. They can safely be removed without harming the consistency of the
files within `parser-output`.
"""
def __init__(self, dir_archive):
self.dir_archive = dir_archive
self.dir_input_data = os.path.join(dir_archive, 'data')
self.file_account_js = os.path.join(self.dir_input_data, 'account.js')
# check if user is in correct folder
if not os.path.isfile(self.file_account_js):
print(f'Error: Failed to load {self.file_account_js}. ')
exit()
self.dir_input_media = find_dir_input_media(self.dir_input_data)
self.dir_output = os.path.join(self.dir_archive, 'parser-output')
self.dir_output_media = os.path.join(self.dir_output, 'media')
self.dir_output_cache = os.path.join(self.dir_archive, 'parser-cache')
self.file_output_following = os.path.join(self.dir_output, 'following.txt')
self.file_output_followers = os.path.join(self.dir_output, 'followers.txt')
self.file_download_log = os.path.join(self.dir_output_media, 'download_log.txt')
self.file_tweet_icon = os.path.join(self.dir_output_media, 'tweet.ico')
self.files_input_tweets = find_files_input_tweets(self.dir_input_data)
# structured like an actual tweet output file, can be used to compute relative urls to a media file
self.example_file_output_tweets = self.create_path_for_file_output_tweets(year=2020, month=12)
def create_path_for_file_output_tweets(self, year, month, format="html", kind="tweets") -> str:
"""Builds the path for a tweet-archive file based on some properties."""
# Previously the filename was f'{dt.year}-{dt.month:02}-01-Tweet-Archive-{dt.year}-{dt.month:02}'
return os.path.join(self.dir_output, f"{kind}-{format}", f"{year:04}", f"{year:04}-{month:02}-01-{kind}.{format}")
def create_path_for_file_output_dms(self, name: str, index: Optional[int]=None, format: str="html", kind: str="DMs") -> str:
"""Builds the path for a dm-archive file based on some properties."""
index_suffix = ""
if (index):
index_suffix = f"-part{index:03}"
return os.path.join(self.dir_output, kind, f"{kind}-{name}{index_suffix}.{format}")
def create_path_for_file_output_single(self, format: str, kind: str)->str:
"""Builds the path for a single output file which, i.e. one that is not part of a larger group or sequence."""
return os.path.join(self.dir_output, f"{kind}.{format}")
def open_and_mkdirs(path_file):
"""Opens a file for writing. If the parent directory does not exist yet, it is created first."""
mkdirs_for_file(path_file)
return open(path_file, 'w', encoding='utf-8')
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
def convert_tweet(tweet, username, media_sources, users: dict, paths: PathConfig):
"""Converts a JSON-format tweet. Returns tuple of timestamp, markdown and HTML."""
if 'tweet' in tweet.keys():
tweet = tweet['tweet']
timestamp_str = tweet['created_at']
timestamp = int(round(datetime.datetime.strptime(timestamp_str, '%a %b %d %X %z %Y').timestamp()))
# Example: Tue Mar 19 14:05:17 +0000 2019
body_markdown = tweet['full_text']
body_html = tweet['full_text']
tweet_id_str = tweet['id_str']
# for old tweets before embedded t.co redirects were added, ensure the links are
# added to the urls entities list so that we can build correct links later on.
if 'entities' in tweet and 'media' not in tweet['entities'] and len(tweet['entities'].get("urls", [])) == 0:
for word in tweet['full_text'].split():
try:
url = urlparse(word)
except ValueError:
pass # don't crash when trying to parse something that looks like a URL but actually isn't
else:
if url.scheme != '' and url.netloc != '' and not word.endswith('\u2026'):
# Shorten links similar to twitter
netloc_short = url.netloc[4:] if url.netloc.startswith("www.") else url.netloc
path_short = url.path if len(url.path + '?' + url.query) < 15 \
else (url.path + '?' + url.query)[:15] + '\u2026'
tweet['entities']['urls'].append({
'url': word,
'expanded_url': word,
'display_url': netloc_short + path_short,
'indices': [tweet['full_text'].index(word), tweet['full_text'].index(word) + len(word)],
})
# replace t.co URLs with their original versions
if 'entities' in tweet and 'urls' in tweet['entities']:
for url in tweet['entities']['urls']:
if 'url' in url and 'expanded_url' in url:
expanded_url = url['expanded_url']
body_markdown = body_markdown.replace(url['url'], expanded_url)
expanded_url_html = f'<a href="{expanded_url}">{expanded_url}</a>'
body_html = body_html.replace(url['url'], expanded_url_html)
# if the tweet is a reply, construct a header that links the names
# of the accounts being replied to the tweet being replied to
header_markdown = ''
header_html = ''
if 'in_reply_to_status_id' in tweet:
# match and remove all occurrences of '@username ' at the start of the body
replying_to = re.match(r'^(@[0-9A-Za-z_]* )*', body_markdown)[0]
if replying_to:
body_markdown = body_markdown[len(replying_to):]
body_html = body_html[len(replying_to):]
else:
# no '@username ' in the body: we're replying to self
replying_to = f'@{username}'
names = replying_to.split()
# some old tweets lack 'in_reply_to_screen_name': use it if present, otherwise fall back to names[0]
in_reply_to_screen_name = tweet['in_reply_to_screen_name'] if 'in_reply_to_screen_name' in tweet else names[0]
# create a list of names of the form '@name1, @name2 and @name3' - or just '@name1' if there is only one name
name_list = ', '.join(names[:-1]) + (f' and {names[-1]}' if len(names) > 1 else names[0])
in_reply_to_status_id = tweet['in_reply_to_status_id']
replying_to_url = f'https://twitter.com/{in_reply_to_screen_name}/status/{in_reply_to_status_id}'
header_markdown += f'Replying to [{escape_markdown(name_list)}]({replying_to_url})\n\n'
header_html += f'Replying to <a href="{replying_to_url}">{name_list}</a><br>'
# escape tweet body for markdown rendering:
body_markdown = escape_markdown(body_markdown)
# replace image URLs with image links to local files
if 'entities' in tweet and 'media' in tweet['entities'] and 'extended_entities' in tweet \
and 'media' in tweet['extended_entities']:
original_url = tweet['entities']['media'][0]['url']
markdown = ''
html = ''
for media in tweet['extended_entities']['media']:
if 'url' in media and 'media_url' in media:
original_expanded_url = media['media_url']
original_filename = os.path.split(original_expanded_url)[1]
archive_media_filename = tweet_id_str + '-' + original_filename
archive_media_path = os.path.join(paths.dir_input_media, archive_media_filename)
file_output_media = os.path.join(paths.dir_output_media, archive_media_filename)
media_url = rel_url(file_output_media, paths.example_file_output_tweets)
markdown += '' if not markdown and body_markdown == escape_markdown(original_url) else '\n\n'
html += '' if not html and body_html == original_url else '<br>'
if os.path.isfile(archive_media_path):
# Found a matching image, use this one
if not os.path.isfile(file_output_media):
shutil.copy(archive_media_path, file_output_media)
markdown += f''
html += f'<img src="{media_url}"/>'
# Save the online location of the best-quality version of this file, for later upgrading if wanted
best_quality_url = f'https://pbs.twimg.com/media/{original_filename}:orig'
media_sources.append(
(os.path.join(paths.dir_output_media, archive_media_filename), best_quality_url)
)
else:
# Is there any other file that includes the tweet_id in its filename?
archive_media_paths = glob.glob(os.path.join(paths.dir_input_media, tweet_id_str + '*'))
if len(archive_media_paths) > 0:
for archive_media_path in archive_media_paths:
archive_media_filename = os.path.split(archive_media_path)[-1]
file_output_media = os.path.join(paths.dir_output_media, archive_media_filename)
media_url = rel_url(file_output_media, paths.example_file_output_tweets)
if not os.path.isfile(file_output_media):
shutil.copy(archive_media_path, file_output_media)
markdown += f'<video controls><source src="{media_url}">Your browser ' \
f'does not support the video tag.</video>\n'
html += f'<video controls><source src="{media_url}">Your browser ' \
f'does not support the video tag.</video>\n'
# Save the online location of the best-quality version of this file,
# for later upgrading if wanted
if 'video_info' in media and 'variants' in media['video_info']:
best_quality_url = ''
best_bitrate = -1 # some valid videos are marked with bitrate=0 in the JSON
for variant in media['video_info']['variants']:
if 'bitrate' in variant:
bitrate = int(variant['bitrate'])
if bitrate > best_bitrate:
best_quality_url = variant['url']
best_bitrate = bitrate
if best_bitrate == -1:
print(f"Warning No URL found for {original_url} {original_expanded_url} "
f"{archive_media_path} {media_url}")
print(f"JSON: {tweet}")
else:
media_sources.append(
(os.path.join(paths.dir_output_media, archive_media_filename),
best_quality_url)
)
else:
print(f'Warning: missing local file: {archive_media_path}. Using original link instead: '
f'{original_url} (expands to {original_expanded_url})')
markdown += f''
html += f'<a href="{original_url}">{original_url}</a>'
body_markdown = body_markdown.replace(escape_markdown(original_url), markdown)
body_html = body_html.replace(original_url, html)
# make the body a quote
body_markdown = '> ' + '\n> '.join(body_markdown.splitlines())
body_html = '<p><blockquote>' + '<br>\n'.join(body_html.splitlines()) + '</blockquote>'
# append the original Twitter URL as a link
original_tweet_url = f'https://twitter.com/{username}/status/{tweet_id_str}'
icon_url = rel_url(paths.file_tweet_icon, paths.example_file_output_tweets)
body_markdown = header_markdown + body_markdown + f'\n\n<img src="{icon_url}" width="12" /> ' \
f'[{timestamp_str}]({original_tweet_url})'
body_html = header_html + body_html + f'<a href="{original_tweet_url}"><img src="{icon_url}" ' \
f'width="12" /> {timestamp_str}</a></p>'
# extract user_id:handle connections
if 'in_reply_to_user_id' in tweet and 'in_reply_to_screen_name' in tweet and \
tweet['in_reply_to_screen_name'] is not None:
reply_to_id = tweet['in_reply_to_user_id']
if int(reply_to_id) >= 0: # some ids are -1, not sure why
handle = tweet['in_reply_to_screen_name']
users[reply_to_id] = UserData(user_id=reply_to_id, handle=handle)
if 'entities' in tweet and 'user_mentions' in tweet['entities'] and tweet['entities']['user_mentions'] is not None:
for mention in tweet['entities']['user_mentions']:
if mention is not None and 'id' in mention and 'screen_name' in mention:
mentioned_id = mention['id']
if int(mentioned_id) >= 0: # some ids are -1, not sure why
handle = mention['screen_name']
if handle is not None:
users[mentioned_id] = UserData(user_id=mentioned_id, handle=handle)
return timestamp, body_markdown, body_html
The provided code snippet includes necessary dependencies for implementing the `parse_tweets` function. Write a Python function `def parse_tweets(username, users, html_template, paths: PathConfig)` to solve the following problem:
Read tweets from paths.files_input_tweets, write to *.md and *.html. Copy the media used to paths.dir_output_media. Collect user_id:user_handle mappings for later use, in 'users'. Returns the mapping from media filename to best-quality URL.
Here is the function:
def parse_tweets(username, users, html_template, paths: PathConfig):
"""Read tweets from paths.files_input_tweets, write to *.md and *.html.
Copy the media used to paths.dir_output_media.
Collect user_id:user_handle mappings for later use, in 'users'.
Returns the mapping from media filename to best-quality URL.
"""
tweets = []
media_sources = []
for tweets_js_filename in paths.files_input_tweets:
json = read_json_from_js_file(tweets_js_filename)
for tweet in json:
tweets.append(convert_tweet(tweet, username, media_sources, users, paths))
tweets.sort(key=lambda tup: tup[0]) # oldest first
# Group tweets by month
grouped_tweets = defaultdict(list)
for timestamp, md, html in tweets:
# Use a (markdown) filename that can be imported into Jekyll: YYYY-MM-DD-your-title-here.md
dt = datetime.datetime.fromtimestamp(timestamp)
grouped_tweets[(dt.year, dt.month)].append((md, html))
for (year, month), content in grouped_tweets.items():
# Write into *.md files
md_string = '\n\n----\n\n'.join(md for md, _ in content)
md_path = paths.create_path_for_file_output_tweets(year, month, format="md")
with open_and_mkdirs(md_path) as f:
f.write(md_string)
# Write into *.html files
html_string = '<hr>\n'.join(html for _, html in content)
html_path = paths.create_path_for_file_output_tweets(year, month, format="html")
with open_and_mkdirs(html_path) as f:
f.write(html_template.format(html_string))
print(f'Wrote {len(tweets)} tweets to *.md and *.html, '
f'with images and video embedded from {paths.dir_output_media}')
return media_sources | Read tweets from paths.files_input_tweets, write to *.md and *.html. Copy the media used to paths.dir_output_media. Collect user_id:user_handle mappings for later use, in 'users'. Returns the mapping from media filename to best-quality URL. |
17,410 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
The provided code snippet includes necessary dependencies for implementing the `collect_user_ids_from_followings` function. Write a Python function `def collect_user_ids_from_followings(paths) -> list` to solve the following problem:
Collect all user ids that appear in the followings archive data. (For use in bulk online lookup from Twitter.)
Here is the function:
def collect_user_ids_from_followings(paths) -> list:
"""
Collect all user ids that appear in the followings archive data.
(For use in bulk online lookup from Twitter.)
"""
# read JSON file from archive
following_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'following.js'))
# collect all user ids in a list
following_ids = []
for follow in following_json:
if 'following' in follow and 'accountId' in follow['following']:
following_ids.append(follow['following']['accountId'])
return following_ids | Collect all user ids that appear in the followings archive data. (For use in bulk online lookup from Twitter.) |
17,411 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
class PathConfig:
"""
Helper class containing constants for various directories and files.
The script will only add / change / delete content in its own directories, which start with `parser-`.
Files within `parser-output` are the end result that the user is probably interested in.
Files within `parser-cache` are temporary working files, which improve the efficiency if you run
this script multiple times. They can safely be removed without harming the consistency of the
files within `parser-output`.
"""
def __init__(self, dir_archive):
self.dir_archive = dir_archive
self.dir_input_data = os.path.join(dir_archive, 'data')
self.file_account_js = os.path.join(self.dir_input_data, 'account.js')
# check if user is in correct folder
if not os.path.isfile(self.file_account_js):
print(f'Error: Failed to load {self.file_account_js}. ')
exit()
self.dir_input_media = find_dir_input_media(self.dir_input_data)
self.dir_output = os.path.join(self.dir_archive, 'parser-output')
self.dir_output_media = os.path.join(self.dir_output, 'media')
self.dir_output_cache = os.path.join(self.dir_archive, 'parser-cache')
self.file_output_following = os.path.join(self.dir_output, 'following.txt')
self.file_output_followers = os.path.join(self.dir_output, 'followers.txt')
self.file_download_log = os.path.join(self.dir_output_media, 'download_log.txt')
self.file_tweet_icon = os.path.join(self.dir_output_media, 'tweet.ico')
self.files_input_tweets = find_files_input_tweets(self.dir_input_data)
# structured like an actual tweet output file, can be used to compute relative urls to a media file
self.example_file_output_tweets = self.create_path_for_file_output_tweets(year=2020, month=12)
def create_path_for_file_output_tweets(self, year, month, format="html", kind="tweets") -> str:
"""Builds the path for a tweet-archive file based on some properties."""
# Previously the filename was f'{dt.year}-{dt.month:02}-01-Tweet-Archive-{dt.year}-{dt.month:02}'
return os.path.join(self.dir_output, f"{kind}-{format}", f"{year:04}", f"{year:04}-{month:02}-01-{kind}.{format}")
def create_path_for_file_output_dms(self, name: str, index: Optional[int]=None, format: str="html", kind: str="DMs") -> str:
"""Builds the path for a dm-archive file based on some properties."""
index_suffix = ""
if (index):
index_suffix = f"-part{index:03}"
return os.path.join(self.dir_output, kind, f"{kind}-{name}{index_suffix}.{format}")
def create_path_for_file_output_single(self, format: str, kind: str)->str:
"""Builds the path for a single output file which, i.e. one that is not part of a larger group or sequence."""
return os.path.join(self.dir_output, f"{kind}.{format}")
def open_and_mkdirs(path_file):
"""Opens a file for writing. If the parent directory does not exist yet, it is created first."""
mkdirs_for_file(path_file)
return open(path_file, 'w', encoding='utf-8')
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
The provided code snippet includes necessary dependencies for implementing the `parse_followings` function. Write a Python function `def parse_followings(users, user_id_url_template, paths: PathConfig)` to solve the following problem:
Parse paths.dir_input_data/following.js, write to paths.file_output_following.
Here is the function:
def parse_followings(users, user_id_url_template, paths: PathConfig):
"""Parse paths.dir_input_data/following.js, write to paths.file_output_following.
"""
following = []
following_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'following.js'))
following_ids = []
for follow in following_json:
if 'following' in follow and 'accountId' in follow['following']:
following_ids.append(follow['following']['accountId'])
for following_id in following_ids:
handle = users[following_id].handle if following_id in users else '~unknown~handle~'
following.append(handle + ' ' + user_id_url_template.format(following_id))
following.sort()
following_output_path = paths.create_path_for_file_output_single(format="txt", kind="following")
with open_and_mkdirs(following_output_path) as f:
f.write('\n'.join(following))
print(f"Wrote {len(following)} accounts to {following_output_path}") | Parse paths.dir_input_data/following.js, write to paths.file_output_following. |
17,412 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
The provided code snippet includes necessary dependencies for implementing the `collect_user_ids_from_followers` function. Write a Python function `def collect_user_ids_from_followers(paths) -> list` to solve the following problem:
Collect all user ids that appear in the followers archive data. (For use in bulk online lookup from Twitter.)
Here is the function:
def collect_user_ids_from_followers(paths) -> list:
"""
Collect all user ids that appear in the followers archive data.
(For use in bulk online lookup from Twitter.)
"""
# read JSON file from archive
follower_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'follower.js'))
# collect all user ids in a list
follower_ids = []
for follower in follower_json:
if 'follower' in follower and 'accountId' in follower['follower']:
follower_ids.append(follower['follower']['accountId'])
return follower_ids | Collect all user ids that appear in the followers archive data. (For use in bulk online lookup from Twitter.) |
17,413 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
class PathConfig:
"""
Helper class containing constants for various directories and files.
The script will only add / change / delete content in its own directories, which start with `parser-`.
Files within `parser-output` are the end result that the user is probably interested in.
Files within `parser-cache` are temporary working files, which improve the efficiency if you run
this script multiple times. They can safely be removed without harming the consistency of the
files within `parser-output`.
"""
def __init__(self, dir_archive):
self.dir_archive = dir_archive
self.dir_input_data = os.path.join(dir_archive, 'data')
self.file_account_js = os.path.join(self.dir_input_data, 'account.js')
# check if user is in correct folder
if not os.path.isfile(self.file_account_js):
print(f'Error: Failed to load {self.file_account_js}. ')
exit()
self.dir_input_media = find_dir_input_media(self.dir_input_data)
self.dir_output = os.path.join(self.dir_archive, 'parser-output')
self.dir_output_media = os.path.join(self.dir_output, 'media')
self.dir_output_cache = os.path.join(self.dir_archive, 'parser-cache')
self.file_output_following = os.path.join(self.dir_output, 'following.txt')
self.file_output_followers = os.path.join(self.dir_output, 'followers.txt')
self.file_download_log = os.path.join(self.dir_output_media, 'download_log.txt')
self.file_tweet_icon = os.path.join(self.dir_output_media, 'tweet.ico')
self.files_input_tweets = find_files_input_tweets(self.dir_input_data)
# structured like an actual tweet output file, can be used to compute relative urls to a media file
self.example_file_output_tweets = self.create_path_for_file_output_tweets(year=2020, month=12)
def create_path_for_file_output_tweets(self, year, month, format="html", kind="tweets") -> str:
"""Builds the path for a tweet-archive file based on some properties."""
# Previously the filename was f'{dt.year}-{dt.month:02}-01-Tweet-Archive-{dt.year}-{dt.month:02}'
return os.path.join(self.dir_output, f"{kind}-{format}", f"{year:04}", f"{year:04}-{month:02}-01-{kind}.{format}")
def create_path_for_file_output_dms(self, name: str, index: Optional[int]=None, format: str="html", kind: str="DMs") -> str:
"""Builds the path for a dm-archive file based on some properties."""
index_suffix = ""
if (index):
index_suffix = f"-part{index:03}"
return os.path.join(self.dir_output, kind, f"{kind}-{name}{index_suffix}.{format}")
def create_path_for_file_output_single(self, format: str, kind: str)->str:
"""Builds the path for a single output file which, i.e. one that is not part of a larger group or sequence."""
return os.path.join(self.dir_output, f"{kind}.{format}")
def open_and_mkdirs(path_file):
"""Opens a file for writing. If the parent directory does not exist yet, it is created first."""
mkdirs_for_file(path_file)
return open(path_file, 'w', encoding='utf-8')
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
The provided code snippet includes necessary dependencies for implementing the `parse_followers` function. Write a Python function `def parse_followers(users, user_id_url_template, paths: PathConfig)` to solve the following problem:
Parse paths.dir_input_data/followers.js, write to paths.file_output_followers.
Here is the function:
def parse_followers(users, user_id_url_template, paths: PathConfig):
"""Parse paths.dir_input_data/followers.js, write to paths.file_output_followers.
"""
followers = []
follower_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'follower.js'))
follower_ids = []
for follower in follower_json:
if 'follower' in follower and 'accountId' in follower['follower']:
follower_ids.append(follower['follower']['accountId'])
for follower_id in follower_ids:
handle = users[follower_id].handle if follower_id in users else '~unknown~handle~'
followers.append(handle + ' ' + user_id_url_template.format(follower_id))
followers.sort()
followers_output_path = paths.create_path_for_file_output_single(format="txt", kind="followers")
with open_and_mkdirs(followers_output_path) as f:
f.write('\n'.join(followers))
print(f"Wrote {len(followers)} accounts to {followers_output_path}") | Parse paths.dir_input_data/followers.js, write to paths.file_output_followers. |
17,414 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
The provided code snippet includes necessary dependencies for implementing the `collect_user_ids_from_direct_messages` function. Write a Python function `def collect_user_ids_from_direct_messages(paths) -> list` to solve the following problem:
Collect all user ids that appear in the direct messages archive data. (For use in bulk online lookup from Twitter.)
Here is the function:
def collect_user_ids_from_direct_messages(paths) -> list:
"""
Collect all user ids that appear in the direct messages archive data.
(For use in bulk online lookup from Twitter.)
"""
# read JSON file from archive
dms_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'direct-messages.js'))
# collect all user ids in a set
dms_user_ids = set()
for conversation in dms_json:
if 'dmConversation' in conversation and 'conversationId' in conversation['dmConversation']:
dm_conversation = conversation['dmConversation']
conversation_id = dm_conversation['conversationId']
user1_id, user2_id = conversation_id.split('-')
dms_user_ids.add(user1_id)
dms_user_ids.add(user2_id)
return list(dms_user_ids) | Collect all user ids that appear in the direct messages archive data. (For use in bulk online lookup from Twitter.) |
17,415 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
class PathConfig:
"""
Helper class containing constants for various directories and files.
The script will only add / change / delete content in its own directories, which start with `parser-`.
Files within `parser-output` are the end result that the user is probably interested in.
Files within `parser-cache` are temporary working files, which improve the efficiency if you run
this script multiple times. They can safely be removed without harming the consistency of the
files within `parser-output`.
"""
def __init__(self, dir_archive):
self.dir_archive = dir_archive
self.dir_input_data = os.path.join(dir_archive, 'data')
self.file_account_js = os.path.join(self.dir_input_data, 'account.js')
# check if user is in correct folder
if not os.path.isfile(self.file_account_js):
print(f'Error: Failed to load {self.file_account_js}. ')
exit()
self.dir_input_media = find_dir_input_media(self.dir_input_data)
self.dir_output = os.path.join(self.dir_archive, 'parser-output')
self.dir_output_media = os.path.join(self.dir_output, 'media')
self.dir_output_cache = os.path.join(self.dir_archive, 'parser-cache')
self.file_output_following = os.path.join(self.dir_output, 'following.txt')
self.file_output_followers = os.path.join(self.dir_output, 'followers.txt')
self.file_download_log = os.path.join(self.dir_output_media, 'download_log.txt')
self.file_tweet_icon = os.path.join(self.dir_output_media, 'tweet.ico')
self.files_input_tweets = find_files_input_tweets(self.dir_input_data)
# structured like an actual tweet output file, can be used to compute relative urls to a media file
self.example_file_output_tweets = self.create_path_for_file_output_tweets(year=2020, month=12)
def create_path_for_file_output_tweets(self, year, month, format="html", kind="tweets") -> str:
"""Builds the path for a tweet-archive file based on some properties."""
# Previously the filename was f'{dt.year}-{dt.month:02}-01-Tweet-Archive-{dt.year}-{dt.month:02}'
return os.path.join(self.dir_output, f"{kind}-{format}", f"{year:04}", f"{year:04}-{month:02}-01-{kind}.{format}")
def create_path_for_file_output_dms(self, name: str, index: Optional[int]=None, format: str="html", kind: str="DMs") -> str:
"""Builds the path for a dm-archive file based on some properties."""
index_suffix = ""
if (index):
index_suffix = f"-part{index:03}"
return os.path.join(self.dir_output, kind, f"{kind}-{name}{index_suffix}.{format}")
def create_path_for_file_output_single(self, format: str, kind: str)->str:
"""Builds the path for a single output file which, i.e. one that is not part of a larger group or sequence."""
return os.path.join(self.dir_output, f"{kind}.{format}")
def open_and_mkdirs(path_file):
"""Opens a file for writing. If the parent directory does not exist yet, it is created first."""
mkdirs_for_file(path_file)
return open(path_file, 'w', encoding='utf-8')
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
def escape_markdown(input_text: str) -> str:
"""
Escape markdown control characters from input text so that the text will not break in rendered markdown.
(Only use on unformatted text parts that do not yet have any markdown control characters added on purpose!)
"""
characters_to_escape: str = r"\_*[]()~`>#+-=|{}.!"
output_text: str = ''
for char in input_text:
if char in characters_to_escape:
# add backslash before control char
output_text = output_text + "\\" + char
elif char == '\n':
# add double space before line break
output_text = output_text + " " + char
else:
output_text = output_text + char
return output_text
def chunks(lst: list, n: int):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
The provided code snippet includes necessary dependencies for implementing the `parse_direct_messages` function. Write a Python function `def parse_direct_messages(username, users, user_id_url_template, paths: PathConfig)` to solve the following problem:
Parse paths.dir_input_data/direct-messages.js, write to one markdown file per conversation.
Here is the function:
def parse_direct_messages(username, users, user_id_url_template, paths: PathConfig):
"""Parse paths.dir_input_data/direct-messages.js, write to one markdown file per conversation.
"""
# read JSON file
dms_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'direct-messages.js'))
# Parse the DMs and store the messages in a dict
conversations_messages = defaultdict(list)
for conversation in dms_json:
if 'dmConversation' in conversation and 'conversationId' in conversation['dmConversation']:
dm_conversation = conversation['dmConversation']
conversation_id = dm_conversation['conversationId']
user1_id, user2_id = conversation_id.split('-')
messages = []
if 'messages' in dm_conversation:
for message in dm_conversation['messages']:
if 'messageCreate' in message:
message_create = message['messageCreate']
if all(tag in message_create for tag in ['senderId', 'recipientId', 'text', 'createdAt']):
from_id = message_create['senderId']
to_id = message_create['recipientId']
body = message_create['text']
# replace t.co URLs with their original versions
if 'urls' in message_create and len(message_create['urls']) > 0:
for url in message_create['urls']:
if 'url' in url and 'expanded' in url:
expanded_url = url['expanded']
body = body.replace(url['url'], expanded_url)
# escape message body for markdown rendering:
body_markdown = escape_markdown(body)
# replace image URLs with image links to local files
if 'mediaUrls' in message_create \
and len(message_create['mediaUrls']) == 1 \
and 'urls' in message_create:
original_expanded_url = message_create['urls'][0]['expanded']
message_id = message_create['id']
media_hash_and_type = message_create['mediaUrls'][0].split('/')[-1]
media_id = message_create['mediaUrls'][0].split('/')[-2]
archive_media_filename = f'{message_id}-{media_hash_and_type}'
new_url = os.path.join(paths.dir_output_media, archive_media_filename)
archive_media_path = \
os.path.join(paths.dir_input_data, 'direct_messages_media', archive_media_filename)
if os.path.isfile(archive_media_path):
# found a matching image, use this one
if not os.path.isfile(new_url):
shutil.copy(archive_media_path, new_url)
image_markdown = f'\n\n'
body_markdown = body_markdown.replace(
escape_markdown(original_expanded_url), image_markdown
)
# Save the online location of the best-quality version of this file,
# for later upgrading if wanted
best_quality_url = \
f'https://ton.twitter.com/i//ton/data/dm/' \
f'{message_id}/{media_id}/{media_hash_and_type}'
# there is no ':orig' here, the url without any suffix has the original size
# TODO: a cookie (and a 'Referer: https://twitter.com' header)
# is needed to retrieve it, so the url might be useless anyway...
# WARNING: Do not uncomment the statement below until the cookie problem is solved!
# media_sources.append(
# (
# os.path.join(output_media_folder_name, archive_media_filename),
# best_quality_url
# )
# )
else:
archive_media_paths = glob.glob(
os.path.join(paths.dir_input_data, 'direct_messages_media', message_id + '*'))
if len(archive_media_paths) > 0:
for archive_media_path in archive_media_paths:
archive_media_filename = os.path.split(archive_media_path)[-1]
media_url = os.path.join(paths.dir_output_media, archive_media_filename)
if not os.path.isfile(media_url):
shutil.copy(archive_media_path, media_url)
video_markdown = f'\n<video controls><source src="{media_url}">' \
f'Your browser does not support the video tag.</video>\n'
body_markdown = body_markdown.replace(
escape_markdown(original_expanded_url), video_markdown
)
# TODO: maybe also save the online location of the best-quality version for videos?
# (see above)
else:
print(f'Warning: missing local file: {archive_media_path}. '
f'Using original link instead: {original_expanded_url})')
created_at = message_create['createdAt'] # example: 2022-01-27T15:58:52.744Z
timestamp = \
int(round(datetime.datetime.strptime(created_at, '%Y-%m-%dT%X.%fZ').timestamp()))
from_handle = escape_markdown(users[from_id].handle) if from_id in users \
else user_id_url_template.format(from_id)
to_handle = escape_markdown(users[to_id].handle) if to_id in users \
else user_id_url_template.format(to_id)
# make the body a quote
body_markdown = '> ' + '\n> '.join(body_markdown.splitlines())
message_markdown = f'{from_handle} -> {to_handle}: ({created_at}) \n\n' \
f'{body_markdown}'
messages.append((timestamp, message_markdown))
# find identifier for the conversation
other_user_id = user2_id if (user1_id in users and users[user1_id].handle == username) else user1_id
# collect messages per identifying user in conversations_messages dict
conversations_messages[other_user_id].extend(messages)
# output as one file per conversation (or part of long conversation)
num_written_messages = 0
num_written_files = 0
for other_user_id, messages in conversations_messages.items():
# sort messages by timestamp
messages.sort(key=lambda tup: tup[0])
other_user_name = escape_markdown(users[other_user_id].handle) if other_user_id in users \
else user_id_url_template.format(other_user_id)
other_user_short_name: str = users[other_user_id].handle if other_user_id in users else other_user_id
escaped_username = escape_markdown(username)
# if there are more than 1000 messages, the conversation was split up in the twitter archive.
# following this standard, also split up longer conversations in the output files:
if len(messages) > 1000:
for chunk_index, chunk in enumerate(chunks(messages, 1000)):
markdown = ''
markdown += f'### Conversation between {escaped_username} and {other_user_name}, ' \
f'part {chunk_index+1}: ###\n\n----\n\n'
markdown += '\n\n----\n\n'.join(md for _, md in chunk)
conversation_output_path = paths.create_path_for_file_output_dms(name=other_user_short_name, index=(chunk_index + 1), format="md")
# write part to a markdown file
with open_and_mkdirs(conversation_output_path) as f:
f.write(markdown)
print(f'Wrote {len(chunk)} messages to {conversation_output_path}')
num_written_files += 1
else:
markdown = ''
markdown += f'### Conversation between {escaped_username} and {other_user_name}: ###\n\n----\n\n'
markdown += '\n\n----\n\n'.join(md for _, md in messages)
conversation_output_path = paths.create_path_for_file_output_dms(name=other_user_short_name, format="md")
with open_and_mkdirs(conversation_output_path) as f:
f.write(markdown)
print(f'Wrote {len(messages)} messages to {conversation_output_path}')
num_written_files += 1
num_written_messages += len(messages)
print(f"\nWrote {len(conversations_messages)} direct message conversations "
f"({num_written_messages} total messages) to {num_written_files} markdown files\n") | Parse paths.dir_input_data/direct-messages.js, write to one markdown file per conversation. |
17,416 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
def find_group_dm_conversation_participant_ids(conversation: dict) -> set:
"""
Find IDs of all participating Users in a group direct message conversation
"""
group_user_ids = set()
if 'dmConversation' in conversation and 'conversationId' in conversation['dmConversation']:
dm_conversation = conversation['dmConversation']
if 'messages' in dm_conversation:
for message in dm_conversation['messages']:
if 'messageCreate' in message:
group_user_ids.add(message['messageCreate']['senderId'])
elif 'joinConversation' in message:
group_user_ids.add(message['joinConversation']['initiatingUserId'])
for participant_id in message['joinConversation']['participantsSnapshot']:
group_user_ids.add(participant_id)
elif "participantsJoin" in message:
group_user_ids.add(message['participantsJoin']['initiatingUserId'])
for participant_id in message['participantsJoin']['userIds']:
group_user_ids.add(participant_id)
return group_user_ids
The provided code snippet includes necessary dependencies for implementing the `collect_user_ids_from_group_direct_messages` function. Write a Python function `def collect_user_ids_from_group_direct_messages(paths) -> list` to solve the following problem:
Collect all user ids that appear in the group direct messages archive data. (For use in bulk online lookup from Twitter.)
Here is the function:
def collect_user_ids_from_group_direct_messages(paths) -> list:
"""
Collect all user ids that appear in the group direct messages archive data.
(For use in bulk online lookup from Twitter.)
"""
# read JSON file from archive
group_dms_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'direct-messages-group.js'))
# collect all user ids in a set
group_dms_user_ids = set()
for conversation in group_dms_json:
participants = find_group_dm_conversation_participant_ids(conversation)
for participant_id in participants:
group_dms_user_ids.add(participant_id)
return list(group_dms_user_ids) | Collect all user ids that appear in the group direct messages archive data. (For use in bulk online lookup from Twitter.) |
17,417 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
def open_and_mkdirs(path_file):
"""Opens a file for writing. If the parent directory does not exist yet, it is created first."""
mkdirs_for_file(path_file)
return open(path_file, 'w', encoding='utf-8')
def read_json_from_js_file(filename):
"""Reads the contents of a Twitter-produced .js file into a dictionary."""
print(f'Parsing {filename}...')
with open(filename, 'r', encoding='utf8') as f:
data = f.readlines()
# if the JSON has no real content, it can happen that the file is only one line long.
# in this case, return an empty dict to avoid errors while trying to read non-existing lines.
if len(data) <= 1:
return {}
# convert js file to JSON: replace first line with just '[', squash lines into a single string
prefix = '['
if '{' in data[0]:
prefix += ' {'
data = prefix + ''.join(data[1:])
# parse the resulting JSON and return as a dict
return json.loads(data)
def escape_markdown(input_text: str) -> str:
"""
Escape markdown control characters from input text so that the text will not break in rendered markdown.
(Only use on unformatted text parts that do not yet have any markdown control characters added on purpose!)
"""
characters_to_escape: str = r"\_*[]()~`>#+-=|{}.!"
output_text: str = ''
for char in input_text:
if char in characters_to_escape:
# add backslash before control char
output_text = output_text + "\\" + char
elif char == '\n':
# add double space before line break
output_text = output_text + " " + char
else:
output_text = output_text + char
return output_text
def chunks(lst: list, n: int):
"""Yield successive n-sized chunks from lst."""
for i in range(0, len(lst), n):
yield lst[i:i + n]
def make_conversation_name_safe_for_filename(conversation_name: str) -> str:
"""
Remove/replace characters that could be unsafe in filenames
"""
forbidden_chars = \
['"', "'", '*', '/', '\\', ':', '<', '>', '?', '|', '!', '@', ';', ',', '=', '.', '\n', '\r', '\t']
new_conversation_name = ''
for char in conversation_name:
if char in forbidden_chars:
new_conversation_name = new_conversation_name + '_'
elif char.isspace():
# replace spaces with underscores
new_conversation_name = new_conversation_name + '_'
elif char == 0x7F or (0x1F >= ord(char) >= 0x00):
# 0x00 - 0x1F and 0x7F are also forbidden, just discard them
continue
else:
new_conversation_name = new_conversation_name + char
return new_conversation_name
def find_group_dm_conversation_participant_ids(conversation: dict) -> set:
"""
Find IDs of all participating Users in a group direct message conversation
"""
group_user_ids = set()
if 'dmConversation' in conversation and 'conversationId' in conversation['dmConversation']:
dm_conversation = conversation['dmConversation']
if 'messages' in dm_conversation:
for message in dm_conversation['messages']:
if 'messageCreate' in message:
group_user_ids.add(message['messageCreate']['senderId'])
elif 'joinConversation' in message:
group_user_ids.add(message['joinConversation']['initiatingUserId'])
for participant_id in message['joinConversation']['participantsSnapshot']:
group_user_ids.add(participant_id)
elif "participantsJoin" in message:
group_user_ids.add(message['participantsJoin']['initiatingUserId'])
for participant_id in message['participantsJoin']['userIds']:
group_user_ids.add(participant_id)
return group_user_ids
The provided code snippet includes necessary dependencies for implementing the `parse_group_direct_messages` function. Write a Python function `def parse_group_direct_messages(username, users, user_id_url_template, paths)` to solve the following problem:
Parse data_folder/direct-messages-group.js, write to one markdown file per conversation.
Here is the function:
def parse_group_direct_messages(username, users, user_id_url_template, paths):
"""Parse data_folder/direct-messages-group.js, write to one markdown file per conversation.
"""
# read JSON file from archive
group_dms_json = read_json_from_js_file(os.path.join(paths.dir_input_data, 'direct-messages-group.js'))
# Parse the group DMs, store messages and metadata in a dict
group_conversations_messages = defaultdict(list)
group_conversations_metadata = defaultdict(dict)
for conversation in group_dms_json:
if 'dmConversation' in conversation and 'conversationId' in conversation['dmConversation']:
dm_conversation = conversation['dmConversation']
conversation_id = dm_conversation['conversationId']
participants = find_group_dm_conversation_participant_ids(conversation)
participant_names = []
for participant_id in participants:
if participant_id in users:
participant_names.append(users[participant_id].handle)
else:
participant_names.append(user_id_url_template.format(participant_id))
# save names in metadata
group_conversations_metadata[conversation_id]['participants'] = participants
group_conversations_metadata[conversation_id]['participant_names'] = participant_names
group_conversations_metadata[conversation_id]['conversation_names'] = [(0, conversation_id)]
group_conversations_metadata[conversation_id]['participant_message_count'] = defaultdict(int)
for participant_id in participants:
# init every participant's message count with 0, so that users with no activity are not ignored
group_conversations_metadata[conversation_id]['participant_message_count'][participant_id] = 0
messages = []
if 'messages' in dm_conversation:
for message in dm_conversation['messages']:
if 'messageCreate' in message:
message_create = message['messageCreate']
if all(tag in message_create for tag in ['senderId', 'text', 'createdAt']):
from_id = message_create['senderId']
# count how many messages this user has sent to the group
group_conversations_metadata[conversation_id]['participant_message_count'][from_id] += 1
body = message_create['text']
# replace t.co URLs with their original versions
if 'urls' in message_create:
for url in message_create['urls']:
if 'url' in url and 'expanded' in url:
expanded_url = url['expanded']
body = body.replace(url['url'], expanded_url)
# escape message body for markdown rendering:
body_markdown = escape_markdown(body)
# replace image URLs with image links to local files
if 'mediaUrls' in message_create \
and len(message_create['mediaUrls']) == 1 \
and 'urls' in message_create:
original_expanded_url = message_create['urls'][0]['expanded']
message_id = message_create['id']
media_hash_and_type = message_create['mediaUrls'][0].split('/')[-1]
media_id = message_create['mediaUrls'][0].split('/')[-2]
archive_media_filename = f'{message_id}-{media_hash_and_type}'
new_url = os.path.join(paths.dir_output_media, archive_media_filename)
archive_media_path = \
os.path.join(paths.dir_input_data, 'direct_messages_group_media',
archive_media_filename)
if os.path.isfile(archive_media_path):
# found a matching image, use this one
if not os.path.isfile(new_url):
shutil.copy(archive_media_path, new_url)
image_markdown = f'\n\n'
body_markdown = body_markdown.replace(
escape_markdown(original_expanded_url), image_markdown
)
# Save the online location of the best-quality version of this file,
# for later upgrading if wanted
best_quality_url = \
f'https://ton.twitter.com/i//ton/data/dm/' \
f'{message_id}/{media_id}/{media_hash_and_type}'
# there is no ':orig' here, the url without any suffix has the original size
# TODO: a cookie (and a 'Referer: https://twitter.com' header)
# is needed to retrieve it, so the url might be useless anyway...
# WARNING: Do not uncomment the statement below until the cookie problem is solved!
# media_sources.append(
# (
# os.path.join(output_media_folder_name, archive_media_filename),
# best_quality_url
# )
# )
else:
archive_media_paths = glob.glob(
os.path.join(paths.dir_input_data, 'direct_messages_group_media',
message_id + '*'))
if len(archive_media_paths) > 0:
for archive_media_path in archive_media_paths:
archive_media_filename = os.path.split(archive_media_path)[-1]
media_url = os.path.join(paths.dir_output_media,
archive_media_filename)
if not os.path.isfile(media_url):
shutil.copy(archive_media_path, media_url)
video_markdown = f'\n<video controls><source src="{media_url}">' \
f'Your browser does not support the video tag.</video>\n'
body_markdown = body_markdown.replace(
escape_markdown(original_expanded_url), video_markdown
)
# TODO: maybe also save the online location of the best-quality version for videos?
# (see above)
else:
print(f'Warning: missing local file: {archive_media_path}. '
f'Using original link instead: {original_expanded_url})')
created_at = message_create['createdAt'] # example: 2022-01-27T15:58:52.744Z
timestamp = int(round(
datetime.datetime.strptime(created_at, '%Y-%m-%dT%X.%fZ').timestamp()
))
from_handle = escape_markdown(users[from_id].handle) if from_id in users \
else user_id_url_template.format(from_id)
# make the body a quote
body_markdown = '> ' + '\n> '.join(body_markdown.splitlines())
message_markdown = f'{from_handle}: ({created_at})\n\n' \
f'{body_markdown}'
messages.append((timestamp, message_markdown))
elif "conversationNameUpdate" in message:
conversation_name_update = message['conversationNameUpdate']
if all(tag in conversation_name_update for tag in ['initiatingUserId', 'name', 'createdAt']):
from_id = conversation_name_update['initiatingUserId']
body_markdown = f"_changed group name to: {escape_markdown(conversation_name_update['name'])}_"
created_at = conversation_name_update['createdAt'] # example: 2022-01-27T15:58:52.744Z
timestamp = int(round(
datetime.datetime.strptime(created_at, '%Y-%m-%dT%X.%fZ').timestamp()
))
from_handle = escape_markdown(users[from_id].handle) if from_id in users \
else user_id_url_template.format(from_id)
message_markdown = f'{from_handle}: ({created_at})\n\n{body_markdown}'
messages.append((timestamp, message_markdown))
# save metadata about name change:
group_conversations_metadata[conversation_id]['conversation_names'].append(
(timestamp, conversation_name_update['name'])
)
elif "joinConversation" in message:
join_conversation = message['joinConversation']
if all(tag in join_conversation for tag in ['initiatingUserId', 'createdAt']):
from_id = join_conversation['initiatingUserId']
created_at = join_conversation['createdAt'] # example: 2022-01-27T15:58:52.744Z
timestamp = int(round(
datetime.datetime.strptime(created_at, '%Y-%m-%dT%X.%fZ').timestamp()
))
from_handle = escape_markdown(users[from_id].handle) if from_id in users \
else user_id_url_template.format(from_id)
escaped_username = escape_markdown(username)
body_markdown = f'_{from_handle} added {escaped_username} to the group_'
message_markdown = f'{from_handle}: ({created_at})\n\n{body_markdown}'
messages.append((timestamp, message_markdown))
elif "participantsJoin" in message:
participants_join = message['participantsJoin']
if all(tag in participants_join for tag in ['initiatingUserId', 'userIds', 'createdAt']):
from_id = participants_join['initiatingUserId']
created_at = participants_join['createdAt'] # example: 2022-01-27T15:58:52.744Z
timestamp = int(round(
datetime.datetime.strptime(created_at, '%Y-%m-%dT%X.%fZ').timestamp()
))
from_handle = escape_markdown(users[from_id].handle) if from_id in users \
else user_id_url_template.format(from_id)
joined_ids = participants_join['userIds']
joined_handles = [escape_markdown(users[joined_id].handle) if joined_id in users
else user_id_url_template.format(joined_id) for joined_id in joined_ids]
name_list = ', '.join(joined_handles[:-1]) + \
(f' and {joined_handles[-1]}' if len(joined_handles) > 1 else
joined_handles[0])
body_markdown = f'_{from_handle} added {name_list} to the group_'
message_markdown = f'{from_handle}: ({created_at})\n\n{body_markdown}'
messages.append((timestamp, message_markdown))
elif "participantsLeave" in message:
participants_leave = message['participantsLeave']
if all(tag in participants_leave for tag in ['userIds', 'createdAt']):
created_at = participants_leave['createdAt'] # example: 2022-01-27T15:58:52.744Z
timestamp = int(round(
datetime.datetime.strptime(created_at, '%Y-%m-%dT%X.%fZ').timestamp()
))
left_ids = participants_leave['userIds']
left_handles = [escape_markdown(users[left_id].handle) if left_id in users
else user_id_url_template.format(left_id) for left_id in left_ids]
name_list = ', '.join(left_handles[:-1]) + \
(f' and {left_handles[-1]}' if len(left_handles) > 1 else
left_handles[0])
body_markdown = f'_{name_list} left the group_'
message_markdown = f'{name_list}: ({created_at})\n\n{body_markdown}'
messages.append((timestamp, message_markdown))
# collect messages per conversation in group_conversations_messages dict
group_conversations_messages[conversation_id].extend(messages)
# output as one file per conversation (or part of long conversation)
num_written_messages = 0
num_written_files = 0
for conversation_id, messages in group_conversations_messages.items():
# sort messages by timestamp
messages.sort(key=lambda tup: tup[0])
# create conversation name for use in filename:
# first, try to find an official name in the parsed conversation data
# Not-so-fun fact:
# If the name was set before the archive's owner joined the group, the name is not included
# in the archive data and can't be found anywhere (except by looking it up from twitter,
# and that would probably need a cookie). So there are many groups that do actually have a name,
# but it can't be used here because we don't know it.
group_conversations_metadata[conversation_id]['conversation_names'].sort(key=lambda tup: tup[0], reverse=True)
official_name = group_conversations_metadata[conversation_id]['conversation_names'][0][1]
safe_group_name = make_conversation_name_safe_for_filename(official_name)
if len(safe_group_name) < 2:
# discard name if it's too short (because of collision risk)
group_name = conversation_id
else:
group_name = safe_group_name
if group_name == conversation_id:
# try to make a nice list of participant handles for the conversation name
handles = []
for participant_id, message_count in \
group_conversations_metadata[conversation_id]['participant_message_count'].items():
if participant_id in users:
participant_handle = users[participant_id].handle
if participant_handle != username:
handles.append((participant_handle, message_count))
# sort alphabetically by handle first, for a more deterministic order
handles.sort(key=lambda tup: tup[0])
# sort so that the most active users are at the start of the list
handles.sort(key=lambda tup: tup[1], reverse=True)
if len(handles) == 1:
group_name = \
f'{handles[0][0]}_and_{len(group_conversations_metadata[conversation_id]["participants"]) - 1}_more'
elif len(handles) == 2 and len(group_conversations_metadata[conversation_id]["participants"]) == 3:
group_name = f'{handles[0][0]}_and_{handles[1][0]}_and_{username}'
elif len(handles) >= 2:
group_name = \
f'{handles[0][0]}_and_{handles[1][0]}_and' \
f'_{len(group_conversations_metadata[conversation_id]["participants"]) - 2}_more'
else:
# just use the conversation id
group_name = conversation_id
# create a list of names of the form '@name1, @name2 and @name3'
# to use as a headline in the output file
escaped_participant_names = [
escape_markdown(participant_name)
for participant_name in group_conversations_metadata[conversation_id]['participant_names']
]
name_list = ', '.join(escaped_participant_names[:-1]) + \
(f' and {escaped_participant_names[-1]}'
if len(escaped_participant_names) > 1
else escaped_participant_names[0])
if len(messages) > 1000:
for chunk_index, chunk in enumerate(chunks(messages, 1000)):
markdown = ''
markdown += f'## {official_name} ##\n\n'
markdown += f'### Group conversation between {name_list}, part {chunk_index + 1}: ###\n\n----\n\n'
markdown += '\n\n----\n\n'.join(md for _, md in chunk)
conversation_output_filename = paths.create_path_for_file_output_dms(
name=group_name, format="md", kind="DMs-Group", index=chunk_index + 1
)
# write part to a markdown file
with open_and_mkdirs(conversation_output_filename) as f:
f.write(markdown)
print(f'Wrote {len(chunk)} messages to {conversation_output_filename}')
num_written_files += 1
else:
markdown = ''
markdown += f'## {official_name} ##\n\n'
markdown += f'### Group conversation between {name_list}: ###\n\n----\n\n'
markdown += '\n\n----\n\n'.join(md for _, md in messages)
conversation_output_filename = \
paths.create_path_for_file_output_dms(name=group_name, format="md", kind="DMs-Group")
with open_and_mkdirs(conversation_output_filename) as f:
f.write(markdown)
print(f'Wrote {len(messages)} messages to {conversation_output_filename}')
num_written_files += 1
num_written_messages += len(messages)
print(f"\nWrote {len(group_conversations_messages)} direct message group conversations "
f"({num_written_messages} total messages) to {num_written_files} markdown files") | Parse data_folder/direct-messages-group.js, write to one markdown file per conversation. |
17,418 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
class PathConfig:
"""
Helper class containing constants for various directories and files.
The script will only add / change / delete content in its own directories, which start with `parser-`.
Files within `parser-output` are the end result that the user is probably interested in.
Files within `parser-cache` are temporary working files, which improve the efficiency if you run
this script multiple times. They can safely be removed without harming the consistency of the
files within `parser-output`.
"""
def __init__(self, dir_archive):
self.dir_archive = dir_archive
self.dir_input_data = os.path.join(dir_archive, 'data')
self.file_account_js = os.path.join(self.dir_input_data, 'account.js')
# check if user is in correct folder
if not os.path.isfile(self.file_account_js):
print(f'Error: Failed to load {self.file_account_js}. ')
exit()
self.dir_input_media = find_dir_input_media(self.dir_input_data)
self.dir_output = os.path.join(self.dir_archive, 'parser-output')
self.dir_output_media = os.path.join(self.dir_output, 'media')
self.dir_output_cache = os.path.join(self.dir_archive, 'parser-cache')
self.file_output_following = os.path.join(self.dir_output, 'following.txt')
self.file_output_followers = os.path.join(self.dir_output, 'followers.txt')
self.file_download_log = os.path.join(self.dir_output_media, 'download_log.txt')
self.file_tweet_icon = os.path.join(self.dir_output_media, 'tweet.ico')
self.files_input_tweets = find_files_input_tweets(self.dir_input_data)
# structured like an actual tweet output file, can be used to compute relative urls to a media file
self.example_file_output_tweets = self.create_path_for_file_output_tweets(year=2020, month=12)
def create_path_for_file_output_tweets(self, year, month, format="html", kind="tweets") -> str:
"""Builds the path for a tweet-archive file based on some properties."""
# Previously the filename was f'{dt.year}-{dt.month:02}-01-Tweet-Archive-{dt.year}-{dt.month:02}'
return os.path.join(self.dir_output, f"{kind}-{format}", f"{year:04}", f"{year:04}-{month:02}-01-{kind}.{format}")
def create_path_for_file_output_dms(self, name: str, index: Optional[int]=None, format: str="html", kind: str="DMs") -> str:
"""Builds the path for a dm-archive file based on some properties."""
index_suffix = ""
if (index):
index_suffix = f"-part{index:03}"
return os.path.join(self.dir_output, kind, f"{kind}-{name}{index_suffix}.{format}")
def create_path_for_file_output_single(self, format: str, kind: str)->str:
"""Builds the path for a single output file which, i.e. one that is not part of a larger group or sequence."""
return os.path.join(self.dir_output, f"{kind}.{format}")
The provided code snippet includes necessary dependencies for implementing the `migrate_old_output` function. Write a Python function `def migrate_old_output(paths: PathConfig)` to solve the following problem:
If present, moves media and cache files from the archive root to the new locations in `paths.dir_output_media` and `paths.dir_output_cache`. Then deletes old output files (md, html, txt) from the archive root, if the user consents.
Here is the function:
def migrate_old_output(paths: PathConfig):
"""If present, moves media and cache files from the archive root to the new locations in
`paths.dir_output_media` and `paths.dir_output_cache`. Then deletes old output files
(md, html, txt) from the archive root, if the user consents."""
# Create new folders, so we can potentially use them to move files there
os.makedirs(paths.dir_output_media, exist_ok=True)
os.makedirs(paths.dir_output_cache, exist_ok=True)
# Move files that we can re-use:
if os.path.exists(os.path.join(paths.dir_archive, "media")):
files_to_move = glob.glob(os.path.join(paths.dir_archive, "media", "*"))
if len(files_to_move) > 0:
print(f"Moving {len(files_to_move)} files from 'media' to '{paths.dir_output_media}'")
for file_path_to_move in files_to_move:
file_name_to_move = os.path.split(file_path_to_move)[1]
os.rename(file_path_to_move, os.path.join(paths.dir_output_media, file_name_to_move))
os.rmdir(os.path.join(paths.dir_archive, "media"))
known_tweets_old_path = os.path.join(paths.dir_archive, "known_tweets.json")
known_tweets_new_path = os.path.join(paths.dir_output_cache, "known_tweets.json")
if os.path.exists(known_tweets_old_path):
os.rename(known_tweets_old_path, known_tweets_new_path)
# Delete files that would be overwritten anyway (if user consents):
output_globs = [
"TweetArchive.html",
"*Tweet-Archive*.html",
"*Tweet-Archive*.md",
"DMs-Archive-*.html",
"DMs-Archive-*.md",
"DMs-Group-Archive-*.html",
"DMs-Group-Archive-*.md",
"followers.txt",
"following.txt",
]
files_to_delete = []
for output_glob in output_globs:
files_to_delete += glob.glob(os.path.join(paths.dir_archive, output_glob))
# TODO maybe remove those files only after the new ones have been generated? This way, the user would never
# end up with less output than before. On the other hand, they might end up with old *and* new versions
# of the output, if the script crashes before it reaches the code to delete the old version.
if len(files_to_delete) > 0:
print(f"\nThere are {len(files_to_delete)} files in the root of the archive,")
print("which were probably generated from an older version of this script.")
print("Since then, the directory layout of twitter-archive-parser has changed")
print("and these files are generated into the sub-directory 'parser-output' or")
print("various sub-sub-directories therein. These are the affected files:")
for file_to_delete in files_to_delete:
print(file_to_delete)
user_input = input('\nOK delete these files? (If the the directory layout would not have changed, they would be overwritten anyway) [y/N]')
if user_input.lower() in ('y', 'yes'):
for file_to_delete in files_to_delete:
os.remove(file_to_delete)
print(f"Files have been deleted. New versions of these files will be generated into 'parser-output' soon.") | If present, moves media and cache files from the archive root to the new locations in `paths.dir_output_media` and `paths.dir_output_cache`. Then deletes old output files (md, html, txt) from the archive root, if the user consents. |
17,419 | from collections import defaultdict
from typing import Optional
from urllib.parse import urlparse
import datetime
import glob
import importlib
import json
import logging
import os
import re
import shutil
import subprocess
import sys
import time
def is_archive(path):
"""Return true if there is a Twitter archive at the given path"""
return os.path.isfile(os.path.join(path, 'data', 'account.js'))
The provided code snippet includes necessary dependencies for implementing the `find_archive` function. Write a Python function `def find_archive()` to solve the following problem:
Search for the archive 1. First try the working directory. 2. Then try the script directory. 3. Finally prompt the user.
Here is the function:
def find_archive():
"""
Search for the archive
1. First try the working directory.
2. Then try the script directory.
3. Finally prompt the user.
"""
if is_archive('.'):
return '.'
script_dir = os.path.dirname(__file__)
if script_dir != os.getcwd():
if is_archive(script_dir):
return script_dir
print('Archive not found in working directory or script directory.\n'
'Please enter the path of your Twitter archive, or just press Enter to exit.\n'
'On most operating systems, you can also try to drag and drop your archive folder '
'into the terminal window, and it will paste its path automatically.\n')
# Give the user as many attempts as they need.
while True:
input_path = input('Archive path: ')
if not input_path:
exit()
if is_archive(input_path):
return input_path
print(f'Archive not found at {input_path}') | Search for the archive 1. First try the working directory. 2. Then try the script directory. 3. Finally prompt the user. |
17,420 | import torch
import torch.nn as nn
import einops
from torch.nn.utils import spectral_norm, weight_norm
CONV_NORMALIZATIONS = frozenset(
[
"none",
"weight_norm",
"spectral_norm",
"time_layer_norm",
"layer_norm",
"time_group_norm",
]
)
def apply_parametrization_norm(module: nn.Module, norm: str = "none") -> nn.Module:
assert norm in CONV_NORMALIZATIONS
if norm == "weight_norm":
return weight_norm(module)
elif norm == "spectral_norm":
return spectral_norm(module)
else:
# We already check was in CONV_NORMALIZATION, so any other choice
# doesn't need reparametrization.
return module | null |
17,421 | import torch
import torch.nn as nn
import einops
from torch.nn.utils import spectral_norm, weight_norm
CONV_NORMALIZATIONS = frozenset(
[
"none",
"weight_norm",
"spectral_norm",
"time_layer_norm",
"layer_norm",
"time_group_norm",
]
)
class ConvLayerNorm(nn.LayerNorm):
"""
Convolution-friendly LayerNorm that moves channels to last dimensions
before running the normalization and moves them back to original position right after.
"""
def __init__(self, normalized_shape, **kwargs):
super().__init__(normalized_shape, **kwargs)
def forward(self, x):
x = einops.rearrange(x, "b ... t -> b t ...")
x = super().forward(x)
x = einops.rearrange(x, "b t ... -> b ... t")
return
The provided code snippet includes necessary dependencies for implementing the `get_norm_module` function. Write a Python function `def get_norm_module( module: nn.Module, causal: bool = False, norm: str = "none", **norm_kwargs ) -> nn.Module` to solve the following problem:
Return the proper normalization module. If causal is True, this will ensure the returned module is causal, or return an error if the normalization doesn't support causal evaluation.
Here is the function:
def get_norm_module(
module: nn.Module, causal: bool = False, norm: str = "none", **norm_kwargs
) -> nn.Module:
"""Return the proper normalization module. If causal is True, this will ensure the returned
module is causal, or return an error if the normalization doesn't support causal evaluation.
"""
assert norm in CONV_NORMALIZATIONS
if norm == "layer_norm":
assert isinstance(module, nn.modules.conv._ConvNd)
return ConvLayerNorm(module.out_channels, **norm_kwargs)
elif norm == "time_group_norm":
if causal:
raise ValueError("GroupNorm doesn't support causal evaluation.")
assert isinstance(module, nn.modules.conv._ConvNd)
return nn.GroupNorm(1, module.out_channels, **norm_kwargs)
else:
return nn.Identity() | Return the proper normalization module. If causal is True, this will ensure the returned module is causal, or return an error if the normalization doesn't support causal evaluation. |
17,422 | import typing as tp
def get_padding(kernel_size, dilation=1):
return int((kernel_size * dilation - dilation) / 2) | null |
17,423 | import typing as tp
def get_2d_padding(
kernel_size: tp.Tuple[int, int], dilation: tp.Tuple[int, int] = (1, 1)
):
return (
((kernel_size[0] - 1) * dilation[0]) // 2,
((kernel_size[1] - 1) * dilation[1]) // 2,
) | null |
17,424 | import typing as tp
def init_weights(m, mean=0.0, std=0.01):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
m.weight.data.normal_(mean, std) | null |
17,425 | from typing import Optional
import six
import torch
import numpy as np
def sequence_mask(
lengths,
maxlen: Optional[int] = None,
dtype: torch.dtype = torch.float32,
device: Optional[torch.device] = None,
) -> torch.Tensor:
if maxlen is None:
maxlen = lengths.max()
row_vector = torch.arange(0, maxlen, 1).to(lengths.device)
matrix = torch.unsqueeze(lengths, dim=-1)
mask = row_vector < matrix
mask = mask.detach()
return mask.type(dtype).to(device) if device is not None else mask.type(dtype) | null |
17,426 | from typing import Optional
import six
import torch
import numpy as np
The provided code snippet includes necessary dependencies for implementing the `end_detect` function. Write a Python function `def end_detect(ended_hyps, i, M=3, d_end=np.log(1 * np.exp(-10)))` to solve the following problem:
End detection. described in Eq. (50) of S. Watanabe et al "Hybrid CTC/Attention Architecture for End-to-End Speech Recognition" :param ended_hyps: :param i: :param M: :param d_end: :return:
Here is the function:
def end_detect(ended_hyps, i, M=3, d_end=np.log(1 * np.exp(-10))):
"""End detection.
described in Eq. (50) of S. Watanabe et al
"Hybrid CTC/Attention Architecture for End-to-End Speech Recognition"
:param ended_hyps:
:param i:
:param M:
:param d_end:
:return:
"""
if len(ended_hyps) == 0:
return False
count = 0
best_hyp = sorted(ended_hyps, key=lambda x: x["score"], reverse=True)[0]
for m in six.moves.range(M):
# get ended_hyps with their length is i - m
hyp_length = i - m
hyps_same_length = [x for x in ended_hyps if len(x["yseq"]) == hyp_length]
if len(hyps_same_length) > 0:
best_hyp_same_length = sorted(
hyps_same_length, key=lambda x: x["score"], reverse=True
)[0]
if best_hyp_same_length["score"] - best_hyp["score"] < d_end:
count += 1
if count == M:
return True
else:
return False | End detection. described in Eq. (50) of S. Watanabe et al "Hybrid CTC/Attention Architecture for End-to-End Speech Recognition" :param ended_hyps: :param i: :param M: :param d_end: :return: |
17,427 | from itertools import chain
from typing import Any
from typing import Dict
from typing import List
from typing import Tuple
from typing import Union
from typing import NamedTuple
import torch
from modules.wenet_extractor.paraformer.utils import end_detect
from modules.wenet_extractor.paraformer.search.ctc import CTCPrefixScorer
from modules.wenet_extractor.paraformer.search.scorer_interface import (
ScorerInterface,
PartialScorerInterface,
)
class BeamSearchCIF(torch.nn.Module):
def __init__(
self,
scorers: Dict[str, ScorerInterface],
weights: Dict[str, float],
beam_size: int,
vocab_size: int,
sos: int,
eos: int,
pre_beam_ratio: float = 1.5,
pre_beam_score_key: str = None,
):
def init_hyp(self, x: torch.Tensor) -> List[Hypothesis]:
def append_token(xs: torch.Tensor, x: int) -> torch.Tensor:
def score_full(
self, hyp: Hypothesis, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
def score_partial(
self, hyp: Hypothesis, ids: torch.Tensor, x: torch.Tensor
) -> Tuple[Dict[str, torch.Tensor], Dict[str, Any]]:
def beam(
self, weighted_scores: torch.Tensor, ids: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
def merge_scores(
prev_scores: Dict[str, float],
next_full_scores: Dict[str, torch.Tensor],
full_idx: int,
next_part_scores: Dict[str, torch.Tensor],
part_idx: int,
) -> Dict[str, torch.Tensor]:
def merge_states(self, states: Any, part_states: Any, part_idx: int) -> Any:
def search(
self, running_hyps: List[Hypothesis], x: torch.Tensor, am_score: torch.Tensor
) -> List[Hypothesis]:
def forward(
self,
x: torch.Tensor,
am_scores: torch.Tensor,
maxlenratio: float = 0.0,
minlenratio: float = 0.0,
) -> List[Hypothesis]:
def post_process(
self,
i: int,
maxlen: int,
maxlenratio: float,
running_hyps: List[Hypothesis],
ended_hyps: List[Hypothesis],
) -> List[Hypothesis]:
class CTCPrefixScorer(BatchPartialScorerInterface):
def __init__(self, ctc: torch.nn.Module, eos: int):
def init_state(self, x: torch.Tensor):
def select_state(self, state, i, new_id=None):
def score_partial(self, y, ids, state, x):
def batch_init_state(self, x: torch.Tensor):
def batch_score_partial(self, y, ids, state, x):
def extend_prob(self, x: torch.Tensor):
def extend_state(self, state):
def build_beam_search(model, args, device):
scorers = {}
if model.ctc is not None:
ctc = CTCPrefixScorer(ctc=model.ctc, eos=model.eos)
scorers.update(ctc=ctc)
weights = dict(
decoder=1.0 - args.ctc_weight,
ctc=args.ctc_weight,
length_bonus=args.penalty,
)
beam_search = BeamSearchCIF(
beam_size=args.beam_size,
weights=weights,
scorers=scorers,
sos=model.sos,
eos=model.eos,
vocab_size=model.vocab_size,
pre_beam_score_key=None if args.ctc_weight == 1.0 else "full",
)
beam_search.to(device=device, dtype=torch.float32).eval()
return beam_search | null |
17,428 | from typing import Optional
import torch
from torch import nn
from modules.wenet_extractor.utils.mask import make_pad_mask
def cif(hidden: torch.Tensor, alphas: torch.Tensor, threshold: float):
batch_size, len_time, hidden_size = hidden.size()
# loop varss
integrate = torch.zeros([batch_size], device=hidden.device)
frame = torch.zeros([batch_size, hidden_size], device=hidden.device)
# intermediate vars along time
list_fires = []
list_frames = []
for t in range(len_time):
alpha = alphas[:, t]
distribution_completion = (
torch.ones([batch_size], device=hidden.device) - integrate
)
integrate += alpha
list_fires.append(integrate)
fire_place = integrate >= threshold
integrate = torch.where(
fire_place,
integrate - torch.ones([batch_size], device=hidden.device),
integrate,
)
cur = torch.where(fire_place, distribution_completion, alpha)
remainds = alpha - cur
frame += cur[:, None] * hidden[:, t, :]
list_frames.append(frame)
frame = torch.where(
fire_place[:, None].repeat(1, hidden_size),
remainds[:, None] * hidden[:, t, :],
frame,
)
fires = torch.stack(list_fires, 1)
frames = torch.stack(list_frames, 1)
list_ls = []
len_labels = torch.round(alphas.sum(-1)).int()
max_label_len = len_labels.max()
for b in range(batch_size):
fire = fires[b, :]
l = torch.index_select(
frames[b, :, :], 0, torch.nonzero(fire >= threshold).squeeze()
)
pad_l = torch.zeros(
[int(max_label_len - l.size(0)), hidden_size], device=hidden.device
)
list_ls.append(torch.cat([l, pad_l], 0))
return torch.stack(list_ls, 0), fires | null |
17,429 | from typing import List
import torch
def basic_greedy_search(
model: torch.nn.Module,
encoder_out: torch.Tensor,
encoder_out_lens: torch.Tensor,
n_steps: int = 64,
) -> List[List[int]]:
# fake padding
padding = torch.zeros(1, 1).to(encoder_out.device)
# sos
pred_input_step = torch.tensor([model.blank]).reshape(1, 1)
cache = model.predictor.init_state(1, method="zero", device=encoder_out.device)
new_cache: List[torch.Tensor] = []
t = 0
hyps = []
prev_out_nblk = True
pred_out_step = None
per_frame_max_noblk = n_steps
per_frame_noblk = 0
while t < encoder_out_lens:
encoder_out_step = encoder_out[:, t : t + 1, :] # [1, 1, E]
if prev_out_nblk:
step_outs = model.predictor.forward_step(
pred_input_step, padding, cache
) # [1, 1, P]
pred_out_step, new_cache = step_outs[0], step_outs[1]
joint_out_step = model.joint(encoder_out_step, pred_out_step) # [1,1,v]
joint_out_probs = joint_out_step.log_softmax(dim=-1)
joint_out_max = joint_out_probs.argmax(dim=-1).squeeze() # []
if joint_out_max != model.blank:
hyps.append(joint_out_max.item())
prev_out_nblk = True
per_frame_noblk = per_frame_noblk + 1
pred_input_step = joint_out_max.reshape(1, 1)
# state_m, state_c = clstate_out_m, state_out_c
cache = new_cache
if joint_out_max == model.blank or per_frame_noblk >= per_frame_max_noblk:
if joint_out_max == model.blank:
prev_out_nblk = False
# TODO(Mddct): make t in chunk for streamming
# or t should't be too lang to predict none blank
t = t + 1
per_frame_noblk = 0
return [hyps] | null |
17,430 | from typing import List, Optional, Tuple
import torch
from torch import nn
from modules.wenet_extractor.utils.common import get_activation, get_rnn
The provided code snippet includes necessary dependencies for implementing the `ApplyPadding` function. Write a Python function `def ApplyPadding(input, padding, pad_value) -> torch.Tensor` to solve the following problem:
Args: input: [bs, max_time_step, dim] padding: [bs, max_time_step]
Here is the function:
def ApplyPadding(input, padding, pad_value) -> torch.Tensor:
"""
Args:
input: [bs, max_time_step, dim]
padding: [bs, max_time_step]
"""
return padding * pad_value + input * (1 - padding) | Args: input: [bs, max_time_step, dim] padding: [bs, max_time_step] |
17,431 | import numpy as np
import torch
def insert_blank(label, blank_id=0):
"""Insert blank token between every two label token."""
label = np.expand_dims(label, 1)
blanks = np.zeros((label.shape[0], 1), dtype=np.int64) + blank_id
label = np.concatenate([blanks, label], axis=1)
label = label.reshape(-1)
label = np.append(label, label[0])
return label
The provided code snippet includes necessary dependencies for implementing the `forced_align` function. Write a Python function `def forced_align(ctc_probs: torch.Tensor, y: torch.Tensor, blank_id=0) -> list` to solve the following problem:
ctc forced alignment. Args: torch.Tensor ctc_probs: hidden state sequence, 2d tensor (T, D) torch.Tensor y: id sequence tensor 1d tensor (L) int blank_id: blank symbol index Returns: torch.Tensor: alignment result
Here is the function:
def forced_align(ctc_probs: torch.Tensor, y: torch.Tensor, blank_id=0) -> list:
"""ctc forced alignment.
Args:
torch.Tensor ctc_probs: hidden state sequence, 2d tensor (T, D)
torch.Tensor y: id sequence tensor 1d tensor (L)
int blank_id: blank symbol index
Returns:
torch.Tensor: alignment result
"""
y_insert_blank = insert_blank(y, blank_id)
log_alpha = torch.zeros((ctc_probs.size(0), len(y_insert_blank)))
log_alpha = log_alpha - float("inf") # log of zero
state_path = (
torch.zeros((ctc_probs.size(0), len(y_insert_blank)), dtype=torch.int16) - 1
) # state path
# init start state
log_alpha[0, 0] = ctc_probs[0][y_insert_blank[0]]
log_alpha[0, 1] = ctc_probs[0][y_insert_blank[1]]
for t in range(1, ctc_probs.size(0)):
for s in range(len(y_insert_blank)):
if (
y_insert_blank[s] == blank_id
or s < 2
or y_insert_blank[s] == y_insert_blank[s - 2]
):
candidates = torch.tensor(
[log_alpha[t - 1, s], log_alpha[t - 1, s - 1]]
)
prev_state = [s, s - 1]
else:
candidates = torch.tensor(
[
log_alpha[t - 1, s],
log_alpha[t - 1, s - 1],
log_alpha[t - 1, s - 2],
]
)
prev_state = [s, s - 1, s - 2]
log_alpha[t, s] = torch.max(candidates) + ctc_probs[t][y_insert_blank[s]]
state_path[t, s] = prev_state[torch.argmax(candidates)]
state_seq = -1 * torch.ones((ctc_probs.size(0), 1), dtype=torch.int16)
candidates = torch.tensor(
[log_alpha[-1, len(y_insert_blank) - 1], log_alpha[-1, len(y_insert_blank) - 2]]
)
final_state = [len(y_insert_blank) - 1, len(y_insert_blank) - 2]
state_seq[-1] = final_state[torch.argmax(candidates)]
for t in range(ctc_probs.size(0) - 2, -1, -1):
state_seq[t] = state_path[t + 1, state_seq[t + 1, 0]]
output_alignment = []
for t in range(0, ctc_probs.size(0)):
output_alignment.append(y_insert_blank[state_seq[t, 0]])
return output_alignment | ctc forced alignment. Args: torch.Tensor ctc_probs: hidden state sequence, 2d tensor (T, D) torch.Tensor y: id sequence tensor 1d tensor (L) int blank_id: blank symbol index Returns: torch.Tensor: alignment result |
17,432 | import torch
The provided code snippet includes necessary dependencies for implementing the `subsequent_mask` function. Write a Python function `def subsequent_mask( size: int, device: torch.device = torch.device("cpu"), ) -> torch.Tensor` to solve the following problem:
Create mask for subsequent steps (size, size). This mask is used only in decoder which works in an auto-regressive mode. This means the current step could only do attention with its left steps. In encoder, fully attention is used when streaming is not necessary and the sequence is not long. In this case, no attention mask is needed. When streaming is need, chunk-based attention is used in encoder. See subsequent_chunk_mask for the chunk-based attention mask. Args: size (int): size of mask str device (str): "cpu" or "cuda" or torch.Tensor.device dtype (torch.device): result dtype Returns: torch.Tensor: mask Examples: >>> subsequent_mask(3) [[1, 0, 0], [1, 1, 0], [1, 1, 1]]
Here is the function:
def subsequent_mask(
size: int,
device: torch.device = torch.device("cpu"),
) -> torch.Tensor:
"""Create mask for subsequent steps (size, size).
This mask is used only in decoder which works in an auto-regressive mode.
This means the current step could only do attention with its left steps.
In encoder, fully attention is used when streaming is not necessary and
the sequence is not long. In this case, no attention mask is needed.
When streaming is need, chunk-based attention is used in encoder. See
subsequent_chunk_mask for the chunk-based attention mask.
Args:
size (int): size of mask
str device (str): "cpu" or "cuda" or torch.Tensor.device
dtype (torch.device): result dtype
Returns:
torch.Tensor: mask
Examples:
>>> subsequent_mask(3)
[[1, 0, 0],
[1, 1, 0],
[1, 1, 1]]
"""
arange = torch.arange(size, device=device)
mask = arange.expand(size, size)
arange = arange.unsqueeze(-1)
mask = mask <= arange
return mask | Create mask for subsequent steps (size, size). This mask is used only in decoder which works in an auto-regressive mode. This means the current step could only do attention with its left steps. In encoder, fully attention is used when streaming is not necessary and the sequence is not long. In this case, no attention mask is needed. When streaming is need, chunk-based attention is used in encoder. See subsequent_chunk_mask for the chunk-based attention mask. Args: size (int): size of mask str device (str): "cpu" or "cuda" or torch.Tensor.device dtype (torch.device): result dtype Returns: torch.Tensor: mask Examples: >>> subsequent_mask(3) [[1, 0, 0], [1, 1, 0], [1, 1, 1]] |
17,433 | import torch
def subsequent_chunk_mask(
size: int,
chunk_size: int,
num_left_chunks: int = -1,
device: torch.device = torch.device("cpu"),
) -> torch.Tensor:
"""Create mask for subsequent steps (size, size) with chunk size,
this is for streaming encoder
Args:
size (int): size of mask
chunk_size (int): size of chunk
num_left_chunks (int): number of left chunks
<0: use full chunk
>=0: use num_left_chunks
device (torch.device): "cpu" or "cuda" or torch.Tensor.device
Returns:
torch.Tensor: mask
Examples:
>>> subsequent_chunk_mask(4, 2)
[[1, 1, 0, 0],
[1, 1, 0, 0],
[1, 1, 1, 1],
[1, 1, 1, 1]]
"""
ret = torch.zeros(size, size, device=device, dtype=torch.bool)
for i in range(size):
if num_left_chunks < 0:
start = 0
else:
start = max((i // chunk_size - num_left_chunks) * chunk_size, 0)
ending = min((i // chunk_size + 1) * chunk_size, size)
ret[i, start:ending] = True
return ret
The provided code snippet includes necessary dependencies for implementing the `add_optional_chunk_mask` function. Write a Python function `def add_optional_chunk_mask( xs: torch.Tensor, masks: torch.Tensor, use_dynamic_chunk: bool, use_dynamic_left_chunk: bool, decoding_chunk_size: int, static_chunk_size: int, num_decoding_left_chunks: int, )` to solve the following problem:
Apply optional mask for encoder. Args: xs (torch.Tensor): padded input, (B, L, D), L for max length mask (torch.Tensor): mask for xs, (B, 1, L) use_dynamic_chunk (bool): whether to use dynamic chunk or not use_dynamic_left_chunk (bool): whether to use dynamic left chunk for training. decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's 0: default for training, use random dynamic chunk. <0: for decoding, use full chunk. >0: for decoding, use fixed chunk size as set. static_chunk_size (int): chunk size for static chunk training/decoding if it's greater than 0, if use_dynamic_chunk is true, this parameter will be ignored num_decoding_left_chunks: number of left chunks, this is for decoding, the chunk size is decoding_chunk_size. >=0: use num_decoding_left_chunks <0: use all left chunks Returns: torch.Tensor: chunk mask of the input xs.
Here is the function:
def add_optional_chunk_mask(
xs: torch.Tensor,
masks: torch.Tensor,
use_dynamic_chunk: bool,
use_dynamic_left_chunk: bool,
decoding_chunk_size: int,
static_chunk_size: int,
num_decoding_left_chunks: int,
):
"""Apply optional mask for encoder.
Args:
xs (torch.Tensor): padded input, (B, L, D), L for max length
mask (torch.Tensor): mask for xs, (B, 1, L)
use_dynamic_chunk (bool): whether to use dynamic chunk or not
use_dynamic_left_chunk (bool): whether to use dynamic left chunk for
training.
decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's
0: default for training, use random dynamic chunk.
<0: for decoding, use full chunk.
>0: for decoding, use fixed chunk size as set.
static_chunk_size (int): chunk size for static chunk training/decoding
if it's greater than 0, if use_dynamic_chunk is true,
this parameter will be ignored
num_decoding_left_chunks: number of left chunks, this is for decoding,
the chunk size is decoding_chunk_size.
>=0: use num_decoding_left_chunks
<0: use all left chunks
Returns:
torch.Tensor: chunk mask of the input xs.
"""
# Whether to use chunk mask or not
if use_dynamic_chunk:
max_len = xs.size(1)
if decoding_chunk_size < 0:
chunk_size = max_len
num_left_chunks = -1
elif decoding_chunk_size > 0:
chunk_size = decoding_chunk_size
num_left_chunks = num_decoding_left_chunks
else:
# chunk size is either [1, 25] or full context(max_len).
# Since we use 4 times subsampling and allow up to 1s(100 frames)
# delay, the maximum frame is 100 / 4 = 25.
chunk_size = torch.randint(1, max_len, (1,)).item()
num_left_chunks = -1
if chunk_size > max_len // 2:
chunk_size = max_len
else:
chunk_size = chunk_size % 25 + 1
if use_dynamic_left_chunk:
max_left_chunks = (max_len - 1) // chunk_size
num_left_chunks = torch.randint(0, max_left_chunks, (1,)).item()
chunk_masks = subsequent_chunk_mask(
xs.size(1), chunk_size, num_left_chunks, xs.device
) # (L, L)
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
chunk_masks = masks & chunk_masks # (B, L, L)
elif static_chunk_size > 0:
num_left_chunks = num_decoding_left_chunks
chunk_masks = subsequent_chunk_mask(
xs.size(1), static_chunk_size, num_left_chunks, xs.device
) # (L, L)
chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)
chunk_masks = masks & chunk_masks # (B, L, L)
else:
chunk_masks = masks
return chunk_masks | Apply optional mask for encoder. Args: xs (torch.Tensor): padded input, (B, L, D), L for max length mask (torch.Tensor): mask for xs, (B, 1, L) use_dynamic_chunk (bool): whether to use dynamic chunk or not use_dynamic_left_chunk (bool): whether to use dynamic left chunk for training. decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's 0: default for training, use random dynamic chunk. <0: for decoding, use full chunk. >0: for decoding, use fixed chunk size as set. static_chunk_size (int): chunk size for static chunk training/decoding if it's greater than 0, if use_dynamic_chunk is true, this parameter will be ignored num_decoding_left_chunks: number of left chunks, this is for decoding, the chunk size is decoding_chunk_size. >=0: use num_decoding_left_chunks <0: use all left chunks Returns: torch.Tensor: chunk mask of the input xs. |
17,434 | import torch
def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:
"""Make mask tensor containing indices of padded part.
See description of make_non_pad_mask.
Args:
lengths (torch.Tensor): Batch of lengths (B,).
Returns:
torch.Tensor: Mask tensor containing indices of padded part.
Examples:
>>> lengths = [5, 3, 2]
>>> make_pad_mask(lengths)
masks = [[0, 0, 0, 0 ,0],
[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]]
"""
batch_size = lengths.size(0)
max_len = max_len if max_len > 0 else lengths.max().item()
seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device)
seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)
seq_length_expand = lengths.unsqueeze(-1)
mask = seq_range_expand >= seq_length_expand
return mask
The provided code snippet includes necessary dependencies for implementing the `make_non_pad_mask` function. Write a Python function `def make_non_pad_mask(lengths: torch.Tensor) -> torch.Tensor` to solve the following problem:
Make mask tensor containing indices of non-padded part. The sequences in a batch may have different lengths. To enable batch computing, padding is need to make all sequence in same size. To avoid the padding part pass value to context dependent block such as attention or convolution , this padding part is masked. This pad_mask is used in both encoder and decoder. 1 for non-padded part and 0 for padded part. Args: lengths (torch.Tensor): Batch of lengths (B,). Returns: torch.Tensor: mask tensor containing indices of padded part. Examples: >>> lengths = [5, 3, 2] >>> make_non_pad_mask(lengths) masks = [[1, 1, 1, 1 ,1], [1, 1, 1, 0, 0], [1, 1, 0, 0, 0]]
Here is the function:
def make_non_pad_mask(lengths: torch.Tensor) -> torch.Tensor:
"""Make mask tensor containing indices of non-padded part.
The sequences in a batch may have different lengths. To enable
batch computing, padding is need to make all sequence in same
size. To avoid the padding part pass value to context dependent
block such as attention or convolution , this padding part is
masked.
This pad_mask is used in both encoder and decoder.
1 for non-padded part and 0 for padded part.
Args:
lengths (torch.Tensor): Batch of lengths (B,).
Returns:
torch.Tensor: mask tensor containing indices of padded part.
Examples:
>>> lengths = [5, 3, 2]
>>> make_non_pad_mask(lengths)
masks = [[1, 1, 1, 1 ,1],
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]]
"""
return ~make_pad_mask(lengths) | Make mask tensor containing indices of non-padded part. The sequences in a batch may have different lengths. To enable batch computing, padding is need to make all sequence in same size. To avoid the padding part pass value to context dependent block such as attention or convolution , this padding part is masked. This pad_mask is used in both encoder and decoder. 1 for non-padded part and 0 for padded part. Args: lengths (torch.Tensor): Batch of lengths (B,). Returns: torch.Tensor: mask tensor containing indices of padded part. Examples: >>> lengths = [5, 3, 2] >>> make_non_pad_mask(lengths) masks = [[1, 1, 1, 1 ,1], [1, 1, 1, 0, 0], [1, 1, 0, 0, 0]] |
17,435 | import torch
The provided code snippet includes necessary dependencies for implementing the `mask_finished_scores` function. Write a Python function `def mask_finished_scores(score: torch.Tensor, flag: torch.Tensor) -> torch.Tensor` to solve the following problem:
If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score and the rest -inf score. Args: score (torch.Tensor): A real value array with shape (batch_size * beam_size, beam_size). flag (torch.Tensor): A bool array with shape (batch_size * beam_size, 1). Returns: torch.Tensor: (batch_size * beam_size, beam_size).
Here is the function:
def mask_finished_scores(score: torch.Tensor, flag: torch.Tensor) -> torch.Tensor:
"""
If a sequence is finished, we only allow one alive branch. This function
aims to give one branch a zero score and the rest -inf score.
Args:
score (torch.Tensor): A real value array with shape
(batch_size * beam_size, beam_size).
flag (torch.Tensor): A bool array with shape
(batch_size * beam_size, 1).
Returns:
torch.Tensor: (batch_size * beam_size, beam_size).
"""
beam_size = score.size(-1)
zero_mask = torch.zeros_like(flag, dtype=torch.bool)
if beam_size > 1:
unfinished = torch.cat((zero_mask, flag.repeat([1, beam_size - 1])), dim=1)
finished = torch.cat((flag, zero_mask.repeat([1, beam_size - 1])), dim=1)
else:
unfinished = zero_mask
finished = flag
score.masked_fill_(unfinished, -float("inf"))
score.masked_fill_(finished, 0)
return score | If a sequence is finished, we only allow one alive branch. This function aims to give one branch a zero score and the rest -inf score. Args: score (torch.Tensor): A real value array with shape (batch_size * beam_size, beam_size). flag (torch.Tensor): A bool array with shape (batch_size * beam_size, 1). Returns: torch.Tensor: (batch_size * beam_size, beam_size). |
17,436 | import torch
The provided code snippet includes necessary dependencies for implementing the `mask_finished_preds` function. Write a Python function `def mask_finished_preds( pred: torch.Tensor, flag: torch.Tensor, eos: int ) -> torch.Tensor` to solve the following problem:
If a sequence is finished, all of its branch should be <eos> Args: pred (torch.Tensor): A int array with shape (batch_size * beam_size, beam_size). flag (torch.Tensor): A bool array with shape (batch_size * beam_size, 1). Returns: torch.Tensor: (batch_size * beam_size).
Here is the function:
def mask_finished_preds(
pred: torch.Tensor, flag: torch.Tensor, eos: int
) -> torch.Tensor:
"""
If a sequence is finished, all of its branch should be <eos>
Args:
pred (torch.Tensor): A int array with shape
(batch_size * beam_size, beam_size).
flag (torch.Tensor): A bool array with shape
(batch_size * beam_size, 1).
Returns:
torch.Tensor: (batch_size * beam_size).
"""
beam_size = pred.size(-1)
finished = flag.repeat([1, beam_size])
return pred.masked_fill_(finished, eos) | If a sequence is finished, all of its branch should be <eos> Args: pred (torch.Tensor): A int array with shape (batch_size * beam_size, beam_size). flag (torch.Tensor): A bool array with shape (batch_size * beam_size, 1). Returns: torch.Tensor: (batch_size * beam_size). |
17,437 | import re
def read_lists(list_file):
lists = []
with open(list_file, "r", encoding="utf8") as fin:
for line in fin:
lists.append(line.strip())
return lists
The provided code snippet includes necessary dependencies for implementing the `read_non_lang_symbols` function. Write a Python function `def read_non_lang_symbols(non_lang_sym_path)` to solve the following problem:
read non-linguistic symbol from file. The file format is like below: {NOISE}\n {BRK}\n ... Args: non_lang_sym_path: non-linguistic symbol file path, None means no any syms.
Here is the function:
def read_non_lang_symbols(non_lang_sym_path):
"""read non-linguistic symbol from file.
The file format is like below:
{NOISE}\n
{BRK}\n
...
Args:
non_lang_sym_path: non-linguistic symbol file path, None means no any
syms.
"""
if non_lang_sym_path is None:
return None
else:
syms = read_lists(non_lang_sym_path)
non_lang_syms_pattern = re.compile(r"(\[[^\[\]]+\]|<[^<>]+>|{[^{}]+})")
for sym in syms:
if non_lang_syms_pattern.fullmatch(sym) is None:
class BadSymbolFormat(Exception):
pass
raise BadSymbolFormat(
"Non-linguistic symbols should be "
"formatted in {xxx}/<xxx>/[xxx], consider"
" modify '%s' to meet the requirment. "
"More details can be found in discussions here : "
"https://github.com/wenet-e2e/wenet/pull/819" % (sym)
)
return syms | read non-linguistic symbol from file. The file format is like below: {NOISE}\n {BRK}\n ... Args: non_lang_sym_path: non-linguistic symbol file path, None means no any syms. |
17,438 | import re
def read_symbol_table(symbol_table_file):
symbol_table = {}
with open(symbol_table_file, "r", encoding="utf8") as fin:
for line in fin:
arr = line.strip().split()
assert len(arr) == 2
symbol_table[arr[0]] = int(arr[1])
return symbol_table | null |
17,439 | import torch
from modules.wenet_extractor.transducer.joint import TransducerJoint
from modules.wenet_extractor.transducer.predictor import (
ConvPredictor,
EmbeddingPredictor,
RNNPredictor,
)
from modules.wenet_extractor.transducer.transducer import Transducer
from modules.wenet_extractor.transformer.asr_model import ASRModel
from modules.wenet_extractor.transformer.cmvn import GlobalCMVN
from modules.wenet_extractor.transformer.ctc import CTC
from modules.wenet_extractor.transformer.decoder import (
BiTransformerDecoder,
TransformerDecoder,
)
from modules.wenet_extractor.transformer.encoder import (
ConformerEncoder,
TransformerEncoder,
)
from modules.wenet_extractor.squeezeformer.encoder import SqueezeformerEncoder
from modules.wenet_extractor.efficient_conformer.encoder import (
EfficientConformerEncoder,
)
from modules.wenet_extractor.paraformer.paraformer import Paraformer
from modules.wenet_extractor.cif.predictor import Predictor
from modules.wenet_extractor.utils.cmvn import load_cmvn
class TransducerJoint(torch.nn.Module):
def __init__(
self,
voca_size: int,
enc_output_size: int,
pred_output_size: int,
join_dim: int,
prejoin_linear: bool = True,
postjoin_linear: bool = False,
joint_mode: str = "add",
activation: str = "tanh",
):
def forward(self, enc_out: torch.Tensor, pred_out: torch.Tensor):
class RNNPredictor(PredictorBase):
def __init__(
self,
voca_size: int,
embed_size: int,
output_size: int,
embed_dropout: float,
hidden_size: int,
num_layers: int,
bias: bool = True,
rnn_type: str = "lstm",
dropout: float = 0.1,
) -> None:
def forward(
self,
input: torch.Tensor,
cache: Optional[List[torch.Tensor]] = None,
) -> torch.Tensor:
def batch_to_cache(self, cache: List[torch.Tensor]) -> List[List[torch.Tensor]]:
def cache_to_batch(self, cache: List[List[torch.Tensor]]) -> List[torch.Tensor]:
def init_state(
self,
batch_size: int,
device: torch.device,
method: str = "zero",
) -> List[torch.Tensor]:
def forward_step(
self, input: torch.Tensor, padding: torch.Tensor, cache: List[torch.Tensor]
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
class EmbeddingPredictor(PredictorBase):
def __init__(
self,
voca_size: int,
embed_size: int,
embed_dropout: float,
n_head: int,
history_size: int = 2,
activation: str = "swish",
bias: bool = False,
layer_norm_epsilon: float = 1e-5,
) -> None:
def init_state(
self, batch_size: int, device: torch.device, method: str = "zero"
) -> List[torch.Tensor]:
def batch_to_cache(self, cache: List[torch.Tensor]) -> List[List[torch.Tensor]]:
def cache_to_batch(self, cache: List[List[torch.Tensor]]) -> List[torch.Tensor]:
def forward(self, input: torch.Tensor, cache: Optional[List[torch.Tensor]] = None):
def forward_step(
self,
input: torch.Tensor,
padding: torch.Tensor,
cache: List[torch.Tensor],
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
class ConvPredictor(PredictorBase):
def __init__(
self,
voca_size: int,
embed_size: int,
embed_dropout: float,
history_size: int = 2,
activation: str = "relu",
bias: bool = False,
layer_norm_epsilon: float = 1e-5,
) -> None:
def init_state(
self, batch_size: int, device: torch.device, method: str = "zero"
) -> List[torch.Tensor]:
def cache_to_batch(self, cache: List[List[torch.Tensor]]) -> List[torch.Tensor]:
def batch_to_cache(self, cache: List[torch.Tensor]) -> List[List[torch.Tensor]]:
def forward(self, input: torch.Tensor, cache: Optional[List[torch.Tensor]] = None):
def forward_step(
self, input: torch.Tensor, padding: torch.Tensor, cache: List[torch.Tensor]
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
class Transducer(ASRModel):
def __init__(
self,
vocab_size: int,
blank: int,
encoder: nn.Module,
predictor: PredictorBase,
joint: nn.Module,
attention_decoder: Optional[
Union[TransformerDecoder, BiTransformerDecoder]
] = None,
ctc: Optional[CTC] = None,
ctc_weight: float = 0,
ignore_id: int = IGNORE_ID,
reverse_weight: float = 0.0,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
transducer_weight: float = 1.0,
attention_weight: float = 0.0,
) -> None:
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
) -> Dict[str, Optional[torch.Tensor]]:
def init_bs(self):
def _cal_transducer_score(
self,
encoder_out: torch.Tensor,
encoder_mask: torch.Tensor,
hyps_lens: torch.Tensor,
hyps_pad: torch.Tensor,
):
def _cal_attn_score(
self,
encoder_out: torch.Tensor,
encoder_mask: torch.Tensor,
hyps_pad: torch.Tensor,
hyps_lens: torch.Tensor,
):
def beam_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
beam_size: int = 5,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
ctc_weight: float = 0.3,
transducer_weight: float = 0.7,
):
def transducer_attention_rescoring(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_size: int,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
reverse_weight: float = 0.0,
ctc_weight: float = 0.0,
attn_weight: float = 0.0,
transducer_weight: float = 0.0,
search_ctc_weight: float = 1.0,
search_transducer_weight: float = 0.0,
beam_search_type: str = "transducer",
) -> List[List[int]]:
def greedy_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
n_steps: int = 64,
) -> List[List[int]]:
def forward_encoder_chunk(
self,
xs: torch.Tensor,
offset: int,
required_cache_size: int,
att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
def forward_predictor_step(
self, xs: torch.Tensor, cache: List[torch.Tensor]
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
def forward_joint_step(
self, enc_out: torch.Tensor, pred_out: torch.Tensor
) -> torch.Tensor:
def forward_predictor_init_state(self) -> List[torch.Tensor]:
class ASRModel(torch.nn.Module):
def __init__(
self,
vocab_size: int,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
ctc: CTC,
ctc_weight: float = 0.5,
ignore_id: int = IGNORE_ID,
reverse_weight: float = 0.0,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
lfmmi_dir: str = "",
):
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
) -> Dict[str, Optional[torch.Tensor]]:
def _calc_att_loss(
self,
encoder_out: torch.Tensor,
encoder_mask: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
) -> Tuple[torch.Tensor, float]:
def _forward_encoder(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
def encoder_extractor(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
def recognize(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_size: int = 10,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> torch.Tensor:
def ctc_greedy_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> List[List[int]]:
def _ctc_prefix_beam_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_size: int,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[List[List[int]], torch.Tensor]:
def ctc_prefix_beam_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_size: int,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> List[int]:
def attention_rescoring(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_size: int,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
ctc_weight: float = 0.0,
simulate_streaming: bool = False,
reverse_weight: float = 0.0,
) -> List[int]:
def load_lfmmi_resource(self):
def _calc_lfmmi_loss(self, encoder_out, encoder_mask, text):
def load_hlg_resource_if_necessary(self, hlg, word):
def hlg_onebest(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
hlg: str = "",
word: str = "",
symbol_table: Dict[str, int] = None,
) -> List[int]:
def hlg_rescore(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
lm_scale: float = 0,
decoder_scale: float = 0,
r_decoder_scale: float = 0,
hlg: str = "",
word: str = "",
symbol_table: Dict[str, int] = None,
) -> List[int]:
def subsampling_rate(self) -> int:
def right_context(self) -> int:
def sos_symbol(self) -> int:
def eos_symbol(self) -> int:
def forward_encoder_chunk(
self,
xs: torch.Tensor,
offset: int,
required_cache_size: int,
att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
def ctc_activation(self, xs: torch.Tensor) -> torch.Tensor:
def is_bidirectional_decoder(self) -> bool:
def forward_attention_decoder(
self,
hyps: torch.Tensor,
hyps_lens: torch.Tensor,
encoder_out: torch.Tensor,
reverse_weight: float = 0,
) -> Tuple[torch.Tensor, torch.Tensor]:
class GlobalCMVN(torch.nn.Module):
def __init__(self, mean: torch.Tensor, istd: torch.Tensor, norm_var: bool = True):
def forward(self, x: torch.Tensor):
class CTC(torch.nn.Module):
def __init__(
self,
odim: int,
encoder_output_size: int,
dropout_rate: float = 0.0,
reduce: bool = True,
):
def forward(
self,
hs_pad: torch.Tensor,
hlens: torch.Tensor,
ys_pad: torch.Tensor,
ys_lens: torch.Tensor,
) -> torch.Tensor:
def log_softmax(self, hs_pad: torch.Tensor) -> torch.Tensor:
def argmax(self, hs_pad: torch.Tensor) -> torch.Tensor:
class TransformerDecoder(torch.nn.Module):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
normalize_before: bool = True,
src_attention: bool = True,
):
def forward(
self,
memory: torch.Tensor,
memory_mask: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
r_ys_in_pad: torch.Tensor = torch.empty(0),
reverse_weight: float = 0.0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
def forward_one_step(
self,
memory: torch.Tensor,
memory_mask: torch.Tensor,
tgt: torch.Tensor,
tgt_mask: torch.Tensor,
cache: Optional[List[torch.Tensor]] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
class BiTransformerDecoder(torch.nn.Module):
def __init__(
self,
vocab_size: int,
encoder_output_size: int,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
r_num_blocks: int = 0,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
self_attention_dropout_rate: float = 0.0,
src_attention_dropout_rate: float = 0.0,
input_layer: str = "embed",
use_output_layer: bool = True,
normalize_before: bool = True,
):
def forward(
self,
memory: torch.Tensor,
memory_mask: torch.Tensor,
ys_in_pad: torch.Tensor,
ys_in_lens: torch.Tensor,
r_ys_in_pad: torch.Tensor,
reverse_weight: float = 0.0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
def forward_one_step(
self,
memory: torch.Tensor,
memory_mask: torch.Tensor,
tgt: torch.Tensor,
tgt_mask: torch.Tensor,
cache: Optional[List[torch.Tensor]] = None,
) -> Tuple[torch.Tensor, List[torch.Tensor]]:
class TransformerEncoder(BaseEncoder):
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: str = "conv2d",
pos_enc_layer_type: str = "abs_pos",
normalize_before: bool = True,
static_chunk_size: int = 0,
use_dynamic_chunk: bool = False,
global_cmvn: torch.nn.Module = None,
use_dynamic_left_chunk: bool = False,
):
class ConformerEncoder(BaseEncoder):
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: str = "conv2d",
pos_enc_layer_type: str = "rel_pos",
normalize_before: bool = True,
static_chunk_size: int = 0,
use_dynamic_chunk: bool = False,
global_cmvn: torch.nn.Module = None,
use_dynamic_left_chunk: bool = False,
positionwise_conv_kernel_size: int = 1,
macaron_style: bool = True,
selfattention_layer_type: str = "rel_selfattn",
activation_type: str = "swish",
use_cnn_module: bool = True,
cnn_module_kernel: int = 15,
causal: bool = False,
cnn_module_norm: str = "batch_norm",
):
class SqueezeformerEncoder(nn.Module):
def __init__(
self,
input_size: int = 80,
encoder_dim: int = 256,
output_size: int = 256,
attention_heads: int = 4,
num_blocks: int = 12,
reduce_idx: Optional[Union[int, List[int]]] = 5,
recover_idx: Optional[Union[int, List[int]]] = 11,
feed_forward_expansion_factor: int = 4,
dw_stride: bool = False,
input_dropout_rate: float = 0.1,
pos_enc_layer_type: str = "rel_pos",
time_reduction_layer_type: str = "conv1d",
do_rel_shift: bool = True,
feed_forward_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.1,
cnn_module_kernel: int = 31,
cnn_norm_type: str = "batch_norm",
dropout: float = 0.1,
causal: bool = False,
adaptive_scale: bool = True,
activation_type: str = "swish",
init_weights: bool = True,
global_cmvn: torch.nn.Module = None,
normalize_before: bool = False,
use_dynamic_chunk: bool = False,
concat_after: bool = False,
static_chunk_size: int = 0,
use_dynamic_left_chunk: bool = False,
):
def output_size(self) -> int:
def forward(
self,
xs: torch.Tensor,
xs_lens: torch.Tensor,
decoding_chunk_size: int = 0,
num_decoding_left_chunks: int = -1,
) -> Tuple[torch.Tensor, torch.Tensor]:
def check_ascending_list(self):
def calculate_downsampling_factor(self, i: int) -> int:
def forward_chunk(
self,
xs: torch.Tensor,
offset: int,
required_cache_size: int,
att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
def forward_chunk_by_chunk(
self,
xs: torch.Tensor,
decoding_chunk_size: int,
num_decoding_left_chunks: int = -1,
) -> Tuple[torch.Tensor, torch.Tensor]:
class EfficientConformerEncoder(torch.nn.Module):
def __init__(
self,
input_size: int,
output_size: int = 256,
attention_heads: int = 4,
linear_units: int = 2048,
num_blocks: int = 6,
dropout_rate: float = 0.1,
positional_dropout_rate: float = 0.1,
attention_dropout_rate: float = 0.0,
input_layer: str = "conv2d",
pos_enc_layer_type: str = "rel_pos",
normalize_before: bool = True,
static_chunk_size: int = 0,
use_dynamic_chunk: bool = False,
global_cmvn: torch.nn.Module = None,
use_dynamic_left_chunk: bool = False,
macaron_style: bool = True,
activation_type: str = "swish",
use_cnn_module: bool = True,
cnn_module_kernel: int = 15,
causal: bool = False,
cnn_module_norm: str = "batch_norm",
stride_layer_idx: Optional[Union[int, List[int]]] = 3,
stride: Optional[Union[int, List[int]]] = 2,
group_layer_idx: Optional[Union[int, List[int], tuple]] = (0, 1, 2, 3),
group_size: int = 3,
stride_kernel: bool = True,
**kwargs,
):
def set_global_chunk_size(self, chunk_size):
def output_size(self) -> int:
def calculate_downsampling_factor(self, i: int) -> int:
def forward(
self,
xs: torch.Tensor,
xs_lens: torch.Tensor,
decoding_chunk_size: int = 0,
num_decoding_left_chunks: int = -1,
) -> Tuple[torch.Tensor, torch.Tensor]:
def forward_chunk(
self,
xs: torch.Tensor,
offset: int,
required_cache_size: int,
att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0),
att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
def forward_chunk_by_chunk(
self,
xs: torch.Tensor,
decoding_chunk_size: int,
num_decoding_left_chunks: int = -1,
use_onnx=False,
) -> Tuple[torch.Tensor, torch.Tensor]:
class Paraformer(ASRModel):
def __init__(
self,
vocab_size: int,
encoder: TransformerEncoder,
decoder: TransformerDecoder,
ctc: CTC,
predictor,
ctc_weight: float = 0.5,
predictor_weight: float = 1.0,
predictor_bias: int = 0,
ignore_id: int = IGNORE_ID,
reverse_weight: float = 0.0,
lsm_weight: float = 0.0,
length_normalized_loss: bool = False,
):
def forward(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
text: torch.Tensor,
text_lengths: torch.Tensor,
) -> Dict[str, Optional[torch.Tensor]]:
def _calc_att_loss(
self,
encoder_out: torch.Tensor,
encoder_mask: torch.Tensor,
ys_pad: torch.Tensor,
ys_pad_lens: torch.Tensor,
) -> Tuple[torch.Tensor, float, torch.Tensor]:
def calc_predictor(self, encoder_out, encoder_mask):
def cal_decoder_with_predictor(
self, encoder_out, encoder_out_lens, sematic_embeds, ys_pad_lens
):
def recognize(self):
def paraformer_greedy_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
def paraformer_beam_search(
self,
speech: torch.Tensor,
speech_lengths: torch.Tensor,
beam_search: torch.nn.Module = None,
decoding_chunk_size: int = -1,
num_decoding_left_chunks: int = -1,
simulate_streaming: bool = False,
) -> Tuple[torch.Tensor, torch.Tensor]:
class Predictor(nn.Module):
def __init__(
self,
idim,
l_order,
r_order,
threshold=1.0,
dropout=0.1,
smooth_factor=1.0,
noise_threshold=0,
tail_threshold=0.45,
):
def forward(
self,
hidden,
target_label: Optional[torch.Tensor] = None,
mask: torch.Tensor = torch.tensor(0),
ignore_id: int = -1,
mask_chunk_predictor: Optional[torch.Tensor] = None,
target_label_length: Optional[torch.Tensor] = None,
):
def tail_process_fn(
self,
hidden,
alphas,
token_num: Optional[torch.Tensor] = None,
mask: Optional[torch.Tensor] = None,
):
def gen_frame_alignments(
self, alphas: torch.Tensor = None, encoder_sequence_length: torch.Tensor = None
):
def load_cmvn(cmvn_file, is_json):
def init_model(configs):
if configs["cmvn_file"] is not None:
mean, istd = load_cmvn(configs["cmvn_file"], configs["is_json_cmvn"])
global_cmvn = GlobalCMVN(
torch.from_numpy(mean).float(), torch.from_numpy(istd).float()
)
else:
global_cmvn = None
input_dim = configs["input_dim"]
vocab_size = configs["output_dim"]
encoder_type = configs.get("encoder", "conformer")
decoder_type = configs.get("decoder", "bitransformer")
if encoder_type == "conformer":
encoder = ConformerEncoder(
input_dim, global_cmvn=global_cmvn, **configs["encoder_conf"]
)
elif encoder_type == "squeezeformer":
encoder = SqueezeformerEncoder(
input_dim, global_cmvn=global_cmvn, **configs["encoder_conf"]
)
elif encoder_type == "efficientConformer":
encoder = EfficientConformerEncoder(
input_dim,
global_cmvn=global_cmvn,
**configs["encoder_conf"],
**(
configs["encoder_conf"]["efficient_conf"]
if "efficient_conf" in configs["encoder_conf"]
else {}
),
)
else:
encoder = TransformerEncoder(
input_dim, global_cmvn=global_cmvn, **configs["encoder_conf"]
)
if decoder_type == "transformer":
decoder = TransformerDecoder(
vocab_size, encoder.output_size(), **configs["decoder_conf"]
)
else:
assert 0.0 < configs["model_conf"]["reverse_weight"] < 1.0
assert configs["decoder_conf"]["r_num_blocks"] > 0
decoder = BiTransformerDecoder(
vocab_size, encoder.output_size(), **configs["decoder_conf"]
)
ctc = CTC(vocab_size, encoder.output_size())
# Init joint CTC/Attention or Transducer model
if "predictor" in configs:
predictor_type = configs.get("predictor", "rnn")
if predictor_type == "rnn":
predictor = RNNPredictor(vocab_size, **configs["predictor_conf"])
elif predictor_type == "embedding":
predictor = EmbeddingPredictor(vocab_size, **configs["predictor_conf"])
configs["predictor_conf"]["output_size"] = configs["predictor_conf"][
"embed_size"
]
elif predictor_type == "conv":
predictor = ConvPredictor(vocab_size, **configs["predictor_conf"])
configs["predictor_conf"]["output_size"] = configs["predictor_conf"][
"embed_size"
]
else:
raise NotImplementedError("only rnn, embedding and conv type support now")
configs["joint_conf"]["enc_output_size"] = configs["encoder_conf"][
"output_size"
]
configs["joint_conf"]["pred_output_size"] = configs["predictor_conf"][
"output_size"
]
joint = TransducerJoint(vocab_size, **configs["joint_conf"])
model = Transducer(
vocab_size=vocab_size,
blank=0,
predictor=predictor,
encoder=encoder,
attention_decoder=decoder,
joint=joint,
ctc=ctc,
**configs["model_conf"],
)
elif "paraformer" in configs:
predictor = Predictor(**configs["cif_predictor_conf"])
model = Paraformer(
vocab_size=vocab_size,
encoder=encoder,
decoder=decoder,
ctc=ctc,
predictor=predictor,
**configs["model_conf"],
)
else:
model = ASRModel(
vocab_size=vocab_size,
encoder=encoder,
decoder=decoder,
ctc=ctc,
lfmmi_dir=configs.get("lfmmi_dir", ""),
**configs["model_conf"],
)
return model | null |
17,440 | import logging
import os
import re
import yaml
import torch
from collections import OrderedDict
import datetime
def load_checkpoint(model: torch.nn.Module, path: str) -> dict:
if torch.cuda.is_available():
logging.info("Checkpoint: loading from checkpoint %s for GPU" % path)
checkpoint = torch.load(path)
else:
logging.info("Checkpoint: loading from checkpoint %s for CPU" % path)
checkpoint = torch.load(path, map_location="cpu")
model.load_state_dict(checkpoint, strict=False)
info_path = re.sub(".pt$", ".yaml", path)
configs = {}
if os.path.exists(info_path):
with open(info_path, "r") as fin:
configs = yaml.load(fin, Loader=yaml.FullLoader)
return configs | null |
17,441 | import logging
import os
import re
import yaml
import torch
from collections import OrderedDict
import datetime
The provided code snippet includes necessary dependencies for implementing the `save_checkpoint` function. Write a Python function `def save_checkpoint(model: torch.nn.Module, path: str, infos=None)` to solve the following problem:
Args: infos (dict or None): any info you want to save.
Here is the function:
def save_checkpoint(model: torch.nn.Module, path: str, infos=None):
"""
Args:
infos (dict or None): any info you want to save.
"""
logging.info("Checkpoint: save to checkpoint %s" % path)
if isinstance(model, torch.nn.DataParallel):
state_dict = model.module.state_dict()
elif isinstance(model, torch.nn.parallel.DistributedDataParallel):
state_dict = model.module.state_dict()
else:
state_dict = model.state_dict()
torch.save(state_dict, path)
info_path = re.sub(".pt$", ".yaml", path)
if infos is None:
infos = {}
infos["save_time"] = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
with open(info_path, "w") as fout:
data = yaml.dump(infos)
fout.write(data) | Args: infos (dict or None): any info you want to save. |
17,442 | import logging
import os
import re
import yaml
import torch
from collections import OrderedDict
import datetime
def filter_modules(model_state_dict, modules):
new_mods = []
incorrect_mods = []
mods_model = model_state_dict.keys()
for mod in modules:
if any(key.startswith(mod) for key in mods_model):
new_mods += [mod]
else:
incorrect_mods += [mod]
if incorrect_mods:
logging.warning(
"module(s) %s don't match or (partially match) "
"available modules in model.",
incorrect_mods,
)
logging.warning("for information, the existing modules in model are:")
logging.warning("%s", mods_model)
return new_mods
def load_trained_modules(model: torch.nn.Module, args: None):
# Load encoder modules with pre-trained model(s).
enc_model_path = args.enc_init
enc_modules = args.enc_init_mods
main_state_dict = model.state_dict()
logging.warning("model(s) found for pre-initialization")
if os.path.isfile(enc_model_path):
logging.info("Checkpoint: loading from checkpoint %s for CPU" % enc_model_path)
model_state_dict = torch.load(enc_model_path, map_location="cpu")
modules = filter_modules(model_state_dict, enc_modules)
partial_state_dict = OrderedDict()
for key, value in model_state_dict.items():
if any(key.startswith(m) for m in modules):
partial_state_dict[key] = value
main_state_dict.update(partial_state_dict)
else:
logging.warning("model was not found : %s", enc_model_path)
model.load_state_dict(main_state_dict)
configs = {}
return configs | null |
17,443 | import copy
def override_config(configs, override_list):
new_configs = copy.deepcopy(configs)
for item in override_list:
arr = item.split()
if len(arr) != 2:
print(f"the overrive {item} format not correct, skip it")
continue
keys = arr[0].split(".")
s_configs = new_configs
for i, key in enumerate(keys):
if key not in s_configs:
print(f"the overrive {item} format not correct, skip it")
if i == len(keys) - 1:
param_type = type(s_configs[key])
if param_type != bool:
s_configs[key] = param_type(arr[1])
else:
s_configs[key] = arr[1] in ["true", "True"]
print(f"override {arr[0]} with {arr[1]}")
else:
s_configs = s_configs[key]
return new_configs | null |
17,444 | from typing import Union
import math
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
def _squareroot_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 0.5
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr | null |
17,445 | from typing import Union
import math
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
def _square_annealing(initial_lr, step, max_steps, min_lr):
mult = ((max_steps - step) / max_steps) ** 2
out_lr = initial_lr * mult
out_lr = max(out_lr, min_lr)
return out_lr | null |
17,446 | from typing import Union
import math
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
def _cosine_annealing(initial_lr, step, max_steps, min_lr):
mult = 0.5 * (1 + math.cos(math.pi * step / max_steps))
out_lr = (initial_lr - min_lr) * mult + min_lr
return out_lr | null |
17,447 | from typing import Union
import math
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
def _linear_warmup_with_cosine_annealing(
max_lr, warmup_steps, step, decay_steps, min_lr
):
assert max_lr > min_lr
# Use linear warmup for the initial part.
if warmup_steps > 0 and step <= warmup_steps:
return max_lr * float(step) / float(warmup_steps)
# For any steps larger than `decay_steps`, use `min_lr`.
if step > warmup_steps + decay_steps:
return min_lr
# If we are done with the warmup period, use the decay style.
num_steps_ = step - warmup_steps
decay_steps_ = decay_steps
decay_ratio = float(num_steps_) / float(decay_steps_)
assert decay_ratio >= 0.0
assert decay_ratio <= 1.0
delta_lr = max_lr - min_lr
coeff = 0.5 * (math.cos(math.pi * decay_ratio) + 1.0)
return min_lr + coeff * delta_lr | null |
17,448 | from typing import Union
import math
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
def _poly_decay(initial_lr, step, decay_steps, power, min_lr, cycle):
if cycle:
multiplier = 1.0 if step == 0 else math.ceil(step / decay_steps)
decay_steps *= multiplier
else:
step = min(step, decay_steps)
p = step / decay_steps
lr = (initial_lr - min_lr) * math.pow(1.0 - p, power)
lr += min_lr
return lr | null |
17,449 | from typing import Union
import math
import warnings
import torch
from torch.optim.lr_scheduler import _LRScheduler
def _noam_hold_annealing(
initial_lr, step, warmup_steps, hold_steps, decay_rate, min_lr
):
# hold_steps = total number of steps
# to hold the LR, not the warmup + hold steps.
T_warmup_decay = max(1, warmup_steps**decay_rate)
T_hold_decay = max(1, (step - hold_steps) ** decay_rate)
lr = (initial_lr * T_warmup_decay) / T_hold_decay
lr = max(lr, min_lr)
return lr | null |
17,450 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
The provided code snippet includes necessary dependencies for implementing the `add_blank` function. Write a Python function `def add_blank(ys_pad: torch.Tensor, blank: int, ignore_id: int) -> torch.Tensor` to solve the following problem:
Prepad blank for transducer predictor Args: ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax) blank (int): index of <blank> Returns: ys_in (torch.Tensor) : (B, Lmax + 1) Examples: >>> blank = 0 >>> ignore_id = -1 >>> ys_pad tensor([[ 1, 2, 3, 4, 5], [ 4, 5, 6, -1, -1], [ 7, 8, 9, -1, -1]], dtype=torch.int32) >>> ys_in = add_blank(ys_pad, 0, -1) >>> ys_in tensor([[0, 1, 2, 3, 4, 5], [0, 4, 5, 6, 0, 0], [0, 7, 8, 9, 0, 0]])
Here is the function:
def add_blank(ys_pad: torch.Tensor, blank: int, ignore_id: int) -> torch.Tensor:
"""Prepad blank for transducer predictor
Args:
ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax)
blank (int): index of <blank>
Returns:
ys_in (torch.Tensor) : (B, Lmax + 1)
Examples:
>>> blank = 0
>>> ignore_id = -1
>>> ys_pad
tensor([[ 1, 2, 3, 4, 5],
[ 4, 5, 6, -1, -1],
[ 7, 8, 9, -1, -1]], dtype=torch.int32)
>>> ys_in = add_blank(ys_pad, 0, -1)
>>> ys_in
tensor([[0, 1, 2, 3, 4, 5],
[0, 4, 5, 6, 0, 0],
[0, 7, 8, 9, 0, 0]])
"""
bs = ys_pad.size(0)
_blank = torch.tensor(
[blank], dtype=torch.long, requires_grad=False, device=ys_pad.device
)
_blank = _blank.repeat(bs).unsqueeze(1) # [bs,1]
out = torch.cat([_blank, ys_pad], dim=1) # [bs, Lmax+1]
return torch.where(out == ignore_id, blank, out) | Prepad blank for transducer predictor Args: ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax) blank (int): index of <blank> Returns: ys_in (torch.Tensor) : (B, Lmax + 1) Examples: >>> blank = 0 >>> ignore_id = -1 >>> ys_pad tensor([[ 1, 2, 3, 4, 5], [ 4, 5, 6, -1, -1], [ 7, 8, 9, -1, -1]], dtype=torch.int32) >>> ys_in = add_blank(ys_pad, 0, -1) >>> ys_in tensor([[0, 1, 2, 3, 4, 5], [0, 4, 5, 6, 0, 0], [0, 7, 8, 9, 0, 0]]) |
17,451 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
def pad_list(xs: List[torch.Tensor], pad_value: int):
"""Perform padding for the list of tensors.
Args:
xs (List): List of Tensors [(T_1, `*`), (T_2, `*`), ..., (T_B, `*`)].
pad_value (float): Value for padding.
Returns:
Tensor: Padded tensor (B, Tmax, `*`).
Examples:
>>> x = [torch.ones(4), torch.ones(2), torch.ones(1)]
>>> x
[tensor([1., 1., 1., 1.]), tensor([1., 1.]), tensor([1.])]
>>> pad_list(x, 0)
tensor([[1., 1., 1., 1.],
[1., 1., 0., 0.],
[1., 0., 0., 0.]])
"""
n_batch = len(xs)
max_len = max([x.size(0) for x in xs])
pad = torch.zeros(n_batch, max_len, dtype=xs[0].dtype, device=xs[0].device)
pad = pad.fill_(pad_value)
for i in range(n_batch):
pad[i, : xs[i].size(0)] = xs[i]
return pad
The provided code snippet includes necessary dependencies for implementing the `add_sos_eos` function. Write a Python function `def add_sos_eos( ys_pad: torch.Tensor, sos: int, eos: int, ignore_id: int ) -> Tuple[torch.Tensor, torch.Tensor]` to solve the following problem:
Add <sos> and <eos> labels. Args: ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax) sos (int): index of <sos> eos (int): index of <eeos> ignore_id (int): index of padding Returns: ys_in (torch.Tensor) : (B, Lmax + 1) ys_out (torch.Tensor) : (B, Lmax + 1) Examples: >>> sos_id = 10 >>> eos_id = 11 >>> ignore_id = -1 >>> ys_pad tensor([[ 1, 2, 3, 4, 5], [ 4, 5, 6, -1, -1], [ 7, 8, 9, -1, -1]], dtype=torch.int32) >>> ys_in,ys_out=add_sos_eos(ys_pad, sos_id , eos_id, ignore_id) >>> ys_in tensor([[10, 1, 2, 3, 4, 5], [10, 4, 5, 6, 11, 11], [10, 7, 8, 9, 11, 11]]) >>> ys_out tensor([[ 1, 2, 3, 4, 5, 11], [ 4, 5, 6, 11, -1, -1], [ 7, 8, 9, 11, -1, -1]])
Here is the function:
def add_sos_eos(
ys_pad: torch.Tensor, sos: int, eos: int, ignore_id: int
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Add <sos> and <eos> labels.
Args:
ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax)
sos (int): index of <sos>
eos (int): index of <eeos>
ignore_id (int): index of padding
Returns:
ys_in (torch.Tensor) : (B, Lmax + 1)
ys_out (torch.Tensor) : (B, Lmax + 1)
Examples:
>>> sos_id = 10
>>> eos_id = 11
>>> ignore_id = -1
>>> ys_pad
tensor([[ 1, 2, 3, 4, 5],
[ 4, 5, 6, -1, -1],
[ 7, 8, 9, -1, -1]], dtype=torch.int32)
>>> ys_in,ys_out=add_sos_eos(ys_pad, sos_id , eos_id, ignore_id)
>>> ys_in
tensor([[10, 1, 2, 3, 4, 5],
[10, 4, 5, 6, 11, 11],
[10, 7, 8, 9, 11, 11]])
>>> ys_out
tensor([[ 1, 2, 3, 4, 5, 11],
[ 4, 5, 6, 11, -1, -1],
[ 7, 8, 9, 11, -1, -1]])
"""
_sos = torch.tensor(
[sos], dtype=torch.long, requires_grad=False, device=ys_pad.device
)
_eos = torch.tensor(
[eos], dtype=torch.long, requires_grad=False, device=ys_pad.device
)
ys = [y[y != ignore_id] for y in ys_pad] # parse padded ys
ys_in = [torch.cat([_sos, y], dim=0) for y in ys]
ys_out = [torch.cat([y, _eos], dim=0) for y in ys]
return pad_list(ys_in, eos), pad_list(ys_out, ignore_id) | Add <sos> and <eos> labels. Args: ys_pad (torch.Tensor): batch of padded target sequences (B, Lmax) sos (int): index of <sos> eos (int): index of <eeos> ignore_id (int): index of padding Returns: ys_in (torch.Tensor) : (B, Lmax + 1) ys_out (torch.Tensor) : (B, Lmax + 1) Examples: >>> sos_id = 10 >>> eos_id = 11 >>> ignore_id = -1 >>> ys_pad tensor([[ 1, 2, 3, 4, 5], [ 4, 5, 6, -1, -1], [ 7, 8, 9, -1, -1]], dtype=torch.int32) >>> ys_in,ys_out=add_sos_eos(ys_pad, sos_id , eos_id, ignore_id) >>> ys_in tensor([[10, 1, 2, 3, 4, 5], [10, 4, 5, 6, 11, 11], [10, 7, 8, 9, 11, 11]]) >>> ys_out tensor([[ 1, 2, 3, 4, 5, 11], [ 4, 5, 6, 11, -1, -1], [ 7, 8, 9, 11, -1, -1]]) |
17,452 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
The provided code snippet includes necessary dependencies for implementing the `reverse_pad_list` function. Write a Python function `def reverse_pad_list( ys_pad: torch.Tensor, ys_lens: torch.Tensor, pad_value: float = -1.0 ) -> torch.Tensor` to solve the following problem:
Reverse padding for the list of tensors. Args: ys_pad (tensor): The padded tensor (B, Tokenmax). ys_lens (tensor): The lens of token seqs (B) pad_value (int): Value for padding. Returns: Tensor: Padded tensor (B, Tokenmax). Examples: >>> x tensor([[1, 2, 3, 4], [5, 6, 7, 0], [8, 9, 0, 0]]) >>> pad_list(x, 0) tensor([[4, 3, 2, 1], [7, 6, 5, 0], [9, 8, 0, 0]])
Here is the function:
def reverse_pad_list(
ys_pad: torch.Tensor, ys_lens: torch.Tensor, pad_value: float = -1.0
) -> torch.Tensor:
"""Reverse padding for the list of tensors.
Args:
ys_pad (tensor): The padded tensor (B, Tokenmax).
ys_lens (tensor): The lens of token seqs (B)
pad_value (int): Value for padding.
Returns:
Tensor: Padded tensor (B, Tokenmax).
Examples:
>>> x
tensor([[1, 2, 3, 4], [5, 6, 7, 0], [8, 9, 0, 0]])
>>> pad_list(x, 0)
tensor([[4, 3, 2, 1],
[7, 6, 5, 0],
[9, 8, 0, 0]])
"""
r_ys_pad = pad_sequence(
[(torch.flip(y.int()[:i], [0])) for y, i in zip(ys_pad, ys_lens)],
True,
pad_value,
)
return r_ys_pad | Reverse padding for the list of tensors. Args: ys_pad (tensor): The padded tensor (B, Tokenmax). ys_lens (tensor): The lens of token seqs (B) pad_value (int): Value for padding. Returns: Tensor: Padded tensor (B, Tokenmax). Examples: >>> x tensor([[1, 2, 3, 4], [5, 6, 7, 0], [8, 9, 0, 0]]) >>> pad_list(x, 0) tensor([[4, 3, 2, 1], [7, 6, 5, 0], [9, 8, 0, 0]]) |
17,453 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
The provided code snippet includes necessary dependencies for implementing the `th_accuracy` function. Write a Python function `def th_accuracy( pad_outputs: torch.Tensor, pad_targets: torch.Tensor, ignore_label: int ) -> float` to solve the following problem:
Calculate accuracy. Args: pad_outputs (Tensor): Prediction tensors (B * Lmax, D). pad_targets (LongTensor): Target label tensors (B, Lmax). ignore_label (int): Ignore label id. Returns: float: Accuracy value (0.0 - 1.0).
Here is the function:
def th_accuracy(
pad_outputs: torch.Tensor, pad_targets: torch.Tensor, ignore_label: int
) -> float:
"""Calculate accuracy.
Args:
pad_outputs (Tensor): Prediction tensors (B * Lmax, D).
pad_targets (LongTensor): Target label tensors (B, Lmax).
ignore_label (int): Ignore label id.
Returns:
float: Accuracy value (0.0 - 1.0).
"""
pad_pred = pad_outputs.view(
pad_targets.size(0), pad_targets.size(1), pad_outputs.size(1)
).argmax(2)
mask = pad_targets != ignore_label
numerator = torch.sum(
pad_pred.masked_select(mask) == pad_targets.masked_select(mask)
)
denominator = torch.sum(mask)
return float(numerator) / float(denominator) | Calculate accuracy. Args: pad_outputs (Tensor): Prediction tensors (B * Lmax, D). pad_targets (LongTensor): Target label tensors (B, Lmax). ignore_label (int): Ignore label id. Returns: float: Accuracy value (0.0 - 1.0). |
17,454 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
def get_rnn(rnn_type: str) -> torch.nn.Module:
assert rnn_type in ["rnn", "lstm", "gru"]
if rnn_type == "rnn":
return torch.nn.RNN
elif rnn_type == "lstm":
return torch.nn.LSTM
else:
return torch.nn.GRU | null |
17,455 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
class Swish(torch.nn.Module):
"""Construct an Swish object."""
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""Return Swish activation function."""
return x * torch.sigmoid(x)
The provided code snippet includes necessary dependencies for implementing the `get_activation` function. Write a Python function `def get_activation(act)` to solve the following problem:
Return activation function.
Here is the function:
def get_activation(act):
"""Return activation function."""
# Lazy load to avoid unused import
from modules.wenet_extractor.transformer.swish import Swish
activation_funcs = {
"hardtanh": torch.nn.Hardtanh,
"tanh": torch.nn.Tanh,
"relu": torch.nn.ReLU,
"selu": torch.nn.SELU,
"swish": getattr(torch.nn, "SiLU", Swish),
"gelu": torch.nn.GELU,
}
return activation_funcs[act]() | Return activation function. |
17,456 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
def get_subsample(config):
input_layer = config["encoder_conf"]["input_layer"]
assert input_layer in ["conv2d", "conv2d6", "conv2d8"]
if input_layer == "conv2d":
return 4
elif input_layer == "conv2d6":
return 6
elif input_layer == "conv2d8":
return 8 | null |
17,457 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
def remove_duplicates_and_blank(hyp: List[int]) -> List[int]:
new_hyp: List[int] = []
cur = 0
while cur < len(hyp):
if hyp[cur] != 0:
new_hyp.append(hyp[cur])
prev = cur
while cur < len(hyp) and hyp[cur] == hyp[prev]:
cur += 1
return new_hyp | null |
17,458 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
def replace_duplicates_with_blank(hyp: List[int]) -> List[int]:
new_hyp: List[int] = []
cur = 0
while cur < len(hyp):
new_hyp.append(hyp[cur])
prev = cur
cur += 1
while cur < len(hyp) and hyp[cur] == hyp[prev] and hyp[cur] != 0:
new_hyp.append(0)
cur += 1
return new_hyp | null |
17,459 | import math
from typing import List, Tuple
import torch
from torch.nn.utils.rnn import pad_sequence
The provided code snippet includes necessary dependencies for implementing the `log_add` function. Write a Python function `def log_add(args: List[int]) -> float` to solve the following problem:
Stable log add
Here is the function:
def log_add(args: List[int]) -> float:
"""
Stable log add
"""
if all(a == -float("inf") for a in args):
return -float("inf")
a_max = max(args)
lsp = math.log(sum(math.exp(a - a_max) for a in args))
return a_max + lsp | Stable log add |
17,460 | import torch
import torch.nn as nn
import torch.nn.functional as F
import math
if "sinc" in dir(torch):
sinc = torch.sinc
else:
# This code is adopted from adefossez's julius.core.sinc under the MIT License
# https://adefossez.github.io/julius/julius/core.html
def sinc(x: torch.Tensor):
"""
Implementation of sinc, i.e. sin(pi * x) / (pi * x)
__Warning__: Different to julius.sinc, the input is multiplied by `pi`!
"""
return torch.where(
x == 0,
torch.tensor(1.0, device=x.device, dtype=x.dtype),
torch.sin(math.pi * x) / math.pi / x,
)
def kaiser_sinc_filter1d(
cutoff, half_width, kernel_size
): # return filter [1,1,kernel_size]
even = kernel_size % 2 == 0
half_size = kernel_size // 2
# For kaiser window
delta_f = 4 * half_width
A = 2.285 * (half_size - 1) * math.pi * delta_f + 7.95
if A > 50.0:
beta = 0.1102 * (A - 8.7)
elif A >= 21.0:
beta = 0.5842 * (A - 21) ** 0.4 + 0.07886 * (A - 21.0)
else:
beta = 0.0
window = torch.kaiser_window(kernel_size, beta=beta, periodic=False)
# ratio = 0.5/cutoff -> 2 * cutoff = 1 / ratio
if even:
time = torch.arange(-half_size, half_size) + 0.5
else:
time = torch.arange(kernel_size) - half_size
if cutoff == 0:
filter_ = torch.zeros_like(time)
else:
filter_ = 2 * cutoff * window * sinc(2 * cutoff * time)
# Normalize filter to have sum = 1, otherwise we will have a small leakage
# of the constant component in the input signal.
filter_ /= filter_.sum()
filter = filter_.view(1, 1, kernel_size)
return filter | null |
17,461 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `normalization` function. Write a Python function `def normalization(channels: int, groups: int = 32)` to solve the following problem:
r"""Make a standard normalization layer, i.e. GroupNorm. Args: channels: number of input channels. groups: number of groups for group normalization. Returns: a ``nn.Module`` for normalization.
Here is the function:
def normalization(channels: int, groups: int = 32):
r"""Make a standard normalization layer, i.e. GroupNorm.
Args:
channels: number of input channels.
groups: number of groups for group normalization.
Returns:
a ``nn.Module`` for normalization.
"""
assert groups > 0, f"invalid number of groups: {groups}"
return nn.GroupNorm(groups, channels) | r"""Make a standard normalization layer, i.e. GroupNorm. Args: channels: number of input channels. groups: number of groups for group normalization. Returns: a ``nn.Module`` for normalization. |
17,462 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `Linear` function. Write a Python function `def Linear(*args, **kwargs)` to solve the following problem:
r"""Wrapper of ``nn.Linear`` with kaiming_normal_ initialization.
Here is the function:
def Linear(*args, **kwargs):
r"""Wrapper of ``nn.Linear`` with kaiming_normal_ initialization."""
layer = nn.Linear(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer | r"""Wrapper of ``nn.Linear`` with kaiming_normal_ initialization. |
17,463 | import torch
import torch.nn as nn
def Conv1d(*args, **kwargs):
r"""Wrapper of ``nn.Conv1d`` with kaiming_normal_ initialization."""
layer = nn.Conv1d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
def Conv2d(*args, **kwargs):
r"""Wrapper of ``nn.Conv2d`` with kaiming_normal_ initialization."""
layer = nn.Conv2d(*args, **kwargs)
nn.init.kaiming_normal_(layer.weight)
return layer
The provided code snippet includes necessary dependencies for implementing the `ConvNd` function. Write a Python function `def ConvNd(dims: int = 1, *args, **kwargs)` to solve the following problem:
r"""Wrapper of N-dimension convolution with kaiming_normal_ initialization. Args: dims: number of dimensions of the convolution.
Here is the function:
def ConvNd(dims: int = 1, *args, **kwargs):
r"""Wrapper of N-dimension convolution with kaiming_normal_ initialization.
Args:
dims: number of dimensions of the convolution.
"""
if dims == 1:
return Conv1d(*args, **kwargs)
elif dims == 2:
return Conv2d(*args, **kwargs)
else:
raise ValueError(f"invalid number of dimensions: {dims}") | r"""Wrapper of N-dimension convolution with kaiming_normal_ initialization. Args: dims: number of dimensions of the convolution. |
17,464 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `zero_module` function. Write a Python function `def zero_module(module: nn.Module)` to solve the following problem:
r"""Zero out the parameters of a module and return it.
Here is the function:
def zero_module(module: nn.Module):
r"""Zero out the parameters of a module and return it."""
nn.init.zeros_(module.weight)
nn.init.zeros_(module.bias)
return module | r"""Zero out the parameters of a module and return it. |
17,465 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `scale_module` function. Write a Python function `def scale_module(module: nn.Module, scale)` to solve the following problem:
r"""Scale the parameters of a module and return it.
Here is the function:
def scale_module(module: nn.Module, scale):
r"""Scale the parameters of a module and return it."""
for p in module.parameters():
p.detach().mul_(scale)
return module | r"""Scale the parameters of a module and return it. |
17,466 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `mean_flat` function. Write a Python function `def mean_flat(tensor: torch.Tensor)` to solve the following problem:
r"""Take the mean over all non-batch dimensions.
Here is the function:
def mean_flat(tensor: torch.Tensor):
r"""Take the mean over all non-batch dimensions."""
return tensor.mean(dim=tuple(range(1, tensor.dim()))) | r"""Take the mean over all non-batch dimensions. |
17,467 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `append_dims` function. Write a Python function `def append_dims(x, target_dims)` to solve the following problem:
r"""Appends dimensions to the end of a tensor until it has target_dims dimensions.
Here is the function:
def append_dims(x, target_dims):
r"""Appends dimensions to the end of a tensor until
it has target_dims dimensions.
"""
dims_to_append = target_dims - x.dim()
if dims_to_append < 0:
raise ValueError(
f"input has {x.dim()} dims but target_dims is {target_dims}, which is less"
)
return x[(...,) + (None,) * dims_to_append] | r"""Appends dimensions to the end of a tensor until it has target_dims dimensions. |
17,468 | import torch
import torch.nn as nn
The provided code snippet includes necessary dependencies for implementing the `append_zero` function. Write a Python function `def append_zero(x, count=1)` to solve the following problem:
r"""Appends ``count`` zeros to the end of a tensor along the last dimension.
Here is the function:
def append_zero(x, count=1):
r"""Appends ``count`` zeros to the end of a tensor along the last dimension."""
assert count > 0, f"invalid count: {count}"
return torch.cat([x, x.new_zeros((*x.size()[:-1], count))], dim=-1) | r"""Appends ``count`` zeros to the end of a tensor along the last dimension. |
17,469 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
def _compute_scale_factor(
x: Tensor,
channel_dim: int,
min_abs: float,
max_abs: float,
gain_factor: float,
max_factor: float,
) -> Tensor:
if channel_dim < 0:
channel_dim += x.ndim
sum_dims = [d for d in range(x.ndim) if d != channel_dim]
x_abs_mean = torch.mean(x.abs(), dim=sum_dims).to(torch.float32)
if min_abs == 0.0:
below_threshold = 0.0
else:
# below_threshold is 0 if x_abs_mean > min_abs, can be at most max_factor if
# x_abs)_mean , min_abs.
below_threshold = ((min_abs - x_abs_mean) * (gain_factor / min_abs)).clamp(
min=0, max=max_factor
)
above_threshold = ((x_abs_mean - max_abs) * (gain_factor / max_abs)).clamp(
min=0, max=max_factor
)
return below_threshold - above_threshold | null |
17,470 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
def _compute_sign_factor(
x: Tensor,
channel_dim: int,
min_positive: float,
max_positive: float,
gain_factor: float,
max_factor: float,
) -> Tensor:
if channel_dim < 0:
channel_dim += x.ndim
sum_dims = [d for d in range(x.ndim) if d != channel_dim]
proportion_positive = torch.mean((x > 0).to(torch.float32), dim=sum_dims)
if min_positive == 0.0:
factor1 = 0.0
else:
# 0 if proportion_positive >= min_positive, else can be
# as large as max_factor.
factor1 = (
(min_positive - proportion_positive) * (gain_factor / min_positive)
).clamp_(min=0, max=max_factor)
if max_positive == 1.0:
factor2 = 0.0
else:
# 0 if self.proportion_positive <= max_positive, else can be
# as large as -max_factor.
factor2 = (
(proportion_positive - max_positive) * (gain_factor / (1.0 - max_positive))
).clamp_(min=0, max=max_factor)
sign_factor = factor1 - factor2
# require min_positive != 0 or max_positive != 1:
assert not isinstance(sign_factor, float)
return sign_factor | null |
17,471 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class RandomClampFunction(torch.autograd.Function):
def forward(
ctx,
x: Tensor,
min: Optional[float],
max: Optional[float],
prob: float,
reflect: float,
) -> Tensor:
x_clamped = torch.clamp(x, min=min, max=max)
mask = torch.rand_like(x) < prob
ans = torch.where(mask, x_clamped, x)
if x.requires_grad:
ctx.save_for_backward(ans == x)
ctx.reflect = reflect
if reflect != 0.0:
ans = ans * (1.0 + reflect) - (x * reflect)
return ans
def backward(ctx, ans_grad: Tensor) -> Tuple[Tensor, None, None, None, None]:
(is_same,) = ctx.saved_tensors
x_grad = ans_grad * is_same.to(ans_grad.dtype)
reflect = ctx.reflect
if reflect != 0.0:
x_grad = x_grad * (1.0 + reflect) - (ans_grad * reflect)
return x_grad, None, None, None, None
def random_clamp(
x: Tensor,
min: Optional[float] = None,
max: Optional[float] = None,
prob: float = 0.5,
reflect: float = 0.0,
):
return RandomClampFunction.apply(x, min, max, prob, reflect) | null |
17,472 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `random_cast_to_half` function. Write a Python function `def random_cast_to_half(x: Tensor, min_abs: float = 5.0e-06) -> Tensor` to solve the following problem:
A randomized way of casting a floating point value to half precision.
Here is the function:
def random_cast_to_half(x: Tensor, min_abs: float = 5.0e-06) -> Tensor:
"""
A randomized way of casting a floating point value to half precision.
"""
if x.dtype == torch.float16:
return x
x_abs = x.abs()
is_too_small = x_abs < min_abs
# for elements where is_too_small is true, random_val will contain +-min_abs with
# probability (x.abs() / min_abs), and 0.0 otherwise. [so this preserves expectations,
# for those elements].
random_val = min_abs * x.sign() * (torch.rand_like(x) * min_abs < x_abs)
return torch.where(is_too_small, random_val, x).to(torch.float16) | A randomized way of casting a floating point value to half precision. |
17,473 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
The provided code snippet includes necessary dependencies for implementing the `ScaledLinear` function. Write a Python function `def ScaledLinear(*args, initial_scale: float = 1.0, **kwargs) -> nn.Linear` to solve the following problem:
Behaves like a constructor of a modified version of nn.Linear that gives an easy way to set the default initial parameter scale. Args: Accepts the standard args and kwargs that nn.Linear accepts e.g. in_features, out_features, bias=False. initial_scale: you can override this if you want to increase or decrease the initial magnitude of the module's output (affects the initialization of weight_scale and bias_scale). Another option, if you want to do something like this, is to re-initialize the parameters.
Here is the function:
def ScaledLinear(*args, initial_scale: float = 1.0, **kwargs) -> nn.Linear:
"""
Behaves like a constructor of a modified version of nn.Linear
that gives an easy way to set the default initial parameter scale.
Args:
Accepts the standard args and kwargs that nn.Linear accepts
e.g. in_features, out_features, bias=False.
initial_scale: you can override this if you want to increase
or decrease the initial magnitude of the module's output
(affects the initialization of weight_scale and bias_scale).
Another option, if you want to do something like this, is
to re-initialize the parameters.
"""
ans = nn.Linear(*args, **kwargs)
with torch.no_grad():
ans.weight[:] *= initial_scale
if ans.bias is not None:
torch.nn.init.uniform_(ans.bias, -0.1 * initial_scale, 0.1 * initial_scale)
return ans | Behaves like a constructor of a modified version of nn.Linear that gives an easy way to set the default initial parameter scale. Args: Accepts the standard args and kwargs that nn.Linear accepts e.g. in_features, out_features, bias=False. initial_scale: you can override this if you want to increase or decrease the initial magnitude of the module's output (affects the initialization of weight_scale and bias_scale). Another option, if you want to do something like this, is to re-initialize the parameters. |
17,474 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class Transpose(nn.Identity):
"""(N, T, D) -> (N, D, T)"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input.transpose(1, 2)
def ScaledConv1d(
*args,
initial_scale: float = 1.0,
kernel_size: int = 3,
padding: str = "same",
**kwargs,
) -> nn.Conv1d:
"""
Behaves like a constructor of a modified version of nn.Conv1d
that gives an easy way to set the default initial parameter scale.
Args:
Accepts the standard args and kwargs that nn.Linear accepts
e.g. in_features, out_features, bias=False.
initial_scale: you can override this if you want to increase
or decrease the initial magnitude of the module's output
(affects the initialization of weight_scale and bias_scale).
Another option, if you want to do something like this, is
to re-initialize the parameters.
"""
ans = nn.Conv1d(*args, kernel_size=kernel_size, padding=padding, **kwargs)
with torch.no_grad():
ans.weight[:] *= initial_scale
if ans.bias is not None:
torch.nn.init.uniform_(ans.bias, -0.1 * initial_scale, 0.1 * initial_scale)
return ans
The provided code snippet includes necessary dependencies for implementing the `TransposeScaledConv1d` function. Write a Python function `def TransposeScaledConv1d( *args, initial_scale: float = 1.0, kernel_size: int = 3, padding: str = "same", **kwargs, ) -> nn.Sequential` to solve the following problem:
Transpose -> ScaledConv1d
Here is the function:
def TransposeScaledConv1d(
*args,
initial_scale: float = 1.0,
kernel_size: int = 3,
padding: str = "same",
**kwargs,
) -> nn.Sequential:
"""
Transpose -> ScaledConv1d
"""
return nn.Sequential(
Transpose(),
ScaledConv1d(
*args,
initial_scale=initial_scale,
kernel_size=kernel_size,
padding=padding,
**kwargs,
),
) | Transpose -> ScaledConv1d |
17,475 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class Transpose(nn.Identity):
"""(N, T, D) -> (N, D, T)"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input.transpose(1, 2)
def ScaledConv1d(
*args,
initial_scale: float = 1.0,
kernel_size: int = 3,
padding: str = "same",
**kwargs,
) -> nn.Conv1d:
"""
Behaves like a constructor of a modified version of nn.Conv1d
that gives an easy way to set the default initial parameter scale.
Args:
Accepts the standard args and kwargs that nn.Linear accepts
e.g. in_features, out_features, bias=False.
initial_scale: you can override this if you want to increase
or decrease the initial magnitude of the module's output
(affects the initialization of weight_scale and bias_scale).
Another option, if you want to do something like this, is
to re-initialize the parameters.
"""
ans = nn.Conv1d(*args, kernel_size=kernel_size, padding=padding, **kwargs)
with torch.no_grad():
ans.weight[:] *= initial_scale
if ans.bias is not None:
torch.nn.init.uniform_(ans.bias, -0.1 * initial_scale, 0.1 * initial_scale)
return ans
The provided code snippet includes necessary dependencies for implementing the `ScaledConv1dTranspose` function. Write a Python function `def ScaledConv1dTranspose( *args, initial_scale: float = 1.0, kernel_size: int = 3, padding: str = "same", **kwargs, ) -> nn.Sequential` to solve the following problem:
Transpose -> ScaledConv1d
Here is the function:
def ScaledConv1dTranspose(
*args,
initial_scale: float = 1.0,
kernel_size: int = 3,
padding: str = "same",
**kwargs,
) -> nn.Sequential:
"""
Transpose -> ScaledConv1d
"""
return nn.Sequential(
ScaledConv1d(
*args,
initial_scale=initial_scale,
kernel_size=kernel_size,
padding=padding,
**kwargs,
),
Transpose(),
) | Transpose -> ScaledConv1d |
17,476 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class Transpose(nn.Identity):
"""(N, T, D) -> (N, D, T)"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input.transpose(1, 2)
The provided code snippet includes necessary dependencies for implementing the `TransposeConv1d` function. Write a Python function `def TransposeConv1d( *args, kernel_size: int = 3, padding: str = "same", **kwargs ) -> nn.Sequential` to solve the following problem:
Transpose -> Conv1d
Here is the function:
def TransposeConv1d(
*args, kernel_size: int = 3, padding: str = "same", **kwargs
) -> nn.Sequential:
"""
Transpose -> Conv1d
"""
return nn.Sequential(
Transpose(),
nn.Conv1d(*args, kernel_size=kernel_size, padding=padding, **kwargs),
) | Transpose -> Conv1d |
17,477 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class Transpose(nn.Identity):
"""(N, T, D) -> (N, D, T)"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input.transpose(1, 2)
The provided code snippet includes necessary dependencies for implementing the `Conv1dTranspose` function. Write a Python function `def Conv1dTranspose( *args, kernel_size: int = 3, padding: str = "same", **kwargs ) -> nn.Sequential` to solve the following problem:
ScaledConv1d -> Transpose
Here is the function:
def Conv1dTranspose(
*args, kernel_size: int = 3, padding: str = "same", **kwargs
) -> nn.Sequential:
"""
ScaledConv1d -> Transpose
"""
return nn.Sequential(
nn.Conv1d(*args, kernel_size=kernel_size, padding=padding, **kwargs),
Transpose(),
) | ScaledConv1d -> Transpose |
17,478 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class Transpose(nn.Identity):
"""(N, T, D) -> (N, D, T)"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input.transpose(1, 2)
class SRConv1d(SRLinear):
def __init__(
self,
in_features,
out_features,
kernel_size,
stride: int = 1,
padding: str = "same",
bias: bool = True,
**kwargs,
):
in_features = in_features * kernel_size
super().__init__(in_features, out_features, bias=bias, **kwargs)
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def forward(self, x):
in_features = self.in_features // self.kernel_size
weight = self.get_weight().view(
self.out_features, in_features, self.kernel_size
)
return nn.functional.conv1d(
x, weight, bias=self.bias, stride=self.stride, padding=self.padding
)
The provided code snippet includes necessary dependencies for implementing the `TransposeSRConv1d` function. Write a Python function `def TransposeSRConv1d( *args, kernel_size: int = 3, padding: str = "same", **kwargs ) -> nn.Sequential` to solve the following problem:
Transpose -> SRConv1d
Here is the function:
def TransposeSRConv1d(
*args, kernel_size: int = 3, padding: str = "same", **kwargs
) -> nn.Sequential:
"""
Transpose -> SRConv1d
"""
return nn.Sequential(
Transpose(),
SRConv1d(*args, kernel_size=kernel_size, padding=padding, **kwargs),
) | Transpose -> SRConv1d |
17,479 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class Transpose(nn.Identity):
"""(N, T, D) -> (N, D, T)"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return input.transpose(1, 2)
class SRConv1d(SRLinear):
def __init__(
self,
in_features,
out_features,
kernel_size,
stride: int = 1,
padding: str = "same",
bias: bool = True,
**kwargs,
):
in_features = in_features * kernel_size
super().__init__(in_features, out_features, bias=bias, **kwargs)
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def forward(self, x):
in_features = self.in_features // self.kernel_size
weight = self.get_weight().view(
self.out_features, in_features, self.kernel_size
)
return nn.functional.conv1d(
x, weight, bias=self.bias, stride=self.stride, padding=self.padding
)
The provided code snippet includes necessary dependencies for implementing the `SRConv1dTranspose` function. Write a Python function `def SRConv1dTranspose( *args, kernel_size: int = 3, padding: str = "same", **kwargs ) -> nn.Sequential` to solve the following problem:
SRConv1d -> Transpose
Here is the function:
def SRConv1dTranspose(
*args, kernel_size: int = 3, padding: str = "same", **kwargs
) -> nn.Sequential:
"""
SRConv1d -> Transpose
"""
return nn.Sequential(
SRConv1d(*args, kernel_size=kernel_size, padding=padding, **kwargs),
Transpose(),
) | SRConv1d -> Transpose |
17,480 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
def with_loss(x, y):
if torch.jit.is_scripting() or torch.jit.is_tracing():
return x
# returns x but adds y.sum() to the loss function.
return WithLoss.apply(x, y)
The provided code snippet includes necessary dependencies for implementing the `penalize_abs_values_gt` function. Write a Python function `def penalize_abs_values_gt(x: Tensor, limit: float, penalty: float) -> Tensor` to solve the following problem:
Returns x unmodified, but in backprop will put a penalty for the excess of the absolute values of elements of x over the limit "limit". E.g. if limit == 10.0, then if x has any values over 10 it will get a penalty. Caution: the value of this penalty will be affected by grad scaling used in automatic mixed precision training. For this reasons we use this, it shouldn't really matter, or may even be helpful; we just use this to disallow really implausible values of scores to be given to softmax.
Here is the function:
def penalize_abs_values_gt(x: Tensor, limit: float, penalty: float) -> Tensor:
"""
Returns x unmodified, but in backprop will put a penalty for the excess of
the absolute values of elements of x over the limit "limit". E.g. if
limit == 10.0, then if x has any values over 10 it will get a penalty.
Caution: the value of this penalty will be affected by grad scaling used
in automatic mixed precision training. For this reasons we use this,
it shouldn't really matter, or may even be helpful; we just use this
to disallow really implausible values of scores to be given to softmax.
"""
x_sign = x.sign()
over_limit = (x.abs() - limit) > 0
# The following is a memory efficient way to penalize the absolute values of
# x that's over the limit. (The memory efficiency comes when you think
# about which items torch needs to cache for the autograd, and which ones it
# can throw away). The numerical value of aux_loss as computed here will
# actually be larger than it should be, by limit * over_limit.sum(), but it
# has the same derivative as the real aux_loss which is penalty * (x.abs() -
# limit).relu().
aux_loss = penalty * ((x_sign * over_limit).to(torch.int8) * x)
# note: we don't do sum() here on aux)_loss, but it's as if we had done
# sum() due to how with_loss() works.
x = with_loss(x, aux_loss)
# you must use x for something, or this will be ineffective.
return x | Returns x unmodified, but in backprop will put a penalty for the excess of the absolute values of elements of x over the limit "limit". E.g. if limit == 10.0, then if x has any values over 10 it will get a penalty. Caution: the value of this penalty will be affected by grad scaling used in automatic mixed precision training. For this reasons we use this, it shouldn't really matter, or may even be helpful; we just use this to disallow really implausible values of scores to be given to softmax. |
17,481 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
def _diag(x: Tensor): # like .diag(), but works for tensors with 3 dims.
if x.ndim == 2:
return x.diag()
else:
(batch, dim, dim) = x.shape
x = x.reshape(batch, dim * dim)
x = x[:, :: dim + 1]
assert x.shape == (batch, dim)
return x
The provided code snippet includes necessary dependencies for implementing the `_whitening_metric` function. Write a Python function `def _whitening_metric(x: Tensor, num_groups: int)` to solve the following problem:
Computes the "whitening metric", a value which will be 1.0 if all the eigenvalues of of the centered feature covariance are the same within each group's covariance matrix and also between groups. Args: x: a Tensor of shape (*, num_channels) num_groups: the number of groups of channels, a number >=1 that divides num_channels Returns: Returns a scalar Tensor that will be 1.0 if the data is "perfectly white" and greater than 1.0 otherwise.
Here is the function:
def _whitening_metric(x: Tensor, num_groups: int):
"""
Computes the "whitening metric", a value which will be 1.0 if all the eigenvalues of
of the centered feature covariance are the same within each group's covariance matrix
and also between groups.
Args:
x: a Tensor of shape (*, num_channels)
num_groups: the number of groups of channels, a number >=1 that divides num_channels
Returns:
Returns a scalar Tensor that will be 1.0 if the data is "perfectly white" and
greater than 1.0 otherwise.
"""
assert x.dtype != torch.float16
x = x.reshape(-1, x.shape[-1])
(num_frames, num_channels) = x.shape
assert num_channels % num_groups == 0
channels_per_group = num_channels // num_groups
x = x.reshape(num_frames, num_groups, channels_per_group).transpose(0, 1)
# x now has shape (num_groups, num_frames, channels_per_group)
# subtract the mean so we use the centered, not uncentered, covariance.
# My experience has been that when we "mess with the gradients" like this,
# it's better not do anything that tries to move the mean around, because
# that can easily cause instability.
x = x - x.mean(dim=1, keepdim=True)
# x_covar: (num_groups, channels_per_group, channels_per_group)
x_covar = torch.matmul(x.transpose(1, 2), x)
x_covar_mean_diag = _diag(x_covar).mean()
# the following expression is what we'd get if we took the matrix product
# of each covariance and measured the mean of its trace, i.e.
# the same as _diag(torch.matmul(x_covar, x_covar)).mean().
x_covarsq_mean_diag = (x_covar**2).sum() / (num_groups * channels_per_group)
# this metric will be >= 1.0; the larger it is, the less 'white' the data was.
metric = x_covarsq_mean_diag / (x_covar_mean_diag**2 + 1.0e-20)
return metric | Computes the "whitening metric", a value which will be 1.0 if all the eigenvalues of of the centered feature covariance are the same within each group's covariance matrix and also between groups. Args: x: a Tensor of shape (*, num_channels) num_groups: the number of groups of channels, a number >=1 that divides num_channels Returns: Returns a scalar Tensor that will be 1.0 if the data is "perfectly white" and greater than 1.0 otherwise. |
17,482 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
def _no_op(x: Tensor) -> Tensor:
if torch.jit.is_scripting() or torch.jit.is_tracing():
return x
else:
# a no-op function that will have a node in the autograd graph,
# to avoid certain bugs relating to backward hooks
return x.chunk(1, dim=-1)[0] | null |
17,483 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class ActivationBalancer(torch.nn.Module):
"""
Modifies the backpropped derivatives of a function to try to encourage, for
each channel, that it is positive at least a proportion `threshold` of the
time. It does this by multiplying negative derivative values by up to
(1+max_factor), and positive derivative values by up to (1-max_factor),
interpolated from 1 at the threshold to those extremal values when none
of the inputs are positive.
Args:
num_channels: the number of channels
channel_dim: the dimension/axis corresponding to the channel, e.g.
-1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
min_positive: the minimum, per channel, of the proportion of the time
that (x > 0), below which we start to modify the derivatives.
max_positive: the maximum, per channel, of the proportion of the time
that (x > 0), above which we start to modify the derivatives.
max_factor: the maximum factor by which we modify the derivatives for
either the sign constraint or the magnitude constraint;
e.g. with max_factor=0.02, the the derivatives would be multiplied by
values in the range [0.98..1.02].
sign_gain_factor: determines the 'gain' with which we increase the
change in gradient once the constraints on min_positive and max_positive
are violated.
scale_gain_factor: determines the 'gain' with which we increase the
change in gradient once the constraints on min_abs and max_abs
are violated.
min_abs: the minimum average-absolute-value difference from the mean
value per channel, which we allow, before we start to modify
the derivatives to prevent this.
max_abs: the maximum average-absolute-value difference from the mean
value per channel, which we allow, before we start to modify
the derivatives to prevent this.
min_prob: determines the minimum probability with which we modify the
gradients for the {min,max}_positive and {min,max}_abs constraints,
on each forward(). This is done randomly to prevent all layers
from doing it at the same time. Early in training we may use
higher probabilities than this; it will decay to this value.
"""
def __init__(
self,
num_channels: int,
channel_dim: int,
min_positive: float = 0.05,
max_positive: float = 0.95,
max_factor: float = 0.04,
sign_gain_factor: float = 0.01,
scale_gain_factor: float = 0.02,
min_abs: float = 0.2,
max_abs: float = 100.0,
min_prob: float = 0.1,
):
super(ActivationBalancer, self).__init__()
self.num_channels = num_channels
self.channel_dim = channel_dim
self.min_positive = min_positive
self.max_positive = max_positive
self.max_factor = max_factor
self.min_abs = min_abs
self.max_abs = max_abs
self.min_prob = min_prob
self.sign_gain_factor = sign_gain_factor
self.scale_gain_factor = scale_gain_factor
# count measures how many times the forward() function has been called.
# We occasionally sync this to a tensor called `count`, that exists to
# make sure it is synced to disk when we load and save the model.
self.cpu_count = 0
self.register_buffer("count", torch.tensor(0, dtype=torch.int64))
def forward(self, x: Tensor) -> Tensor:
if torch.jit.is_scripting() or not x.requires_grad or torch.jit.is_tracing():
return _no_op(x)
count = self.cpu_count
self.cpu_count += 1
if random.random() < 0.01:
# Occasionally sync self.cpu_count with self.count.
# count affects the decay of 'prob'. don't do this on every iter,
# because syncing with the GPU is slow.
self.cpu_count = max(self.cpu_count, self.count.item())
self.count.fill_(self.cpu_count)
# the prob of doing some work exponentially decreases from 0.5 till it hits
# a floor at min_prob (==0.1, by default)
prob = max(self.min_prob, 0.5 ** (1 + (count / 4000.0)))
if random.random() < prob:
sign_gain_factor = 0.5
if self.min_positive != 0.0 or self.max_positive != 1.0:
sign_factor = _compute_sign_factor(
x,
self.channel_dim,
self.min_positive,
self.max_positive,
gain_factor=self.sign_gain_factor / prob,
max_factor=self.max_factor,
)
else:
sign_factor = None
scale_factor = _compute_scale_factor(
x.detach(),
self.channel_dim,
min_abs=self.min_abs,
max_abs=self.max_abs,
gain_factor=self.scale_gain_factor / prob,
max_factor=self.max_factor,
)
return ActivationBalancerFunction.apply(
x,
scale_factor,
sign_factor,
self.channel_dim,
)
else:
return _no_op(x)
class DoubleSwish(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
"""Return double-swish activation function which is an approximation to Swish(Swish(x)),
that we approximate closely with x * sigmoid(x-1).
"""
if torch.jit.is_scripting() or torch.jit.is_tracing():
return x * torch.sigmoid(x - 1.0)
return DoubleSwishFunction.apply(x)
The provided code snippet includes necessary dependencies for implementing the `BalancedDoubleSwish` function. Write a Python function `def BalancedDoubleSwish( d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25 ) -> nn.Sequential` to solve the following problem:
ActivationBalancer -> DoubleSwish
Here is the function:
def BalancedDoubleSwish(
d_model, channel_dim=-1, max_abs=10.0, min_prob=0.25
) -> nn.Sequential:
"""
ActivationBalancer -> DoubleSwish
"""
balancer = ActivationBalancer(
d_model, channel_dim=channel_dim, max_abs=max_abs, min_prob=min_prob
)
return nn.Sequential(
balancer,
DoubleSwish(),
) | ActivationBalancer -> DoubleSwish |
17,484 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class MaxEig(torch.nn.Module):
"""
Modifies the backpropped derivatives of a function to try to discourage
that any given direction in activation space accounts for more than
a specified proportion of the covariance (e.g. 0.2).
Args:
num_channels: the number of channels
channel_dim: the dimension/axis corresponding to the channel, e.g.
-1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
max_var_per_eig: the maximum proportion of the variance of the
features/channels, after mean subtraction, that can come from
any given eigenvalue.
min_prob: the minimum probability with which we apply this during any invocation
of forward(), assuming last time we applied the constraint it was
not active; supplied for speed.
scale: determines the scale with which we modify the gradients, relative
to the existing / unmodified gradients
"""
def __init__(
self,
num_channels: int,
channel_dim: int,
max_var_per_eig: float = 0.2,
min_prob: float = 0.01,
scale: float = 0.01,
):
super(MaxEig, self).__init__()
self.num_channels = num_channels
self.channel_dim = channel_dim
self.scale = scale
assert max_var_per_eig == 0.0 or max_var_per_eig > 1.0 / num_channels
self.max_var_per_eig = max_var_per_eig
# we figure out the dominant direction using the power method: starting with
# a random vector, keep multiplying by the covariance and renormalizing.
with torch.no_grad():
# arbitrary.. would use randn() but want to leave the rest of the model's
# random parameters unchanged for comparison
direction = torch.arange(num_channels).to(torch.float)
direction = direction / direction.norm()
self.register_buffer("max_eig_direction", direction)
self.min_prob = min_prob
# cur_prob is the current probability we'll use to apply the ActivationBalancer.
# We'll regress this towards prob, each tiem we try to apply it and it is not
# active.
self.cur_prob = 1.0
def forward(self, x: Tensor) -> Tensor:
if (
torch.jit.is_scripting()
or self.max_var_per_eig <= 0
or random.random() > self.cur_prob
or torch.jit.is_tracing()
):
return _no_op(x)
with torch.cuda.amp.autocast(enabled=False):
eps = 1.0e-20
orig_x = x
x = x.to(torch.float32)
with torch.no_grad():
x = x.transpose(self.channel_dim, -1).reshape(-1, self.num_channels)
x = x - x.mean(dim=0)
new_direction, coeffs = self._find_direction_coeffs(
x, self.max_eig_direction
)
x_var = (x**2).mean()
x_residual = x - coeffs * new_direction
x_residual_var = (x_residual**2).mean()
# `variance_proportion` is the proportion of the variance accounted for
# by the top eigen-direction.
variance_proportion = (x_var - x_residual_var) / (x_var + 1.0e-20)
# ensure new direction is nonzero even if x == 0, by including `direction`.
self._set_direction(0.1 * self.max_eig_direction + new_direction)
if random.random() < 0.01 or __name__ == "__main__":
logging.info(
f"variance_proportion = {variance_proportion.item()}, shape={tuple(orig_x.shape)}, cur_prob={self.cur_prob}"
)
if variance_proportion >= self.max_var_per_eig:
# The constraint is active. Note, we should quite rarely
# reach here, only near the beginning of training if we are
# starting to diverge, should this constraint be active.
cur_prob = self.cur_prob
self.cur_prob = 1.0 # next time, do the update with probability 1.0.
return MaxEigLimiterFunction.apply(
orig_x, coeffs, new_direction, self.channel_dim, self.scale
)
else:
# let self.cur_prob exponentially approach self.min_prob, as
# long as the constraint is inactive.
self.cur_prob = 0.75 * self.cur_prob + 0.25 * self.min_prob
return orig_x
def _set_direction(self, direction: Tensor):
"""
Sets self.max_eig_direction to a normalized version of `direction`
"""
direction = direction.detach()
direction = direction / direction.norm()
direction_sum = direction.sum().item()
if direction_sum - direction_sum == 0: # no inf/nan
self.max_eig_direction[:] = direction
else:
logging.info(
f"Warning: sum of direction in MaxEig is {direction_sum}, "
"num_channels={self.num_channels}, channel_dim={self.channel_dim}"
)
def _find_direction_coeffs(
self, x: Tensor, prev_direction: Tensor
) -> Tuple[Tensor, Tensor, Tensor]:
"""
Figure out (an approximation to) the proportion of the variance of a set of
feature vectors that can be attributed to the top eigen-direction.
Args:
x: a Tensor of shape (num_frames, num_channels), with num_frames > 1.
prev_direction: a Tensor of shape (num_channels,), that is our previous estimate
of the top eigen-direction, or a random direction if this is the first
iteration. Does not have to be normalized, but should be nonzero.
Returns: (cur_direction, coeffs), where:
cur_direction: a Tensor of shape (num_channels,) that is the current
estimate of the top eigen-direction.
coeffs: a Tensor of shape (num_frames, 1) that minimizes, or
approximately minimizes, (x - coeffs * cur_direction).norm()
"""
(num_frames, num_channels) = x.shape
assert num_channels > 1 and num_frames > 1
assert prev_direction.shape == (num_channels,)
# `coeffs` are the coefficients of `prev_direction` in x.
# actually represent the coeffs up to a constant positive factor.
coeffs = (x * prev_direction).sum(dim=1, keepdim=True) + 1.0e-10
cur_direction = (x * coeffs).sum(dim=0) / ((coeffs**2).sum() + 1.0e-20)
return cur_direction, coeffs
def _test_max_eig():
for proportion in [0.1, 0.5, 10.0]:
logging.info(f"proportion = {proportion}")
x = torch.randn(100, 128)
direction = torch.randn(128)
coeffs = torch.randn(100, 1)
x += proportion * direction * coeffs
x.requires_grad = True
num_channels = 128
m = MaxEig(
num_channels, 1, 0.5, scale=0.1 # channel_dim # max_var_per_eig
) # grad_scale
for _ in range(4):
y = m(x)
y_grad = torch.randn_like(x)
y.backward(gradient=y_grad)
if proportion < 0.2:
assert torch.allclose(x.grad, y_grad, atol=1.0e-02)
elif proportion > 1.0:
assert not torch.allclose(x.grad, y_grad) | null |
17,485 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class Whiten(nn.Module):
def __init__(
self,
num_groups: int,
whitening_limit: float,
prob: Union[float, Tuple[float, float]],
grad_scale: float,
):
"""
Args:
num_groups: the number of groups to divide the channel dim into before
whitening. We will attempt to make the feature covariance
within each group, after mean subtraction, as "white" as possible,
while having the same trace across all groups.
whitening_limit: a value greater than 1.0, that dictates how much
freedom we have to violate the constraints. 1.0 would mean perfectly
white, with exactly the same trace across groups; larger values
give more freedom. E.g. 2.0.
prob: the probability with which we apply the gradient modification
(also affects the grad scale). May be supplied as a float,
or as a pair (min_prob, max_prob)
grad_scale: determines the scale on the gradient term from this object,
relative to the rest of the gradient on the attention weights.
E.g. 0.02 (you may want to use smaller values than this if prob is large)
"""
super(Whiten, self).__init__()
assert num_groups >= 1
assert whitening_limit >= 1
assert grad_scale >= 0
self.num_groups = num_groups
self.whitening_limit = whitening_limit
if isinstance(prob, float):
assert 0 < prob <= 1
self.prob = prob
else:
(self.min_prob, self.max_prob) = prob
assert 0 < self.min_prob < self.max_prob <= 1
self.prob = self.max_prob
self.grad_scale = grad_scale
def forward(self, x: Tensor) -> Tensor:
"""
In the forward pass, this function just returns the input unmodified.
In the backward pass, it will modify the gradients to ensure that the
distribution in each group has close to (lambda times I) as the covariance
after mean subtraction, with the same lambda across groups.
For whitening_limit > 1, there will be more freedom to violate this
constraint.
Args:
x: the input of shape (*, num_channels)
Returns:
x, unmodified. You should make sure
you use the returned value, or the graph will be freed
and nothing will happen in backprop.
"""
if not x.requires_grad or random.random() > self.prob or self.grad_scale == 0:
return _no_op(x)
else:
if hasattr(self, "min_prob") and random.random() < 0.25:
# occasionally switch between min_prob and max_prob, based on whether
# we are above or below the threshold.
if (
_whitening_metric(x.to(torch.float32), self.num_groups)
> self.whitening_limit
):
# there would be a change to the grad.
self.prob = self.max_prob
else:
self.prob = self.min_prob
return WhiteningPenaltyFunction.apply(
x, self.num_groups, self.whitening_limit, self.grad_scale
)
def _test_whiten():
for proportion in [0.1, 0.5, 10.0]:
logging.info(f"_test_whiten(): proportion = {proportion}")
x = torch.randn(100, 128)
direction = torch.randn(128)
coeffs = torch.randn(100, 1)
x += proportion * direction * coeffs
x.requires_grad = True
num_channels = 128
m = Whiten(
1, 5.0, prob=1.0, grad_scale=0.1 # num_groups # whitening_limit,
) # grad_scale
for _ in range(4):
y = m(x)
y_grad = torch.randn_like(x)
y.backward(gradient=y_grad)
if proportion < 0.2:
assert torch.allclose(x.grad, y_grad)
elif proportion > 1.0:
assert not torch.allclose(x.grad, y_grad) | null |
17,486 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class ActivationBalancer(torch.nn.Module):
def __init__(
self,
num_channels: int,
channel_dim: int,
min_positive: float = 0.05,
max_positive: float = 0.95,
max_factor: float = 0.04,
sign_gain_factor: float = 0.01,
scale_gain_factor: float = 0.02,
min_abs: float = 0.2,
max_abs: float = 100.0,
min_prob: float = 0.1,
):
def forward(self, x: Tensor) -> Tensor:
def _test_activation_balancer_sign():
probs = torch.arange(0, 1, 0.01)
N = 1000
x = 1.0 * ((2.0 * (torch.rand(probs.numel(), N) < probs.unsqueeze(-1))) - 1.0)
x = x.detach()
x.requires_grad = True
m = ActivationBalancer(
probs.numel(),
channel_dim=0,
min_positive=0.05,
max_positive=0.95,
max_factor=0.2,
min_abs=0.0,
)
y_grad = torch.sign(torch.randn(probs.numel(), N))
y = m(x)
y.backward(gradient=y_grad)
print("_test_activation_balancer_sign: x = ", x)
print("_test_activation_balancer_sign: y grad = ", y_grad)
print("_test_activation_balancer_sign: x grad = ", x.grad) | null |
17,487 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class ActivationBalancer(torch.nn.Module):
"""
Modifies the backpropped derivatives of a function to try to encourage, for
each channel, that it is positive at least a proportion `threshold` of the
time. It does this by multiplying negative derivative values by up to
(1+max_factor), and positive derivative values by up to (1-max_factor),
interpolated from 1 at the threshold to those extremal values when none
of the inputs are positive.
Args:
num_channels: the number of channels
channel_dim: the dimension/axis corresponding to the channel, e.g.
-1, 0, 1, 2; will be interpreted as an offset from x.ndim if negative.
min_positive: the minimum, per channel, of the proportion of the time
that (x > 0), below which we start to modify the derivatives.
max_positive: the maximum, per channel, of the proportion of the time
that (x > 0), above which we start to modify the derivatives.
max_factor: the maximum factor by which we modify the derivatives for
either the sign constraint or the magnitude constraint;
e.g. with max_factor=0.02, the the derivatives would be multiplied by
values in the range [0.98..1.02].
sign_gain_factor: determines the 'gain' with which we increase the
change in gradient once the constraints on min_positive and max_positive
are violated.
scale_gain_factor: determines the 'gain' with which we increase the
change in gradient once the constraints on min_abs and max_abs
are violated.
min_abs: the minimum average-absolute-value difference from the mean
value per channel, which we allow, before we start to modify
the derivatives to prevent this.
max_abs: the maximum average-absolute-value difference from the mean
value per channel, which we allow, before we start to modify
the derivatives to prevent this.
min_prob: determines the minimum probability with which we modify the
gradients for the {min,max}_positive and {min,max}_abs constraints,
on each forward(). This is done randomly to prevent all layers
from doing it at the same time. Early in training we may use
higher probabilities than this; it will decay to this value.
"""
def __init__(
self,
num_channels: int,
channel_dim: int,
min_positive: float = 0.05,
max_positive: float = 0.95,
max_factor: float = 0.04,
sign_gain_factor: float = 0.01,
scale_gain_factor: float = 0.02,
min_abs: float = 0.2,
max_abs: float = 100.0,
min_prob: float = 0.1,
):
super(ActivationBalancer, self).__init__()
self.num_channels = num_channels
self.channel_dim = channel_dim
self.min_positive = min_positive
self.max_positive = max_positive
self.max_factor = max_factor
self.min_abs = min_abs
self.max_abs = max_abs
self.min_prob = min_prob
self.sign_gain_factor = sign_gain_factor
self.scale_gain_factor = scale_gain_factor
# count measures how many times the forward() function has been called.
# We occasionally sync this to a tensor called `count`, that exists to
# make sure it is synced to disk when we load and save the model.
self.cpu_count = 0
self.register_buffer("count", torch.tensor(0, dtype=torch.int64))
def forward(self, x: Tensor) -> Tensor:
if torch.jit.is_scripting() or not x.requires_grad or torch.jit.is_tracing():
return _no_op(x)
count = self.cpu_count
self.cpu_count += 1
if random.random() < 0.01:
# Occasionally sync self.cpu_count with self.count.
# count affects the decay of 'prob'. don't do this on every iter,
# because syncing with the GPU is slow.
self.cpu_count = max(self.cpu_count, self.count.item())
self.count.fill_(self.cpu_count)
# the prob of doing some work exponentially decreases from 0.5 till it hits
# a floor at min_prob (==0.1, by default)
prob = max(self.min_prob, 0.5 ** (1 + (count / 4000.0)))
if random.random() < prob:
sign_gain_factor = 0.5
if self.min_positive != 0.0 or self.max_positive != 1.0:
sign_factor = _compute_sign_factor(
x,
self.channel_dim,
self.min_positive,
self.max_positive,
gain_factor=self.sign_gain_factor / prob,
max_factor=self.max_factor,
)
else:
sign_factor = None
scale_factor = _compute_scale_factor(
x.detach(),
self.channel_dim,
min_abs=self.min_abs,
max_abs=self.max_abs,
gain_factor=self.scale_gain_factor / prob,
max_factor=self.max_factor,
)
return ActivationBalancerFunction.apply(
x,
scale_factor,
sign_factor,
self.channel_dim,
)
else:
return _no_op(x)
def _test_activation_balancer_magnitude():
magnitudes = torch.arange(0, 1, 0.01)
N = 1000
x = torch.sign(torch.randn(magnitudes.numel(), N)) * magnitudes.unsqueeze(-1)
x = x.detach()
x.requires_grad = True
m = ActivationBalancer(
magnitudes.numel(),
channel_dim=0,
min_positive=0.0,
max_positive=1.0,
max_factor=0.2,
min_abs=0.2,
max_abs=0.8,
min_prob=1.0,
)
y_grad = torch.sign(torch.randn(magnitudes.numel(), N))
y = m(x)
y.backward(gradient=y_grad)
print("_test_activation_balancer_magnitude: x = ", x)
print("_test_activation_balancer_magnitude: y grad = ", y_grad)
print("_test_activation_balancer_magnitude: x grad = ", x.grad) | null |
17,488 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class BasicNorm(torch.nn.Module):
"""
This is intended to be a simpler, and hopefully cheaper, replacement for
LayerNorm. The observation this is based on, is that Transformer-type
networks, especially with pre-norm, sometimes seem to set one of the
feature dimensions to a large constant value (e.g. 50), which "defeats"
the LayerNorm because the output magnitude is then not strongly dependent
on the other (useful) features. Presumably the weight and bias of the
LayerNorm are required to allow it to do this.
So the idea is to introduce this large constant value as an explicit
parameter, that takes the role of the "eps" in LayerNorm, so the network
doesn't have to do this trick. We make the "eps" learnable.
Args:
num_channels: the number of channels, e.g. 512.
channel_dim: the axis/dimension corresponding to the channel,
interprted as an offset from the input's ndim if negative.
shis is NOT the num_channels; it should typically be one of
{-2, -1, 0, 1, 2, 3}.
eps: the initial "epsilon" that we add as ballast in:
scale = ((input_vec**2).mean() + epsilon)**-0.5
Note: our epsilon is actually large, but we keep the name
to indicate the connection with conventional LayerNorm.
learn_eps: if true, we learn epsilon; if false, we keep it
at the initial value.
eps_min: float
eps_max: float
"""
def __init__(
self,
num_channels: int,
channel_dim: int = -1, # CAUTION: see documentation.
eps: float = 0.25,
learn_eps: bool = True,
eps_min: float = -3.0,
eps_max: float = 3.0,
) -> None:
super(BasicNorm, self).__init__()
self.num_channels = num_channels
self.channel_dim = channel_dim
if learn_eps:
self.eps = nn.Parameter(torch.tensor(eps).log().detach())
else:
self.register_buffer("eps", torch.tensor(eps).log().detach())
self.eps_min = eps_min
self.eps_max = eps_max
def forward(self, x: Tensor) -> Tensor:
assert x.shape[self.channel_dim] == self.num_channels
eps = self.eps
if self.training and random.random() < 0.25:
# with probability 0.25, in training mode, clamp eps between the min
# and max; this will encourage it to learn parameters within the
# allowed range by making parameters that are outside the allowed
# range noisy.
# gradients to allow the parameter to get back into the allowed
# region if it happens to exit it.
eps = eps.clamp(min=self.eps_min, max=self.eps_max)
scales = (
torch.mean(x**2, dim=self.channel_dim, keepdim=True) + eps.exp()
) ** -0.5
return x * scales
def _test_basic_norm():
num_channels = 128
m = BasicNorm(num_channels=num_channels, channel_dim=1)
x = torch.randn(500, num_channels)
y = m(x)
assert y.shape == x.shape
x_rms = (x**2).mean().sqrt()
y_rms = (y**2).mean().sqrt()
print("x rms = ", x_rms)
print("y rms = ", y_rms)
assert y_rms < x_rms
assert y_rms > 0.5 * x_rms | null |
17,489 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
class DoubleSwish(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
"""Return double-swish activation function which is an approximation to Swish(Swish(x)),
that we approximate closely with x * sigmoid(x-1).
"""
if torch.jit.is_scripting() or torch.jit.is_tracing():
return x * torch.sigmoid(x - 1.0)
return DoubleSwishFunction.apply(x)
def _test_double_swish_deriv():
x = torch.randn(10, 12, dtype=torch.double) * 3.0
x.requires_grad = True
m = DoubleSwish()
tol = (1.2 - (-0.043637)) / 255.0
torch.autograd.gradcheck(m, x, atol=tol)
# for self-test.
x = torch.randn(1000, 1000, dtype=torch.double) * 3.0
x.requires_grad = True
y = m(x) | null |
17,490 | import logging
import random
import math
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from torch import Tensor
def softmax(x: Tensor, dim: int):
if torch.jit.is_scripting() or torch.jit.is_tracing():
return x.softmax(dim)
return SoftmaxFunction.apply(x, dim)
def _test_softmax():
a = torch.randn(2, 10, dtype=torch.float64)
b = a.clone()
a.requires_grad = True
b.requires_grad = True
a.softmax(dim=1)[:, 0].sum().backward()
print("a grad = ", a.grad)
softmax(b, dim=1)[:, 0].sum().backward()
print("b grad = ", b.grad)
assert torch.allclose(a.grad, b.grad) | null |
17,491 | import math
import torch
from torch import nn
from torch.nn import Parameter
import torch.nn.functional as F
import numpy as np
def pad(input_ele, mel_max_length=None):
if mel_max_length:
max_len = mel_max_length
else:
max_len = max([input_ele[i].size(0) for i in range(len(input_ele))])
out_list = list()
for i, batch in enumerate(input_ele):
if len(batch.shape) == 1:
one_batch_padded = F.pad(
batch, (0, max_len - batch.size(0)), "constant", 0.0
)
elif len(batch.shape) == 2:
one_batch_padded = F.pad(
batch, (0, 0, 0, max_len - batch.size(0)), "constant", 0.0
)
out_list.append(one_batch_padded)
out_padded = torch.stack(out_list)
return out_padded | null |
17,492 | import torch
from torch.nn import functional as F
import numpy as np
DEFAULT_MIN_BIN_WIDTH = 1e-3
DEFAULT_MIN_BIN_HEIGHT = 1e-3
DEFAULT_MIN_DERIVATIVE = 1e-3
def unconstrained_rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails="linear",
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
outside_interval_mask = ~inside_interval_mask
outputs = torch.zeros_like(inputs)
logabsdet = torch.zeros_like(inputs)
if tails == "linear":
unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
constant = np.log(np.exp(1 - min_derivative) - 1)
unnormalized_derivatives[..., 0] = constant
unnormalized_derivatives[..., -1] = constant
outputs[outside_interval_mask] = inputs[outside_interval_mask]
logabsdet[outside_interval_mask] = 0
else:
raise RuntimeError("{} tails are not implemented.".format(tails))
(
outputs[inside_interval_mask],
logabsdet[inside_interval_mask],
) = rational_quadratic_spline(
inputs=inputs[inside_interval_mask],
unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
inverse=inverse,
left=-tail_bound,
right=tail_bound,
bottom=-tail_bound,
top=tail_bound,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
)
return outputs, logabsdet
def rational_quadratic_spline(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
left=0.0,
right=1.0,
bottom=0.0,
top=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if torch.min(inputs) < left or torch.max(inputs) > right:
raise ValueError("Input to a transform is not within its domain")
num_bins = unnormalized_widths.shape[-1]
if min_bin_width * num_bins > 1.0:
raise ValueError("Minimal bin width too large for the number of bins")
if min_bin_height * num_bins > 1.0:
raise ValueError("Minimal bin height too large for the number of bins")
widths = F.softmax(unnormalized_widths, dim=-1)
widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
cumwidths = torch.cumsum(widths, dim=-1)
cumwidths = F.pad(cumwidths, pad=(1, 0), mode="constant", value=0.0)
cumwidths = (right - left) * cumwidths + left
cumwidths[..., 0] = left
cumwidths[..., -1] = right
widths = cumwidths[..., 1:] - cumwidths[..., :-1]
derivatives = min_derivative + F.softplus(unnormalized_derivatives)
heights = F.softmax(unnormalized_heights, dim=-1)
heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
cumheights = torch.cumsum(heights, dim=-1)
cumheights = F.pad(cumheights, pad=(1, 0), mode="constant", value=0.0)
cumheights = (top - bottom) * cumheights + bottom
cumheights[..., 0] = bottom
cumheights[..., -1] = top
heights = cumheights[..., 1:] - cumheights[..., :-1]
if inverse:
bin_idx = searchsorted(cumheights, inputs)[..., None]
else:
bin_idx = searchsorted(cumwidths, inputs)[..., None]
input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
delta = heights / widths
input_delta = delta.gather(-1, bin_idx)[..., 0]
input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
input_heights = heights.gather(-1, bin_idx)[..., 0]
if inverse:
a = (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
) + input_heights * (input_delta - input_derivatives)
b = input_heights * input_derivatives - (inputs - input_cumheights) * (
input_derivatives + input_derivatives_plus_one - 2 * input_delta
)
c = -input_delta * (inputs - input_cumheights)
discriminant = b.pow(2) - 4 * a * c
assert (discriminant >= 0).all()
root = (2 * c) / (-b - torch.sqrt(discriminant))
outputs = root * input_bin_widths + input_cumwidths
theta_one_minus_theta = root * (1 - root)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * root.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - root).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, -logabsdet
else:
theta = (inputs - input_cumwidths) / input_bin_widths
theta_one_minus_theta = theta * (1 - theta)
numerator = input_heights * (
input_delta * theta.pow(2) + input_derivatives * theta_one_minus_theta
)
denominator = input_delta + (
(input_derivatives + input_derivatives_plus_one - 2 * input_delta)
* theta_one_minus_theta
)
outputs = input_cumheights + numerator / denominator
derivative_numerator = input_delta.pow(2) * (
input_derivatives_plus_one * theta.pow(2)
+ 2 * input_delta * theta_one_minus_theta
+ input_derivatives * (1 - theta).pow(2)
)
logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
return outputs, logabsdet
def piecewise_rational_quadratic_transform(
inputs,
unnormalized_widths,
unnormalized_heights,
unnormalized_derivatives,
inverse=False,
tails=None,
tail_bound=1.0,
min_bin_width=DEFAULT_MIN_BIN_WIDTH,
min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
min_derivative=DEFAULT_MIN_DERIVATIVE,
):
if tails is None:
spline_fn = rational_quadratic_spline
spline_kwargs = {}
else:
spline_fn = unconstrained_rational_quadratic_spline
spline_kwargs = {"tails": tails, "tail_bound": tail_bound}
outputs, logabsdet = spline_fn(
inputs=inputs,
unnormalized_widths=unnormalized_widths,
unnormalized_heights=unnormalized_heights,
unnormalized_derivatives=unnormalized_derivatives,
inverse=inverse,
min_bin_width=min_bin_width,
min_bin_height=min_bin_height,
min_derivative=min_derivative,
**spline_kwargs,
)
return outputs, logabsdet | null |
17,493 | import copy
from functools import partial
from typing import Any, Callable, List, Optional, Union
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from modules.norms import AdaptiveLayerNorm, LayerNorm, BalancedBasicNorm, IdentityNorm
from modules.transformer import MultiheadAttention
from modules.general.scaling import BalancedDoubleSwish
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) | null |
17,494 | import copy
from functools import partial
from typing import Any, Callable, List, Optional, Union
import torch
from torch import Tensor, nn
from torch.nn import functional as F
from modules.norms import AdaptiveLayerNorm, LayerNorm, BalancedBasicNorm, IdentityNorm
from modules.transformer import MultiheadAttention
from modules.general.scaling import BalancedDoubleSwish
def _get_activation_fn(activation: str) -> Callable[[Tensor], Tensor]:
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation)) | null |
17,495 | import torch
import torch.nn as nn
import numpy as np
from .Layers import FFTBlock
from text.symbols import symbols
The provided code snippet includes necessary dependencies for implementing the `get_sinusoid_encoding_table` function. Write a Python function `def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None)` to solve the following problem:
Sinusoid position encoding table
Here is the function:
def get_sinusoid_encoding_table(n_position, d_hid, padding_idx=None):
"""Sinusoid position encoding table"""
def cal_angle(position, hid_idx):
return position / np.power(10000, 2 * (hid_idx // 2) / d_hid)
def get_posi_angle_vec(position):
return [cal_angle(position, hid_j) for hid_j in range(d_hid)]
sinusoid_table = np.array(
[get_posi_angle_vec(pos_i) for pos_i in range(n_position)]
)
sinusoid_table[:, 0::2] = np.sin(sinusoid_table[:, 0::2]) # dim 2i
sinusoid_table[:, 1::2] = np.cos(sinusoid_table[:, 1::2]) # dim 2i+1
if padding_idx is not None:
# zero vector for padding dimension
sinusoid_table[padding_idx] = 0.0
return torch.FloatTensor(sinusoid_table) | Sinusoid position encoding table |
17,496 | from abc import ABC, abstractmethod
import numpy as np
import torch as th
from scipy.stats import norm
import torch.distributed as dist
class UniformSampler(ScheduleSampler):
def __init__(self, diffusion):
self.diffusion = diffusion
self._weights = np.ones([diffusion.num_timesteps])
def weights(self):
return self._weights
class LossSecondMomentResampler(LossAwareSampler):
def __init__(self, diffusion, history_per_term=10, uniform_prob=0.001):
self.diffusion = diffusion
self.history_per_term = history_per_term
self.uniform_prob = uniform_prob
self._loss_history = np.zeros(
[diffusion.num_timesteps, history_per_term], dtype=np.float64
)
self._loss_counts = np.zeros([diffusion.num_timesteps], dtype=np.int)
def weights(self):
if not self._warmed_up():
return np.ones([self.diffusion.num_timesteps], dtype=np.float64)
weights = np.sqrt(np.mean(self._loss_history**2, axis=-1))
weights /= np.sum(weights)
weights *= 1 - self.uniform_prob
weights += self.uniform_prob / len(weights)
return weights
def update_with_all_losses(self, ts, losses):
for t, loss in zip(ts, losses):
if self._loss_counts[t] == self.history_per_term:
# Shift out the oldest loss term.
self._loss_history[t, :-1] = self._loss_history[t, 1:]
self._loss_history[t, -1] = loss
else:
self._loss_history[t, self._loss_counts[t]] = loss
self._loss_counts[t] += 1
def _warmed_up(self):
return (self._loss_counts == self.history_per_term).all()
class LogNormalSampler:
def __init__(self, p_mean=-1.2, p_std=1.2, even=False):
self.p_mean = p_mean
self.p_std = p_std
self.even = even
if self.even:
self.inv_cdf = lambda x: norm.ppf(x, loc=p_mean, scale=p_std)
self.rank, self.size = dist.get_rank(), dist.get_world_size()
def sample(self, bs, device):
if self.even:
# buckets = [1/G]
start_i, end_i = self.rank * bs, (self.rank + 1) * bs
global_batch_size = self.size * bs
locs = (th.arange(start_i, end_i) + th.rand(bs)) / global_batch_size
log_sigmas = th.tensor(self.inv_cdf(locs), dtype=th.float32, device=device)
else:
log_sigmas = self.p_mean + self.p_std * th.randn(bs, device=device)
sigmas = th.exp(log_sigmas)
weights = th.ones_like(sigmas)
return sigmas, weights
The provided code snippet includes necessary dependencies for implementing the `create_named_schedule_sampler` function. Write a Python function `def create_named_schedule_sampler(name, diffusion)` to solve the following problem:
Create a ScheduleSampler from a library of pre-defined samplers. :param name: the name of the sampler. :param diffusion: the diffusion object to sample for.
Here is the function:
def create_named_schedule_sampler(name, diffusion):
"""
Create a ScheduleSampler from a library of pre-defined samplers.
:param name: the name of the sampler.
:param diffusion: the diffusion object to sample for.
"""
if name == "uniform":
return UniformSampler(diffusion)
elif name == "loss-second-moment":
return LossSecondMomentResampler(diffusion)
elif name == "lognormal":
return LogNormalSampler()
else:
raise NotImplementedError(f"unknown schedule sampler: {name}") | Create a ScheduleSampler from a library of pre-defined samplers. :param name: the name of the sampler. :param diffusion: the diffusion object to sample for. |
17,497 | import random
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from utils.ssim import SSIM
from modules.diffusion.karras.random_utils import get_generator
The provided code snippet includes necessary dependencies for implementing the `mean_flat` function. Write a Python function `def mean_flat(tensor)` to solve the following problem:
Take the mean over all non-batch dimensions.
Here is the function:
def mean_flat(tensor):
"""
Take the mean over all non-batch dimensions.
"""
return tensor.mean(dim=list(range(1, len(tensor.shape)))) | Take the mean over all non-batch dimensions. |
17,498 | import random
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from utils.ssim import SSIM
from modules.diffusion.karras.random_utils import get_generator
def get_weightings(weight_schedule, snrs, sigma_data):
if weight_schedule == "snr":
weightings = snrs
elif weight_schedule == "snr+1":
weightings = snrs + 1
elif weight_schedule == "karras":
weightings = snrs + 1.0 / sigma_data**2
elif weight_schedule == "truncated-snr":
weightings = th.clamp(snrs, min=1.0)
elif weight_schedule == "uniform":
weightings = th.ones_like(snrs)
else:
raise NotImplementedError()
return weightings | null |
17,499 | import random
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from utils.ssim import SSIM
from modules.diffusion.karras.random_utils import get_generator
def get_sigmas_karras(n, sigma_min, sigma_max, rho=7.0, device="cpu"):
"""Constructs the noise schedule of Karras et al. (2022)."""
ramp = th.linspace(0, 1, n)
min_inv_rho = sigma_min ** (1 / rho)
max_inv_rho = sigma_max ** (1 / rho)
sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return append_zero(sigmas).to(device)
def sample_euler_ancestral(model, x, sigmas, generator, progress=False, callback=None):
"""Ancestral sampling with Euler method steps."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
denoised = model(x, sigmas[i] * s_in)
sigma_down, sigma_up = get_ancestral_step(sigmas[i], sigmas[i + 1])
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigmas[i],
"denoised": denoised,
}
)
d = to_d(x, sigmas[i], denoised)
# Euler method
dt = sigma_down - sigmas[i]
x = x + d * dt
x = x + generator.randn_like(x) * sigma_up
return x
def sample_heun(
denoiser,
x,
sigmas,
generator,
progress=False,
callback=None,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
if s_tmin <= sigmas[i] <= s_tmax
else 0.0
)
eps = generator.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigma_hat,
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma_hat
if sigmas[i + 1] == 0:
# Euler method
x = x + d * dt
else:
# Heun's method
x_2 = x + d * dt
denoised_2 = denoiser(x_2, sigmas[i + 1] * s_in)
d_2 = to_d(x_2, sigmas[i + 1], denoised_2)
d_prime = (d + d_2) / 2
x = x + d_prime * dt
return x
def sample_euler(
denoiser,
x,
sigmas,
generator,
progress=False,
callback=None,
):
"""Implements Algorithm 2 (Heun steps) from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
sigma = sigmas[i]
denoised = denoiser(x, sigma * s_in)
d = to_d(x, sigma, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma
x = x + d * dt
return x
def sample_dpm(
denoiser,
x,
sigmas,
generator,
progress=False,
callback=None,
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
):
"""A sampler inspired by DPM-Solver-2 and Algorithm 2 from Karras et al. (2022)."""
s_in = x.new_ones([x.shape[0]])
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
gamma = (
min(s_churn / (len(sigmas) - 1), 2**0.5 - 1)
if s_tmin <= sigmas[i] <= s_tmax
else 0.0
)
eps = generator.randn_like(x) * s_noise
sigma_hat = sigmas[i] * (gamma + 1)
if gamma > 0:
x = x + eps * (sigma_hat**2 - sigmas[i] ** 2) ** 0.5
denoised = denoiser(x, sigma_hat * s_in)
d = to_d(x, sigma_hat, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigmas[i],
"sigma_hat": sigma_hat,
"denoised": denoised,
}
)
# Midpoint method, where the midpoint is chosen according to a rho=3 Karras schedule
sigma_mid = ((sigma_hat ** (1 / 3) + sigmas[i + 1] ** (1 / 3)) / 2) ** 3
dt_1 = sigma_mid - sigma_hat
dt_2 = sigmas[i + 1] - sigma_hat
x_2 = x + d * dt_1
denoised_2 = denoiser(x_2, sigma_mid * s_in)
d_2 = to_d(x_2, sigma_mid, denoised_2)
x = x + d_2 * dt_2
return x
def sample_onestep(
distiller,
x,
sigmas,
generator=None,
progress=False,
callback=None,
):
"""Single-step generation from a distilled model."""
s_in = x.new_ones([x.shape[0]])
return distiller(x, sigmas[0] * s_in)
def stochastic_iterative_sampler(
distiller,
x,
sigmas,
generator,
ts,
progress=False,
callback=None,
t_min=0.002,
t_max=80.0,
rho=7.0,
steps=40,
):
t_max_rho = t_max ** (1 / rho)
t_min_rho = t_min ** (1 / rho)
s_in = x.new_ones([x.shape[0]])
for i in range(len(ts) - 1):
t = (t_max_rho + ts[i] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
x0 = distiller(x, t * s_in)
next_t = (t_max_rho + ts[i + 1] / (steps - 1) * (t_min_rho - t_max_rho)) ** rho
next_t = np.clip(next_t, t_min, t_max)
x = x0 + generator.randn_like(x) * np.sqrt(next_t**2 - t_min**2)
return x
def sample_progdist(
denoiser,
x,
sigmas,
generator=None,
progress=False,
callback=None,
):
s_in = x.new_ones([x.shape[0]])
sigmas = sigmas[:-1] # skip the zero sigma
indices = range(len(sigmas) - 1)
if progress:
from tqdm.auto import tqdm
indices = tqdm(indices)
for i in indices:
sigma = sigmas[i]
denoised = denoiser(x, sigma * s_in)
d = to_d(x, sigma, denoised)
if callback is not None:
callback(
{
"x": x,
"i": i,
"sigma": sigma,
"denoised": denoised,
}
)
dt = sigmas[i + 1] - sigma
x = x + d * dt
return x
def get_generator(generator, num_samples=0, seed=0):
if generator == "dummy":
return DummyGenerator()
elif generator == "determ":
return DeterministicGenerator(num_samples, seed)
elif generator == "determ-indiv":
return DeterministicIndividualGenerator(num_samples, seed)
else:
raise NotImplementedError
def karras_sample(
diffusion,
model,
shape,
steps,
clip_denoised=True,
progress=True,
callback=None,
# model_kwargs=None,
condition=None,
device=None,
sigma_min=0.002,
sigma_max=80, # higher for highres?
rho=7.0,
sampler="heun",
s_churn=0.0,
s_tmin=0.0,
s_tmax=float("inf"),
s_noise=1.0,
generator=None,
ts=None,
):
if generator is None:
generator = get_generator("dummy")
if sampler == "progdist":
sigmas = get_sigmas_karras(steps + 1, sigma_min, sigma_max, rho, device=device)
else:
sigmas = get_sigmas_karras(steps, sigma_min, sigma_max, rho, device=device)
th.manual_seed(42)
x_T = generator.randn(*shape, device=device) * sigma_max
sigmas = sigmas.unsqueeze(-1)
sample_fn = {
"heun": sample_heun,
"dpm": sample_dpm,
"ancestral": sample_euler_ancestral,
"onestep": sample_onestep,
"progdist": sample_progdist,
"euler": sample_euler,
"multistep": stochastic_iterative_sampler,
}[sampler]
if sampler in ["heun", "dpm"]:
sampler_args = dict(
s_churn=s_churn, s_tmin=s_tmin, s_tmax=s_tmax, s_noise=s_noise
)
elif sampler == "multistep":
sampler_args = dict(
ts=ts, t_min=sigma_min, t_max=sigma_max, rho=diffusion.rho, steps=steps
)
else:
sampler_args = {}
def denoiser(x_t, sigma):
_, denoised = diffusion.denoise(model, x_t, sigma, condition)
if clip_denoised:
denoised = denoised.clamp(-1, 1)
return denoised
x_0 = sample_fn(
denoiser,
x_T,
sigmas,
generator,
progress=progress,
callback=callback,
**sampler_args,
)
return x_0.clamp(-1, 1) | null |
17,500 | import random
import numpy as np
import torch as th
import torch.nn as nn
import torch.nn.functional as F
from utils.ssim import SSIM
from modules.diffusion.karras.random_utils import get_generator
The provided code snippet includes necessary dependencies for implementing the `sample_midpoint_ancestral` function. Write a Python function `def sample_midpoint_ancestral(model, x, ts, generator, progress=False, callback=None)` to solve the following problem:
Ancestral sampling with midpoint method steps.
Here is the function:
def sample_midpoint_ancestral(model, x, ts, generator, progress=False, callback=None):
"""Ancestral sampling with midpoint method steps."""
s_in = x.new_ones([x.shape[0]])
step_size = 1 / len(ts)
if progress:
from tqdm.auto import tqdm
ts = tqdm(ts)
for tn in ts:
dn = model(x, tn * s_in)
dn_2 = model(x + (step_size / 2) * dn, (tn + step_size / 2) * s_in)
x = x + step_size * dn_2
if callback is not None:
callback({"x": x, "tn": tn, "dn": dn, "dn_2": dn_2})
return x | Ancestral sampling with midpoint method steps. |
17,502 | import re
from g2p_en import G2p
from string import punctuation
def read_lexicon(lex_path):
lexicon = {}
with open(lex_path) as f:
for line in f:
temp = re.split(r"\s+", line.strip("\n"))
word = temp[0]
phones = temp[1:]
if word.lower() not in lexicon:
lexicon[word.lower()] = phones
return lexicon | null |
17,503 | import re
from g2p_en import G2p
from string import punctuation
def preprocess_english(text, lexicon):
text = text.rstrip(punctuation)
g2p = G2p()
phones = []
words = re.split(r"([,;.\-\?\!\s+])", text)
for w in words:
if w.lower() in lexicon:
phones += lexicon[w.lower()]
else:
phones += list(filter(lambda p: p != " ", g2p(w)))
phones = "}{".join(phones)
phones = re.sub(r"\{[^\w\s]?\}", "{sp}", phones)
phones = phones.replace("}{", " ")
return phones | null |
17,504 | import re
from unidecode import unidecode
from .numbers import normalize_numbers
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text)
The provided code snippet includes necessary dependencies for implementing the `basic_cleaners` function. Write a Python function `def basic_cleaners(text)` to solve the following problem:
Basic pipeline that lowercases and collapses whitespace without transliteration.
Here is the function:
def basic_cleaners(text):
"""Basic pipeline that lowercases and collapses whitespace without transliteration."""
text = lowercase(text)
text = collapse_whitespace(text)
return text | Basic pipeline that lowercases and collapses whitespace without transliteration. |
17,505 | import re
from unidecode import unidecode
from .numbers import normalize_numbers
def lowercase(text):
return text.lower()
def collapse_whitespace(text):
return re.sub(_whitespace_re, " ", text)
def convert_to_ascii(text):
return unidecode(text)
The provided code snippet includes necessary dependencies for implementing the `transliteration_cleaners` function. Write a Python function `def transliteration_cleaners(text)` to solve the following problem:
Pipeline for non-English text that transliterates to ASCII.
Here is the function:
def transliteration_cleaners(text):
"""Pipeline for non-English text that transliterates to ASCII."""
text = convert_to_ascii(text)
text = lowercase(text)
text = collapse_whitespace(text)
return text | Pipeline for non-English text that transliterates to ASCII. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.