repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
archive-query-log
|
archive-query-log-main/archive_query_log/results/test/test_utils.py
|
from pathlib import Path
from webbrowser import open_new_tab
from approvaltests import verify_as_json, ApprovalException
from approvaltests.core.options import Options
from approvaltests.namer.cli_namer import CliNamer
from slugify import slugify
from tqdm.auto import tqdm
from archive_query_log import PROJECT_DIRECTORY_PATH
from archive_query_log.config import SERVICES
from archive_query_log.download.iterable import ArchivedRawSerps
from archive_query_log.model import ArchivedParsedSerp, ArchivedRawSerp
from archive_query_log.results.parse import ArchivedParsedSerpParser
_expected_dir = PROJECT_DIRECTORY_PATH / \
"data/manual-annotations/" \
"archived-raw-serps/expected/"
_warc_dir = PROJECT_DIRECTORY_PATH / \
"data/manual-annotations/" \
"archived-raw-serps/warcs/"
def verify_serp_parsing(
wayback_raw_url: str,
service_name: str | None = None,
) -> None:
if service_name is None:
services = SERVICES.values()
else:
services = [SERVICES[service_name]]
result_parsers = []
interpreted_query_parsers = []
for service in services:
result_parsers += service.results_parsers
interpreted_query_parsers += service.interpreted_query_parsers
parser = ArchivedParsedSerpParser(
result_parsers,
interpreted_query_parsers,
)
archived_raw_serp = _find_archived_raw_serp(wayback_raw_url)
archived_parsed_serp = parser.parse_single(archived_raw_serp)
try:
_verify_archived_parsed_serp_results(
archived_raw_serp,
archived_parsed_serp,
service_name,
)
except ApprovalException as e:
open_new_tab(archived_raw_serp.raw_archive_url)
raise e
def _find_archived_raw_serp(wayback_raw_url: str) -> ArchivedRawSerp:
num_files = sum(1 for _ in _warc_dir.glob("*.warc.gz"))
print(
f"Searching for record with URL {wayback_raw_url} in {_warc_dir} "
f"({num_files} files)"
)
records = ArchivedRawSerps(path=Path(_warc_dir))
records = tqdm(
records,
desc="Find record for URL",
unit="record",
)
for record in records:
if record.raw_archive_url == wayback_raw_url:
return record
raise ValueError(
f'Could not find record with URL {wayback_raw_url} in {_warc_dir}')
_schema = ArchivedParsedSerp.schema()
def _verify_archived_parsed_serp_results(
archived_raw_serp: ArchivedRawSerp,
archived_parsed_serp: ArchivedParsedSerp | None,
service: str | None = None,
) -> None:
if archived_parsed_serp is not None:
actual = _schema.dump(archived_parsed_serp)
else:
actual = None
query = archived_raw_serp.query
query = slugify(query)
query = query[:100]
name = f"{query}-{archived_raw_serp.timestamp}"
if service is not None:
name = f"{service}-{name}"
name = slugify(name)
verify_as_json(
actual,
options=Options().with_namer(
CliNamer(f"{_expected_dir}/{name}")
)
)
| 3,114
| 29.539216
| 75
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/services/test_services.py
|
from archive_query_log.config import SERVICES_PATH
from archive_query_log.services import read_services
def test_services_can_be_parsed():
assert read_services(
SERVICES_PATH,
ignore_parsing_errors=False
) is not None
| 244
| 23.5
| 52
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/services/search_forms.py
|
import re
import pandas as pd
from tqdm import tqdm
from requests_html import HTMLSession
from bs4 import BeautifulSoup
import argparse
pattern = re.compile(r'(?i).*search.*')
# noinspection PyUnresolvedReferences,PyBroadException
class SearchFormIdentifier:
"""
Class that takes in a CSV-file containing services. The file needs
to have the following columns:
- rank
- service (name with TLD e.g. 'amazon.com')
- TLD
The process_services method will look for input fields, search forms
and search divs on the corresponding website,
indicate if the service has such a field as well as document
any HMTL-snippet found during the search
"""
def __init__(self, csv_file='./ranked_services.csv', outfile_num=0,
start_row=0, end_row=None):
self.out_df = None
self.df = pd.read_csv(csv_file, header=None)
self.df.columns = ['rank', 'service', 'tld']
if end_row is None:
end_row = len(self.df)
self.df = self.df[start_row:end_row]
self.out_file = f'./search_forms_{outfile_num}.csv'
self.session = HTMLSession()
def process_services(self):
"""
Use pd.DataFrame.progress_apply to run the method check_url
on all services.
Store the results in an attribute out_df and save it in a csv-File.
"""
tqdm.pandas()
self.out_df = self.df.copy(deep=True)
# Search for the HTML elements and split the results
# into separate columns
self.out_df['tmp'] = self.df['service'].progress_apply(
func=self.check_url)
self.out_df[['input', 'search_form', 'search_div', 'input_snippets',
'form_snippets', 'div_snippets']] = \
pd.DataFrame(self.out_df['tmp'].tolist(), index=self.out_df.index)
self.out_df.drop('tmp', axis=1, inplace=True)
self.out_df.to_csv(self.out_file)
def check_url(self, url: str):
"""
Method to take in a service URL and look for
any relevant tags in the HTML.
It will look for any input tag and div or form with an id that
has the term 'search' in it
:param url: Service URL to be processed
:return: found_input: Boolean
found_search_form: Boolean
found_search_div: Boolean
input_snippets: List of identified HTML-snippets
form_snippets: List of identified HTML-snippets
div_snippets: List of identified HTML-snippets
"""
url = 'https://' + url if 'http' not in url else url
# If regular requesting fails, try to get a recent snapshot
# in the internet archive
try:
response = self.session.get(url, timeout=10)
except Exception:
try:
response = self.get_internet_archive_html(url=url)
except Exception:
return None, None, None
try:
html = response.html.html
except Exception:
return None, None, None
# Render JavaScript necessary
if '<script>' in html:
try:
response.html.render(timeout=10)
html = response.html.html
except Exception:
return None, None, None
# Look for elements with the pattern in them and save the snippets
soup = BeautifulSoup(html, 'html.parser')
found_input, input_snippets = find_input_tag(soup=soup)
found_search_form, form_snippets = find_search_tag(
soup=soup, tag='form')
found_search_div, div_snippets = find_search_tag(soup=soup, tag='div')
return found_input, found_search_form, found_search_div, \
input_snippets, form_snippets, div_snippets
def get_internet_archive_html(self, url: str, year=2022, byte_digits=4):
"""
Method to get the response for a URL from the Internet Archive.
It will return the most recent snapshot from the specified year
with a minimum size
:param url: URL for which to find a snapshot
:param year: The desired year for snapshots
:param byte_digits: Defines the minimum size with a leading 5.
Examples byte_digits=4 -> Bytes >= 5,000;
byte_digits=5 -> Bytes >= 50,000
:return: The response from the Internet Archive Snapshot
"""
# Get the snapshots with a minimum number of bytes
# from the specified year and extract the timestamp of the most recent
search_url = f'https://web.archive.org/cdx/search/cdx' \
f'?url={url}&fl=original,timestamp,length&from={year}' \
f'&filter=mimetype:text/html&filter=statuscode:200' \
f'&filter=length:.*[5-9][0-9]%7B{byte_digits - 1},%7D.*'
try:
response = self.session.get(search_url, timeout=10)
snapshot_list = response.html.html.split('\n')
timestamp = snapshot_list[-2].split(" ")[1]
# Request the corresponding HTML
ia_url = f'https://web.archive.org/web/{timestamp}/{url}/'
return self.session.get(ia_url, timeout=10)
except Exception:
raise RuntimeError(
'Failed to request an internet archive snapshot')
def services_no_search(self):
return self.out_df[(self.out_df['input'] is False) & (
self.out_df['search_form'] is False) &
(self.out_df['search_div'] is False)]
def find_input_tag(soup: BeautifulSoup):
"""
Function to look for HTML-Elements with the input tag
:param soup: A bs4.BeautifulSoup instance with the HTML
to conduct tag search on
:return: found: Boolean to indicate if search was successful
snippet_list: List of matching HTML snippets that were found
"""
snippet_list = soup.findAll(re.compile(r'(?i).*input.*'))
found = True if len(snippet_list) > 0 else False
return found, snippet_list
def find_search_tag(soup: BeautifulSoup, tag='form'):
"""
Function to look for HTML-Elements with the specified tag
that contain the pattern "search" in some way
:param soup: A bs4.BeautifulSoup instance with the HTML
to conduct tag search on
tag: Name of relevant tags
:return: found: Boolean to indicate if search was successful
snippet_list: List of matching HTML snippets that were found
"""
snippet_list = soup.findAll(tag, {"id": pattern})
found = True if len(snippet_list) > 0 else False
return found, snippet_list
if __name__ == "__main__":
# Parse input
parser = argparse.ArgumentParser(
prog='Search form identification',
description='Takes in a CSV-File of services '
'and looks for search forms in their HTML')
parser.add_argument('-f', '--csv_file', type=str)
parser.add_argument('-o', '--outfile_num', type=str)
parser.add_argument('-s', '--start_row', type=int)
parser.add_argument('-e', '--end_row', type=int)
args = parser.parse_args()
# Set/Update default values
csv_file = './alexa-top-1m-fused-domains-rrf-top-10000.csv' \
if args.csv_file is None else args.csv_file
outfile_num = "0" if args.outfile_num is None else args.outfile_num
start_row = 0 if args.start_row is None else args.start_row
end_row = None if args.end_row is None else args.end_row
# Run the search for specified services
identifier = SearchFormIdentifier(
csv_file=csv_file, outfile_num=outfile_num,
start_row=start_row, end_row=end_row
)
identifier.process_services()
| 7,993
| 38.97
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/services/alexa.py
|
from asyncio import run
from csv import reader
from dataclasses import dataclass
from datetime import datetime
from functools import cached_property
from io import TextIOWrapper
from itertools import islice
from math import floor, log10
from pathlib import Path
from tempfile import gettempdir
from typing import Sized, Iterable, Any, Iterator, Mapping, Set
from zipfile import ZipFile
from publicsuffixlist import PublicSuffixList
from ranx import Run, fuse
from requests import get, HTTPError
from requests.exceptions import ChunkedEncodingError
from tqdm.auto import tqdm
from archive_query_log.model import ArchivedUrl
from archive_query_log.download.raw import WebArchiveRawDownloader
from archive_query_log.util.http_session import backoff_session
@dataclass(frozen=True)
class AlexaTop1MArchivedUrls(Sized, Iterable[ArchivedUrl]):
"""
Get all archived URLs of Alexa top-1M rankings.
"""
output_path: Path
cdx_api_url: str
@cached_property
def _params(self) -> Iterable[tuple[str, Any]]:
return (
("url", "s3.amazonaws.com/alexa-static/top-1m.csv.zip"),
("fl", "timestamp,original"),
("filter", "mimetype:application/zip"),
("filter", "statuscode:200"),
)
@cached_property
def _result_path(self) -> Path:
return self.output_path
@cached_property
def _cache_path(self) -> Path:
cache_path = Path(gettempdir()) / self.output_path.stem
cache_path.mkdir(exist_ok=True)
return cache_path
@cached_property
def num_pages(self) -> int:
num_pages_response = get(
self.cdx_api_url,
params=[
*self._params,
("showNumPages", True),
],
)
return int(num_pages_response.text)
def _page_cache_path(self, page: int) -> Path:
num_digits = floor(log10(self.num_pages)) + 1
return self._cache_path / f"page_{page:{num_digits}}.jsonl"
def _fetch_page(self, page: int) -> Path | None:
path = self._page_cache_path(page)
if path.exists():
# Page was already downloaded, skip it.
assert path.is_file()
return path
session = backoff_session()
try:
response = session.get(
self.cdx_api_url,
params=[
*self._params,
("page", page),
],
timeout=10 * 60 # 10 minutes, better safe than sorry ;)
)
except HTTPError:
print(f"Failed to load page: {page}")
return None
except ChunkedEncodingError:
print(f"Failed to read page contents: {page}")
return None
schema = ArchivedUrl.schema()
with path.open("wt") as file:
for line in response.text.splitlines(keepends=False):
timestamp_string, url = line.split()
timestamp = datetime.strptime(
timestamp_string,
"%Y%m%d%H%M%S"
)
archived_url = ArchivedUrl(url, int(timestamp.timestamp()))
file.write(schema.dumps(archived_url))
file.write("\n")
def _fetch_pages(self) -> None:
"""
Fetch queries from each individual page.
"""
for page in tqdm(
range(self.num_pages),
desc="Fetch urls",
unit="page",
):
self._fetch_page(page)
def _missing_pages(self) -> set[int]:
"""
Find missing pages.
Most often, the missing pages are caused by request timeouts.
"""
missing_pages = set()
for page in range(self.num_pages):
path = self._page_cache_path(page)
if not path.exists() or not path.is_file():
missing_pages.add(page)
return missing_pages
def _merge_cached_pages(self) -> None:
"""
Merge queries from all pages.
"""
with self.output_path.open("wt") as file:
for page in tqdm(
range(self.num_pages),
desc="Merge urls",
unit="page",
):
path = self._page_cache_path(page)
with path.open("rt") as page_file:
lines = page_file
for line in lines:
file.write(line)
def fetch(self) -> None:
if self.output_path.exists():
assert self.output_path.is_file()
return
print(f"Storing temporary files at: {self._cache_path}")
self._fetch_pages()
missing_pages = self._missing_pages()
if len(missing_pages) > 0:
raise RuntimeError(
f"Pages missing: {missing_pages}\n"
f"Consider retrying the download, as some requests "
f"might only have timed out."
)
self._merge_cached_pages()
for path in self._cache_path.iterdir():
path.unlink()
self._cache_path.rmdir()
def __len__(self) -> int:
self.fetch()
with self.output_path.open("rt") as file:
return sum(1 for _ in file)
def __iter__(self) -> Iterator[ArchivedUrl]:
self.fetch()
schema = ArchivedUrl.schema()
with self.output_path.open("rt") as file:
for line in file:
yield schema.loads(line)
@dataclass(frozen=True)
class AlexaTop1MFusedDomains(Sized, Iterable[Path]):
"""
Fuse the rop-1000 of all archived Alexa top-1M rankings.
"""
data_directory_path: Path
cdx_api_url: str
fusion_method: str = "rrf"
max_domains_per_ranking: int | None = 1000
deduplicate_per_ranking: bool = True
deduplicate_fused_ranking: bool = False
@cached_property
def _urls(self) -> Set[ArchivedUrl]:
alexa_path = self.data_directory_path / \
"alexa-top-1m-archived-urls.jsonl"
urls = AlexaTop1MArchivedUrls(
output_path=alexa_path,
cdx_api_url=self.cdx_api_url
)
return set(urls)
@cached_property
def _result_path(self) -> Path:
name = f"alexa-top-1m-fused-domains-{self.fusion_method}" \
f"-top-{self.max_domains_per_ranking}.csv"
return self.data_directory_path / name
@cached_property
def _cache_path(self) -> Path:
cache_path = Path(gettempdir()) / "alexa-top-1m-fused-domains"
cache_path.mkdir(exist_ok=True)
return cache_path
def _fetch_rankings(self) -> Iterable[Path]:
downloader = WebArchiveRawDownloader()
paths: Mapping[ArchivedUrl, Path] = run(downloader.download(
self._urls,
self._cache_path,
lambda url: f"{url.timestamp}.zip",
))
if len(paths) < len(self._urls):
raise RuntimeError("Some downloads were unsuccessful. Try again.")
return paths.values()
def _iter_deduplicated(self, domains: Iterable[str]) -> Iterator[str]:
public_suffix_list = PublicSuffixList()
second_level_domains = set()
for domain in domains:
public_suffix = public_suffix_list.publicsuffix(domain)
second_level_domain = public_suffix_list.subdomain(domain, 0)
if second_level_domain is None:
second_level_domain = public_suffix
second_level_domain = second_level_domain.removesuffix(
f".{public_suffix}"
)
if second_level_domain in second_level_domains:
continue
second_level_domains.add(second_level_domain)
yield domain
def _fuse_cached_rankings(self) -> None:
runs: list[Run] = []
num_runs = sum(1 for _ in self._cache_path.iterdir())
for path in tqdm(
self._cache_path.iterdir(),
total=num_runs,
desc="Read ranking",
unit="ranking",
):
with path.open("rb") as file:
with ZipFile(file) as zip_file:
with zip_file.open("top-1m.csv", "r") as csv_file:
with TextIOWrapper(csv_file) as lines:
domains = (line[1] for line in reader(lines))
if self.deduplicate_per_ranking:
domains = self._iter_deduplicated(domains)
if self.max_domains_per_ranking is not None:
domains = islice(
domains,
self.max_domains_per_ranking,
)
scores: dict[str, float] = {
domain: 1_000_000 - index
for index, domain in enumerate(domains)
}
run = Run({"_": scores})
runs.append(run)
print(f"Fusing {len(runs)} rankings.")
combined_run = fuse(
runs=runs,
norm="min-max",
method=self.fusion_method,
).to_dict()
items = sorted(
combined_run["_"].items(),
key=lambda item: item[1],
reverse=True,
)
domains = (domain for domain, _ in items)
if self.deduplicate_fused_ranking and not self.deduplicate_per_ranking:
domains = self._iter_deduplicated(domains)
public_suffix_list = PublicSuffixList(only_icann=True)
with self._result_path.open("wt") as file:
for index, domain in enumerate(domains):
public_suffix = public_suffix_list.publicsuffix(domain)
file.write(f"{index + 1},{domain},{public_suffix}\n")
def fetch(self) -> None:
if self._result_path.exists():
assert self._result_path.is_file()
return
print(f"Storing temporary files at: {self._cache_path}")
self._fetch_rankings()
self._fuse_cached_rankings()
# for path in self._cache_path.iterdir():
# path.unlink()
# self._cache_path.rmdir()
def __len__(self) -> int:
self.fetch()
with self._result_path.open("rt") as file:
return sum(1 for _ in file)
def __iter__(self) -> Iterator[ArchivedUrl]:
self.fetch()
schema = ArchivedUrl.schema()
with self._result_path.open("rt") as file:
for line in file:
yield schema.loads(line)
| 10,694
| 34.413907
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/services/__init__.py
|
from pathlib import Path
from typing import Mapping
from yaml import safe_load
from archive_query_log.model import Service
def read_services(
path: Path, ignore_parsing_errors=True
) -> Mapping[str, Service]:
with path.open("r") as file:
services_dict = safe_load(file)
services = []
for service_dict in services_dict:
try:
service = Service.schema().load(service_dict)
services += [(service.name, service)]
except Exception as exception:
if not ignore_parsing_errors:
raise ValueError(
f"Could not parse service {service_dict['name']}",
exception
)
if not ignore_parsing_errors:
service_names = set()
for name, service in services:
if name in service_names:
raise ValueError(f"Duplicate service name {name}")
service_names.add(name)
return {
name: service
for name, service in services
}
| 1,114
| 29.972222
| 74
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/services/update_yaml.py
|
from typing import Sequence
import pandas as pd
import yaml
from pandas import concat, DataFrame
from archive_query_log import DATA_DIRECTORY_PATH
from archive_query_log.cli.external import load_services, load_domains, \
service_domains, load_url_prefixes, \
load_query_parsers, query_parser, load_page_offset_parsers, \
page_offset_parser_series
services_file = DATA_DIRECTORY_PATH / "selected-services_overwrite.yaml"
def get_spreadsheet_data(
first_service="google", last_service="hrblock"
) -> DataFrame:
"""
Get parser information from the Google spreadsheet as a dataframe
:param first_service: First service to obtain data for (including)
:param last_service: Last service to obtain data for (including)
:return: Dataframe with parser information for each service
"""
services = load_services()
idx_first = services["name"].ne(first_service).idxmin()
idx_last = services["name"].ne(last_service).idxmin()
services = services.loc[idx_first:idx_last, :]
domains = load_domains()
services["domains"] = [
service_domains(domains, row)
for _, row in services.iterrows()
]
query_parsers = concat(
[
load_url_prefixes(),
load_query_parsers()[["query_parser"]]
],
axis="columns")
query_parsers.dropna(inplace=True)
services["query_parsers"] = [
sorted((
query_parser(row)
for _, row in
query_parsers[
query_parsers["name"].str.fullmatch(service["name"])
].iterrows()
), key=lambda qp: str(qp["url_pattern"]))
for _, service in services.iterrows()
]
page_offset_parsers = concat(
[
load_url_prefixes(),
load_page_offset_parsers()[["page_offset_parser"]]
],
axis="columns")
services["page_parsers"] = page_offset_parser_series(
page_offset_parsers, services, count="pages")
services["offset_parsers"] = page_offset_parser_series(
page_offset_parsers, services, count="results")
return services
def update_yaml_file(
first_service="google", last_service="hrblock", overwrite=False
):
"""
Update the local yaml file with the data from the Google spreadsheet
:param first_service: First service to update (including)
:param last_service: Last service to update (including)
:param overwrite: False: Only add parsers for service
that don't have one
True: Overwrite the yaml entries
using the spreadsheet
"""
services = get_spreadsheet_data(
first_service=first_service, last_service=last_service)
with open(services_file, "r") as stream:
yaml_list = yaml.safe_load(stream)
update_func = overwrite_parsers if overwrite else update_empty_parsers
i = 0
start_update = False
while not start_update:
elem = yaml_list[i]
if elem["name"] == first_service:
start_update = True
else:
i += 1
while True:
elem = yaml_list[i]
name = elem["name"]
update_func(service_elem=elem, services=services)
i += 1
if name == last_service:
break
with services_file.open("wt") as file:
yaml.dump(yaml_list, stream=file, sort_keys=False)
def update_empty_parsers(service_elem: dict, services: DataFrame):
offset_parsers = service_elem["offset_parsers"]
page_parsers = service_elem["page_parsers"]
query_parsers = service_elem["query_parsers"]
# If no parsers are set, update the element using the services df
if len(offset_parsers) + len(page_parsers) < 1:
set_page_offset_parsers(service_elem=service_elem, services=services)
if len(query_parsers) == 0:
set_query_parsers(service_elem=service_elem, services=services)
def overwrite_parsers(service_elem: dict, services: DataFrame):
set_page_offset_parsers(service_elem=service_elem, services=services)
set_query_parsers(service_elem=service_elem, services=services)
def set_page_offset_parsers(service_elem: dict, services: DataFrame) -> None:
name = service_elem["name"]
row = services.loc[services["name"] == name, :]
service_elem.update({"page_parsers": row["page_parsers"].values[0],
"offset_parsers": row["offset_parsers"].values[0]})
def set_query_parsers(service_elem: dict, services: DataFrame) -> None:
name = service_elem["name"]
row = services.loc[services["name"] == name, :]
service_elem.update({"query_parsers": row["query_parsers"].values[0]})
def update_ranks(df: pd.DataFrame, yaml_list: Sequence[dict]):
for i, elem in enumerate(yaml_list):
name = elem["name"]
try:
rank = int(df.loc[df["service"] == name, "rank"].values[0])
except Exception:
rank = 999999
yaml_list[i]["alexa_rank"] = rank
return yaml_list
def sort_by_rank(yaml_list: Sequence[dict]):
return sorted(yaml_list, key=lambda d: d['alexa_rank'])
| 5,168
| 33.925676
| 78
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/services/aggregate_services.py
|
import pandas as pd
def combine_alexa_manual(manual_csv: str, alexa_csv: str):
"""
Function to combine the CSV created by search_forms.py
with a manual collection of search engines
:param manual_csv: Path to the CSV containing a manual collection
of search engines
:param alexa_csv: Path to the CSV containing the automatically
created list of services
:return: A pd.DataFrame that combined both source CSVs
"""
m_df = pd.read_csv(manual_csv)
a_df = pd.read_csv(alexa_csv)
a_df.drop("Unnamed: 0", axis=1, inplace=True)
# Get identically formatted service names for both
m_df["Service"] = m_df["Service"].str.lower()
m_df.rename({"Service": "service"}, axis=1, inplace=True)
a_df["service"] = a_df["service"].str.split(".").str[0]
# Create new alexa df with unique service names and store
# the duplicates to be added later
a_df_unique = a_df.drop_duplicates(subset="service")
a_df_merged = a_df.merge(a_df_unique, on=["rank", "rank"], how="left",
indicator=True)
a_df_dup = a_df_merged[a_df_merged["_merge"] == "left_only"]
a_df_dup = a_df_dup.loc[:, ~a_df_dup.columns.str.contains("_y")]
a_df_dup.columns = a_df_dup.columns.str.rstrip("_x")
a_df_dup.drop("_merge", axis=1, inplace=True)
# Create merged df by performing an outer join on both df
merged_df = pd.concat([a_df_unique.set_index("service"),
m_df.set_index("service")], axis=1,
join="outer").reset_index()
merged_df = pd.concat([merged_df, a_df_dup], ignore_index=True)
cols_reordered = ["rank", "service", "tld", "Search Category", "input",
"search_form", "search_div",
'input_snippets', 'form_snippets', 'div_snippets']
merged_df = merged_df[cols_reordered]
merged_df["rank"].fillna(99999, inplace=True)
merged_df.sort_values("rank", inplace=True)
return merged_df
| 2,010
| 42.717391
| 75
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/http_session.py
|
from requests import Session
from requests.adapters import HTTPAdapter
from urllib3 import Retry
def backoff_session() -> Session:
session = Session()
retries = Retry(
total=10,
backoff_factor=1,
status_forcelist=[502, 503, 504],
)
session.mount("https://", HTTPAdapter(max_retries=retries))
return session
| 353
| 22.6
| 63
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/html.py
|
from bleach import clean
from bs4 import Tag
_HIGHLIGHT_TAGS = ["em", "strong", "mark", "b", "i", "u"]
def clean_html(html: str | Tag) -> str:
if isinstance(html, Tag):
html = html.decode_contents()
html = clean(
html,
tags=_HIGHLIGHT_TAGS,
attributes=[],
protocols=[],
strip=True,
strip_comments=True,
)
for tag in _HIGHLIGHT_TAGS:
html = html.replace(f"<{tag}>", "<em>")
html = html.replace(f"</{tag}>", "</em>")
html = html.strip()
return html
| 546
| 22.782609
| 57
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/archive_http.py
|
from contextlib import asynccontextmanager
from aiohttp import ClientSession, TCPConnector, ClientTimeout, \
ClientConnectorError, ServerTimeoutError, ClientPayloadError
from aiohttp_retry import RetryClient, JitterRetry
@asynccontextmanager
async def archive_http_session(limit: int = 10) -> ClientSession:
# The Wayback Machine doesn't seem to support more than 10
# parallel connections from the same IP.
connector = TCPConnector(
limit_per_host=limit,
)
# Graceful timeout as the Wayback Machine is sometimes very slow.
timeout = ClientTimeout(
total=15 * 60,
connect=5 * 60, # Setting up a connection is especially slow.
sock_read=5 * 60,
)
async with ClientSession(
connector=connector,
timeout=timeout,
) as session:
yield session
@asynccontextmanager
async def archive_http_client(limit: int = 10) -> RetryClient:
retry_options = JitterRetry(
attempts=10,
start_timeout=10, # 10 seconds
max_timeout=15 * 60, # 15 minutes
statuses={429, 502, 503, 504}, # server errors
exceptions={
ClientConnectorError,
ServerTimeoutError,
ClientPayloadError,
},
)
async with archive_http_session(limit) as session:
retry_client = RetryClient(
client_session=session,
retry_options=retry_options,
)
yield retry_client
| 1,466
| 30.212766
| 70
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/iterable.py
|
from typing import runtime_checkable, TypeVar, Protocol, Sized, Iterable
T = TypeVar("T", covariant=True)
@runtime_checkable
class SizedIterable(Sized, Iterable[T], Protocol):
pass
| 188
| 20
| 72
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/text.py
|
from typing import IO, Iterator
_LINE_COUNT_BUFFER_SIZE = 1024 * 1024
def _chunks(reader: IO[bytes]) -> Iterator[bytes]:
buffer = reader.read(_LINE_COUNT_BUFFER_SIZE)
while buffer:
yield buffer
buffer = reader.read(_LINE_COUNT_BUFFER_SIZE)
def count_lines(file: IO[bytes]) -> int:
return sum(buffer.count(b"\n") for buffer in _chunks(file))
| 374
| 24
| 63
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/__init__.py
| 0
| 0
| 0
|
py
|
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/urls.py
|
from urllib.parse import quote
def safe_quote_url(url: str) -> str:
return quote(url, safe="")
| 101
| 16
| 36
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/util/serialization.py
|
from marshmallow.fields import Field
from archive_query_log.model import HighlightedText
class HighlightedTextField(Field):
"""
Field that serializes to an HTML string and deserializes
to a highlighted text.
"""
def _serialize(self, value: HighlightedText, attr, obj, **kwargs):
if value is None:
return None
return value.html
def _deserialize(self, value: str, attr, data, **kwargs):
if value is None:
return None
return HighlightedText(value)
| 530
| 24.285714
| 70
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/queries/iterable.py
|
from dataclasses import dataclass
from gzip import GzipFile
from io import TextIOWrapper
from pathlib import Path
from typing import Sized, Iterable, Iterator, IO
from archive_query_log.model import ArchivedQueryUrl
from archive_query_log.util.text import count_lines
@dataclass(frozen=True)
class ArchivedQueryUrls(Sized, Iterable[ArchivedQueryUrl]):
"""
Read archived query URLs from a JSONL file.
"""
path: Path
"""
Path where the query URLs are stored in JSONL format.
"""
def __post_init__(self):
self._check_urls_path()
def _check_urls_path(self):
if not self.path.exists() or not self.path.is_file():
raise ValueError(
f"URLs path must be a file: {self.path}"
)
def __len__(self) -> int:
with self.path.open("rb") as file:
with GzipFile(fileobj=file, mode="rb") as gzip_file:
gzip_file: IO[bytes]
return count_lines(gzip_file)
def __iter__(self) -> Iterator[ArchivedQueryUrl]:
schema = ArchivedQueryUrl.schema()
with self.path.open("rb") as file:
with GzipFile(fileobj=file, mode="rb") as gzip_file:
gzip_file: IO[bytes]
with TextIOWrapper(gzip_file) as text_file:
for line in text_file:
yield schema.loads(line)
| 1,384
| 29.777778
| 64
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/queries/__init__.py
| 0
| 0
| 0
|
py
|
|
archive-query-log
|
archive-query-log-main/archive_query_log/queries/parse.py
|
from dataclasses import dataclass
from gzip import GzipFile
from io import TextIOWrapper
from pathlib import Path
from typing import Sequence, NamedTuple, Pattern
from urllib.parse import parse_qsl, unquote, quote
from tqdm.auto import tqdm
from archive_query_log.model import ArchivedQueryUrl, \
ArchivedUrl, PageParser, QueryParser, OffsetParser, Service
from archive_query_log.urls.iterable import ArchivedUrls
@dataclass(frozen=True)
class QueryParameterQueryParser(QueryParser):
url_pattern: Pattern[str]
parameter: str
def parse(self, url: ArchivedUrl) -> str | None:
if self.url_pattern.search(url.url) is None:
return None
for key, value in parse_qsl(url.split_url.query):
if key == self.parameter:
return value.strip()
return None
@dataclass(frozen=True)
class FragmentParameterQueryParser(QueryParser):
url_pattern: Pattern[str]
parameter: str
def parse(self, url: ArchivedUrl) -> str | None:
if self.url_pattern.search(url.url) is None:
return None
for key, value in parse_qsl(url.split_url.fragment):
if key == self.parameter:
return value.strip()
return None
@dataclass(frozen=True)
class PathSegmentQueryParser(QueryParser):
url_pattern: Pattern[str]
segment: int
delimiter: str = "/"
remove_patterns: Sequence[Pattern[str]] = tuple()
space_patterns: Sequence[Pattern[str]] = tuple()
def parse(self, url: ArchivedUrl) -> str | None:
if self.url_pattern.search(url.url) is None:
return None
path = url.split_url.path
path_segments = path.split(self.delimiter)
if len(path_segments) <= self.segment:
return None
path_segment = path_segments[self.segment]
for pattern in self.remove_patterns:
path_segment = pattern.sub("", path_segment)
if len(self.space_patterns) > 0:
for pattern in self.space_patterns:
path_segment = pattern.sub(" ", path_segment)
path_segment = path_segment.replace(" ", " ")
return unquote(path_segment).strip()
@dataclass(frozen=True)
class FragmentSegmentQueryParser(QueryParser):
url_pattern: Pattern[str]
segment: int
delimiter: str = "/"
remove_patterns: Sequence[Pattern[str]] = tuple()
space_patterns: Sequence[Pattern[str]] = tuple()
def parse(self, url: ArchivedUrl) -> str | None:
if self.url_pattern.search(url.url) is None:
return None
path = url.split_url.fragment
path_segments = path.split(self.delimiter)
if len(path_segments) <= self.segment:
return None
path_segment = path_segments[self.segment]
for pattern in self.remove_patterns:
path_segment = pattern.sub("", path_segment)
if len(self.space_patterns) > 0:
for pattern in self.space_patterns:
path_segment = pattern.sub(" ", path_segment)
path_segment = path_segment.replace(" ", " ")
return unquote(path_segment).strip()
@dataclass(frozen=True)
class QueryParameterPageOffsetParser(PageParser, OffsetParser):
url_pattern: Pattern[str]
parameter: str
def parse(self, url: ArchivedUrl) -> int | None:
if self.url_pattern.search(url.url) is None:
return None
for key, value in parse_qsl(url.split_url.query):
if key == self.parameter and value.isdigit():
return int(value)
return None
@dataclass(frozen=True)
class FragmentParameterPageOffsetParser(PageParser, OffsetParser):
url_pattern: Pattern[str]
parameter: str
def parse(self, url: ArchivedUrl) -> int | None:
if self.url_pattern.search(url.url) is None:
return None
for key, value in parse_qsl(url.split_url.fragment):
if key == self.parameter and value.isdigit():
return int(value)
return None
@dataclass(frozen=True)
class PathSegmentPageOffsetParser(PageParser, OffsetParser):
url_pattern: Pattern[str]
segment: int
delimiter: str = "/"
remove_patterns: Sequence[Pattern[str]] = tuple()
def parse(self, url: ArchivedUrl) -> int | None:
if self.url_pattern.search(url.url) is None:
return None
path = url.split_url.path
path_segments = path.split(self.delimiter)
if len(path_segments) <= self.segment:
return None
path_segment = path_segments[self.segment]
for pattern in self.remove_patterns:
path_segment = pattern.sub("", path_segment)
return int(path_segment)
@dataclass(frozen=True)
class FragmentSegmentPageOffsetParser(PageParser, OffsetParser):
url_pattern: Pattern[str]
segment: int
delimiter: str = "/"
remove_patterns: Sequence[Pattern[str]] = tuple()
def parse(self, url: ArchivedUrl) -> int | None:
if self.url_pattern.search(url.url) is None:
return None
path = url.split_url.fragment
path_segments = path.split(self.delimiter)
if len(path_segments) <= self.segment:
return None
path_segment = path_segments[self.segment]
for pattern in self.remove_patterns:
path_segment = pattern.sub("", path_segment)
return int(path_segment)
class _CdxPage(NamedTuple):
input_path: Path
output_path: Path
@dataclass(frozen=True)
class ArchivedQueryUrlParser:
query_parsers: Sequence[QueryParser]
page_parsers: Sequence[PageParser]
offset_parsers: Sequence[OffsetParser]
overwrite: bool = False
verbose: bool = False
def parse(
self,
input_path: Path,
output_path: Path,
focused: bool = False,
) -> None:
if output_path.exists() and not self.overwrite:
return
output_path.parent.mkdir(parents=True, exist_ok=True)
archived_urls = ArchivedUrls(input_path)
if self.verbose:
archived_urls = tqdm(
archived_urls,
desc="Parse SERP URLs",
unit="URL",
)
archived_serp_urls = (
self._parse_single(archived_url, focused)
for archived_url in archived_urls
)
archived_serp_urls = (
archived_serp_url
for archived_serp_url in archived_serp_urls
if archived_serp_url is not None
)
output_schema = ArchivedQueryUrl.schema()
# noinspection PyTypeChecker
with output_path.open("wb") as file, \
GzipFile(fileobj=file, mode="wb") as gzip_file, \
TextIOWrapper(gzip_file) as text_file:
for archived_serp_url in archived_serp_urls:
text_file.write(output_schema.dumps(archived_serp_url))
text_file.write("\n")
def _parse_single(
self,
archived_url: ArchivedUrl,
focused: bool,
) -> ArchivedQueryUrl | None:
query: str | None = None
for parser in self.query_parsers:
query = parser.parse(archived_url)
if query is not None:
break
if query is None:
return None
page: int | None = None
for parser in self.page_parsers:
page = parser.parse(archived_url)
if page is not None:
break
if focused and page is not None and page != 0:
return None
offset: int | None = None
for parser in self.offset_parsers:
offset = parser.parse(archived_url)
if offset is not None:
break
if focused and offset is not None and offset != 0:
return None
return ArchivedQueryUrl(
url=archived_url.url,
timestamp=archived_url.timestamp,
query=query,
page=page,
offset=offset,
)
def _service_pages(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None,
cdx_page: int | None,
) -> Sequence[_CdxPage]:
"""
List all items that need to be downloaded.
"""
input_format_path = data_directory / "archived-urls"
output_format_path = data_directory / "archived-query-urls"
service_path = input_format_path / service.name
if domain is not None:
domain_paths = [service_path / domain]
else:
domain_paths = [
path
for path in service_path.iterdir()
if path.is_dir()
]
if focused:
domain_paths = [
path
for path in domain_paths
if any(
path.name.endswith(quote(prefix, safe=""))
for prefix in service.focused_url_prefixes
)
]
if cdx_page is not None:
assert domain is not None
assert len(domain_paths) == 1
cdx_page_paths = [domain_paths[0] / f"{cdx_page:010}.jsonl.gz"]
else:
cdx_page_paths = [
path
for domain_path in domain_paths
for path in domain_path.iterdir()
if (
path.is_file() and
len(path.name.removesuffix(".jsonl.gz")) == 10 and
path.name.removesuffix(".jsonl.gz").isdigit()
)
]
return [
_CdxPage(
input_path=cdx_page_path,
output_path=output_format_path / cdx_page_path.relative_to(
input_format_path
),
)
for cdx_page_path in cdx_page_paths
]
def parse_service(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None = None,
cdx_page: int | None = None,
):
pages = self._service_pages(
data_directory=data_directory,
focused=focused,
service=service,
domain=domain,
cdx_page=cdx_page,
)
if len(pages) == 0:
return
if len(pages) > 1:
pages = tqdm(
pages,
desc="Parse archived SERP URLs",
unit="page",
)
for page in pages:
self.parse(page.input_path, page.output_path, focused)
| 10,716
| 30.895833
| 75
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/serps/iterable.py
|
from dataclasses import dataclass
from gzip import GzipFile
from io import TextIOWrapper
from pathlib import Path
from typing import Sized, Iterable, Iterator, IO
from archive_query_log.model import ArchivedParsedSerp
from archive_query_log.util.text import count_lines
@dataclass(frozen=True)
class ArchivedParsedSerps(Sized, Iterable[ArchivedParsedSerp]):
"""
Read archived parsed SERPs from a JSONL file.
"""
path: Path
"""
Path where the parsed SERPs are stored in JSONL format.
"""
def __post_init__(self):
self._check_urls_path()
def _check_urls_path(self):
if not self.path.exists() or not self.path.is_file():
raise ValueError(
f"URLs path must be a file: {self.path}"
)
def __len__(self) -> int:
with self.path.open("rb") as file:
with GzipFile(fileobj=file, mode="rb") as gzip_file:
gzip_file: IO[bytes]
return count_lines(gzip_file)
def __iter__(self) -> Iterator[ArchivedParsedSerp]:
schema = ArchivedParsedSerp.schema()
with self.path.open("rb") as file:
with GzipFile(fileobj=file, mode="rb") as gzip_file:
gzip_file: IO[bytes]
with TextIOWrapper(gzip_file) as text_file:
for line in text_file:
yield schema.loads(line)
| 1,398
| 30.088889
| 64
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/serps/__init__.py
| 0
| 0
| 0
|
py
|
|
archive-query-log
|
archive-query-log-main/archive_query_log/urls/fetch.py
|
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from functools import cached_property
from gzip import GzipFile
from io import TextIOWrapper
from itertools import chain
from pathlib import Path
from typing import AbstractSet, Sequence, Any, Iterable, Iterator, NamedTuple
from urllib.parse import urlencode, quote
from aiohttp import ClientResponseError
from aiohttp_retry import RetryClient
from asyncio_pool import AioPool
from diskcache import Cache
from marshmallow import Schema
from tqdm.auto import tqdm
from archive_query_log import CDX_API_URL
from archive_query_log.model import ArchivedUrl, Service
from archive_query_log.util.archive_http import archive_http_client
class UrlMatchScope(Enum):
EXACT = "exact"
PREFIX = "prefix"
HOST = "host"
DOMAIN = "domain"
class _CdxPage(NamedTuple):
url: str
page: int
path: Path
@dataclass(frozen=True)
class ArchivedUrlsFetcher:
"""
Fetch archived URLs from the Wayback Machine's CDX API and
store them in a JSONL file.
"""
match_scope: UrlMatchScope = UrlMatchScope.EXACT
include_status_codes: AbstractSet[int] = frozenset({200})
exclude_status_codes: AbstractSet[int] = frozenset({})
include_mime_types: AbstractSet[str] = frozenset({"text/html"})
exclude_mime_types: AbstractSet[str] = frozenset({})
cdx_api_url: str = CDX_API_URL
overwrite: bool = False
@cached_property
def _base_params(self) -> Sequence[tuple[Any, Any]]:
params = [
("matchType", self.match_scope.value),
("fl", "timestamp,original"),
]
if len(self.include_mime_types) > 0:
pattern = "|".join(self.include_mime_types)
params.append(("filter", f"mimetype:{pattern}"))
if len(self.exclude_mime_types) > 0:
pattern = "|".join(self.exclude_mime_types)
params.append(("filter", f"mimetype:{pattern}"))
if len(self.include_status_codes) > 0:
pattern = "|".join(str(code) for code in self.include_status_codes)
params.append(("filter", f"statuscode:{pattern}"))
if len(self.exclude_status_codes) > 0:
pattern = "|".join(str(code) for code in self.exclude_status_codes)
params.append(("filter", f"statuscode:{pattern}"))
return params
def _params(self, url: str) -> Sequence[tuple[Any, Any]]:
return [
("url", url),
*self._base_params,
]
async def _num_pages(
self,
cache: Cache,
url: str,
client: RetryClient,
) -> int:
params = self._params(url)
params_hash = urlencode(params)
if params_hash in cache:
return cache[params_hash]
num_pages_params = [
*params,
("showNumPages", True),
]
url = f"{self.cdx_api_url}?{urlencode(num_pages_params)}"
async with client.get(url) as response:
text = await response.text()
try:
num_pages = int(text)
except Exception:
num_pages = 0
cache[params_hash] = num_pages
return num_pages
@staticmethod
def _parse_response_lines(
lines: Iterable[str],
schema: Schema,
) -> Iterator[str]:
for line in lines:
line = line.strip()
timestamp_string, url = line.split()
timestamp = datetime.strptime(
timestamp_string,
"%Y%m%d%H%M%S"
)
archived_url = ArchivedUrl(
url=url,
timestamp=int(timestamp.timestamp()),
)
yield schema.dumps(archived_url)
async def _fetch_page(
self,
page: _CdxPage,
client: RetryClient,
progress: tqdm | None = None,
) -> None:
if page.path.exists() and not self.overwrite:
progress.update()
return
params = [
*self._params(page.url),
("page", page.page),
]
url = f"{self.cdx_api_url}?{urlencode(params)}"
try:
async with client.get(url) as response:
response.raise_for_status()
text = await response.text()
schema = ArchivedUrl.schema()
lines = self._parse_response_lines(
text.splitlines(keepends=False),
schema,
)
page.path.parent.mkdir(parents=True, exist_ok=True)
# noinspection PyTypeChecker
with page.path.open("wb") as file, \
GzipFile(fileobj=file, mode="wb") as gzip_file, \
TextIOWrapper(gzip_file) as text_file:
for line in lines:
text_file.write(line)
text_file.write("\n")
return
except ClientResponseError as e:
page.path.unlink(missing_ok=True)
print(
f"HTTP error {e.status} when fetching {url}. "
f"Please try again later. Continuing with next URL."
)
return None
except BaseException as e:
page.path.unlink(missing_ok=True)
raise e
finally:
if progress is not None:
progress.update()
async def _service_pages(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None,
cdx_page: int | None,
client: RetryClient,
) -> Sequence[_CdxPage]:
"""
List all items that need to be downloaded.
"""
output_format_path = data_directory / "archived-urls"
output_format_path.mkdir(parents=True, exist_ok=True)
if cdx_page is not None:
assert domain is not None
service_path = output_format_path / service.name
domain_path = service_path / quote(domain, safe="")
cdx_page_path = domain_path / f"{cdx_page:010}.jsonl.gz"
return [
_CdxPage(
path=cdx_page_path,
url=domain,
page=cdx_page,
)
]
elif domain is not None:
async def cdx_page_pages(_cdx_page: int) -> Sequence[_CdxPage]:
return await self._service_pages(
data_directory=data_directory,
focused=focused,
service=service,
domain=domain,
cdx_page=_cdx_page,
client=client,
)
with Cache(str(output_format_path / ".pages")) as cache:
num_cdx_pages = await self._num_pages(
cache,
domain,
client,
)
pool = AioPool(size=1)
if num_cdx_pages <= 0:
return []
return list(chain.from_iterable(
await pool.map(cdx_page_pages, range(num_cdx_pages))
))
else:
domains = service.domains
if focused:
domains = [
f"{domain}{url_prefix}"
for domain in domains
for url_prefix in service.focused_url_prefixes
]
else:
suffix_free_domains = []
for domain in sorted(domains, key=len):
if not any(
domain.endswith(suffix)
for suffix in suffix_free_domains
):
suffix_free_domains.append(domain)
domains = suffix_free_domains
if len(domains) == 0:
return []
domains = sorted(domains)
progress = tqdm(
domains,
total=len(domains),
desc="Fetching number of pages",
unit="domain",
)
async def domain_pages(_domain: str) -> Sequence[_CdxPage]:
res = await self._service_pages(
data_directory=data_directory,
focused=focused,
service=service,
domain=_domain,
cdx_page=None,
client=client,
)
progress.update()
return res
pool = AioPool(size=100)
res: Sequence[Sequence[_CdxPage]]
res = await pool.map(domain_pages, domains)
for val in res:
if isinstance(val, Exception):
raise val
res = sorted(res, key=len, reverse=True)
return list(chain.from_iterable(res))
async def fetch_service(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None = None,
cdx_page: int | None = None,
):
async with archive_http_client(limit=5) as client:
pages = await self._service_pages(
data_directory=data_directory,
focused=focused,
service=service,
domain=domain,
cdx_page=cdx_page,
client=client,
)
if len(pages) == 0:
return
progress = None
if len(pages) > 1:
progress = tqdm(
total=len(pages),
desc="Fetch archived URLs",
unit="page",
)
async def fetch_page(page: _CdxPage):
return await self._fetch_page(
page=page,
client=client,
progress=progress,
)
pool = AioPool(size=1000)
await pool.map(fetch_page, pages)
async def num_service_pages(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None = None,
cdx_page: int | None = None,
) -> int:
async with archive_http_client(limit=2) as client:
pages = await self._service_pages(
data_directory=data_directory,
focused=focused,
service=service,
domain=domain,
cdx_page=cdx_page,
client=client,
)
return len(pages)
| 10,716
| 31.874233
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/urls/iterable.py
|
from dataclasses import dataclass
from gzip import GzipFile
from io import TextIOWrapper
from pathlib import Path
from typing import Sized, Iterable, Iterator, IO
from archive_query_log.model import ArchivedUrl
from archive_query_log.util.text import count_lines
@dataclass(frozen=True)
class ArchivedUrls(Sized, Iterable[ArchivedUrl]):
"""
Read archived URLs from a JSONL file.
"""
path: Path
"""
Path where the URLs are stored in JSONL format.
"""
def __post_init__(self):
self._check_urls_path()
def _check_urls_path(self):
if not self.path.exists() or not self.path.is_file():
raise ValueError(
f"URLs path must be a file: {self.path}"
)
def __len__(self) -> int:
with self.path.open("rb") as file:
with GzipFile(fileobj=file, mode="rb") as gzip_file:
gzip_file: IO[bytes]
return count_lines(gzip_file)
def __iter__(self) -> Iterator[ArchivedUrl]:
schema = ArchivedUrl.schema()
with self.path.open("rb") as file:
with GzipFile(fileobj=file, mode="rb") as gzip_file:
gzip_file: IO[bytes]
with TextIOWrapper(gzip_file) as text_file:
for line in text_file:
yield schema.loads(line)
| 1,347
| 28.955556
| 64
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/urls/__init__.py
| 0
| 0
| 0
|
py
|
|
archive-query-log
|
archive-query-log-main/archive_query_log/model/highlight.py
|
from dataclasses import dataclass
from functools import cached_property
from html.parser import HTMLParser
from typing import List, Union, Optional
class Highlight(str):
depth: int
def __new__(cls, value: str, depth: int):
result = super().__new__(cls, value)
result.depth = depth
return result
@dataclass(frozen=True, unsafe_hash=True)
class HighlightedText(str):
"""
Text with highlighting using``<em>`` tags.
Other tags are not allowed.
"""
html: str
@property
def text(self):
return "".join(self.sequence)
def __str__(self) -> str:
return self.text
@cached_property
def sequence(self) -> List[Union[str, Highlight]]:
parser = _HighlightParser()
parser.feed(self.html)
sequence = parser.sequence
parser.close()
return sequence
class _HighlightParser(HTMLParser):
_sequence: List[Union[str, Highlight]]
_data: Optional[str]
_highlight_depth: int
def __init__(self):
super().__init__(convert_charrefs=True)
self._sequence = []
self._data = None
self._highlight_depth = 0
def _flush_data(self):
if self._data is not None:
if self._highlight_depth > 0:
self._sequence.append(
Highlight(
self._data,
depth=self._highlight_depth
)
)
else:
self._sequence.append(self._data)
self._data = None
@property
def sequence(self) -> List[Union[str, Highlight]]:
self._flush_data()
return self._sequence
def handle_starttag(self, tag: str, attrs):
if tag != "em":
raise SyntaxError("Can only parse <em> tags.")
if attrs:
raise SyntaxError("Cannot parse attributes.")
self._flush_data()
self._highlight_depth += 1
# Overridable -- handle end tag
def handle_endtag(self, tag: str):
if tag != "em":
raise SyntaxError("Can only parse <em> tags.")
self._flush_data()
self._highlight_depth -= 1
def handle_charref(self, name: str):
raise AssertionError(
"Should never be called because convert_charrefs is True."
)
def handle_entityref(self, name: str):
raise AssertionError(
"Should never be called because convert_charrefs is True."
)
def handle_data(self, data: str):
if self._data is None:
self._data = data
else:
self._data += data
def handle_comment(self, data: str):
raise SyntaxError("Comments are not supported.")
def handle_decl(self, decl: str):
raise SyntaxError("Doctype declarations are not supported.")
def handle_pi(self, data: str):
raise SyntaxError("Processing instructions are not supported.")
| 2,943
| 26.259259
| 71
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/model/__init__.py
|
from dataclasses import dataclass, field
from datetime import datetime, timezone
from functools import cached_property
from hashlib import md5
from pathlib import Path
from typing import Sequence, Any
from urllib.parse import SplitResult, urlsplit
from uuid import UUID, uuid5, NAMESPACE_URL
from dataclasses_json import DataClassJsonMixin, config
from marshmallow.fields import List, Nested, String, Field
from archive_query_log.model.highlight import HighlightedText
from archive_query_log.util.serialization import HighlightedTextField
@dataclass(frozen=True, slots=True)
class ArchivedUrl(DataClassJsonMixin):
"""
A URL that is archived in the Wayback Machine (https://web.archive.org/).
The archived snapshot can be retrieved using the ``archive_url``
and ``raw_archive_url`` properties.
Output of: 2-archived-urls
Input of: 3-archived-query-urls
"""
url: str
"""
Original URL that was archived.
"""
timestamp: int
"""
Timestamp of the archived snapshot in the Wayback Machine.
"""
@cached_property
def id(self) -> UUID:
"""
Unique ID for this archived URL.
"""
return uuid5(NAMESPACE_URL, f"{self.timestamp}:{self.url}")
@cached_property
def split_url(self) -> SplitResult:
"""
Original URL split into its components.
"""
return urlsplit(self.url)
@cached_property
def url_domain(self) -> str:
"""
Domain of the original URL.
"""
return self.split_url.netloc
@cached_property
def url_md5(self) -> str:
"""
MD5 hash of the original URL.
"""
return md5(self.url.encode()).hexdigest()
@cached_property
def datetime(self) -> datetime:
"""
Snapshot timestamp as a ``datetime`` object.
"""
return datetime.fromtimestamp(self.timestamp, timezone.utc)
@cached_property
def archive_timestamp(self) -> str:
"""
Snapshot timestamp as a string in the format used
by the Wayback Machine (``YYYYmmddHHMMSS``).
"""
return f"{self.datetime.year:04d}{self.datetime.month:02d}" \
f"{self.datetime.day:02d}{self.datetime.hour:02d}" \
f"{self.datetime.minute:02d}{self.datetime.second:02d}"
@property
def archive_url(self) -> str:
"""
URL of the archived snapshot in the Wayback Machine.
"""
return f"https://web.archive.org/web/" \
f"{self.archive_timestamp}/{self.url}"
@property
def raw_archive_url(self) -> str:
"""
URL of the archived snapshot's raw contents in the Wayback Machine.
"""
return f"https://web.archive.org/web/" \
f"{self.archive_timestamp}id_/{self.url}"
@dataclass(frozen=True, slots=True)
class ArchivedQueryUrl(ArchivedUrl, DataClassJsonMixin):
"""
Archived URL of a search engine result page (SERP) for a query.
Output of: 3-archived-query-urls
Input of: 4-archived-raw-serps, 8-ir-corpus
"""
query: str
"""
Query that was used to retrieve the SERP.
"""
page: int | None
"""
Page number of the SERP, e.g., 1, 2, 3.
Note: the page number should be zero-indexed, i.e.,
the first result page has the page number 0.
See also: ``results_page_offset``.
"""
offset: int | None
"""
Offset (start position) of the first result in the SERP, e.g., 10, 20.
Note: the offset should be zero-indexed, i.e.,
the first result page has the offset 0.
See also ``results_page``.
"""
@dataclass(frozen=True, slots=True)
class ArchivedRawSerp(ArchivedQueryUrl, DataClassJsonMixin):
"""
Raw snapshot content of an archived SERP.
Output of: 4-archived-raw-serps
Input of: 5-archived-parsed-serps
"""
content: bytes
"""
Raw byte content of the archived SERP's snapshot.
"""
encoding: str
"""
Encoding of the archived SERP's snapshot.
"""
@dataclass(frozen=True, slots=True)
class ArchivedSearchResultSnippet(ArchivedUrl, DataClassJsonMixin):
"""
Single retrieved result from a query's archived SERP.
"""
rank: int
"""
Rank of the result in the SERP.
"""
title: HighlightedText | str = field(metadata=config(
encoder=str,
decoder=HighlightedText,
mm_field=HighlightedTextField(),
))
"""
Title of the result with optional highlighting.
"""
snippet: HighlightedText | str | None = field(metadata=config(
encoder=str,
decoder=HighlightedText,
mm_field=HighlightedTextField(allow_none=True),
))
"""
Snippet of the result.
Highlighting should be normalized to ``<em>`` tags.
Other HTML tags are removed.
"""
@cached_property
def id(self) -> UUID:
"""
Unique ID for this archived URL.
"""
return uuid5(NAMESPACE_URL, f"{self.rank}:{self.timestamp}:{self.url}")
@dataclass(frozen=True, slots=True)
class ArchivedParsedSerp(ArchivedQueryUrl, DataClassJsonMixin):
"""
Archived search engine result page (SERP) corresponding to a query.
Output of: 5-archived-parsed-serps
Input of: 6-archived-raw-search-results, 8-ir-corpus
"""
results: Sequence[ArchivedSearchResultSnippet] = field(
metadata=config(
encoder=list,
decoder=tuple,
mm_field=List(Nested(ArchivedSearchResultSnippet.schema()))
)
)
"""
Retrieved results from the SERP in the same order as they appear.
"""
interpreted_query: str | None
"""
Interpreted query that is displayed or otherwise included in the SERP.
Note: the interpreted result query can be different from the original query
due to spelling correction etc.
"""
@dataclass(frozen=True, slots=True)
class ArchivedRawSearchResult(ArchivedSearchResultSnippet, DataClassJsonMixin):
"""
Raw content of an archived search result.
Output of: 6-archived-raw-search-results
Input of: 7-archived-parsed-search-results
"""
content: bytes
"""
Raw byte content of the archived search result's snapshot.
"""
encoding: str
"""
Encoding of the archived search result's snapshot.
"""
@dataclass(frozen=True, slots=True)
class ArchivedParsedSearchResult(ArchivedSearchResultSnippet,
DataClassJsonMixin):
"""
Parsed representation of an archived search result.
Output of: 7-archived-parsed-search-results
Input of: 8-ir-corpus
"""
content_title: str | None
"""
Title of the archived SERP's snapshot content.
Note: the content title can be different from the snippet title due to ellipses etc.
"""
content_plaintext: str | None
"""
Plaintext of the archived SERP's snapshot content.
"""
# TODO
pass
# flake8: noqa: E402
from archive_query_log.model.parse import QueryParser, \
PageParser, OffsetParser, QueryParserField, PageOffsetParserField, \
ResultsParserField, InterpretedQueryParserField, InterpretedQueryParser, \
ResultsParser
@dataclass(frozen=True, slots=True)
class Service(DataClassJsonMixin):
"""
An online service that has a search interface.
Output of: service collection, service domains
Input of: service URLs, query extraction
"""
name: str
"""
Service name (corresponds to ``alexa_domain`` without
the ``alexa_public_suffix``).
"""
public_suffix: str
"""
Public suffix (https://publicsuffix.org/) of ``alexa_domain``.
"""
alexa_domain: str
"""
Domain as it appears in Alexa top-1M ranks.
"""
alexa_rank: int | None
"""
Rank from fused Alexa top-1M rankings.
"""
category: str | None
"""
Category of the service (manual annotation).
"""
notes: str | None
"""
Notes about the service (manual annotation).
"""
input_field: bool | None
"""
Whether the service has an input field.
"""
search_form: bool | None
"""
Whether the service has a search form element.
"""
search_div: bool | None
"""
Whether the service has a search div element.
"""
domains: Sequence[str] = field(
metadata=config(
decoder=tuple,
mm_field=List(String())
)
)
"""
Known domains of the service, including the main domain.
"""
query_parsers: Sequence[QueryParser] = field(
metadata=config(
decoder=tuple,
mm_field=List(QueryParserField())
)
)
"""
Query parsers in order of precedence.
"""
page_parsers: Sequence[PageParser] = field(
metadata=config(
decoder=tuple,
mm_field=List(PageOffsetParserField())
)
)
"""
Page number parsers in order of precedence.
"""
offset_parsers: Sequence[OffsetParser] = field(
metadata=config(
decoder=tuple,
mm_field=List(PageOffsetParserField())
)
)
"""
Page number parsers in order of precedence.
"""
interpreted_query_parsers: Sequence[InterpretedQueryParser] = field(
metadata=config(
decoder=tuple,
mm_field=List(InterpretedQueryParserField())
)
)
"""
Interpreted query parsers in order of precedence.
The interpreted query is the query that is displayed
or otherwise included in the SERP.
Note: the interpreted result query can be different from the original query
due to spelling correction etc.
"""
results_parsers: Sequence[ResultsParser] = field(
metadata=config(
decoder=tuple,
mm_field=List(ResultsParserField())
)
)
"""
SERP parsers in order of precedence.
"""
focused_url_prefixes: Sequence[str] = field(
metadata=config(
decoder=tuple,
mm_field=List(String())
)
)
"""
URL prefixes for a more focused pipeline which might miss some queries
but executes faster.
"""
class PathField(Field):
def _serialize(
self, value: Any, attr: str | None, obj: Any, **kwargs
) -> str:
return str(value)
def _deserialize(
self, value: str, attr: str, data: Any, **kwargs: Any
) -> Path:
return Path(value)
@dataclass(frozen=True, slots=True)
class CorpusJsonlLocation(DataClassJsonMixin):
relative_path: Path = field(
metadata=config(
encoder=str,
decoder=Path,
mm_field=PathField(),
)
)
"""
Path of the JSONL file relative to the corpus root path.
"""
byte_offset: int
"""
Position of the JSONL line's first byte in the decompressed JSONL file.
"""
@dataclass(frozen=True, slots=True)
class CorpusJsonlSnippetLocation(CorpusJsonlLocation, DataClassJsonMixin):
index: int
"""
Index of the snippet in the JSONL file entry's results list.
"""
@dataclass(frozen=True, slots=True)
class CorpusWarcLocation(DataClassJsonMixin):
relative_path: Path = field(
metadata=config(
encoder=str,
decoder=Path,
mm_field=PathField(),
)
)
"""
Path of the WARC file relative to the corpus root path.
"""
byte_offset: int
"""
Position of the WARC record's first byte in the compressed WARC file.
"""
@dataclass(frozen=True, slots=True)
class CorpusQueryUrl(DataClassJsonMixin):
id: UUID
"""
Unique ID based on the URL and timestamp.
"""
url: str
"""
Original URL that was archived.
"""
timestamp: int
"""
POSIX timestamp of the URL's archived snapshot in the Wayback Machine.
"""
wayback_url: str
"""
URL to access the archived snapshot in the Wayback Machine.
"""
wayback_raw_url: str
"""
URL to access the archived snapshot's raw contents in the Wayback Machine.
"""
url_query: str | None
"""
Query that was parsed from the URL.
"""
url_page: int | None
"""
SERP page number that was parsed from the URL, e.g., 1, 2, 3.
Note: the page number should be zero-indexed, i.e.,
the first result page has the page number 0.
"""
url_offset: int | None
"""
SERP results offset (start position) that was parsed from the URL,
e.g., 10, 20.
Note: the offset should be zero-indexed, i.e.,
the first result page has the offset 0.
"""
serp_query: str | None
"""
Interpreted query as displayed on (or otherwise included in) the SERP.
Note: the interpreted result query can be different from the original query
due to spelling correction etc.
"""
archived_url_location: CorpusJsonlLocation
"""
Location of the corresponding URL entry and JSONL file.
"""
archived_query_url_location: CorpusJsonlLocation | None
"""
Location of the corresponding query URL entry and JSONL file.
"""
archived_raw_serp_location: CorpusWarcLocation | None
"""
Location of the corresponding raw SERP entry and WARC file.
"""
archived_parsed_serp_location: CorpusJsonlLocation | None
"""
Location of the corresponding parsed SERP entry in the JSONL file.
"""
@dataclass(frozen=True, slots=True)
class CorpusSearchResult(DataClassJsonMixin):
id: UUID
"""
Unique ID for this archived URL.
"""
url: str
"""
Original URL that was archived.
"""
timestamp: int
"""
POSIX timestamp of the archived snapshot in the Wayback Machine.
Note that there might not be a snapshot for the exact timestamp,
but the Wayback Machine will instead redirect
to the nearest available snapshot.
"""
wayback_url: str
"""
URL of the archived snapshot in the Wayback Machine.
Note that there might not be a snapshot for the exact timestamp,
but the Wayback Machine will instead redirect
to the nearest available snapshot.
"""
wayback_raw_url: str
"""
URL of the archived snapshot's raw contents in the Wayback Machine.
Note that there might not be a snapshot for the exact timestamp,
but the Wayback Machine will instead redirect
to the nearest available snapshot.
"""
snippet_rank: int
"""
Rank of the result in the SERP.
"""
snippet_title: HighlightedText | str = field(metadata=config(
encoder=str,
decoder=HighlightedText,
mm_field=HighlightedTextField(),
))
"""
Snippet title of the result with optional highlighting.
Highlighting should be normalized to ``<em>`` tags.
Other HTML tags are removed.
"""
snippet_text: HighlightedText | str | None = field(metadata=config(
encoder=str,
decoder=HighlightedText,
mm_field=HighlightedTextField(allow_none=True),
))
"""
Snippet text of the result with optional highlighting.
Highlighting should be normalized to ``<em>`` tags.
Other HTML tags are removed.
"""
archived_snippet_location: CorpusJsonlSnippetLocation
"""
Location of the corresponding snippet entry in the JSONL file.
"""
archived_raw_search_result_location: CorpusWarcLocation | None
"""
Location of the corresponding raw search result entry and WARC file.
"""
archived_parsed_search_result_location: CorpusJsonlLocation | None
"""
Location of the corresponding parsed search result entry in the JSONL file.
"""
@dataclass(frozen=True, slots=True)
class CorpusQuery(CorpusQueryUrl, DataClassJsonMixin):
service: str
"""
Name of the search engine service from which the query was fetched.
"""
results: Sequence[CorpusSearchResult] | None = field(
metadata=config(
encoder=list,
decoder=tuple,
mm_field=List(Nested(CorpusSearchResult.schema()))
)
)
"""
Retrieved results from the SERP in the same order as they appear.
"""
@dataclass(frozen=True, slots=True)
class CorpusDocument(CorpusSearchResult, DataClassJsonMixin):
service: str
"""
Name of the search engine service from which the snippet was fetched.
"""
query: CorpusQueryUrl
"""
Query and SERP that was used to retrieve this result.
"""
| 16,441
| 26.221854
| 88
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/model/parse.py
|
from re import compile, IGNORECASE
from typing import Sequence, Protocol, runtime_checkable, Any, Mapping, Union
from marshmallow.fields import Field
from archive_query_log.model import ArchivedUrl, ArchivedSearchResultSnippet, \
ArchivedRawSerp
@runtime_checkable
class QueryParser(Protocol):
def parse(self, url: "ArchivedUrl") -> str | None:
...
@runtime_checkable
class PageParser(Protocol):
def parse(self, url: "ArchivedUrl") -> int | None:
...
@runtime_checkable
class OffsetParser(Protocol):
def parse(self, url: "ArchivedUrl") -> int | None:
...
@runtime_checkable
class InterpretedQueryParser(Protocol):
def parse(self, content: "ArchivedRawSerp") -> str | None:
...
@runtime_checkable
class ResultsParser(Protocol):
def parse(
self,
raw_serp: "ArchivedRawSerp",
) -> Sequence["ArchivedSearchResultSnippet"] | None:
...
class QueryParserField(Field):
def _deserialize(
self,
value: Any,
attr: str | None,
data: Mapping[str, Any] | None,
**kwargs,
) -> QueryParser:
value: Mapping[str, Any]
parser_type = value["type"]
if parser_type == "query_parameter":
from archive_query_log.queries.parse import \
QueryParameterQueryParser
return QueryParameterQueryParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
parameter=value["parameter"],
)
elif parser_type == "fragment_parameter":
from archive_query_log.queries.parse import \
FragmentParameterQueryParser
return FragmentParameterQueryParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
parameter=value["parameter"],
)
elif parser_type == "path_segment":
from archive_query_log.queries.parse import \
PathSegmentQueryParser
return PathSegmentQueryParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
segment=value["segment"],
remove_patterns=(
[
compile(pattern, IGNORECASE)
for pattern in value["remove_patterns"]
]
if "remove_patterns" in value
else []
),
space_patterns=(
[
compile(pattern, IGNORECASE)
for pattern in value["space_patterns"]
]
if "space_patterns" in value
else []
),
)
elif parser_type == "fragment_segment":
from archive_query_log.queries.parse import \
FragmentSegmentQueryParser
return FragmentSegmentQueryParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
segment=value["segment"],
remove_patterns=(
[
compile(pattern, IGNORECASE)
for pattern in value["remove_patterns"]
]
if "remove_patterns" in value
else []
),
space_patterns=(
[
compile(pattern, IGNORECASE)
for pattern in value["space_patterns"]
]
if "space_patterns" in value
else []
),
)
else:
raise ValueError(f"Unknown parser type: {parser_type}")
class PageOffsetParserField(Field):
def _deserialize(
self,
value: Any,
attr: str | None,
data: Mapping[str, Any] | None,
**kwargs,
) -> Union[PageParser | OffsetParser]:
value: Mapping[str, Any]
parser_type = value["type"]
if parser_type == "query_parameter":
from archive_query_log.queries.parse import \
QueryParameterPageOffsetParser
return QueryParameterPageOffsetParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
parameter=value["parameter"],
)
elif parser_type == "fragment_parameter":
from archive_query_log.queries.parse import \
FragmentParameterPageOffsetParser
return FragmentParameterPageOffsetParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
parameter=value["parameter"],
)
elif parser_type == "path_segment":
from archive_query_log.queries.parse import \
PathSegmentPageOffsetParser
return PathSegmentPageOffsetParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
segment=value["segment"],
delimiter=value["delimiter"] if "delimiter" in value else "/",
remove_patterns=(
[
compile(pattern, IGNORECASE)
for pattern in value["remove_patterns"]
]
if "remove_patterns" in value
else []
),
)
elif parser_type == "fragment_segment":
from archive_query_log.queries.parse import \
FragmentSegmentPageOffsetParser
return FragmentSegmentPageOffsetParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
segment=value["segment"],
delimiter=value["delimiter"] if "delimiter" in value else "/",
remove_patterns=(
[
compile(pattern, IGNORECASE)
for pattern in value["remove_patterns"]
]
if "remove_patterns" in value
else []
),
)
else:
raise ValueError(f"Unknown parser type: {parser_type}")
class InterpretedQueryParserField(Field):
def _deserialize(
self,
value: Any,
attr: str | None,
data: Mapping[str, Any] | None,
**kwargs,
) -> InterpretedQueryParser:
value: Mapping[str, Any]
parser_type = value["type"]
if parser_type == "html_selector":
from archive_query_log.results.parse import \
HtmlSelectorInterpretedQueryParser
return HtmlSelectorInterpretedQueryParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
query_selector=value["query_selector"],
query_attribute=(
value["query_attribute"]
if "query_attribute" in value
else "value"
),
query_text=(
value["query_text"]
if "query_text" in value
else False
),
)
else:
raise ValueError(f"Unknown parser type: {parser_type}")
class ResultsParserField(Field):
def _deserialize(
self,
value: Any,
attr: str | None,
data: Mapping[str, Any] | None,
**kwargs,
) -> ResultsParser:
value: Mapping[str, Any]
parser_type = value["type"]
if parser_type == "html_selector":
from archive_query_log.results.parse import \
HtmlSelectorResultsParser
return HtmlSelectorResultsParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
results_selector=value["results_selector"],
url_selector=value["url_selector"],
url_attribute=(
value["url_attribute"]
if "url_attribute" in value
else "href"
),
title_selector=value["title_selector"],
snippet_selector=(
value["snippet_selector"]
if "snippet_selector" in value
else None
),
)
elif parser_type == "chatnoir":
from archive_query_log.results.chatnoir import \
ChatNoirResultsParser
return ChatNoirResultsParser(
url_pattern=compile(value["url_pattern"], IGNORECASE),
)
else:
raise ValueError(f"Unknown parser type: {parser_type}")
| 8,728
| 34.628571
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/download/warc.py
|
from dataclasses import dataclass
from io import BytesIO
from itertools import count, groupby, chain
from pathlib import Path
from random import Random
from tempfile import TemporaryFile
from typing import Sequence, NamedTuple, Iterable
from urllib.parse import quote, parse_qsl
from aiohttp import ClientResponseError
from aiohttp_retry import RetryClient
from asyncio_pool import AioPool
from tqdm.auto import tqdm
from warcio import WARCWriter, StatusAndHeaders
from archive_query_log.model import ArchivedUrl, Service
from archive_query_log.queries.iterable import ArchivedQueryUrls
from archive_query_log.serps.iterable import ArchivedParsedSerps
from archive_query_log.util.archive_http import archive_http_client
class _CdxPage(NamedTuple):
input_path: Path
output_path: Path
class _CdxUrl(NamedTuple):
archived_url: ArchivedUrl
output_path: Path
@dataclass(frozen=True)
class WebArchiveWarcDownloader:
"""
Download WARC files for archived URLs from the Web Archive.
The downloader will retry requests with a backoff and will continue
downloading URLs even if some URLs fail.
"""
max_file_size: int = 1_000_000_000 # 1GB
"""
Maximum number of bytes to write to a single WARC file.
"""
verbose: bool = False
@staticmethod
def _check_download_path(download_path: Path):
download_path.mkdir(parents=True, exist_ok=True)
if not download_path.is_dir():
raise ValueError(
f"Download path must be a directory: {download_path}"
)
@staticmethod
def _lock_path(download_path: Path) -> Path:
path = download_path / ".lock"
path.touch(exist_ok=True)
return path
def _is_url_downloaded(self, url: _CdxUrl) -> bool:
if not url.output_path.exists():
return False
archive_url = url.archived_url.raw_archive_url
with self._lock_path(url.output_path).open("rt") as file:
return any(
line.strip() == archive_url
for line in file
)
def _set_url_downloaded(self, url: _CdxUrl):
archive_url = url.archived_url.raw_archive_url
with self._lock_path(url.output_path).open("at") as file:
file.write(f"{archive_url}\n")
def _next_available_file_path(
self,
download_path: Path,
buffer_size: int,
) -> Path:
WebArchiveWarcDownloader._check_download_path(download_path)
for index in count():
name = f"{index:010}.warc.gz"
path = download_path / name
if not path.exists():
path.touch()
return path
else:
file_size = path.stat().st_size
if file_size + buffer_size <= self.max_file_size:
return path
async def _download(
self,
urls: Iterable[_CdxUrl],
) -> None:
"""
Download WARC files for archived URLs from the Web Archive.
"""
urls = [
url
for url in urls
if not self._is_url_downloaded(url)
]
if len(urls) == 0:
return
progress = None
if self.verbose:
progress = tqdm(
total=len(urls),
desc="Download archived URLs",
unit="URL",
)
async with archive_http_client(limit=100) as client:
pool = AioPool(size=100) # avoid creating too many tasks at once
async def download_single(url: _CdxUrl):
return await self._download_single(client, url, progress)
await pool.map(download_single, urls)
async def download(
self,
download_path: Path,
archived_urls: Iterable[ArchivedUrl],
) -> None:
"""
Download WARC files for archived URLs from the Web Archive.
"""
await self._download([
_CdxUrl(url, download_path)
for url in archived_urls
])
async def _download_single(
self,
client: RetryClient,
url: _CdxUrl,
progress: tqdm | None = None,
) -> bool:
if self._is_url_downloaded(url):
if progress is not None:
progress.update()
return True
# FIXME: This won't work for snippet URLs. Instead,
# we need to fetch the closest archived URL if any.
archive_url = url.archived_url.raw_archive_url
url_headers = {
"Archived-URL": url.archived_url.schema().dumps(url.archived_url),
}
try:
async with client.get(archive_url) as response:
response.raise_for_status()
with TemporaryFile() as tmp_file:
writer = WARCWriter(tmp_file, gzip=True)
# noinspection PyProtectedMember
version = client._client.version
protocol = f"HTTP/{version[0]}.{version[1]}"
request_record = writer.create_warc_record(
uri=str(response.request_info.url),
record_type="request",
http_headers=StatusAndHeaders(
statusline=" ".join((
response.request_info.method,
response.request_info.url.path,
protocol,
)),
headers=response.request_info.headers,
protocol=protocol,
),
warc_headers_dict={**url_headers},
)
writer.write_record(request_record)
response_record = writer.create_warc_record(
uri=str(response.url),
record_type="response",
http_headers=StatusAndHeaders(
statusline=" ".join((
protocol,
str(response.status),
response.reason,
)),
headers=response.headers,
protocol=protocol
),
payload=BytesIO(await response.content.read()),
length=response.content_length,
warc_headers_dict={**url_headers},
)
writer.write_record(response_record)
tmp_file.flush()
tmp_size = tmp_file.tell()
tmp_file.seek(0)
file_path = self._next_available_file_path(
url.output_path,
tmp_size,
)
with file_path.open("ab") as file:
tmp = tmp_file.read()
if not len(tmp) == tmp_size:
raise RuntimeError("Invalid buffer size.")
file.write(tmp)
self._set_url_downloaded(url)
return True
except ClientResponseError:
return False
except BaseException as e:
raise e
finally:
if progress is not None:
progress.update()
def _service_pages(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None,
cdx_page: int | None,
snippets: bool = False,
) -> Sequence[_CdxPage]:
"""
List all items that need to be downloaded.
"""
if snippets:
input_format_path = data_directory / "archived-parsed-serps"
output_format_path = data_directory / "archived-raw-search-results"
else:
input_format_path = data_directory / "archived-query-urls"
output_format_path = data_directory / "archived-raw-serps"
service_path = input_format_path / service.name
if domain is not None:
domain_paths = [service_path / domain]
else:
domain_paths = [
path
for path in service_path.iterdir()
if path.is_dir()
]
if focused:
domain_paths = [
path
for path in domain_paths
if any(
path.name.endswith(quote(prefix, safe=""))
for prefix in service.focused_url_prefixes
)
]
if cdx_page is not None:
assert domain is not None
assert len(domain_paths) == 1
cdx_page_paths = [domain_paths[0] / f"{cdx_page:010}.jsonl.gz"]
else:
cdx_page_paths = [
path
for domain_path in domain_paths
for path in domain_path.iterdir()
if (
path.is_file() and
len(path.name.removesuffix(".jsonl.gz")) == 10 and
path.name.removesuffix(".jsonl.gz").isdigit()
)
]
pages = (
_CdxPage(
input_path=cdx_page_path,
output_path=output_format_path / cdx_page_path.relative_to(
input_format_path
).with_name(cdx_page_path.name.removesuffix(".jsonl.gz")),
)
for cdx_page_path in cdx_page_paths
)
return [page for page in pages if page.input_path.exists()]
@staticmethod
def _canonical_url(urls: Iterable[_CdxUrl]) -> _CdxUrl:
"""
First URL, sorted by URL query string length, then by URL length,
then by URL.
"""
urls = sorted(
urls,
key=lambda url: url.archived_url.url
)
urls = sorted(
urls,
key=lambda url: len(url.archived_url.url)
)
urls = sorted(
urls,
key=lambda url: len(parse_qsl(url.archived_url.split_url.query))
)
return urls[0]
@staticmethod
def _deduplicate_urls(
urls: Iterable[_CdxUrl],
snippets: bool,
) -> list[_CdxUrl]:
if snippets:
return list(urls)
urls = sorted(
urls,
key=lambda url: url.archived_url.query
)
grouped_query_urls = groupby(
urls,
key=lambda url: url.archived_url.query
)
return [
WebArchiveWarcDownloader._canonical_url(urls)
for query, urls in grouped_query_urls
]
@staticmethod
def _page_urls(
page: _CdxPage,
focused: bool,
snippets: bool,
) -> Iterable[_CdxUrl]:
if snippets:
urls = (
_CdxUrl(url, page.output_path)
for serp in ArchivedParsedSerps(page.input_path)
for url in serp.results
)
else:
urls = (
_CdxUrl(url, page.output_path)
for url in ArchivedQueryUrls(page.input_path)
)
if focused:
urls = WebArchiveWarcDownloader._deduplicate_urls(urls, snippets)
return urls
async def download_service(
self,
data_directory: Path,
focused: bool,
service: Service,
domain: str | None = None,
cdx_page: int | None = None,
snippets: bool = False,
):
pages = self._service_pages(
data_directory=data_directory,
focused=focused,
service=service,
domain=domain,
cdx_page=cdx_page,
snippets=snippets,
)
if len(pages) == 0:
return
if focused:
pages = tqdm(
pages,
desc="Deduplicate query URLs",
unit="page",
)
archived_urls = chain.from_iterable(
self._page_urls(page, focused, snippets)
for page in pages
)
if focused:
archived_urls = self._deduplicate_urls(archived_urls, snippets)
archived_urls = Random(0).sample(
archived_urls,
min(len(archived_urls), 75_000)
)
await self._download(archived_urls)
| 12,668
| 31.992188
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/download/iterable.py
|
from dataclasses import dataclass
from functools import cached_property
from json import JSONDecodeError
from pathlib import Path
from typing import Sized, Iterable, Iterator
from fastwarc import GZipStream, FileStream, ArchiveIterator, WarcRecordType, \
WarcRecord
from marshmallow import Schema
from archive_query_log import LOGGER
from archive_query_log.model import ArchivedQueryUrl, ArchivedRawSerp
@dataclass(frozen=True)
class ArchivedRawSerps(Sized, Iterable[ArchivedRawSerp]):
"""
Read archived raw SERP contents from a directory of WARC files.
"""
path: Path
"""
Path where the raw SERP contents are stored in WARC format.
"""
def __post_init__(self):
self._check_raw_serps_paths()
def _check_raw_serps_paths(self):
if not self.path.exists() or not self.path.is_dir():
raise ValueError(
f"Raw SERPs path must be a directory: {self.path}"
)
def _streams(self) -> Iterator[tuple[Path, GZipStream]]:
files = self.path.glob("*.warc.gz")
for file in files:
yield file, GZipStream(FileStream(str(file), "rb"))
def __len__(self) -> int:
return sum(
1
for _, stream in self._streams()
for _ in ArchiveIterator(
stream,
record_types=WarcRecordType.response,
parse_http=False,
)
)
@cached_property
def _archived_serp_url_schema(self) -> Schema:
return ArchivedQueryUrl.schema()
def _read_serp_content(self, record: WarcRecord) -> ArchivedRawSerp | None:
archived_serp_url: ArchivedQueryUrl
record_url_header = record.headers["Archived-URL"]
try:
archived_serp_url = self._archived_serp_url_schema.loads(
record_url_header
)
except JSONDecodeError:
LOGGER.warning(f"Could not index {record_url_header} "
f"from record {record.record_id}.")
return None
content_type = record.http_charset
if content_type is None:
content_type = "utf8"
return ArchivedRawSerp(
url=archived_serp_url.url,
timestamp=archived_serp_url.timestamp,
query=archived_serp_url.query,
page=archived_serp_url.page,
offset=archived_serp_url.offset,
content=record.reader.read(),
encoding=content_type,
)
def __iter__(self) -> Iterator[ArchivedRawSerp]:
for path, stream in self._streams():
failures = False
for record in ArchiveIterator(
stream,
record_types=WarcRecordType.response,
parse_http=True,
):
serp = self._read_serp_content(record)
if serp is None:
failures = True
continue
yield serp
if failures:
LOGGER.warning(f"Failed to index some records from {path}.")
| 3,088
| 31.861702
| 79
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/download/raw.py
|
from asyncio import sleep
from pathlib import Path
from random import random
from typing import Iterable, Callable, Mapping
from aiohttp import ClientResponseError
from aiohttp_retry import RetryClient
from asyncio_pool import AioPool
from tqdm.auto import tqdm
from archive_query_log.model import ArchivedUrl
from archive_query_log.util.archive_http import archive_http_client
class WebArchiveRawDownloader:
async def download(
self,
archived_urls: Iterable[ArchivedUrl],
download_directory_path: Path,
file_name: Callable[[ArchivedUrl], str],
) -> Mapping[ArchivedUrl, Path]:
"""
Download the original HTML of a collection of archived URLs
from the Internet Archive's Wayback Machine and
return the paths to the individual downloaded files.
The downloader will retry requests with a backoff and will continue
downloading URLs even if some URLs fail.
:param archived_urls: The archived URLs to download.
:param download_directory_path: Path to the directory in which the
downloaded HTML files should be stored.
:param file_name: Function for specifying each URL's file name
in the download directory.
:return: A mapping of the successfully downloaded URLs to their
corresponding download file paths. Failed downloads will not appear
in the mapping.
"""
archived_urls = list(archived_urls)
progress = tqdm(
total=len(archived_urls),
desc="Download archived URLs",
unit="URL",
)
async with archive_http_client(limit=10) as client:
pool = AioPool(size=100) # avoid creating too many tasks at once
async def download_single(archived_url: ArchivedUrl):
return await self._download_single_progress(
archived_url,
download_directory_path,
file_name,
client,
progress,
)
responses = await pool.map(download_single, archived_urls)
return {
archived_url: response
for archived_url, response in zip(archived_urls, responses)
if response is not None
}
async def download_single(
self,
archived_url: ArchivedUrl,
download_directory_path: Path,
file_name: Callable[[ArchivedUrl], str],
) -> Path | None:
"""
Download the original HTML of a single archived URL
from the Internet Archive's Wayback Machine and
return the path to the downloaded file.
The downloader will retry requests with a backoff.
:param archived_url: The archived URL to download.
:param download_directory_path: Path to the directory in which the
downloaded HTML file should be stored.
:param file_name: Function for specifying each URL's file name
in the download directory.
:return: The download file path if the request was successful,
and None otherwise.
"""
async with archive_http_client(limit=1) as client:
return await self._download_single(
archived_url=archived_url,
download_directory_path=download_directory_path,
file_name=file_name,
client=client,
)
@staticmethod
async def _download_single(
archived_url: ArchivedUrl,
download_directory_path: Path,
file_name: Callable[[ArchivedUrl], str],
client: RetryClient,
) -> Path | None:
file_path = download_directory_path / file_name(archived_url)
if file_path.exists():
return file_path
url = archived_url.raw_archive_url
await sleep(1.0 * random())
try:
async with client.get(url) as response:
response.raise_for_status()
with file_path.open("wb") as file:
async for data, _ in response.content.iter_chunks():
file.write(data)
return file_path
except ClientResponseError:
file_path.unlink(missing_ok=True)
return None
except BaseException as e:
file_path.unlink(missing_ok=True)
raise e
@staticmethod
async def _download_single_progress(
archived_url: ArchivedUrl,
download_directory_path: Path,
file_name: Callable[[ArchivedUrl], str],
client: RetryClient,
progress: tqdm,
) -> Path | None:
res = await WebArchiveRawDownloader._download_single(
archived_url=archived_url,
download_directory_path=download_directory_path,
file_name=file_name,
client=client,
)
progress.update(1)
return res
| 4,981
| 35.632353
| 77
|
py
|
archive-query-log
|
archive-query-log-main/archive_query_log/download/__init__.py
| 0
| 0
| 0
|
py
|
|
archive-query-log
|
archive-query-log-main/scripts/create_corpus.py
|
from argparse import ArgumentParser
from datetime import datetime
from gzip import GzipFile
from json import loads, JSONDecodeError, dumps
from pathlib import Path
from random import shuffle
from typing import Optional, Iterator, Literal
from urllib.parse import urlparse
from uuid import uuid5, NAMESPACE_URL
from fastwarc import FileStream, ArchiveIterator, WarcRecordType, WarcRecord
from publicsuffixlist import PublicSuffixList
from pyspark.sql import SparkSession
from tqdm.auto import tqdm
from yaml import safe_load
_CEPH_DIR = Path("/mnt/ceph/storage")
_RESEARCH_DIR = _CEPH_DIR / "data-in-progress" / "data-research"
_GLOBAL_DATA_DIR = _RESEARCH_DIR / "web-search" / "web-archive-query-log"
_DATA_DIR = _GLOBAL_DATA_DIR / "focused"
_PUBLIC_SUFFIX_LIST = PublicSuffixList()
def _load_services(path: Path) -> dict:
with path.open("r") as file:
services_dict = safe_load(file)
services_list = [(service["name"], service) for service in services_dict]
assert len({name for name, service in services_list}) == len(services_list)
services = {
name: service
for name, service in services_list
}
print(f"Found {len(services)} services.")
return services
_SERVICES = _load_services(_GLOBAL_DATA_DIR / "selected-services.yaml")
def _detect_language(text: str) -> Optional[str]:
text = text.replace("\n", " ")
from cld3 import get_language
language_prediction = get_language(text)
if language_prediction is None:
return None
return language_prediction.language.split("-")[0] \
if language_prediction.is_reliable else None
def _read_jsonl(path: Path, base_type: str) -> dict:
jsonl_path = _DATA_DIR / base_type / path.with_suffix(".jsonl.gz")
if not jsonl_path.exists():
return {}
index = {}
try:
with GzipFile(jsonl_path, "r") as gzip_file:
# noinspection PyTypeChecker
for line in tqdm(gzip_file, desc="Index JSONL"):
try:
# noinspection PyTypeChecker
record = loads(line)
except:
print(f"Could not index {line} at {path}.")
continue
record_id = uuid5(
NAMESPACE_URL,
f"{record['timestamp']}:{record['url']}",
)
index[record_id] = record
return index
except:
print(f"Could not read JSONL file at {path}.")
return {}
def _index_warc(path: Path, base_type: str) -> dict:
warc_path = _DATA_DIR / base_type / path
if not warc_path.exists():
return {}
index = {}
for warc_child_path in warc_path.iterdir():
if warc_child_path.name.startswith("."):
continue
try:
stream = FileStream(str(warc_child_path.absolute()))
records = ArchiveIterator(
stream,
record_types=WarcRecordType.response,
parse_http=False,
)
# noinspection PyTypeChecker
for record in tqdm(records, desc="Index WARC"):
record: WarcRecord
offset = record.stream_pos
record_url_header = record.headers["Archived-URL"]
try:
record_url = loads(record_url_header)
except JSONDecodeError:
print(f"Could not index {record_url_header} at {path}.")
continue
record_id = uuid5(
NAMESPACE_URL,
f"{record_url['timestamp']}:{record_url['url']}",
)
index[record_id] = (
warc_child_path,
offset,
)
except:
print(f"Could not read WARC file at {warc_child_path}.")
continue
return index
def _iter_relative_path_records(relative_path: Path) -> Iterator[tuple]:
service = relative_path.parts[0]
print(f"Reading files in {relative_path}.")
archived_urls_index = _read_jsonl(relative_path, "archived-urls")
print("Finished reading archived URLs.")
archived_query_urls_index = _read_jsonl(relative_path,
"archived-query-urls")
print("Finished reading archived query URLs.")
archived_raw_serps_index = _index_warc(relative_path, "archived-raw-serps")
print("Finished reading archived raw SERPs (pointers).")
archived_parsed_serps_index = _read_jsonl(relative_path,
"archived-parsed-serps")
print("Finished reading archived parsed SERPs.")
for record_id, archived_url in archived_urls_index.items():
archived_url = archived_urls_index[record_id]
archived_query_url = archived_query_urls_index.get(record_id, None)
archived_raw_serp_location = archived_raw_serps_index.get(record_id,
None)
archived_parsed_serp = archived_parsed_serps_index.get(record_id, None)
yield service, relative_path, record_id, archived_url, archived_query_url, archived_raw_serp_location, archived_parsed_serp
def _iter_results(
archived_url: dict, archived_parsed_serp: dict
) -> Iterator[dict]:
if archived_parsed_serp is None:
return
for snippet in archived_parsed_serp["results"]:
url = snippet["url"]
domain = urlparse(url).hostname
public_suffix = _PUBLIC_SUFFIX_LIST.publicsuffix(domain) \
if domain is not None else None
timestamp = archived_url["timestamp"]
wayback_timestamp = \
datetime.fromtimestamp(timestamp).strftime("%Y%m%d%H%M%S")
wayback_url = f"https://web.archive.org/web/{wayback_timestamp}/{url}"
wayback_raw_url = \
f"https://web.archive.org/web/{wayback_timestamp}id_/{url}"
record_id = uuid5(
NAMESPACE_URL,
f"{snippet['rank']}:{snippet['timestamp']}:{snippet['url']}"
)
print(f"Yield result: {record_id}")
yield {
"result_id": str(record_id),
"result_url": url,
"result_domain": domain,
"result_domain_public_suffix": public_suffix,
"result_wayback_url": wayback_url,
"result_wayback_raw_url": wayback_raw_url,
"result_snippet_rank": snippet['rank'],
"result_snippet_title": snippet["title"],
"result_snippet_text": snippet["snippet"],
"result_warc_relative_path": None,
"result_warc_byte_offset": None,
}
def _record_to_query(relative_path_record: tuple) -> Optional[str]:
service, relative_path, record_id, archived_url, archived_query_url, \
archived_raw_serp_location, archived_parsed_serp = relative_path_record
if archived_query_url is None:
print(f"Archived query URL not found for ID {record_id}.")
return None
url = archived_url["url"]
domain = urlparse(url).hostname
public_suffix = _PUBLIC_SUFFIX_LIST.publicsuffix(domain) \
if domain is not None else None
timestamp = archived_url["timestamp"]
wayback_timestamp = \
datetime.fromtimestamp(timestamp).strftime("%Y%m%d%H%M%S")
wayback_url = f"https://web.archive.org/web/{wayback_timestamp}/{url}"
wayback_raw_url = \
f"https://web.archive.org/web/{wayback_timestamp}id_/{url}"
query = archived_query_url["query"]
language = _detect_language(query)
service_info = _SERVICES[service]
documents = list(_iter_results(archived_url, archived_parsed_serp))
print(f"Yield SERP: {record_id}")
return dumps({
"serp_id": str(record_id),
"serp_url": url,
"serp_domain": domain,
"serp_domain_public_suffix": public_suffix,
"serp_timestamp": timestamp,
"serp_wayback_url": wayback_url,
"serp_wayback_raw_url": wayback_raw_url,
"serp_page": archived_query_url["page"],
"serp_offset": archived_query_url["offset"],
"serp_query_text_url": query,
"serp_query_text_url_language": language,
"serp_query_text_html": (
archived_parsed_serp["interpreted_query"]
if archived_parsed_serp is not None else None
),
"serp_warc_relative_path": (
str(archived_raw_serp_location[0].relative_to(_GLOBAL_DATA_DIR))
if archived_raw_serp_location is not None else None
),
"serp_warc_byte_offset": (
archived_raw_serp_location[1]
if archived_raw_serp_location is not None else None
),
"serp_results": documents,
"search_provider_name": service,
"search_provider_alexa_domain": service_info["alexa_domain"],
"search_provider_alexa_domain_public_suffix":
_SERVICES[service]["public_suffix"],
"search_provider_alexa_rank": service_info["alexa_rank"],
"search_provider_category": service_info["category"],
})
def _iter_query_documents(query: str) -> Iterator[dict]:
query = loads(query)
for result in query["serp_results"]:
print(f"Yield result: {result['result_id']}")
yield dumps({
"result_id": result["result_id"],
"result_url": result["result_url"],
"result_domain": result["result_domain"],
"result_domain_public_suffix":
result["result_domain_public_suffix"],
"result_wayback_url": result["result_wayback_url"],
"result_wayback_raw_url": result["result_wayback_raw_url"],
"result_snippet_rank": result["result_snippet_rank"],
"result_snippet_title": result["result_snippet_title"],
"result_snippet_text": result["result_snippet_text"],
"result_warc_relative_path": result["result_warc_relative_path"],
"result_warc_byte_offset": result["result_warc_byte_offset"],
"serp_id": query["serp_id"],
"serp_url": query["serp_url"],
"serp_domain": query["serp_domain"],
"serp_domain_public_suffix": query["serp_domain_public_suffix"],
"serp_timestamp": query["serp_timestamp"],
"serp_wayback_url": query["serp_wayback_url"],
"serp_wayback_raw_url": query["serp_wayback_raw_url"],
"serp_page": query["serp_page"],
"serp_offset": query["serp_offset"],
"serp_query_text_url": query["serp_query_text_url"],
"serp_query_text_url_language":
query["serp_query_text_url_language"],
"serp_query_text_html": query["serp_query_text_html"],
"serp_warc_relative_path": query["serp_warc_relative_path"],
"serp_warc_byte_offset": query["serp_warc_byte_offset"],
"search_provider_name": query["search_provider_name"],
"search_provider_alexa_domain":
query["search_provider_alexa_domain"],
"search_provider_alexa_domain_public_suffix":
query["search_provider_alexa_domain_public_suffix"],
"search_provider_alexa_rank": query["search_provider_alexa_rank"],
"search_provider_category": query["search_provider_category"],
})
def main(variant: Literal["small", "medium", "full"]):
session = SparkSession.builder.getOrCreate()
sc = session.sparkContext
relative_paths = [
path
.relative_to(_DATA_DIR / "archived-urls")
.with_name(path.name[:-len(".jsonl.gz")])
for path in _DATA_DIR.glob("archived-urls/*/*/*.jsonl.gz")
]
print(f"Found {len(relative_paths)} paths.")
shuffle(relative_paths)
if variant == "small":
relative_paths = relative_paths[:100]
elif variant == "medium":
relative_paths = relative_paths[:2500]
print(f"Selected {len(relative_paths)} paths for corpus creation.")
print("Start corpus creation.")
print("Export SERPs.")
sc.parallelize(relative_paths) \
.repartition(1_000) \
.flatMap(_iter_relative_path_records) \
.map(_record_to_query) \
.filter(lambda json: json is not None) \
.saveAsTextFile(f"archive-query-log/{variant}/serps/",
compressionCodecClass=
"org.apache.hadoop.io.compress.GzipCodec")
print("Export results.")
sc.textFile(f"archive-query-log/{variant}/serps/") \
.flatMap(_iter_query_documents) \
.map(dumps) \
.repartition(1_000) \
.saveAsTextFile(f"archive-query-log/{variant}/results/",
compressionCodecClass=
"org.apache.hadoop.io.compress.GzipCodec")
print("Done.")
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument(
"--small",
dest="variant",
action="store_const",
const="small",
default="full",
)
parser.add_argument(
"--medium",
dest="variant",
action="store_const",
const="medium",
default="full",
)
parser.add_argument(
"--full",
dest="variant",
action="store_const",
const="full",
default="full",
)
args = parser.parse_args()
print(f"Creating corpus (variant: {args.variant}).")
main(args.variant)
| 13,418
| 37.560345
| 131
|
py
|
archive-query-log
|
archive-query-log-main/scripts/create_url_list.py
|
from datetime import datetime
from gzip import GzipFile
from json import loads, JSONDecodeError, dumps
from pathlib import Path
from random import shuffle
from typing import Optional, Iterator
from urllib.parse import urlparse
from uuid import uuid5, NAMESPACE_URL
from fastwarc import FileStream, ArchiveIterator, WarcRecordType, WarcRecord
from pyspark.sql import SparkSession
from tqdm.auto import tqdm
_CEPH_DIR = Path("/mnt/ceph/storage")
_RESEARCH_DIR = _CEPH_DIR / "data-in-progress" / "data-research"
_GLOBAL_DATA_DIR = _RESEARCH_DIR / "web-search" / "web-archive-query-log"
_DATA_DIR = _GLOBAL_DATA_DIR / "focused"
def _read_jsonl(path: Path, base_type: str) -> dict:
jsonl_path = _DATA_DIR / base_type / path.with_suffix(".jsonl.gz")
if not jsonl_path.exists():
return {}
index = {}
try:
with GzipFile(jsonl_path, "r") as gzip_file:
# noinspection PyTypeChecker
for line in tqdm(gzip_file, desc="Index JSONL"):
try:
# noinspection PyTypeChecker
record = loads(line)
except:
print(f"Could not index {line} at {path}.")
continue
record_id = uuid5(
NAMESPACE_URL,
f"{record['timestamp']}:{record['url']}",
)
index[record_id] = record
return index
except:
print(f"Could not read JSONL file at {path}.")
return {}
def _index_warc(path: Path, base_type: str) -> dict:
warc_path = _DATA_DIR / base_type / path
if not warc_path.exists():
return {}
index = {}
for warc_child_path in warc_path.iterdir():
if warc_child_path.name.startswith("."):
continue
try:
stream = FileStream(str(warc_child_path.absolute()))
records = ArchiveIterator(
stream,
record_types=WarcRecordType.response,
parse_http=False,
)
# noinspection PyTypeChecker
for record in tqdm(records, desc="Index WARC"):
record: WarcRecord
offset = record.stream_pos
record_url_header = record.headers["Archived-URL"]
try:
record_url = loads(record_url_header)
except JSONDecodeError:
print(f"Could not index {record_url_header} at {path}.")
continue
record_id = uuid5(
NAMESPACE_URL,
f"{record_url['timestamp']}:{record_url['url']}",
)
index[record_id] = (
warc_child_path,
offset,
)
except:
print(f"Could not read WARC file at {warc_child_path}.")
continue
return index
def _iter_relative_path_records(relative_path: Path) -> Iterator[tuple]:
print("Finished reading archived URLs.")
archived_query_urls_index = _read_jsonl(relative_path,
"archived-query-urls")
print("Finished reading archived query URLs.")
archived_raw_serps_index = _index_warc(relative_path, "archived-raw-serps")
print("Finished reading archived raw SERPs (pointers).")
for record_id, archived_url in archived_query_urls_index.items():
archived_query_url = archived_query_urls_index[record_id]
archived_raw_serp_location = archived_raw_serps_index.get(record_id,
None)
yield relative_path, record_id, archived_query_url, \
archived_raw_serp_location
def _record_to_query(relative_path_record: tuple) -> Optional[str]:
relative_path, record_id, archived_query_url, archived_raw_serp_location \
= relative_path_record
if archived_raw_serp_location is not None:
print("SERP was already downloaded.")
url = archived_query_url["url"]
domain = urlparse(url).hostname
timestamp = archived_query_url["timestamp"]
wayback_timestamp = \
datetime.fromtimestamp(timestamp).strftime("%Y%m%d%H%M%S")
wayback_raw_url = \
f"https://web.archive.org/web/{wayback_timestamp}id_/{url}"
task = {
"download_url": wayback_raw_url,
"output_path": str(_GLOBAL_DATA_DIR / "archived-raw-serps" / relative_path / "*.warc.gz"),
}
return dumps(task)
def main():
session = SparkSession.builder.getOrCreate()
sc = session.sparkContext
relative_paths = [
path
.relative_to(_DATA_DIR / "archived-urls")
.with_name(path.name[:-len(".jsonl.gz")])
for path in _DATA_DIR.glob("archived-urls/*/*/*.jsonl.gz")
]
print(f"Found {len(relative_paths)} paths.")
shuffle(relative_paths)
print(f"Selected {len(relative_paths)} paths "
f"for finding downloadable SERP URLs.")
print("Export downloadable SERP URL list at archive-query-log-urls/.")
sc.parallelize(relative_paths, 100) \
.flatMap(_iter_relative_path_records) \
.map(_record_to_query) \
.filter(lambda json: json is not None) \
.repartition(1) \
.saveAsTextFile(f"archive-query-log-urls/",
compressionCodecClass=
"org.apache.hadoop.io.compress.GzipCodec")
print("Done.")
if __name__ == "__main__":
main()
| 5,481
| 33.049689
| 98
|
py
|
archive-query-log
|
archive-query-log-main/integrations/tira/aql-experiment-baseline.py
|
#!/usr/bin/env python3
from argparse import ArgumentParser
from json import dump
from pathlib import Path
from pandas import concat, read_json
def parse_args():
parser = ArgumentParser(description="Archive Query Log Demo")
parser.add_argument(
'--input',
type=Path,
help="The directory with the input data "
"(i.e., a directory containing serps and results).",
required=True,
)
parser.add_argument(
"--output",
type=Path,
help="The output will be stored in this directory.",
required=True,
)
return parser.parse_args()
def main(input_dir: Path, output_dir: Path):
paths = input_dir.glob("serps/part*.gz")
df = concat([read_json(path, lines=True) for path in paths])
with (output_dir / "results.json").open("w") as file:
dump(df["search_provider_name"].value_counts().to_dict(), file)
if __name__ == "__main__":
args = parse_args()
main(args.input, args.output)
| 1,000
| 24.025
| 71
|
py
|
archive-query-log
|
archive-query-log-main/integrations/ir_datasets/archive_query_log_ir_datasets_integration.py
|
"""
This script registers the Archive Query Log dataset in ir_datasets.
See: https://github.com/allenai/ir_datasets/
Note: This script is intended to be executed with the Docker image provided.
"""
from pathlib import Path
from typing import NamedTuple, List, Mapping, Optional, Iterator
from ir_datasets import registry
from ir_datasets.datasets.base import Dataset
from ir_datasets.formats import BaseDocs, BaseQueries, BaseQrels, GenericDoc
from ir_datasets.indices import PickleLz4FullStore, Docstore
from pandas import DataFrame, read_json, concat
from tqdm import tqdm
NAME = "archive-query-log"
DATA_DIR = Path('/data/')
def _extract_non_empty_results(serp: Mapping) -> List[dict]:
if 'serp_results' not in serp or serp['serp_results'] is None:
return []
results = []
for result in serp['serp_results']:
if ('result_snippet_title' not in result or
result['result_snippet_title'] is None):
continue
results += [result]
return results
def _parse_serps(base_path: Path, min_results_on_serp: int = 1) -> DataFrame:
result: DataFrame = concat([
read_json(path, lines=True)
for path in tqdm(base_path.glob('serps/part*.gz'), 'Load SERPs')
])
original_length = len(result)
num_results = result['serp_results'].map(
lambda serp_results: len(
_extract_non_empty_results({'serp_results': serp_results})
)
)
mask = num_results >= min_results_on_serp
result = result[mask]
print(f'Processed {original_length} SERPs, '
f'found {len(result)} non-empty SERPs.')
return result
def _persist_run_file(serps: DataFrame, lang: str = "en") -> None:
run_data = []
for _, serp in serps.iterrows():
results = _extract_non_empty_results(serp)
max_rank = max(
result['result_snippet_rank']
for result in results
)
run_data += [
{
'query': serp.serp_id,
'q0': 0,
'docid': result['result_id'],
'rank': result['result_snippet_rank'],
'score': max_rank - result['result_snippet_rank'],
'system': 'aql'
}
for result in results
if serp['serp_query_text_url_language'] == lang
]
run = DataFrame(run_data)
run.to_csv(DATA_DIR / 'run.txt', sep=" ", header=False, index=False)
class ArchiveQueryLogQuery(NamedTuple):
query_id: str
text: str
search_provider_name: str
num_results: int
lang: str
def default_text(self):
return self.text
class ArchiveQueryLogQrel(NamedTuple):
query_id: str
doc_id: str
relevance: int
iteration: int
search_provider_name: str
num_results: int
lang: str
class ArchiveQueryLogQueries(BaseQueries):
serps: DataFrame
lang: Optional[str]
def __init__(self, serps: DataFrame, lang: Optional[str] = "en"):
self.serps = serps
self.lang = lang
def queries_iter(self) -> Iterator[ArchiveQueryLogQuery]:
for _, serp in self.serps.iterrows():
if (self.lang is not None and
self.lang != serp["serp_query_text_url_language"]):
continue
num_results = len(_extract_non_empty_results(serp))
if num_results <= 0:
continue
yield ArchiveQueryLogQuery(
query_id=serp["serp_id"],
text=serp["serp_query_text_html"],
lang=serp["serp_query_text_url_language"],
search_provider_name=serp["search_provider_name"],
num_results=num_results,
)
def queries_cls(self):
return ArchiveQueryLogQuery
def queries_lang(self) -> Optional[str]:
return self.lang
class ArchiveQueryLogQrels(BaseQrels):
serps: DataFrame
lang: Optional[str]
def __init__(self, serps: DataFrame, lang: Optional[str] = "en"):
self.serps = serps
self.lang = lang
def qrels_iter(self) -> Iterator[ArchiveQueryLogQrel]:
for _, serp in self.serps.iterrows():
if (self.lang is not None and
self.lang != serp["serp_query_text_url_language"]):
continue
results = _extract_non_empty_results(serp)
max_rank = max(
result['result_snippet_rank']
for result in results
)
for result in results:
yield ArchiveQueryLogQrel(
query_id=serp["serp_id"],
doc_id=result['result_id'],
relevance=max_rank - result['result_snippet_rank'],
iteration=0,
search_provider_name=serp["search_provider_name"],
num_results=len(results),
lang=serp["serp_query_text_url_language"],
)
def qrels_cls(self):
return ArchiveQueryLogQrel
class ArchiveQueryLogDocs(BaseDocs):
serps: DataFrame
lang: Optional[str]
def __init__(self, serps: DataFrame, lang: Optional[str] = "en"):
self.serps = serps
self.lang = lang
def docs_iter(self) -> Iterator[GenericDoc]:
for _, serp in self.serps.iterrows():
if (self.lang is not None and
self.lang != serp["serp_query_text_url_language"]):
continue
for result in _extract_non_empty_results(serp):
yield GenericDoc(
doc_id=result['result_id'],
text=f"{result['result_snippet_title']} "
f"{result['result_snippet_text']}".strip()
)
def docs_count(self) -> int:
return sum(
len(_extract_non_empty_results(serp))
for _, serp in self.serps.iterrows()
if (self.lang is None or
self.lang == serp["serp_query_text_url_language"])
)
def docs_cls(self):
return GenericDoc
def docs_lang(self) -> Optional[str]:
return self.lang
def docs_store(self) -> Docstore:
return PickleLz4FullStore(
path=f'{DATA_DIR}/docs.pklz4',
init_iter_fn=self.docs_iter,
data_cls=self.docs_cls(),
lookup_field='doc_id',
index_fields=['doc_id'],
)
_serps = _parse_serps(DATA_DIR)
dataset = Dataset(
ArchiveQueryLogDocs(_serps),
ArchiveQueryLogQueries(_serps),
ArchiveQueryLogQrels(_serps),
)
assert dataset.has_docs(), "dataset has no documents"
assert dataset.has_queries(), "dataset has no queries"
if NAME in registry:
print(f"Dataset '{NAME}' already registered.")
else:
registry.register(NAME, dataset)
if __name__ == '__main__':
_persist_run_file(_serps)
| 6,855
| 29.471111
| 77
|
py
|
anticipatr
|
anticipatr-main/src/main.py
|
import os
import argparse
import random
import numpy as np
import time
from pathlib import Path
import json
import datetime
import pickle
import torch
from torch.utils.data import DataLoader
import datasets
import utils.misc as utils
from datasets import build_dataset
from models import build_model
from engine import train_one_epoch, evaluate
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str,default="bf")
parser.add_argument('--root',type=str,help='Path to data root directory')
parser.add_argument('--num_nouns',type=int,default=1)
parser.add_argument('--num_verbs',type=int,default=48)
parser.add_argument('--num_actions',type=int,default=1)
parser.add_argument('--task',type=str,default='anticipation',choices=['anticipation','recognition'])
parser.add_argument('--anticipation',type=str,default='longfuture',choices=['nearfuture','longfuture'])
parser.add_argument('--pretraining_task',type=str,default='snippet_longfuture_anticipation')
parser.add_argument('--fps',type=int,default=60)
parser.add_argument('--label_type',type=str,default='verb',choices=['verb','noun','action'])
parser.add_argument('--action_repr',type=str,default='actionset',choices=['actionset'])
parser.add_argument('--train_many_shot',action='store_true',default=False,help='training with many shot verbs')
parser.add_argument('--split',type=int,default=1)
parser.add_argument('--train_timestamps',type=str,default='0.2,0.3,0.4,0.5,0.6,0.7,0.8')
parser.add_argument('--val_timestamps',type=str,default='0.25,0.5,0.75')
# model parameters
parser.add_argument('--model',type=str,default='antr')
parser.add_argument('--matcher_type',type=str,default='greedy', choices=['hungarian','greedy'])
parser.add_argument('--num_queries',type=int,default=10)
parser.add_argument('--num_pos_embed_dict',type=int,default=50000)
parser.add_argument('--dim_latent',type=int,default=128)
parser.add_argument('--hidden_dim',type=int,default=256)
parser.add_argument('--position_embedding',type=str,default='sine')
parser.add_argument('--num_decoder_embedding',type=int,default=10000)
parser.add_argument('--position_type',type=str,default='index',choices=['time','index'])
parser.add_argument('--dropout',type=float,default=0.1,help='transformer droput')
parser.add_argument('--nheads',type=int,default=8)
parser.add_argument('--dim_feedforward',type=int,default=2048)
parser.add_argument('--encoder',type=str,default='parallel')
parser.add_argument('--decoder',type=str,default='parallel')
parser.add_argument('--enc_layers',type=int,default=2)
parser.add_argument('--dec_layers',type=int,default=2)
parser.add_argument('--pretrained_enc_layers',type=int,default=2)
parser.add_argument('--pretrained_dec_layers',type=int,default=2)
parser.add_argument('--snippet_window',type=int,default=16)
parser.add_argument('--pretrained_path',type=str,default='')
parser.add_argument('--pre_norm',action='store_true')
parser.add_argument('--aux_loss',action='store_true')
parser.add_argument('--cuda',action='store_true',help='gpu mode')
parser.add_argument('--eval',action='store_true',help='evaluation mode')
parser.add_argument('--norm_type',type=str,choices=['gn','bn'],default='bn',help="normalization type")
parser.add_argument('--activation',type=str,default='leaky_relu',help="transformer activation type")
parser.add_argument('--set_cost_class',type=float,default=1,help='transformer droput')
parser.add_argument('--set_cost_segment',type=float,default=5,help='transformer droput')
parser.add_argument('--set_cost_siou',type=float,default=3,help='transformer droput')
parser.add_argument('--loss_coef_segment',type=float,default=5,help='transformer droput')
parser.add_argument('--loss_coef_siou',type=float,default=3,help='transformer droput')
parser.add_argument('--eos_coef',type=float,default=0.1,help='transformer droput')
# * Training
parser.add_argument('--resume',type=str,default='',help='resume from a checkpoint')
parser.add_argument('--save_checkpoint_every',type=int,default=1000,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every_epoch',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--num_workers',type=int,default=0,help='number of workers')
parser.add_argument('--batch_size',type=int,default=2,help='batch_size')
parser.add_argument('--epochs',type=int,default=10,help='number of epochs')
parser.add_argument('--step_size',type=int,default=64,help='number of steps before backpropagation')
parser.add_argument('--start_epoch',type=int,default=0,help='starting epoch')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_joiner', default=0, type=float)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--clip_max_norm', default=1, type=float,help='gradient clipping max norm')
parser.add_argument('--output_dir', type=str,default='./experiments/checkpoints/',help='path to save intermediate checkpoints')
# * Distributed Training
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--device', default='cuda',help='device to use for training / testing')
args = parser.parse_args()
print(args)
def main(args):
bz = args.batch_size
lr = args.lr
if args.cuda:
if torch.cuda.device_count() >= 1:
utils.init_distributed_mode(args)
device = torch.device(args.device)
else:
device = torch.device('cpu')
# fix the seed for reproducibility
if args.cuda:
seed = args.seed + utils.get_rank()
else:
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# datasets build
dataset_train = build_dataset(args=args, mode="train")
dataset_test = build_dataset(args=args, mode="val")
if args.cuda and args.distributed:
sampler_train = torch.utils.data.distributed.DistributedSampler(dataset_train,shuffle=True)
sampler_test = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_test = DataLoader(dataset_test, 1, sampler=sampler_test, drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
# set up model
model, criterion = build_model(args)
model.to(device)
criterion.to(device)
model_without_ddp = model
if args.cuda and args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],find_unused_parameters=True)
model_with_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
# set up model training
param_dicts = [{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" not in n and p.requires_grad]},
{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" in n and p.requires_grad], "lr": args.lr_joiner,},]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
# output and checkpoints directory
checkpoint_dir = args.output_dir
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except OSError:
pass
if args.resume:
checkpoint = Path(args.resume)
assert checkpoint.exists()
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
print("Start Training")
start_time = time.time()
optimizer.zero_grad()
for epoch in range(args.start_epoch, args.epochs):
if args.cuda and args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(epoch, args.clip_max_norm, model, criterion, data_loader_train, optimizer, lr_scheduler, device)
if args.output_dir:
checkpoint_dir = Path(checkpoint_dir)
checkpoint_paths = [checkpoint_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and every 100 epochs
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % args.save_checkpoint_every == 0:
checkpoint_paths.append(checkpoint_dir / f'checkpoint{epoch:05}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args,}, checkpoint_path)
# evaluation
if epoch % args.evaluate_every_epoch == 0:
test_stats = evaluate(epoch, model, criterion, data_loader_test, args.dataset, args.evaluate_every, device)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, **{f'test_{k}': v for k, v in test_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (checkpoint_dir / 'log.json').open("a") as f:
f.write(json.dumps(log_stats) + "\n")
lr_scheduler.step()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == "__main__":
main(args)
| 10,477
| 46.627273
| 208
|
py
|
anticipatr
|
anticipatr-main/src/engine.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import os,sys
import copy
import numpy as np
import math
from typing import Iterable
import time
import utils.misc as utils
import datasets
from metrics.longfuture_metrics import AnticipationEvaluator
def train_one_epoch(epoch, max_norm, model, criterion, data_loader, optimizer, scheduler, device):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 50
step = 0
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask, targets, tgt_mask)
losses = criterion(outputs, targets)
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
loss_value.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
train_stats = {k: meter.global_avg for k, meter in metric_logger.meters.items() if 'AP' not in k}
print("Train epoch:", epoch, "Averaged stats:", train_stats)
return train_stats
def evaluate(epoch, model, criterion, data_loader, dataset, evaluate_every, device):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test: [{}]'.format(epoch)
print_freq = 50
step = 0
predictions = {}
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask,targets, tgt_mask)
losses = criterion(outputs, targets)
losses_metric = {k:v for k,v in losses.items() if 'AP' in k or 'acc' in k}
# convert dict[key, tensor (b,x,x)] to list of length b with dict(str, tensor (x,x))
losses_metric = [{k:v[i] for k,v in losses_metric.items()} for i in range(samples.tensors.size(0))]
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
metric_logger.update(loss=losses_reduced_scaled.item(), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
res = {datasets.ds_utils.getVideoName(dataset, target['video_id'].tolist()): output for target, output in zip(targets,losses_metric)}
predictions.update(res)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
######For mAP calculation need to gather all data###########
all_predictions = utils.all_gather(predictions)
stats = {}
if epoch % evaluate_every == 0:
evaluator = AnticipationEvaluator(dataset)
test_stats = evaluator.evaluate(all_predictions)
print("Test epoch:", epoch, "Averaged test stats:", test_stats)
return test_stats
| 4,794
| 37.36
| 141
|
py
|
anticipatr
|
anticipatr-main/src/snippet_models/model.py
|
import torch
import torch.nn.functional as F
from torch import nn
from .transformer import build_transformer
from .joiner import build_joiner
import numpy as np
from utils.misc import accuracy, get_world_size, get_rank,is_dist_avail_and_initialized
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class EncoderSnippetLongfutureAnticipation(nn.Module):
def __init__(self, joiner, transformer, dim_feedforward, num_classes, num_queries, aux_loss = True):
""" Initializes the model.
Parameters:
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of action classes
num_queries: number of action queries, ie decoder outputs.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.joiner = joiner
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.input_proj = nn.Conv1d(2048, hidden_dim, kernel_size=1)
self.aux_loss = aux_loss
def forward(self, samples, mask, targets=None, tgt_mask=None):
""" The forward expects two inputs:
- samples.tensor: batched videos features, of shape [batch_size x 2048 x T]
- samples.mask: a binary mask of shape [batch_size x T], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-action) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_segments": The normalized segments coordinates for all queries, represented as
(start_time, end_time). These values are normalized in [0, 1],
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
assert mask is not None
sample_positions=torch.empty_like(mask)
src, pos = self.joiner(samples,mask,sample_positions)
input = self.input_proj(src)
hs = self.transformer(input,mask, tgt_mask, self.query_embed.weight,pos)[0]
outputs_class = self.class_embed(hs)
return outputs_class
class CriterionSnippetLongfutureAnticipation(nn.Module):
def __init__(self, num_classes, weight_dict, eos_coef, losses,fps):
super().__init__()
self.num_classes = num_classes
self.weight_dict = weight_dict
self.losses = losses
self.eos_coef = eos_coef
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def get_mAP(self,pred,labels,label_mask):
mAPs = dict()
for i in range(label_mask.shape[0]):
pred_i = pred[:, label_mask[i]]
labels_i = labels[:, label_mask[i]]
mAPs['mAP_{}'.format(i)] = torch.cat((pred_i.detach().cpu(), labels_i.detach().cpu()),1)
if mAPs['mAP_{}'.format(i)].ndim == 1:
mAPs['mAP_{}'.format(i)] = mAPs['mAP_{}'.format(i)].unsqueeze(0)
return mAPs
def loss_labels(self,outputs, targets,log=True):
src_logits = torch.sigmoid(outputs['pred_logits'].mean(1))
target_classes = torch.cat([t['labels'].unsqueeze(0) for t in targets])
loss_ce = F.binary_cross_entropy(src_logits, target_classes,reduction='mean')
losses = {'loss_ce': loss_ce}
losses.update(self.get_mAP(src_logits, target_classes, targets[0]['label_mask']))
return losses
def get_loss(self, loss, outputs, targets, **kwargs):
loss_map = {
'labels': self.loss_labels
}
assert loss in loss_map, f'{loss} loss not defined'
return loss_map[loss](outputs,targets,**kwargs)
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets))
if 'aux_outputs' in outputs:
for i,aux_outputs in enumerate(outputs['aux_outputs']):
for loss in self.losses:
kwargs = {}
kwargs = {'log' : False}
l_dict = self.get_loss(loss,aux_outputs,targets,**kwargs)
l_dict = {k + f'_{i}': v for k,v in l_dict.items()}
losses.update(l_dict)
return losses
class Identity(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
def replace_last_layer(model):
model.class_embed = Identity()
return model
def build(args):
joiner = build_joiner(args)
transformer = build_transformer(args)
if args.label_type == 'verb':
num_classes = args.num_verbs
if args.label_type == 'noun':
num_classes = args.num_nouns
if args.label_type == 'action':
num_classes = args.num_actions
losses = ['labels']
weight_dict = {'loss_ce':1}
model = EncoderSnippetLongfutureAnticipation(
joiner,
transformer,
dim_feedforward=args.dim_feedforward,
num_classes=num_classes,
num_queries=1,
aux_loss=args.aux_loss,
)
criterion = CriterionSnippetLongfutureAnticipation(num_classes=num_classes, weight_dict=weight_dict, eos_coef=args.eos_coef, losses=losses,fps=args.fps)
model = replace_last_layer(model)
print(model)
return model
| 6,287
| 35.77193
| 156
|
py
|
anticipatr
|
anticipatr-main/src/snippet_models/position_encoding.py
|
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSineIndex(nn.Module):
"""
Sinusoidal positional encodings based on sequence timestamps
"""
def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x,mask):
assert mask is not None
not_mask = ~mask
not_mask = not_mask.to(mask.device)
x_embed = not_mask.cumsum(1, dtype=torch.float32)
if self.normalize:
eps = 1e-6
x_embed = x_embed / (x_embed[:, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=2).flatten(2)
pos = pos_x.permute(0, 2, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim
if args.position_embedding == 'sine' and args.position_type=='index':
position_embedding = PositionEmbeddingSineIndex(N_steps, normalize=True)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 1,693
| 33.571429
| 97
|
py
|
anticipatr
|
anticipatr-main/src/snippet_models/transformer.py
|
"""
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, encoding='parallel', decoding='no_decoder', dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
if decoding != 'no_decoder':
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.encoding = encoding
self.decoding = decoding
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, src_mask, tgt_mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, t = src.shape
src = src.permute(2, 0, 1)
pos_embed = pos_embed.permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
tgt = torch.zeros_like(query_embed)
encoder_mask = None
memory = self.encoder(src, mask=encoder_mask, src_key_padding_mask=src_mask, pos=pos_embed)
tgt_mask = None
if self.decoding != 'no_decoder':
hs = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_key_padding_mask=src_mask, pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0)
elif self.decoding == 'no_decoder':
return memory.permute(1,0,2), torch.empty(memory.size()).to(memory.device)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
encoding=args.encoder,
decoding="no_decoder",
activation=args.activation,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "elu":
return F.elu
if activation == "leaky_relu":
return F.leaky_relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 12,622
| 39.458333
| 139
|
py
|
anticipatr
|
anticipatr-main/src/snippet_models/joiner.py
|
"""
Joiner modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from .position_encoding import build_position_encoding
class Joiner(nn.Sequential):
def __init__(self,position_embedding,position_type,position_encoding_type):
super().__init__(position_embedding)
self.position_type = position_type
self.position_encoding_type = position_encoding_type
def forward(self, x, mask,positions):
if self.position_type == 'index' and self.position_encoding_type=='sine':
pos = self[0](x,mask)
return x, pos
def build_joiner(args):
position_embedding = build_position_encoding(args)
model = Joiner(position_embedding,position_type=args.position_type,position_encoding_type=args.position_embedding)
return model
| 959
| 29.967742
| 118
|
py
|
anticipatr
|
anticipatr-main/src/snippet_models/__init__.py
|
from .model import build
def build_snippet_model(args):
return build(args)
| 83
| 9.5
| 30
|
py
|
anticipatr
|
anticipatr-main/src/models/matcher.py
|
import torch
from scipy.optimize import linear_sum_assignment
from torch import nn
import numpy as np
import utils.segment_utils as segment_utils
class GreedyMatcher(nn.Module):
"""This class computes an assignment between the targets and the predictions of the network
For efficiency reasons, the targets don't include the no_action. Because of this, in general,
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
while the others are un-matched (and thus treated as non-actions).
"""
def __init__(self):
"""Creates the matcher
"""
super().__init__()
@torch.no_grad()
def forward(self, outputs, targets):
""" Performs the matching
Params:
outputs: This is a dict that contains at least these entries:
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
"pred_segments": Tensor of dim [batch_size, num_queries, 2] with the predicted segment timestamps
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
"labels": Tensor of dim [num_target_segments] (where num_target_segments is the number of ground-truth
actions in the target) containing the class labels
"segmentes": Tensor of dim [num_target_segments, 2] containing the target segment timestamps
Returns:
A list of size batch_size, containing tuples of (index_i, index_j) where:
- index_i is the indices of the selected predictions (in order)
- index_j is the indices of the corresponding selected targets (in order)
For each batch element, it holds:
len(index_i) = len(index_j) = min(num_queries, num_target_segmentes)
"""
bs, num_queries = outputs["pred_logits"].shape[:2]
# We flatten to compute the cost matrices in a batch
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
out_segment = outputs["pred_segments"].flatten(0, 1) # [batch_size * num_queries, 2]
scale_factor = torch.stack([t["prediction_duration"] for t in targets], dim=0)
out_segment_scaled = out_segment * scale_factor.unsqueeze(1).repeat(1,num_queries,1).flatten(0,1).repeat(1,2)
tgt_segment = torch.cat([v["segments"] for v in targets])
tgt_segment_scaled = torch.cat([v["segments"] * v['prediction_duration'] for v in targets])
indices = []
for i in range(bs):
targets_i = targets[i]['segments'] * targets[i]['prediction_duration']
targets_i = targets_i[torch.sort(targets_i[:,1]-targets_i[:,0], descending=True)[1]]
preds_i = outputs['pred_segments'][i] * scale_factor[i][0]
tgt_i = []
p_i = []
seen_pidx = []
for tidx, tgt in enumerate(targets_i):
sorted_iou = torch.sort(segment_utils.generalized_segment_iou(tgt.unsqueeze(0), preds_i), descending=True,dim=0,stable=True)[1]
for s in sorted_iou:
if s not in seen_pidx:
pidx = s
seen_pidx.append(s)
break
tgt_i.append(tidx)
p_i.append(pidx)
unseen_pidx = [p for p in range(num_queries) if p not in seen_pidx]
for up_idx in unseen_pidx:
p_i.append(up_idx)
tgt_i.append(-1)
indices.append(torch.cat((torch.as_tensor(p_i,dtype=torch.int64).unsqueeze(0),torch.as_tensor(tgt_i,dtype=torch.int64).unsqueeze(0)),dim=0))
sizes = [len(v["segments"]) for v in targets]
return [torch.as_tensor(i, dtype=torch.int64) for i in indices]
def build_matcher(args):
return GreedyMatcher()
| 3,980
| 46.392857
| 152
|
py
|
anticipatr
|
anticipatr-main/src/models/antr.py
|
import torch
import torch.nn.functional as F
from torch import nn
from .transformer import build_transformer
from .joiner import build_joiner
from .matcher import build_matcher
import snippet_models
import numpy as np
from utils.misc import accuracy, get_world_size, get_rank,is_dist_avail_and_initialized
from utils import segment_utils as segment_utils
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class ANTR(nn.Module):
def __init__(self, joiner, transformer, output_type, dim_feedforward, num_classes, num_queries, num_decoder_embedding, aux_loss = True):
""" Initializes the model.
Parameters:
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of action classes
num_queries: number of anticipation queries, ie, decoder ouputs. This is the maximal number of actions the model can predict given a video.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.joiner = joiner
hidden_dim = transformer.d_model
self.hidden_dim = hidden_dim
self.output_type = output_type
self.num_queries = num_queries
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.query_time_embed = nn.Linear(hidden_dim + 1, hidden_dim)
self.class_embed = nn.Linear(hidden_dim, num_classes + 1)
self.segments_embed = MLP(hidden_dim, hidden_dim, 2, 3)
self.input_proj = nn.Conv1d(2048, hidden_dim, kernel_size=1)
self.aux_loss = aux_loss
def forward(self, samples, mask, targets,tgt_mask=None):
""" The forward expects two inputs:
- samples: batched videos features, of shape [batch_size x 2048 x T]
- mask: a binary mask of shape [batch_size x T], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-action) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_segments": The normalized segments coordinates for all queries, represented as
(start_time, end_time). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
assert mask is not None
sample_positions = torch.empty_like(mask).to(samples.device) ## for positional encodings
src, pos = self.joiner(samples,mask,sample_positions)
input = self.input_proj(src)
b, l, c = input.size()
query_pos = self.query_embed.weight.unsqueeze(1).repeat(1, b, 1)
nq = query_pos.size(0)
prediction_times = torch.stack([t['prediction_duration'] for t in targets], axis=0).squeeze(1).repeat(1, nq, 1).permute(1,2,0)
query_and_prediction_times = torch.cat([query_pos, prediction_times], axis=2)
decoder_pos = self.query_time_embed(query_and_prediction_times.reshape(b * nq, self.hidden_dim + 1)).reshape(b,nq,-1).permute(1,0,2)
hs = self.transformer(input,src, mask, tgt_mask, decoder_pos,pos)[0]
outputs_class = self.class_embed(hs)
outputs_segments = self.segments_embed(hs)
outputs_segments = F.relu(outputs_segments) + 0.1
out = {'pred_logits': outputs_class[-1], 'pred_segments': outputs_segments[-1]}
if self.aux_loss:
out['aux_outputs'] = self._set_aux_loss(outputs_class, outputs_segments)
return out
@torch.jit.unused
def _set_aux_loss(self, outputs_class, outputs_segments):
# this is a workaround to make torchscript happy, as torchscript
# doesn't support dictionary with non-homogeneous values, such
# as a dict having both a Tensor and a list.
return [{'pred_logits': a, 'pred_segments': b} for a, b in zip(outputs_class[:-1], outputs_segments[:-1])]
class CriterionGreedyMatcher(nn.Module):
""" This class computes the loss for ANTICIPATR.
The process happens in two steps:
1) we compute greedy assignment between ground truth segments and the outputs of the model
2) we supervise each pair of matched ground-truth / prediction (supervise class and segment)
"""
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
""" Create the criterion.
Parameters:
num_classes: number of action categories, omitting the special no-action category
matcher: module able to compute a matching between targets and proposals
weight_dict: dict containing as key the names of the losses and as values their relative weight.
eos_coef: relative classification weight applied to the no-action category
losses: list of all the losses to be applied. See get_loss for list of available losses.
"""
super().__init__()
self.num_classes = num_classes
self.weight_dict = weight_dict
self.matcher = matcher
self.losses = losses
self.eos_coef=eos_coef
empty_weight = torch.ones(self.num_classes + 1)
empty_weight[-1] = self.eos_coef
self.register_buffer('empty_weight', empty_weight)
def _get_src_permutation_idx(self, indices):
# permute predictions following indices
batch_idx = torch.cat([torch.full_like(src, i) for i, (src,_) in enumerate(indices)])
src_idx = torch.cat([src for (src, _) in indices])
return batch_idx, src_idx
def _get_tgt_permutation_idx(self, indices):
# permute targets following indices
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
return batch_idx, tgt_idx
def loss_labels(self, outputs, targets, indices, num_segments, log=True):
"""Classification loss (NLL)
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_segments]
"""
assert 'pred_logits' in outputs
src_logits = outputs['pred_logits']
idx = self._get_src_permutation_idx(indices)
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
target_classes = torch.full(src_logits.shape[:2], self.num_classes,
dtype=torch.int64, device=src_logits.device)
target_classes[idx] = target_classes_o
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
losses = {'loss_ce': loss_ce}
return losses
def loss_segments(self, outputs, targets, indices, num_segments):
"""Compute the losses related to the segments, the L1 regression loss and the IoU loss
targets dicts must contain the key "segments" containing a tensor of dim [num_segments, 2]
"""
assert 'pred_segments' in outputs
idx = self._get_src_permutation_idx(indices)
src_segments = outputs['pred_segments'][idx].squeeze(1)
target_segments = torch.cat([t['segments'][i] for t, (_, i) in zip(targets, indices)], dim=0).squeeze(1)
loss_segment = F.l1_loss(src_segments, target_segments, reduction='none')
losses = {}
losses['loss_segment'] = loss_segment.sum()/num_segments
loss_siou = 1 - torch.diag(segment_utils.generalized_segment_iou(src_segments,target_segments))
losses['loss_siou'] = loss_siou.sum()/num_segments
return losses
def get_unrolled_timeline(self, outputs, targets):
src_logits = F.softmax(outputs['pred_logits'],dim=2)
b,q,c = src_logits.size()
src_segments = outputs['pred_segments']
scale_factor = torch.cat([t['prediction_duration'].unsqueeze(0) for t in targets]).repeat(1,2)
src_segments_scaled = src_segments * scale_factor[:,None,:]
fps = targets[0]['fps']
out_logits = torch.zeros(b,int(torch.round(torch.max(torch.cat([t['prediction_duration'] for t in targets])))),self.num_classes+1).to(src_logits.device)
for bidx in range(b):
for sidx in range(len(src_segments_scaled[bidx])):
s = max(int(src_segments_scaled[bidx][sidx][0]), 0)
e = min(int(src_segments_scaled[bidx][sidx][1]), out_logits.size(1))
for tidx in range(s,e):
out_logits[bidx,tidx,:] = torch.max(out_logits[bidx,tidx,:], src_logits[bidx][sidx])
output_classes_onehot = torch.tensor(F.one_hot(torch.argmax(out_logits[:,:,:-1],dim=2),num_classes=self.num_classes),dtype=torch.float32)
target_classes = torch.zeros(b,out_logits.size(1),c-1).to(src_logits.device)
for idx, t in enumerate(targets):
target_classes[idx,:, :t['labels_onehot'].size(0)] = t['labels_onehot']
return output_classes_onehot, target_classes
@torch.no_grad()
def loss_cardinality(self, outputs, targets, indices, num_segments):
""" Compute the cardinality error, ie the absolute error in the number of predicted non-empty segments
This is not really a loss, it is intended for logging purposes only. It doesn't propagate gradients
"""
pred_logits = outputs['pred_logits']
device = pred_logits.device
tgt_lengths = torch.as_tensor([len(v["labels"]) for v in targets], device=device)
# Count the number of predictions that are NOT "no-action" (which is the last class)
card_pred = (pred_logits.argmax(-1) != pred_logits.shape[-1] - 1).sum(1)
card_err = F.l1_loss(card_pred.float(), tgt_lengths.float())
losses = {'cardinality_error': card_err}
return losses
def get_mAP(self, pred, labels, label_mask):
mAPs = dict()
pred = torch.clip(pred.sum(1), min=0.0,max=1.0)
labels = torch.clip(labels.sum(1), min=0.0,max=1.0)
for i in range(label_mask.shape[1]):
if torch.sum(label_mask[0][i]) > 0:
pred_i = pred[:, label_mask[0][i]].squeeze(1)
labels_i = labels[:, label_mask[0][i]].squeeze(1)
mAPs['AP_{}'.format(i)] = torch.cat((pred_i.detach().cpu(), labels_i.detach().cpu()),1)
return mAPs
def get_accuracy(self,pred,labels, outputs, targets):
acc = dict()
for i in range(pred.shape[0]):
k_r_p = 'acc_{}_{}'.format(int(targets[i]['ratio_idx']*100), int(targets[i]['prediction_idx']*100))
# check if the key already exists in output dict
if k_r_p in acc:
acc[k].append(torch.cat((pred[i].detach().cpu(), labels[i].detach().cpu()),1))
# if it doesn't exist create a key and value pair
else:
acc[k] = torch.cat((pred[i].detach().cpu(), labels[i].detach().cpu()),1)
return acc
def get_loss(self, loss, outputs, targets, indices, num_segments, **kwargs):
loss_map = {
'labels': self.loss_labels,
'cardinality': self.loss_cardinality,
'segments': self.loss_segments,
}
assert loss in loss_map, f'do you really want to compute {loss} loss?'
return loss_map[loss](outputs, targets, indices, num_segments, **kwargs)
def forward(self, outputs, targets):
""" This performs the loss computation.
Parameters:
outputs: dict of tensors, see the output specification of the model for the format
targets: list of dicts, such that len(targets) == batch_size.
The expected keys in each dict depends on the losses applied, see each loss' doc
"""
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
# Retrieve the matching between the outputs of the last layer and the targets
all_indices = self.matcher(outputs_without_aux, targets)
indices = [idx[:,(idx[1,:] + 1).nonzero(as_tuple=False)] for idx in all_indices]
# Compute the average number of target segments accross all nodes, for normalization purposes
num_segments = sum(len(t["labels"]) for t in targets)
num_segments = torch.as_tensor([num_segments], dtype=torch.float, device=next(iter(outputs.values())).device)
if is_dist_avail_and_initialized():
torch.distributed.all_reduce(num_segments)
num_segments = torch.clamp(num_segments / get_world_size(), min=1).item()
# Compute all the requested losses
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets, indices, num_segments))
# In case of auxiliary losses, we repeat this process with the output of each intermediate layer.
if 'aux_outputs' in outputs:
for i, aux_outputs in enumerate(outputs['aux_outputs']):
indices = self.matcher(aux_outputs, targets)
for loss in self.losses:
kwargs = {}
if loss == 'labels':
# Logging is enabled only for the last layer
kwargs = {'log': False}
l_dict = self.get_loss(loss, aux_outputs, targets, indices, num_segments, **kwargs)
l_dict = {k + f'_{i}': v for k, v in l_dict.items()}
losses.update(l_dict)
pred,labels = self.get_unrolled_timeline(outputs, targets)
losses.update(self.get_mAP(pred,labels, targets[0]['label_mask']))
losses.update(self.get_accuracy(pred, labels, outputs, targets))
return losses
def build(args):
joiner = build_joiner(args)
transformer = build_transformer(args)
if args.label_type == 'verb':
num_classes = args.num_verbs
if args.label_type == 'noun':
num_classes = args.num_nouns
if args.label_type == 'action':
num_classes = args.num_actions
num_queries = args.num_queries
model = ANTR(
joiner,
transformer,
dim_feedforward=args.dim_feedforward,
output_type=args.action_repr,
num_classes=num_classes,
num_queries=num_queries,
num_decoder_embedding=args.num_decoder_embedding,
aux_loss=args.aux_loss,
)
if args.aux_loss:
aux_weight_dict = {}
for i in range(args.dec_layers - 1):
aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
weight_dict.update(aux_weight_dict)
losses = ['labels', 'segments', 'cardinality']
matcher = build_matcher(args)
weight_dict = {'loss_ce': 1, 'loss_segment': args.loss_coef_segment, 'loss_siou': args.loss_coef_siou}
criterion = CriterionGreedyMatcher(num_classes, matcher, weight_dict=weight_dict, eos_coef=args.eos_coef, losses=losses)
print(model)
return model, criterion
| 15,660
| 46.457576
| 163
|
py
|
anticipatr
|
anticipatr-main/src/models/position_encoding.py
|
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSineIndex(nn.Module):
def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x,mask):
assert mask is not None
not_mask = ~mask
not_mask = not_mask.to(mask.device)
x_embed = not_mask.cumsum(1, dtype=torch.float32)
if self.normalize:
eps = 1e-6
x_embed = x_embed / (x_embed[:, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=2).flatten(2)
pos = pos_x.permute(0, 2, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim
if args.position_embedding == 'sine' and args.position_type=='index':
position_embedding = PositionEmbeddingSineIndex(N_steps, normalize=True)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 1,604
| 33.891304
| 97
|
py
|
anticipatr
|
anticipatr-main/src/models/transformer.py
|
"""
Transformer class.
Code inspired by torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* decoder handles multiple encoders
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import os, sys
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from snippet_models import build_snippet_model
class TransformerMultipleEncoder(nn.Module):
def __init__(self, snippet_model, d_model=512, nhead=8, num_encoder_layers=6,num_pretrained_layers=4,snippet_window=32,
num_decoder_layers=6, encoding='parallel', decoding='parallel', dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False,
return_intermediate_dec=False,pretrained_path=''):
super().__init__()
self.encoding = encoding
self.decoding = decoding
self.d_model = d_model
self.nhead = nhead
self.snippet_window = snippet_window
## construct video encoder
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.video_encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
self.snippet_encoder = snippet_model
for param in self.snippet_encoder.parameters():
param.requires_grad = False
# load pretrained snippet encoder
if pretrained_path != '' and os.path.exists(pretrained_path):
self.snippet_encoder.eval()
model_dict = self.snippet_encoder.state_dict()
pretrained_dict = torch.load(pretrained_path,map_location=torch.device('cpu'))['model']
pretrained_dict = {k:v for k,v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.snippet_encoder.load_state_dict(model_dict)
## construct decoder
decoder_layer = TransformerMultipleEncoderDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerMultipleEncoderDecoder(decoder_layer, num_decoder_layers, decoder_norm)
self._reset_parameters()
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, orig_src, src_mask, tgt_mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, t = src.shape
src = src.permute(2, 0, 1)
orig_src = orig_src.permute(2,0,1)
pos_embed = pos_embed.permute(2, 0, 1)
tgt = torch.zeros_like(query_embed)
encoder_mask = None
memory_snippet = None
## extracting snippet representations and handling overflow properly
## overflow needs to be handled as video length might not be a multiple
## of the size of snippet length used in snippet encoder
if t % self.snippet_window == 0:
memory_snippet = self.snippet_encoder(orig_src.reshape(bs * (t//self.snippet_window), -1, self.snippet_window),
mask=torch.zeros((bs * (t//self.snippet_window),self.snippet_window),dtype=torch.bool).to(orig_src.device)).reshape(bs,-1,t)
else:
overflow = t % self.snippet_window
windows_length = t - (t % self.snippet_window)
windows_memory = self.snippet_encoder(orig_src[:windows_length,:,:].reshape(bs * (windows_length//self.snippet_window), -1, self.snippet_window),
mask=torch.zeros((bs * (windows_length//self.snippet_window), self.snippet_window),dtype=torch.bool).to(orig_src.device))
overflow_memory = self.snippet_encoder(orig_src[-overflow:,:,:].reshape(bs, -1, overflow), mask=torch.zeros((bs,overflow),dtype=torch.bool).to(orig_src.device))
memory_snippet = torch.cat((windows_memory.reshape(bs,-1,windows_length), overflow_memory.reshape(bs, -1, overflow)), dim=2)
memory_video = self.video_encoder(src, mask=encoder_mask, src_key_padding_mask=src_mask, pos=pos_embed)
memory_snippet = memory_snippet.reshape(t,bs,c)
tgt_mask = None
hs = self.decoder(tgt, memory_snippet, memory_video, tgt_mask, memory_key_padding_mask_1=src_mask, memory_key_padding_mask_2=src_mask, pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory_video.permute(1, 2, 0)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerMultipleEncoderDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory_1, memory_2,
tgt_mask: Optional[Tensor] = None,
memory_mask_1: Optional[Tensor] = None,
memory_mask_2: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask_1: Optional[Tensor] = None,
memory_key_padding_mask_2: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory_1, memory_2, tgt_mask=tgt_mask,
memory_mask_1=memory_mask_1, memory_mask_2=memory_mask_2,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask_1=memory_key_padding_mask_1,
memory_key_padding_mask_2=memory_key_padding_mask_2,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerMultipleEncoderDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn1 = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn2 = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.norm4 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.dropout4 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory_1, memory_2,
tgt_mask: Optional[Tensor] = None,
memory_mask_1: Optional[Tensor] = None,
memory_mask_2: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask_1: Optional[Tensor] = None,
memory_key_padding_mask_2: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn1(query=self.with_pos_embed(tgt, query_pos),
key=memory_1,
value=memory_1, attn_mask=memory_mask_1,
key_padding_mask=memory_key_padding_mask_1)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.multihead_attn2(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory_2, pos),
value=memory_2, attn_mask=memory_mask_2,
key_padding_mask=memory_key_padding_mask_2)[0]
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout4(tgt2)
tgt = self.norm4(tgt)
return tgt
def forward(self, tgt, memory_1, memory_2,
tgt_mask: Optional[Tensor] = None,
memory_mask_1: Optional[Tensor] = None,
memory_mask_2: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask_1: Optional[Tensor] = None,
memory_key_padding_mask_2: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
return self.forward_post(tgt, memory_1, memory_2, tgt_mask, memory_mask_1, memory_mask_2,
tgt_key_padding_mask, memory_key_padding_mask_1,
memory_key_padding_mask_2, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
snippet_model = build_snippet_model(args)
return TransformerMultipleEncoder(
snippet_model,
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_pretrained_layers=args.pretrained_enc_layers,
num_decoder_layers=args.dec_layers,
snippet_window=args.snippet_window,
encoding=args.encoder,
decoding=args.decoder,
activation=args.activation,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
pretrained_path=args.pretrained_path,
#combination_mode=args.combination
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "elu":
return F.elu
if activation == "leaky_relu":
return F.leaky_relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 14,850
| 43.199405
| 180
|
py
|
anticipatr
|
anticipatr-main/src/models/joiner.py
|
"""
Joiner modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from .position_encoding import build_position_encoding
class Joiner(nn.Sequential):
def __init__(self,position_embedding,position_type,position_encoding_type):
super().__init__(position_embedding)
self.position_type = position_type
self.position_encoding_type = position_encoding_type
def forward(self, x, mask,positions):
pos = self[0](x,mask)
return x, pos
def build_joiner(args):
position_embedding = build_position_encoding(args)
model = Joiner(position_embedding,position_type=args.position_type,position_encoding_type=args.position_embedding)
return model
| 873
| 28.133333
| 118
|
py
|
anticipatr
|
anticipatr-main/src/models/__init__.py
|
from .antr import build
def build_model(args):
return build(args)
| 72
| 11.166667
| 23
|
py
|
anticipatr
|
anticipatr-main/src/metrics/__init__.py
| 0
| 0
| 0
|
py
|
|
anticipatr
|
anticipatr-main/src/metrics/longfuture_metrics.py
|
"""
Evaluator class for action anticipation benchmarks
"""
import math
import numpy as np
import torch
import warnings
from collections import OrderedDict
warnings.filterwarnings("ignore", category=UserWarning)
import sklearn.metrics as skmetrics
class AnticipationEvaluator(object):
def __init__(self,dataset):
self.apmeter = OrderedDict()
self.output = OrderedDict()
if dataset in ['ek','egtea']:
prediction_type = 'time_independent'
elif dataset in ['bf','salads']:
prediction_type = 'time_conditioned'
self.prediction_type = prediction_type
self.accmeter = OrderedDict()
self.output['mAP_micro'] = []
self.output['mAP_macro'] = []
def get_AP_perclass(self, predictions):
if isinstance(predictions,dict):
predictions = [predictions]
preds = {}
targets = {}
for p in predictions:
for k,v in p.items():
for k_ap,v_ap in v.items():
if 'AP' in k_ap:
preds[k_ap].append(v_ap[:v_ap.size(0)//2].numpy())
targets[k_ap].append(v_ap[v_ap.size(0)//2:].numpy())
for k_ap,v_ap in preds.items():
y_true = np.asarray([t for t in targets[k_ap]])
y_pred = np.asarray([p for p in preds[k_ap]])
if 'AP' in k_ap:
self.output['mAP_macro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='macro'))
self.output['mAP_micro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='micro'))
def get_accuracy_perclass(self, predictions):
if isinstance(predictions,dict):
predictions = [predictions]
preds = {}
targets = {}
for p in predictions:
for k,v in p.items():
for k_ap,v_ap in v.items():
if 'acc' in k_ap:
if k_ap not in self.accmeter:
preds[k_ap] = []
targets[k_ap] = []
if v_ap.ndim == 1:
v_ap = v_ap.unsqueeze(0)
preds[k_ap].append(v_ap[:,:v_ap.size(1)//2].numpy())
targets[k_ap].append(v_ap[:,v_ap.size(1)//2:].numpy())
for k,v in predictions[0].items():
for k_ap,v_ap in v.items():
if 'acc' in k_ap and v_ap.size(0) > 0:
self.output[k_ap] = []
preds[k_ap] = np.asarray(preds[k_ap])
preds[k_ap] = preds[k_ap].reshape(-1, preds[k_ap].shape[-1])
targets[k_ap] = np.asarray(targets[k_ap])
targets[k_ap] = targets[k_ap].reshape(-1, targets[k_ap].shape[-1])
for cls in range(targets[k_ap].shape[1]):
preds_logits = preds[k_ap][:,cls]
max_prob_cls = np.argmax(preds_logits, axis=1) # obtaining max probability class
preds_i = np.zeros_like(preds_logits)
preds_i[max_prob_cls] = 1 # get most likely predicted class
labels_i = targets[k_ap][:,cls]
self.output[k_ap].append((1-skmetrics.hamming_loss(labels_i,preds_i)) * 100)
def evaluate(self,predictions):
## Epic-Kitchens-55 and EGTEA Gaze+ evaluation
if self.prediction_type == 'time_independent':
self.get_AP_perclass(predictions)
metrics = {}
for k,v in self.output.items():
if k in ['mAP_macro', 'mAP_micro']:
metrics[k] = np.mean(np.asarray(v))
return metrics
## Breakfast and 50Salads evaluation
if self.prediction_type == 'time_conditioned':
self.get_accuracy_perclass(predictions)
metrics = {}
for k,v in self.output.items():
if 'acc' in k:
metrics[k] = np.mean(np.asarray(v))
return metrics
| 4,064
| 40.907216
| 151
|
py
|
anticipatr
|
anticipatr-main/src/datasets/bf.py
|
"""
Constructs a dataloader for breakfast dataset for the task of long term action anticipation.
"""
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_longfuture import SequenceDatasetLongFuture
def build_bf_anticipation(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "i3d_feats.pkl"
label_type = 'verb'
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/ek/longfuture/annotations/bf_verbs.csv'}
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
timestamps = '0.2,0.3'
val_timestamps = [float(t) for t in timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'action_repr': args.action_repr,
'prediction_type': 'time_conditioned',
'train_timestamps': train_timestamps,
'val_timestamps': val_timestamps,
'num_verbs': args.num_verbs,
'num_nouns': args.num_nouns,
'num_actions': args.num_actions,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,600
| 33.804348
| 126
|
py
|
anticipatr
|
anticipatr-main/src/datasets/ek.py
|
"""
Constructs a dataloader for Epic-Kitchens-55 for the task of long term action anticipation.
"""
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_longfuture import SequenceDatasetLongFuture
#verbs, nouns,action: 125,3522,3806
#train_many_shot --verb,noun,action: 26,32,250
def build_ek_anticipation(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "{}_lfb_s30_{}.pkl".format(mode,'verb')
label_type = '' if args.label_type == 'action' else args.label_type
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/ek/longfuture/annotations/EPIC_many_shot_verbs.csv', 'noun':'data/ek/longfuture/annotations/EPIC_many_shot_nouns.csv'}
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
timestamps = '0.25,0.5,0.75'
val_timestamps = [float(t) for t in timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'action_repr': args.action_repr,
'prediction_type': 'time_independent',
'train_timestamps': train_timestamps,
'val_timestamps': val_timestamps,
'num_verbs': args.num_verbs ,
'num_nouns': args.num_nouns,
'num_actions': args.num_actions,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,834
| 38.042553
| 153
|
py
|
anticipatr
|
anticipatr-main/src/datasets/ds_utils.py
|
import numpy as np
import os,sys
def getVideoId(dataset,vidname):
if dataset == 'ek':
return getVideoId_ek(vidname)
elif dataset == 'bf':
return getVideoId_bf(vidname)
def getVideoName(dataset,vidid):
if dataset == 'ek':
return getVideoName_ek(vidid)
elif dataset == 'bf':
return getVideoName_bf(vidid)
def getVideoId_ek(video_name):
video_name = video_name.split('/')[-1]
video_id = int(video_name.split('_')[1])
person_id = int(video_name.split('_')[0][1:])
video_id = [person_id, video_id]
return video_id
def getVideoName_ek(video_id):
video_name = "P" + str(video_id[0]).zfill(2) + "_" + str(video_id[1]).zfill(2)
return video_name
def breakfast_name_dicts():
person_dict = {}
src_dict = {}
recipe_dict = {}
for i in range(0,55):
if i in list(range(0,10)):
person_dict['P0' + str(i)] = i
else:
person_dict['P'+str(i)] = i
src_dict = {'cam01':1, 'cam02':2, 'stereo01':3, 'webcam01':4, 'webcam02':5}
recipe_dict = {'cereals':1, 'coffee':2, 'friedegg':3, 'juice':4, 'milk':5, 'pancake':6,'salat':7 , 'sandwich':8 , 'scrambledegg':9 , 'tea':10}
return person_dict, src_dict, recipe_dict
def getVideoId_bf(video_name):
video_name = video_name.split('.')[0]
person_name = video_name.split('_')[0]
src_name = video_name.split('_')[1]
recipe_name = video_name.split('_')[3]
person_dict, src_dict, recipe_dict = breakfast_name_dicts()
video_id = [person_dict[person_name], src_dict[src_name], recipe_dict[recipe_name]]
return video_id
def getVideoName_bf(video_id):
person_dict, src_dict, recipe_dict = breakfast_name_dicts()
person_dict = {v:k for k,v in person_dict.items()}
src_dict = {v:k for k,v in src_dict.items()}
recipe_dict = {v:k for k,v in recipe_dict.items()}
video_name = person_dict[video_id[0]] + "_" + src_dict[video_id[1]] + "_" + person_dict[video_id[0]] + "_" + recipe_dict[video_id[2]]
return video_name
if __name__ == "__main__":
vid = getVideoId(sys.argv[1],sys.argv[2])
reverse = getVideoName(sys.argv[1],vid)
print(sys.argv[1], vid, reverse)
| 2,211
| 28.891892
| 146
|
py
|
anticipatr
|
anticipatr-main/src/datasets/__init__.py
|
import torch.utils.data
import torchvision
def build_dataset(args, mode):
if args.dataset == 'ek':
from datasets.ek import build_ek_anticipation
return build_ek_anticipation(args=args, mode=mode)
elif args.dataset == 'bf':
from datasets.bf import build_bf_anticipation
return build_bf_anticipation(args=args, mode=mode)
| 363
| 27
| 58
|
py
|
anticipatr
|
anticipatr-main/src/datasets/baseds_longfuture.py
|
import bisect
import copy
import os
import os.path as osp
import random
from functools import partial
import itertools
import numpy as np
import pickle as pkl
import collections
from collections import Sequence
import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from datasets import ds_utils
class DatasetSegmentRecord(object):
def __init__(self, row, clip_range=None):
self._data = row
self.clip_range = clip_range
@property
def path(self):
return self._data[0]
@property
def start_frame(self):
return int(self._data[1])
@property
def end_frame(self):
return int(self._data[2])
@property
def label(self):
return [int(x) for x in self._data[3:]]
@property
def num_frames(self):
return self.end_frame - self.start_frame + 1
@property
def clip_start_frame(self):
return int(self._data[1]) if self.clip_range is None else int(self.clip_range[0])
@property
def clip_end_frame(self):
return int(self._data[2]) if self.clip_range is None else int(self.clip_range[1])
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
def get_many_shot(fin):
with open(fin, "r") as f:
lines = f.readlines()[1:]
classes = [int(line.split(',')[0]) for line in lines]
return classes
class SequenceDatasetLongFuture(Dataset):
def __init__(self, feature_file, ann_file, label_type, test_mode, task, fps, dset, action_repr, prediction_type, train_timestamps, val_timestamps, num_verbs, num_nouns, num_actions, train_many_shot=False, manyshot_annotations={}, **kwargs):
self.feature_file = feature_file
self.ann_file = ann_file
self.test_mode = test_mode
self.label = label_type
self.task = task
self.dset = dset
self.action_repr = action_repr
self.prediction_type = prediction_type
self.train_many_shot = train_many_shot
self.train_timestamps = train_timestamps
self.val_timestamps = val_timestamps
self.num_verbs = num_verbs
self.num_nouns = num_nouns
self.num_actions = num_actions
self.train_prediction_interval = 10 ## time in seconds ; used only in training
self.fps = fps
if train_many_shot:
manyshot_verbs = sorted(get_many_shot(manyshot_annotations['verb']))
manyshot_nouns = sorted(get_many_shot(manyshot_annotations['noun']))
self.num_verbs, self.num_nouns = len(manyshot_verbs), len(manyshot_nouns)
self.manyshot_verbs, self.manyshot_nouns = manyshot_verbs, manyshot_nouns
else:
manyshot_nouns, manyshot_verbs = [],[]
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(self.ann_file)]
if self.dset in ['ek','egtea']:
int_counts = [(record.label[0], record.label[1]) for record in records]
int_counts = collections.Counter(int_counts).items()
int_counts = sorted(int_counts, key=lambda x: -x[1])[0:self.num_actions]
self.int_to_idx = {interact:idx for idx, (interact, count) in enumerate(int_counts)}
else:
self.int_to_idx = {}
if prediction_type=='time_independent':
self.data = self.load_annotations_anticipation_time_independent(ann_file)
elif prediction_type=='time_conditioned':
self.data = self.load_annotations_anticipation_time_conditioned(ann_file)
if train_many_shot:
for record in self.data:
record.verbs = [manyshot_verbs.index(x) for x in record.verbs if x in manyshot_verbs]
record.nouns = [manyshot_nouns.index(x) for x in record.nouns if x in manyshot_nouns]
# Only a few nouns/ints will actually have gt positives
# Pass these as part of the batch to evaluate mAP
# Don't know how to pass these in the config
eval_ints = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_ints |= set(record.ints)
eval_set = torch.zeros(1, self.num_actions)
eval_set[0, list(eval_ints)] = 1
self.eval_ints = eval_set.byte()
else:
self.eval_ints = torch.zeros(1, self.num_actions).byte()
eval_nouns = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_nouns |= set(record.nouns)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
self.eval_nouns = eval_set.byte()
else:
eval_set = torch.zeros(3, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
manyshot = eval_nouns & set(manyshot_nouns)
rareshot = eval_nouns - set(manyshot_nouns)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_nouns = eval_set.byte()
else:
self.eval_nouns = torch.zeros(1, self.num_actions).byte()
eval_verbs = set()
for record in self.data:
eval_verbs |= set(record.verbs)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
else:
eval_set = torch.zeros(3, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
manyshot = eval_verbs & set(manyshot_verbs)
rareshot = eval_verbs - set(manyshot_verbs)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_verbs = eval_set.byte()
self.prepare = RecordAnticipationData(self.action_repr, self.prediction_type, self.feature_file, self.dset, self.num_nouns, self.num_verbs, self.num_actions, self.int_to_idx, self.fps, self.label, self.eval_verbs, self.eval_nouns, self.eval_ints)
def load_annotations_anticipation_time_independent(self, ann_file):
vid_lengths = open(self.ann_file.replace('.csv', '_nframes.csv')).read().strip().split('\n')
vid_lengths = [line.split('\t') for line in vid_lengths]
vid_lengths = {k:int(v) for k,v in vid_lengths}
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(ann_file)]
records_by_vid = collections.defaultdict(list)
for record in records:
record.uid = '%s_%s_%s'%(record.path, record.start_frame, record.end_frame)
records_by_vid[record.path].append(record)
records = []
for vid in records_by_vid:
vrecords = sorted(records_by_vid[vid], key=lambda record: record.end_frame)
length = vid_lengths[vid]
if self.test_mode:
timestamps = self.val_timestamps
else:
timestamps = self.train_timestamps # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
timestamps = [int(frac*length) for frac in timestamps]
for i, t in enumerate(timestamps):
past_records = [record for record in vrecords if record.end_frame<=t]
future_records = [record for record in vrecords if record.start_frame>t]
if len(past_records)<3 or len(future_records)<3:
continue
record = DatasetSegmentRecord([vid, 0, t, -1, -1])
if self.dset in ['ek','egtea']:
record.instances = [dict(segment=[record.start_frame,record.end_frame], verb=record.label[0], noun=record.label[1], action=self.int_to_idx[(record.label[0],record.label[1])]) for record in future_records if (record.label[0],record.label[1]) in self.int_to_idx]
record.nouns = sorted(set([record.label[1] for record in future_records]))
record.ints = sorted(set([self.int_to_idx[(record.label[0], record.label[1])] for record in future_records if (record.label[0], record.label[1]) in self.int_to_idx]))
record.verbs =sorted(set([record.label[0] for record in future_records]))
record.fps = self.fps
record.ratio_idx = i
record.prediction_idx = 1
record.duration = length
record.prediction_duration = length - t
record.observation_duration = t
records.append(record)
print(self.dset, ": time-independent anticipation", len(records))
return records
def load_annotations_anticipation_time_conditioned(self, ann_file):
vid_lengths = open(self.ann_file.replace('.csv', '_nframes.csv')).read().strip().split('\n')
vid_lengths = [line.split('\t') for line in vid_lengths]
vid_lengths = {k:int(v) for k,v in vid_lengths}
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(ann_file)]
records_by_vid = collections.defaultdict(list)
for record in records:
record.uid = '%s_%s_%s'%(record.path, record.start_frame, record.end_frame)
records_by_vid[record.path].append(record)
records = []
for vid in records_by_vid:
vrecords = sorted(records_by_vid[vid], key=lambda record: record.end_frame)
length = vid_lengths[vid]
if self.test_mode:
timestamps = self.val_timestamps
unseen_timestamps = [0.1, 0.2, 0.3, 0.4, 0.5]
else:
timestamps = self.train_timestamps # [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8]
unseen_timestamps = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
seen_timestamps = [int(frac*length) for frac in timestamps]
for i, t in enumerate(seen_timestamps):
past_records = [record for record in vrecords if record.end_frame<=t]
prediction_timestamps = [int(frac*(length - t)) + t for frac in unseen_timestamps]
# prediction_timestamps = [min(t,length-t) for pt in prediction_timestamps]
for j, pred_t in enumerate(prediction_timestamps):
future_records = [record for record in vrecords if record.start_frame>t and record.end_frame<=pred_t]
if len(past_records)<3 or len(future_records)<3:
continue
record = DatasetSegmentRecord([vid, 0, t, -1, -1])
record.instances = [dict(segment=[record.start_frame,record.end_frame], verb=record.label[0]) for record in future_records]
record.verbs =sorted(set([record.label[0] for record in future_records]))
record.fps = self.fps
record.ratio_idx = timestamps[i]
record.prediction_idx = unseen_timestamps[j]
record.duration = length
record.prediction_duration = pred_t - t
record.observation_duration = t
records.append(record)
print(self.dset,": time-conditioned anticipation", len(records))
return records
def get_ann_info(self, idx):
return {
'path': self.data[idx].path,
'num_frames': self.data[idx].num_frames,
'label': self.data[idx].label
}
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
vrecord = self.data[idx]
inputs, targets = self.prepare(vrecord)
return inputs, targets
class RecordAnticipationData(object):
def __init__(self, action_repr, prediction_type, feature_file, dset, num_nouns, num_verbs, num_actions, int_to_idx, fps, label_type, eval_verbs, eval_nouns, eval_actions):
self.action_repr = action_repr
self.prediction_type = prediction_type
self.feature_file = feature_file
self.dset = dset
self.num_nouns = num_nouns
self.num_verbs = num_verbs
self.num_actions = num_actions
self.int_to_idx = int_to_idx
self.fps = fps
self.label_type = label_type
self.eval_verbs = eval_verbs
self.eval_nouns = eval_nouns
self.eval_actions = eval_actions
with open(feature_file,'rb') as f:
self.feature_data = pkl.load(f)
def __call__(self, vrecord):
## features of past records
vidname = vrecord.path
duration = vrecord.duration
features = []
observation_positions = []
for idx in range(vrecord.start_frame-31,vrecord.end_frame+31):
if idx in self.feature_data[vidname].keys():
# set fps to choose the sampling rate (TODO: set as argument)
fps = 1
if idx% fps ==0:
features.append(self.feature_data[vidname][idx])
observation_positions.append(idx)
features = torch.tensor(features,dtype=torch.float32).permute(1,0)
observation_positions = torch.tensor(observation_positions,dtype=torch.float32)
video_id = ds_utils.getVideoId(self.dset, vidname)
## output representation
set_targets = {}
set_targets['video_id'] = torch.tensor(video_id)
if self.label_type == 'action':
label = torch.zeros(self.num_actions)
label[vrecord.ints] = 1
set_targets['labels_onehot'] = to_tensor(label)
set_targets['labels'] = torch.tensor([instance['action'] for instance in vrecord.instances])
num_classes = self.num_actions
set_targets['label_mask'] = to_tensor(self.eval_actions)
elif self.label_type == 'verb':
label = torch.zeros(self.num_verbs)
label[vrecord.verbs] = 1
set_targets['labels_onehot'] = to_tensor(label)
set_targets['labels'] = torch.tensor([instance['verb'] for instance in vrecord.instances])
num_classes = self.num_verbs
set_targets['label_mask'] = to_tensor(self.eval_verbs)
elif self.label_type == 'noun':
label = torch.zeros(self.num_nouns)
label[vrecord.nouns] = 1
set_targets['labels_onehot'] = to_tensor(label)
set_targets['labels'] = torch.tensor([instance['nouns'] for instance in vrecord.instances])
num_classes = self.num_nouns
set_targets['label_mask'] = to_tensor(self.eval_nouns)
set_targets['segments'] = [(np.asarray(instance['segment']) - vrecord.observation_duration)/vrecord.prediction_duration for instance in vrecord.instances]
set_targets['segments'] = torch.tensor(set_targets['segments'],dtype=torch.float32)
set_targets['labels_onehot'] = torch.tensor(set_targets['labels_onehot'], dtype=torch.float32)
set_targets['duration'] = torch.tensor([vrecord.duration/self.fps],dtype=torch.float32)
set_targets['prediction_duration'] = torch.tensor([vrecord.prediction_duration],dtype=torch.float32)
set_targets['observation_duration'] = torch.tensor([(vrecord.end_frame - vrecord.start_frame)],dtype=torch.float32)
set_targets['ratio_idx'] = torch.tensor([vrecord.ratio_idx],dtype=torch.float32)
set_targets['prediction_idx'] = torch.tensor([vrecord.prediction_idx],dtype=torch.float32)
set_targets['observation_positions'] = observation_positions
set_targets['fps'] = torch.tensor([vrecord.fps],dtype=torch.float32)
return features, set_targets
| 16,321
| 42.641711
| 280
|
py
|
anticipatr
|
anticipatr-main/src/utils/misc.py
|
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references and
https://github.com/facebookresearch/detr
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__[2:4]) < 0.7:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
max_size = _max_by_axis([list(feat.shape) for feat in tensor_list])
batch_shape = [len(tensor_list)] + max_size
b, c, t = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, t), dtype=torch.bool, device=device)
for feat, pad_feat, m in zip(tensor_list, tensor, mask):
pad_feat[: feat.shape[0], : feat.shape[1]].copy_(feat)
m[: feat.shape[1]] = False
return NestedTensor(tensor, mask)
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,world_size=args.world_size, rank=args.rank)
if not torch.cuda.is_available():
torch.distributed.barrier()
#torch.distributed.barrier(group=torch.distributed.group.WORLD)
setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(5,10,20)):
"""Computes the precision@k for the specified values of k"""
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| 13,447
| 30.716981
| 137
|
py
|
anticipatr
|
anticipatr-main/src/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
anticipatr
|
anticipatr-main/src/utils/segment_utils.py
|
import torch
import numpy as np
def segment_iou(target_segment,candidate_segments):
tt1 = torch.max(target_segment[0], candidate_segments[:, 0])
tt2 = torch.min(target_segment[1], candidate_segments[:, 1])
# Intersection including Non-negative overlap score.
segments_intersection = (tt2 - tt1).clamp(min=0)
# Segment union.
segments_union = (candidate_segments[:,1] - candidate_segments[:,0]) + (target_segment[1] - target_segment[0]) - segments_intersection
tIoU = segments_intersection / segments_union
tIoU[torch.isnan(tIoU)] = 0
tIoU[torch.isinf(tIoU)] = 0
return tIoU
def generalized_segment_iou(target_segments,candidate_segments):
if candidate_segments.ndim !=2 or target_segments.ndim != 2:
raise ValueError('Dimension of arguments is incorrect')
n, m = candidate_segments.shape[0], target_segments.shape[0]
tiou = torch.zeros(n, m)
for i in range(m):
tiou[:, i] = segment_iou(target_segments[i,:], candidate_segments)
tiou[torch.isnan(tiou)] = 0
tiou[torch.isinf(tiou)] = 0
return torch.tensor(tiou,device=candidate_segments.device)
| 1,137
| 33.484848
| 139
|
py
|
anticipatr
|
anticipatr-main/pretraining/main_pretraining.py
|
import os
import argparse
import random
import numpy as np
import time
from pathlib import Path
import json
import datetime
import pickle
import torch
from torch.utils.data import DataLoader
import utils.misc as utils
from tasks import build_task
from engine_pretraining import train_one_epoch, evaluate
parser = argparse.ArgumentParser()
# dataset parameter
parser.add_argument('--dataset', type=str,default="bf")
parser.add_argument('--root',type=str,help='Path to data root directory')
parser.add_argument('--num_nouns',type=int,default=300)
parser.add_argument('--num_verbs',type=int,default=48)
parser.add_argument('--num_actions',type=int,default=100)
parser.add_argument('--num_future_labels',type=int,default=-1)
parser.add_argument('--task',type=str,default='anticipation',choices=['anticipation'])
parser.add_argument('--anticipation',type=str,default='longfuture',choices=['longfuture'])
parser.add_argument('--fps',type=int,default=60)
parser.add_argument('--label_type',type=str,default='verb',choices=['verb','noun','action'])
parser.add_argument('--train_many_shot',action='store_true',default=False,help='training with many shot verbs')
parser.add_argument('--split',type=int,default=1)
parser.add_argument('--train_timestamps',type=str,default='0.2,0.3,0.4,0.5,0.6,0.7,0.8')
parser.add_argument('--val_timestamps',type=str,default='0.25,0.5,0.75')
# Model parameters
parser.add_argument('--model',type=str,default='antr')
parser.add_argument('--num_queries',type=int,default=10)
parser.add_argument('--num_pos_embed_dict',type=int,default=256)
parser.add_argument('--dim_latent',type=int,default=128)
parser.add_argument('--hidden_dim',type=int,default=256)
parser.add_argument('--position_embedding',type=str,default='sine')
parser.add_argument('--num_decoder_embedding',type=int,default=10000)
parser.add_argument('--position_type',type=str,default='index',choices=['index'])
parser.add_argument('--dropout',type=float,default=0.1,help='transformer droput')
parser.add_argument('--nheads',type=int,default=8)
parser.add_argument('--dim_feedforward',type=int,default=2048)
parser.add_argument('--encoder',type=str,default='parallel')
parser.add_argument('--decoder',type=str,default='no_decoder')
parser.add_argument('--enc_layers',type=int,default=3)
parser.add_argument('--dec_layers',type=int,default=3)
parser.add_argument('--pre_norm',action='store_true')
parser.add_argument('--aux_loss',action='store_true')
parser.add_argument('--cuda',default=False,action='store_true',help='gpu mode')
parser.add_argument('--mp',action='store_true',help='gpu mode')
parser.add_argument('--eval',action='store_true',help='evaluation mode')
parser.add_argument('--norm_type',type=str,choices=['gn','bn'],default='bn',help="normalization type")
parser.add_argument('--activation',type=str,default='leaky_relu',help="transformer activation type")
# * Training
parser.add_argument('--pretraining_task',type=str)
parser.add_argument('--resume',type=str,default='',help='resume from a checkpoint')
parser.add_argument('--save_checkpoint_every',type=int,default=1000,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--evaluate_every_epoch',type=int,default=5,help='checkpoint saving frequency')
parser.add_argument('--num_workers',type=int,default=0,help='number of workers')
parser.add_argument('--batch_size',type=int,default=8,help='batch_size')
parser.add_argument('--epochs',type=int,default=10,help='number of epochs')
parser.add_argument('--step_size',type=int,default=64,help='number of steps before backpropagation')
parser.add_argument('--start_epoch',type=int,default=0,help='starting epoch')
parser.add_argument('--seed', default=42, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--lr_joiner', default=0, type=float)
parser.add_argument('--weight_decay', default=1e-4, type=float)
parser.add_argument('--lr_drop', default=100, type=int)
parser.add_argument('--clip_max_norm', default=1, type=float,help='gradient clipping max norm')
parser.add_argument('--output_dir', type=str,default='./pretraining_expts/checkpoints/',help='path to save intermediate checkpoints')
# * Distributed Training
parser.add_argument('--dist_url', default='env://', help='url used to set up distributed training')
parser.add_argument('--device', default='cuda',help='device to use for training / testing')
args = parser.parse_args()
print(args)
def main(args):
bz = args.batch_size
lr = args.lr
if args.cuda:
if torch.cuda.device_count() >= 1:
utils.init_distributed_mode(args)
device = torch.device(args.device)
else:
device = torch.device('cpu')
# fix the seed for reproducibility
if args.cuda:
seed = args.seed + utils.get_rank()
else:
seed = args.seed
torch.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
# get task setup -- datasets, model, loss
dataset_train, dataset_test, model, criterion = build_task(args)
if args.cuda and args.distributed:
sampler_train = torch.utils.data.distributed.DistributedSampler(dataset_train,shuffle=True)
sampler_test = torch.utils.data.distributed.DistributedSampler(dataset_test, shuffle=False)
else:
sampler_train = torch.utils.data.RandomSampler(dataset_train)
sampler_test = torch.utils.data.SequentialSampler(dataset_test)
batch_sampler_train = torch.utils.data.BatchSampler(sampler_train, args.batch_size, drop_last=True)
data_loader_train = DataLoader(dataset_train, batch_sampler=batch_sampler_train, collate_fn=utils.collate_fn, num_workers=args.num_workers)
data_loader_test = DataLoader(dataset_test, 1, sampler=sampler_test, drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)
model.to(device)
criterion.to(device)
model_without_ddp = model
if args.cuda and args.distributed:
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu],find_unused_parameters=True)
model_with_ddp = model.module
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
print('number of params:', n_parameters)
# set up model training
param_dicts = [{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" not in n and p.requires_grad]},
{"params": [p for n, p in model_without_ddp.named_parameters() if "joiner" in n and p.requires_grad], "lr": args.lr_joiner,},]
optimizer = torch.optim.AdamW(param_dicts, lr=args.lr, weight_decay=args.weight_decay)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
# output and checkpoints directory
checkpoint_dir = args.output_dir
if not os.path.exists(checkpoint_dir):
try:
os.makedirs(checkpoint_dir)
except OSError:
pass
if args.resume:
checkpoint = Path(args.resume)
assert checkpoint.exists()
checkpoint = torch.load(args.resume, map_location='cpu')
model_without_ddp.load_state_dict(checkpoint['model'])
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
args.start_epoch = checkpoint['epoch'] + 1
print("Start Training")
start_time = time.time()
optimizer.zero_grad()
for epoch in range(args.start_epoch, args.epochs):
if args.cuda and args.distributed:
sampler_train.set_epoch(epoch)
train_stats = train_one_epoch(epoch, args.clip_max_norm, model, criterion, data_loader_train, optimizer, lr_scheduler, args.dataset, device)
if args.output_dir:
checkpoint_dir = Path(checkpoint_dir)
checkpoint_paths = [checkpoint_dir / 'checkpoint.pth']
# extra checkpoint before LR drop and a given frequency
if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % args.save_checkpoint_every == 0:
checkpoint_paths.append(checkpoint_dir / f'checkpoint{epoch:05}.pth')
for checkpoint_path in checkpoint_paths:
utils.save_on_master({'model': model_without_ddp.state_dict(), 'optimizer': optimizer.state_dict(), 'lr_scheduler': lr_scheduler.state_dict(), 'epoch': epoch, 'args': args,}, checkpoint_path)
# evaluation
if epoch % args.evaluate_every_epoch == 0:
test_stats = evaluate(epoch, model, criterion, data_loader_test, args.dataset, args.evaluate_every, device)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()}, **{f'test_{k}': v for k, v in test_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
else:
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},'epoch': epoch, 'n_parameters': n_parameters}
if args.output_dir and utils.is_main_process():
with (checkpoint_dir / 'log.json').open("a") as f:
f.write(json.dumps(log_stats) + "\n")
lr_scheduler.step()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('Training time {}'.format(total_time_str))
if __name__ == "__main__":
main(args)
| 9,503
| 45.817734
| 208
|
py
|
anticipatr
|
anticipatr-main/pretraining/engine_pretraining.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import os,sys
import copy
import numpy as np
import math
from typing import Iterable
import time
import utils.misc as utils
import datasets
from metrics.longfuture_metrics import AnticipationEvaluator
def train_one_epoch(epoch, max_norm, model, criterion, data_loader, optimizer, scheduler, dataset, device):
model.train()
criterion.train()
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 50
step = 0
predictions = {}
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask, targets, tgt_mask)
losses = criterion(outputs, targets)
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
losses_mAP = {k:v for k,v in losses.items() if 'AP' in k or 'acc' in k}
losses_mAP = [{k:v[i] for k,v in losses_mAP.items()} for i in range(samples.tensors.size(0))]
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
loss = losses_reduced_scaled.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
print(loss_dict_reduced)
sys.exit(1)
optimizer.zero_grad()
loss_value.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
optimizer.step()
res = {datasets.ds_utils.getVideoName(dataset, target['video_id'].tolist())+'_'+str(int(target['start_frame']))+'_'+str(int(target['end_frame'])): output for target, output in zip(targets,losses_mAP)}
predictions.update(res)
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
######For mAP calculation on training data###########
all_predictions = utils.all_gather(predictions)
stats = {}
if epoch % 5 == 0:
evaluator = AnticipationEvaluator()
eval_stats = evaluator.evaluate(all_predictions)
stats = {k:v for k,v in eval_stats.items()}
train_stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
train_stats.update(**stats)
print("Train epoch:", epoch, "Averaged stats:", train_stats)
return train_stats
def evaluate(epoch, model, criterion, data_loader, dataset, evaluate_every, device):
model.eval()
criterion.eval()
metric_logger = utils.MetricLogger(delimiter=" ")
header = 'Test: [{}]'.format(epoch)
print_freq = 50
step = 0
predictions = {}
for samples, targets in metric_logger.log_every(data_loader, print_freq, header):
step += 1
samples = samples.to(device)
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
tgt_mask = None
outputs = model(samples.tensors, samples.mask, targets, tgt_mask)
losses = criterion(outputs, targets)
losses_mAP = {k:v for k,v in losses.items() if 'AP' in k or 'acc' in k}
losses_mAP = [{k:v[i] for k,v in losses_mAP.items()} for i in range(samples.tensors.size(0))]
loss_dict = {k:v for k,v in losses.items() if 'loss' in k}
weight_dict = criterion.weight_dict
loss_value = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
# reduce losses over all GPUs for logging purposes
loss_dict_reduced = utils.reduce_dict(loss_dict)
loss_dict_reduced_unscaled = {f'{k}_unscaled': v for k, v in loss_dict_reduced.items()}
loss_dict_reduced_scaled = {k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict}
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
metric_logger.update(loss=losses_reduced_scaled.item(), **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
res = {datasets.ds_utils.getVideoName(dataset, target['video_id'].tolist())+'_'+str(int(target['start_frame']))+'_'+str(int(target['end_frame'])): output for target, output in zip(targets,losses_mAP)}
predictions.update(res)
# gather the stats from all processes
metric_logger.synchronize_between_processes()
######For mAP calculation need to gather all data###########
all_predictions = utils.all_gather(predictions)
stats = {}
if epoch % evaluate_every == 0:
evaluator = AnticipationEvaluator()
test_stats = evaluator.evaluate(all_predictions)
test_loss_stats = {k: meter.global_avg for k, meter in metric_logger.meters.items() if 'mAP' not in k}
test_stats.update(**test_loss_stats)
print("Test epoch:", epoch, "Averaged test stats:", test_stats)
return test_stats
| 5,666
| 39.769784
| 208
|
py
|
anticipatr
|
anticipatr-main/pretraining/models/model.py
|
import torch
import torch.nn.functional as F
from torch import nn
from .transformer import build_transformer
from .joiner import build_joiner
import numpy as np
class MLP(nn.Module):
""" Very simple multi-layer perceptron (also called FFN)"""
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
return x
class EncoderSnippetLongfutureAnticipation(nn.Module):
def __init__(self, joiner, transformer, dim_feedforward, num_classes, num_queries, aux_loss = True):
""" Initializes the model.
Parameters:
transformer: torch module of the transformer architecture. See transformer.py
num_classes: number of action classes
num_queries: number of action queries, ie detection slot. This is the maximal number of objects
DETR can detect in a single image.
"""
super().__init__()
self.num_queries = num_queries
self.transformer = transformer
self.joiner = joiner
hidden_dim = transformer.d_model
self.query_embed = nn.Embedding(num_queries, hidden_dim)
self.class_embed = nn.Linear(hidden_dim, num_classes)
self.input_proj = nn.Conv1d(2048, hidden_dim, kernel_size=1)
self.aux_loss = aux_loss
def forward(self, samples, mask, targets, tgt_mask=None):
""" The forward expects two inputs:
- samples: batched videos features, of shape [batch_size x 2048 x T]
- mask: a binary mask of shape [batch_size x T], containing 1 on padded pixels
It returns a dict with the following elements:
- "pred_logits": the classification logits (including no-object) for all queries.
Shape= [batch_size x num_queries x (num_classes + 1)]
- "pred_segments": The normalized boxes coordinates for all queries, represented as
(start_time, end_time). These values are normalized in [0, 1],
relative to the size of each individual image (disregarding possible padding).
See PostProcess for information on how to retrieve the unnormalized bounding box.
- "aux_outputs": Optional, only returned when auxilary losses are activated. It is a list of
dictionnaries containing the two above keys for each decoder layer.
"""
assert mask is not None
sample_positions = torch.empty_like(mask) ## set for positional encodings
src, pos = self.joiner(samples,mask,sample_positions)
input = self.input_proj(src)
hs = self.transformer(input,mask, tgt_mask, self.query_embed.weight,pos)[0]
outputs_class = self.class_embed(hs)
out = {'pred_logits': outputs_class}
return out
class CriterionSnippetLongfutureAnticipation(nn.Module):
"""
This class is the implementation of multilabel classification loss.
"""
def __init__(self, num_classes, weight_dict, losses,fps):
super().__init__()
self.num_classes = num_classes
self.weight_dict = weight_dict
self.losses = losses
def get_mAP(self,pred,labels,label_mask):
mAPs = dict()
mAPs['mAP'] = torch.cat((pred[:,label_mask[0]].detach().cpu(), labels[:,label_mask[0]].detach().cpu()),1)
for i in range(label_mask.shape[1]):
pred_i = pred[:, label_mask[0][i]].squeeze(1)
labels_i = labels[:, label_mask[0][i]].squeeze(1)
mAPs['AP_{}'.format(i)] = torch.cat((pred_i.detach().cpu(), labels_i.detach().cpu()),1)
return mAPs
def loss_labels(self,outputs, targets,log=True):
src_logits = torch.sigmoid(outputs['pred_logits'].mean(1))
target_classes = torch.cat([t['labels'].unsqueeze(0) for t in targets])
loss_ce = F.binary_cross_entropy(src_logits, target_classes,reduction='mean')
losses = {'loss_ce': loss_ce}
losses.update(self.get_mAP(src_logits, target_classes, targets[0]['label_mask']))
return losses
def get_loss(self, loss, outputs, targets, **kwargs):
loss_map = {
'labels': self.loss_labels
}
assert loss in loss_map, f'{loss} loss not defined'
return loss_map[loss](outputs,targets,**kwargs)
def forward(self, outputs, targets):
outputs_without_aux = {k: v for k, v in outputs.items() if k != 'aux_outputs'}
losses = {}
for loss in self.losses:
losses.update(self.get_loss(loss, outputs, targets))
if 'aux_outputs' in outputs:
for i,aux_outputs in enumerate(outputs['aux_outputs']):
for loss in self.losses:
kwargs = {}
kwargs = {'log' : False}
l_dict = self.get_loss(loss,aux_outputs,targets,**kwargs)
l_dict = {k + f'_{i}': v for k,v in l_dict.items()}
losses.update(l_dict)
return losses
def build(args):
joiner = build_joiner(args)
transformer = build_transformer(args)
if args.label_type == 'verb':
num_classes = args.num_verbs
if args.label_type == 'noun':
num_classes = args.num_nouns
if args.label_type == 'action':
num_classes = args.num_actions
if args.pretraining_task == 'snippet_longfuture_anticipation':
model = EncoderSnippetLongfutureAnticipation(
joiner,
transformer,
dim_feedforward=args.dim_feedforward,
num_classes=num_classes,
num_queries=1,
aux_loss=args.aux_loss,
)
losses = ['labels']
weight_dict = {'loss_ce': 1}
criterion = CriterionSnippetLongfutureAnticipation(num_classes=num_classes, weight_dict=weight_dict, losses=losses,fps=args.fps)
else:
print("unindentified pretraining task")
print(model)
return model, criterion
| 6,367
| 38.308642
| 136
|
py
|
anticipatr
|
anticipatr-main/pretraining/models/position_encoding.py
|
"""
Various positional encodings for the transformer.
"""
import math
import torch
from torch import nn
class PositionEmbeddingSineIndex(nn.Module):
"""
Sinusoidal positional encodings based on sequence timestamps
"""
def __init__(self, num_pos_feats, temperature=10000, normalize=True, scale=None):
super().__init__()
self.num_pos_feats = num_pos_feats
self.temperature = temperature
self.normalize = normalize
if scale is not None and normalize is False:
raise ValueError("normalize should be True if scale is passed")
if scale is None:
scale = 2 * math.pi
self.scale = scale
def forward(self, x,mask):
assert mask is not None
not_mask = ~mask
not_mask = not_mask.to(mask.device)
x_embed = not_mask.cumsum(1, dtype=torch.float32)
if self.normalize:
eps = 1e-6
x_embed = x_embed / (x_embed[:, -1:] + eps) * self.scale
dim_t = torch.arange(self.num_pos_feats, dtype=torch.float32, device=x.device)
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
pos_x = x_embed[:, :, None] / dim_t
pos_x = torch.stack((pos_x[:, :, 0::2].sin(), pos_x[:, :, 1::2].cos()), dim=2).flatten(2)
pos = pos_x.permute(0, 2, 1)
return pos
def build_position_encoding(args):
N_steps = args.hidden_dim
if args.position_embedding == 'sine' and args.position_type=='index':
position_embedding = PositionEmbeddingSineIndex(N_steps, normalize=True)
else:
raise ValueError(f"not supported {args.position_embedding}")
return position_embedding
| 1,693
| 33.571429
| 97
|
py
|
anticipatr
|
anticipatr-main/pretraining/models/transformer.py
|
"""
Transformer class.
Copy-paste from torch.nn.Transformer with modifications:
* positional encodings are passed in MHattention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
"""
import copy
from typing import Optional, List
import torch
import torch.nn.functional as F
from torch import nn, Tensor
class Transformer(nn.Module):
def __init__(self, d_model=512, nhead=8, num_encoder_layers=6,
num_decoder_layers=6, encoding='parallel', decoding='no_decoder', dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False,
return_intermediate_dec=False):
super().__init__()
encoder_layer = TransformerEncoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
if decoding != 'no_decoder':
decoder_layer = TransformerDecoderLayer(d_model, nhead, dim_feedforward,
dropout, activation, normalize_before)
decoder_norm = nn.LayerNorm(d_model)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, decoder_norm, return_intermediate=return_intermediate_dec)
self._reset_parameters()
self.encoding = encoding
self.decoding = decoding
self.d_model = d_model
self.nhead = nhead
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, src, src_mask, tgt_mask, query_embed, pos_embed):
# flatten NxCxHxW to HWxNxC
bs, c, t = src.shape
src = src.permute(2, 0, 1)
pos_embed = pos_embed.permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
tgt = torch.zeros_like(query_embed)
encoder_mask = None
memory = self.encoder(src, mask=encoder_mask, src_key_padding_mask=src_mask, pos=pos_embed)
tgt_mask = None
if self.decoding != 'no_decoder':
hs = self.decoder(tgt, memory, tgt_mask=tgt_mask, memory_key_padding_mask=src_mask, pos=pos_embed, query_pos=query_embed)
return hs.transpose(1, 2), memory.permute(1, 2, 0)
elif self.decoding == 'no_decoder':
return memory.permute(1,0,2), torch.empty(memory.size()).to(memory.device)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask,
src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1,
activation="leaky_relu", normalize_before=False):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
def build_transformer(args):
return Transformer(
d_model=args.hidden_dim,
dropout=args.dropout,
nhead=args.nheads,
dim_feedforward=args.dim_feedforward,
num_encoder_layers=args.enc_layers,
num_decoder_layers=args.dec_layers,
encoding=args.encoder,
decoding=args.decoder,
activation=args.activation,
normalize_before=args.pre_norm,
return_intermediate_dec=True,
)
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "elu":
return F.elu
if activation == "leaky_relu":
return F.leaky_relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 12,622
| 39.458333
| 139
|
py
|
anticipatr
|
anticipatr-main/pretraining/models/joiner.py
|
"""
Joiner modules.
"""
from collections import OrderedDict
import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List
from .position_encoding import build_position_encoding
class Joiner(nn.Sequential):
def __init__(self,position_embedding,position_type,position_encoding_type):
super().__init__(position_embedding)
self.position_type = position_type
self.position_encoding_type = position_encoding_type
def forward(self, x, mask,positions):
if self.position_type == 'index' and self.position_encoding_type=='sine':
pos = self[0](x,mask)
return x, pos
def build_joiner(args):
position_embedding = build_position_encoding(args)
model = Joiner(position_embedding,position_type=args.position_type,position_encoding_type=args.position_embedding)
return model
| 959
| 29.967742
| 118
|
py
|
anticipatr
|
anticipatr-main/pretraining/models/__init__.py
|
from .model import build
def build_model(args):
return build(args)
| 75
| 8.5
| 24
|
py
|
anticipatr
|
anticipatr-main/pretraining/metrics/__init__.py
| 0
| 0
| 0
|
py
|
|
anticipatr
|
anticipatr-main/pretraining/metrics/longfuture_metrics.py
|
import math
import numpy as np
import torch
import warnings
from collections import OrderedDict
warnings.filterwarnings("ignore", category=UserWarning)
import sklearn.metrics as skmetrics
class AnticipationEvaluator(object):
""" The pretraining task is multilabel classification problem."""
def __init__(self):
self.apmeter = OrderedDict()
self.output = OrderedDict()
self.accmeter = OrderedDict()
self.output['mAP_micro'] = []
self.output['mAP_macro'] = []
def get_AP_perclass(self, predictions):
if isinstance(predictions,dict):
predictions = [predictions]
preds = {}
preds['mAP'] = []
targets = {}
targets['mAP'] = []
for p in predictions:
for k,v in p.items():
for k_ap,v_ap in v.items():
if 'mAP' in k_ap:
preds[k_ap].append(v_ap[:v_ap.size(0)//2].numpy())
targets[k_ap].append(v_ap[v_ap.size(0)//2:].numpy())
for k_ap,v_ap in preds.items():
y_true = np.asarray([t for t in targets[k_ap]])
y_pred = np.asarray([p for p in preds[k_ap]])
if 'mAP' in k_ap:
self.output['mAP_macro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='macro'))
self.output['mAP_micro'].append(skmetrics.average_precision_score(np.asarray(targets[k_ap]), np.asarray(preds[k_ap]), average='micro'))
def evaluate(self,predictions):
self.get_AP_perclass(predictions)
metrics = {}
for k,v in self.output.items():
if 'mAP' in k:
metrics[k] = v
return metrics
| 1,809
| 31.909091
| 151
|
py
|
anticipatr
|
anticipatr-main/pretraining/datasets/bf.py
|
"""
Builds a dataloader class for snippet-level anticipation task
"""
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_snippetprediction import SequenceDatasetLongFuture
def build_bf_pretraining(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "i3d_feats.pkl"
label_type = 'verb'
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/bf/longfuture/annotations/bf_verbs.csv'}
pretraining_train_vids = "pretraining_data/bf/train_videos.txt"
pretraining_val_vids = "pretraining_data/bf/val_videos.txt"
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
val_timestamps = [float(t) for t in args.val_timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'train_vid_list': pretraining_train_vids,
'val_vid_list': pretraining_val_vids,
'num_verbs': 48,
'num_nouns': 1,
'num_actions': 1,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns,
'pretraining_task': args.pretraining_task,
'num_future_labels': args.num_future_labels
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,770
| 36.680851
| 130
|
py
|
anticipatr
|
anticipatr-main/pretraining/datasets/ek.py
|
import numpy as np
import lmdb
from tqdm import tqdm
from torch.utils.data import Dataset
import pandas as pd
from .baseds_snippetprediction import SequenceDatasetLongFuture
def build_ek_pretraining(args,mode,override_modality=None):
path_to_features = "{}/{}/{}/features/".format(args.root, args.dataset, args.anticipation) + "i3d_feats.pkl")
label_type = '' if args.label_type == 'action' else args.label_type
path_to_csv = '{}/{}/{}/split/{}_S{}.csv'.format(args.root, args.dataset, args.anticipation, mode, args.split, label_type)
manyshot_anns = {'verb':'data/ek/longfuture/annotations/EPIC_many_shot_verbs.csv', 'noun':'data/ek/longfuture/annotations/EPIC_many_shot_nouns.csv'}
pretraining_train_vids = "pretraining_data/ek/train_videos.txt"
pretraining_val_vids = "pretraining_data/ek/val_videos.txt"
train_timestamps = [float(t) for t in args.train_timestamps.split(',')]
val_timestamps = [float(t) for t in args.val_timestamps.split(',')]
kwargs = {
'feature_file': path_to_features,
'ann_file': path_to_csv,
'label_type': args.label_type,
'test_mode': False if mode == 'train' else True,
'task': args.task,
'fps': args.fps,
'dset': args.dataset,
'train_vid_list': pretraining_train_vids,
'val_vid_list': pretraining_val_vids,
'num_verbs': args.num_verbs ,
'num_nouns': args.num_nouns,
'num_actions': args.num_actions,
'train_many_shot': args.train_many_shot,
'manyshot_annotations': manyshot_anns,
'pretraining_task': args.pretraining_task,
'num_future_labels': args.num_future_labels
}
dataset = SequenceDatasetLongFuture(**kwargs)
return dataset
| 1,866
| 41.431818
| 157
|
py
|
anticipatr
|
anticipatr-main/pretraining/datasets/baseds_snippetprediction.py
|
"""
Implementation of dataloader for snippet anticipation.
Code inspired by: https://github.com/facebookresearch/ego-topo
"""
import bisect
import copy
import os
import os.path as osp
import random
from functools import partial
import itertools
import numpy as np
import pickle as pkl
import collections
from collections import Sequence
import tqdm
import torch
from torch.utils.data import Dataset
from torchvision import transforms
from PIL import Image
from datasets import ds_utils
class DatasetSegmentRecord(object):
def __init__(self, row, clip_range=None):
self._data = row
self.clip_range = clip_range
@property
def path(self):
return self._data[0]
@property
def start_frame(self):
return int(self._data[1])
@property
def end_frame(self):
return int(self._data[2])
@property
def label(self):
return [int(x) for x in self._data[3:]]
@property
def num_frames(self):
return self.end_frame - self.start_frame + 1
@property
def clip_start_frame(self):
return int(self._data[1]) if self.clip_range is None else int(self.clip_range[0])
@property
def clip_end_frame(self):
return int(self._data[2]) if self.clip_range is None else int(self.clip_range[1])
def to_tensor(data):
"""Convert objects of various python types to :obj:`torch.Tensor`.
Supported types are: :class:`numpy.ndarray`, :class:`torch.Tensor`,
:class:`Sequence`, :class:`int` and :class:`float`.
"""
if isinstance(data, torch.Tensor):
return data
elif isinstance(data, np.ndarray):
return torch.from_numpy(data)
elif isinstance(data, int):
return torch.LongTensor([data])
elif isinstance(data, float):
return torch.FloatTensor([data])
else:
raise TypeError('type {} cannot be converted to tensor.'.format(
type(data)))
class SequenceDatasetLongFuture(Dataset):
def __init__(self, feature_file, ann_file, label_type, test_mode, task, fps, dset, train_vid_list, val_vid_list, num_verbs, num_nouns, num_actions, train_many_shot=False, manyshot_annotations={}, num_future_labels=-1,**kwargs):
self.feature_file = feature_file
self.ann_file = ann_file
self.test_mode = test_mode
self.label = label_type
self.task = task
self.dset = dset
self.train_many_shot = train_many_shot
self.train_vid_list = train_vid_list
self.val_vid_list = val_vid_list
self.num_verbs = num_verbs
self.num_nouns = num_nouns
self.num_actions = num_actions
self.fps = fps
self.num_future_labels = num_future_labels
with open(feature_file,'rb') as f:
self.feature_data = pkl.load(f)
if train_many_shot:
manyshot_verbs = sorted(get_many_shot(manyshot_annotations['verb']))
manyshot_nouns = sorted(get_many_shot(manyshot_annotations['noun']))
if train_many_shot:
self.num_verbs, self.num_nouns = len(manyshot_verbs), len(manyshot_nouns)
self.manyshot_verbs, self.manyshot_nouns = manyshot_verbs, manyshot_nouns
else:
manyshot_nouns, manyshot_verbs = [],[]
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(self.ann_file)]
if self.dset in ['ek','egtea']:
int_counts = [(record.label[0], record.label[1]) for record in records]
int_counts = collections.Counter(int_counts).items()
int_counts = sorted(int_counts, key=lambda x: -x[1])[0:self.num_actions]
self.int_to_idx = {interact:idx for idx, (interact, count) in enumerate(int_counts)}
else:
self.int_to_idx = {}
self.data = self.load_longfuture_anticipation_annotations(ann_file)
if train_many_shot:
for record in self.data:
record.verbs = [manyshot_verbs.index(x) for x in record.verbs if x in manyshot_verbs]
record.nouns = [manyshot_nouns.index(x) for x in record.nouns if x in manyshot_nouns]
# Only a few nouns/ints will actually have gt positives
# Pass these as part of the batch to evaluate mAP
# Don't know how to pass these in the config
eval_ints = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_ints |= set(record.ints)
eval_set = torch.zeros(1, self.num_actions)
eval_set[0, list(eval_ints)] = 1
self.eval_ints = eval_set.byte()
eval_nouns = set()
if self.dset in ['ek','egtea']:
for record in self.data:
eval_nouns |= set(record.nouns)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
self.eval_nouns = eval_set.byte()
else:
eval_set = torch.zeros(3, self.num_nouns)
eval_set[0, list(eval_nouns)] = 1
manyshot = eval_nouns & set(manyshot_nouns)
rareshot = eval_nouns - set(manyshot_nouns)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_nouns = eval_set.byte()
else:
self.eval_ints = torch.zeros(1, self.num_actions).byte()
self.eval_nouns = torch.zeros(1, self.num_nouns).byte()
eval_verbs = set()
for record in self.data:
eval_verbs |= set(record.verbs)
if not train_many_shot:
eval_set = torch.zeros(1, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
else:
eval_set = torch.zeros(3, self.num_verbs)
eval_set[0, list(eval_verbs)] = 1
manyshot = eval_verbs & set(manyshot_verbs)
rareshot = eval_verbs - set(manyshot_verbs)
eval_set[1, list(manyshot)] = 1
eval_set[2, list(rareshot)] = 1
self.eval_verbs = eval_set.byte()
self.prepare = RecordSnippetLongfutureAnticipationData(self.feature_data, self.dset, self.num_nouns, self.num_verbs, self.num_actions, self.int_to_idx, self.fps, self.label, self.eval_verbs, self.eval_nouns, self.eval_ints,self.test_mode)
def load_longfuture_anticipation_annotations(self, ann_file):
print("Loading longfuture anticipation annotations")
vid_lengths = open(self.ann_file.replace('.csv', '_nframes.csv')).read().strip().split('\n')
vid_lengths = [line.split('\t') for line in vid_lengths]
vid_lengths = {k:int(v) for k,v in vid_lengths}
if self.test_mode:
vidfile = self.val_vid_list
else:
vidfile = self.train_vid_list
with open(vidfile,'rb') as f:
vid_list = [line.rstrip().decode() for line in f]
records = [DatasetSegmentRecord(x.strip().split('\t')) for x in open(ann_file)]
records_by_vid = collections.defaultdict(list)
for record in records:
if self.dset=='ek':
path = record.path.split('/')[-1]
else:
path = record.path
if path in vid_list:
record.uid = '%s_%s_%s'%(record.path, record.start_frame, record.end_frame)
records_by_vid[record.path].append(record)
records = []
for vid in records_by_vid:
vrecords = sorted(records_by_vid[vid], key=lambda record: record.end_frame)
length = vid_lengths[vid]
if vid not in self.feature_data:
continue
for segment_idx, segment_record in enumerate(vrecords[:-2]):
record_length = segment_record.end_frame - segment_record.start_frame + 1
if not any(x in self.feature_data[vid].keys() for x in list(range(segment_record.start_frame, segment_record.end_frame+1))):
continue
if self.dset in ['bf']:
invalid_verbs = [0]
if record_length <= 15:
continue
if segment_record.label[0] == 0:
continue
if self.dset in ['salads']:
if record_length <= 15:
continue
invalid_verbs = [17, 18]
if segment_record.label[0] in [17,18]:
continue
else:
if record_length <= 1:
continue
invalid_verbs = []
# create snippet record: label has to be future labels
future_records = [record for record in vrecords[segment_idx+1:-1]]
record = segment_record
record.verbs = sorted(set([frec.label[0] for frec in future_records]))
if self.num_future_labels > 0:
record.verbs = record.verbs[:num_future_labels]
if self.dset in ['ek', 'egtea']:
record.nouns = sorted(set([frec.label[1] for frec in future_records]))
record.ints = sorted(set([self.int_to_idx[(frec.label[0], frec.label[1])] for frec in future_records if (frec.label[0], frec.label[1]) in self.int_to_idx]))
if self.num_future_labels > 0:
record.nouns = record.nouns[:num_future_labels]
record.ints = record.ints[:num_future_labels]
record.duration = record.end_frame - record.start_frame
record.fps = self.fps
records.append(record)
#if len(records) == 8: return records
print("Snippet based longfuture anticipation", len(records))
return records
def get_ann_info(self, idx):
return {
'path': self.data[idx].path,
'num_frames': self.data[idx].num_frames,
'label': self.data[idx].label
}
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
vrecord = self.data[idx]
inputs, targets = self.prepare(vrecord)
return inputs, targets
class RecordSnippetLongfutureAnticipationData(object):
def __init__(self, feature_data, dset, num_nouns, num_verbs, num_actions, int_to_idx, fps, label_type, eval_verbs, eval_nouns, eval_actions,test_mode):
self.feature_data = feature_data
self.dset = dset
self.num_nouns = num_nouns
self.num_verbs = num_verbs
self.num_actions = num_actions
self.int_to_idx = int_to_idx
self.fps = fps
self.label_type = label_type
self.eval_verbs = eval_verbs
self.eval_nouns = eval_nouns
self.eval_actions = eval_actions
self.test_mode = test_mode
def __call__(self, vrecord):
## features of past records
vidname = vrecord.path
duration = vrecord.duration
features = []
for idx in range(vrecord.start_frame,vrecord.end_frame+1):
if idx in self.feature_data[vidname].keys():
if self.dset in ['ek', 'egtea']:
features.append(torch.tensor(self.feature_data[vidname][idx]))
if self.dset in ['bf','salads']:
# set snippet_fps to choose the sampling rate
snippet_fps = 1
if idx% snippet_fps ==0:
features.append(torch.tensor(self.feature_data[vidname][idx]))
features = torch.tensor(torch.stack(features),dtype=torch.float32).permute(1,0)
video_id = ds_utils.getVideoId(self.dset, vidname)
## output representation
set_targets = {}
set_targets['video_id'] = torch.tensor(video_id)
if self.label_type == 'action':
label = torch.zeros(self.num_actions)
label[vrecord.ints] = 1
set_targets['labels'] = to_tensor(label)
set_targets['label_mask'] = to_tensor(self.eval_actions)
elif self.label_type == 'noun':
label = torch.zeros(self.num_nouns)
label[vrecord.nouns] = 1
set_targets['labels'] = to_tensor(label)
set_targets['label_mask'] = to_tensor(self.eval_nouns)
elif self.label_type == 'verb':
label = torch.zeros(self.num_verbs)
label[vrecord.verbs] = 1
set_targets['labels'] = to_tensor(label)
set_targets['label_mask'] = to_tensor(self.eval_verbs)
set_targets['fps'] = torch.tensor([vrecord.fps],dtype=torch.float32)
set_targets['duration'] = torch.tensor([vrecord.duration/vrecord.fps],dtype=torch.float32)
set_targets['start_frame'] = torch.tensor([vrecord.start_frame],dtype=torch.float32)
set_targets['end_frame'] = torch.tensor([vrecord.end_frame],dtype=torch.float32)
return features, set_targets
| 12,985
| 38.956923
| 246
|
py
|
anticipatr
|
anticipatr-main/pretraining/datasets/ds_utils.py
|
import numpy as np
import os,sys
def getVideoId(dataset,vidname):
if dataset == 'ek':
return getVideoId_ek(vidname)
elif dataset == 'bf':
return getVideoId_bf(vidname)
def getVideoName(dataset,vidid):
if dataset == 'ek':
return getVideoName_ek(vidid)
elif dataset == 'bf':
return getVideoName_bf(vidid)
def getVideoId_ek(video_name):
video_name = video_name.split('/')[-1]
video_id = int(video_name.split('_')[1])
person_id = int(video_name.split('_')[0][1:])
video_id = [person_id, video_id]
return video_id
def getVideoName_ek(video_id):
video_name = "P" + str(video_id[0]).zfill(2) + "_" + str(video_id[1]).zfill(2)
return video_name
def breakfast_name_dicts():
person_dict = {}
src_dict = {}
recipe_dict = {}
for i in range(0,55):
if i in list(range(0,10)):
person_dict['P0' + str(i)] = i
else:
person_dict['P'+str(i)] = i
src_dict = {'cam01':1, 'cam02':2, 'stereo01':3, 'webcam01':4, 'webcam02':5}
recipe_dict = {'cereals':1, 'coffee':2, 'friedegg':3, 'juice':4, 'milk':5, 'pancake':6,'salat':7 , 'sandwich':8 , 'scrambledegg':9 , 'tea':10}
return person_dict, src_dict, recipe_dict
def getVideoId_bf(video_name):
video_name = video_name.split('.')[0]
person_name = video_name.split('_')[0]
src_name = video_name.split('_')[1]
recipe_name = video_name.split('_')[3]
person_dict, src_dict, recipe_dict = breakfast_name_dicts()
video_id = [person_dict[person_name], src_dict[src_name], recipe_dict[recipe_name]]
return video_id
def getVideoName_bf(video_id):
person_dict, src_dict, recipe_dict = breakfast_name_dicts()
person_dict = {v:k for k,v in person_dict.items()}
src_dict = {v:k for k,v in src_dict.items()}
recipe_dict = {v:k for k,v in recipe_dict.items()}
video_name = person_dict[video_id[0]] + "_" + src_dict[video_id[1]] + "_" + person_dict[video_id[0]] + "_" + recipe_dict[video_id[2]]
return video_name
if __name__ == "__main__":
vid = getVideoId(sys.argv[1],sys.argv[2])
reverse = getVideoName(sys.argv[1],vid)
print(sys.argv[1], vid, reverse)
| 2,225
| 28.289474
| 146
|
py
|
anticipatr
|
anticipatr-main/pretraining/datasets/__init__.py
|
import torch.utils.data
import torchvision
def build_dataset(args):
if args.dataset == 'ek':
from datasets.ek import build_ek_pretraining
dataset_train = build_ek_pretraining(args,mode='train')
dataset_val = build_ek_pretraining(args,mode='val')
return dataset_train, dataset_val
elif args.dataset == 'bf':
from datasets.bf import build_bf_pretraining
dataset_train = build_bf_pretraining(args,mode='train')
dataset_val = build_bf_pretraining(args,mode='val')
return dataset_train, dataset_val
| 568
| 34.5625
| 63
|
py
|
anticipatr
|
anticipatr-main/pretraining/utils/misc.py
|
"""
Misc functions, including distributed helpers.
Mostly copy-paste from torchvision references and
https://github.com/facebookresearch/detr
"""
import os
import subprocess
import time
from collections import defaultdict, deque
import datetime
import pickle
from typing import Optional, List
import torch
import torch.distributed as dist
from torch import Tensor
# needed due to empty tensor bug in pytorch and torchvision 0.5
import torchvision
if float(torchvision.__version__[2:4]) < 7:
from torchvision.ops import _new_empty_tensor
from torchvision.ops.misc import _output_size
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / self.count
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(
median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.tensor([tensor.numel()], device="cuda")
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
if local_size != max_size:
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that all processes
have the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.all_reduce(values)
if average:
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
class MetricLogger(object):
def __init__(self, delimiter="\t"):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append(
"{}: {}".format(name, str(meter))
)
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
iter_time = SmoothedValue(fmt='{avg:.4f}')
data_time = SmoothedValue(fmt='{avg:.4f}')
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}',
'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header,
'[{0' + space_fmt + '}/{1}]',
'eta: {eta}',
'{meters}',
'time: {time}',
'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
data_time.update(time.time() - end)
yield obj
iter_time.update(time.time() - end)
if i % print_freq == 0 or i == len(iterable) - 1:
eta_seconds = iter_time.global_avg * (len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time),
memory=torch.cuda.max_memory_allocated() / MB))
else:
print(log_msg.format(
i, len(iterable), eta=eta_string,
meters=str(self),
time=str(iter_time), data=str(data_time)))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
print('{} Total time: {} ({:.4f} s / it)'.format(
header, total_time_str, total_time / len(iterable)))
def get_sha():
cwd = os.path.dirname(os.path.abspath(__file__))
def _run(command):
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
sha = 'N/A'
diff = "clean"
branch = 'N/A'
try:
sha = _run(['git', 'rev-parse', 'HEAD'])
subprocess.check_output(['git', 'diff'], cwd=cwd)
diff = _run(['git', 'diff-index', 'HEAD'])
diff = "has uncommited changes" if diff else "clean"
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
except Exception:
pass
message = f"sha: {sha}, status: {diff}, branch: {branch}"
return message
def collate_fn(batch):
batch = list(zip(*batch))
batch[0] = nested_tensor_from_tensor_list(batch[0])
return tuple(batch)
def _max_by_axis(the_list):
# type: (List[List[int]]) -> List[int]
maxes = the_list[0]
for sublist in the_list[1:]:
for index, item in enumerate(sublist):
maxes[index] = max(maxes[index], item)
return maxes
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
max_size = _max_by_axis([list(feat.shape) for feat in tensor_list])
batch_shape = [len(tensor_list)] + max_size
b, c, t = batch_shape
dtype = tensor_list[0].dtype
device = tensor_list[0].device
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
mask = torch.ones((b, t), dtype=torch.bool, device=device)
for feat, pad_feat, m in zip(tensor_list, tensor, mask):
pad_feat[: feat.shape[0], : feat.shape[1]].copy_(feat)
m[: feat.shape[1]] = False
return NestedTensor(tensor, mask)
class NestedTensor(object):
def __init__(self, tensors, mask: Optional[Tensor]):
self.tensors = tensors
self.mask = mask
def to(self, device):
# type: (Device) -> NestedTensor # noqa
cast_tensor = self.tensors.to(device)
mask = self.mask
if mask is not None:
assert mask is not None
cast_mask = mask.to(device)
else:
cast_mask = None
return NestedTensor(cast_tensor, cast_mask)
def decompose(self):
return self.tensors, self.mask
def __repr__(self):
return str(self.tensors)
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(args.rank, args.dist_url), flush=True)
#torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,world_size=args.world_size, rank=args.rank)
torch.distributed.init_process_group(backend=args.dist_backend)
if not torch.cuda.is_available():
torch.distributed.barrier()
#torch.distributed.barrier(group=torch.distributed.group.WORLD)
#setup_for_distributed(args.rank == 0)
@torch.no_grad()
def accuracy(output, target, topk=(5,10,20)):
"""Computes the precision@k for the specified values of k"""
import ipdb; ipdb.set_trace()
if target.numel() == 0:
return [torch.zeros([], device=output.device)]
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
"""
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
This will eventually be supported natively by PyTorch, and this
class can go away.
"""
if float(torchvision.__version__[:3]) < 0.7:
if input.numel() > 0:
return torch.nn.functional.interpolate(
input, size, scale_factor, mode, align_corners
)
output_shape = _output_size(2, input, size, scale_factor)
output_shape = list(input.shape[:-2]) + list(output_shape)
return _new_empty_tensor(input, output_shape)
else:
return torchvision.ops.misc.interpolate(input, size, scale_factor, mode, align_corners)
| 13,549
| 30.807512
| 138
|
py
|
anticipatr
|
anticipatr-main/pretraining/utils/__init__.py
| 0
| 0
| 0
|
py
|
|
anticipatr
|
anticipatr-main/pretraining/tasks/__init__.py
|
import torch
from datasets import build_dataset
from models import build_model
def build_task(args):
dataset_train,dataset_test = build_dataset(args)
model, criterion = build_model(args)
return dataset_train, dataset_test, model, criterion
| 257
| 18.846154
| 56
|
py
|
benchmarks
|
benchmarks-master/tools/run_distributed_benchmarks.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Builds docker images and runs benchmarks from benchmark_configs.yml file.
This script should only be run from opensource repository.
"""
import argparse
from datetime import datetime
import logging
import os
from string import maketrans
import subprocess
import sys
import docker
import k8s_tensorflow_lib
import kubectl_util
import yaml
_DOCKER_IMAGE_PATTERN = 'gcr.io/tensorflow-testing/benchmarks/%s'
_OUTPUT_FILE_ENV_VAR = 'TF_DIST_BENCHMARK_RESULTS_FILE'
_TEST_NAME_ENV_VAR = 'TF_DIST_BENCHMARK_NAME'
_PORT = 5000
def _ConvertToValidName(name):
"""Converts to name that we can use as a kubernetes job prefix.
Args:
name: benchmark name.
Returns:
Benchmark name that can be used as a kubernetes job prefix.
"""
return name.translate(maketrans('/:_', '---'))
def _RunBenchmark(name, yaml_file):
"""Runs a single distributed benchmark.
Args:
name: name of the benchmark to run.
yaml_file: path to kubernetes config file.
"""
kubectl_util.DeletePods(name, yaml_file)
kubectl_util.CreatePods(name, yaml_file)
success = kubectl_util.WaitForCompletion(name)
kubectl_util.DeletePods(name, yaml_file)
return success
def _BuildAndPushDockerImage(
docker_client, docker_file, name, tag, push_to_gcloud=False):
"""Builds a docker image and optionally pushes it to gcloud.
Args:
docker_client: docker.Client object.
docker_file: Dockerfile path.
name: name of the benchmark to build a docker image for.
tag: tag for docker image.
push_to_gcloud: whether to push the image to google cloud.
Returns:
Docker image identifier.
"""
local_docker_image_with_tag = '%s:%s' % (name, tag)
remote_docker_image = _DOCKER_IMAGE_PATTERN % name
remote_docker_image_with_tag = '%s:%s' % (remote_docker_image, tag)
if FLAGS.docker_context_dir:
docker_context = os.path.join(
os.path.dirname(__file__), FLAGS.docker_context_dir)
docker_file_name = docker_file
else:
docker_context = os.path.dirname(docker_file)
docker_file_name = os.path.basename(docker_file)
built_image = docker_client.images.build(
path=docker_context, dockerfile=docker_file_name,
tag=local_docker_image_with_tag,
pull=True)
built_image.tag(remote_docker_image, tag=tag)
if push_to_gcloud:
subprocess.check_call(
['gcloud', 'docker', '--', 'push', remote_docker_image_with_tag])
return remote_docker_image_with_tag
def _GetMostRecentDockerImageFromGcloud(docker_image):
"""Get most recent <docker_image>:tag for this docker_image.
Args:
docker_image: (string) docker image on Google Cloud.
Returns:
docker_image:tag if at least one tag was found for docker_image.
Otherwise, returns None.
"""
tag = subprocess.check_output(
['gcloud', 'container', 'images', 'list-tags',
docker_image, '--limit=1', '--format=value(tags[0])'])
tag = tag.strip()
if not tag:
return None
return '%s:%s' % (docker_image, tag)
def get_gpu_volume_mounts():
"""Get volume specs to add to Kubernetes config.
Returns:
Volume specs in the format: volume_name: (hostPath, podPath).
"""
volume_specs = {}
if FLAGS.nvidia_lib_dir:
volume_specs['nvidia-libraries'] = (FLAGS.nvidia_lib_dir, '/usr/lib/nvidia')
if FLAGS.cuda_lib_dir:
cuda_library_files = ['libcuda.so', 'libcuda.so.1', 'libcudart.so']
for cuda_library_file in cuda_library_files:
lib_name = cuda_library_file.split('.')[0]
volume_specs['cuda-libraries-%s' % lib_name] = (
os.path.join(FLAGS.cuda_lib_dir, cuda_library_file),
os.path.join('/usr/lib/cuda/', cuda_library_file))
return volume_specs
def main():
config_text = open(FLAGS.benchmark_configs_file, 'r').read()
configs = yaml.load(config_text)
docker_client = docker.from_env()
time_tag = datetime.now().strftime('%d_%m_%Y_%H_%M')
# Create directories to store kubernetes yaml configs in.
if not os.path.isdir(FLAGS.config_output_file_dir):
os.makedirs(FLAGS.config_output_file_dir)
# Keeps track of already built docker images in case multiple benchmarks
# use the same docker image.
benchmark_name_to_docker_image = {}
# TODO(annarev): run benchmarks in parallel instead of sequentially.
for config in configs:
name = _ConvertToValidName(str(config['benchmark_name']))
if name in benchmark_name_to_docker_image:
docker_image = benchmark_name_to_docker_image[name]
elif FLAGS.build_docker_image:
docker_image = _BuildAndPushDockerImage(
docker_client, config['docker_file'], name, time_tag,
FLAGS.store_docker_image_in_gcloud)
benchmark_name_to_docker_image[name] = docker_image
else:
docker_image = _GetMostRecentDockerImageFromGcloud(
_DOCKER_IMAGE_PATTERN % name)
if not docker_image:
raise NoImageFoundError('No tags found for image %s.' % docker_image)
env_vars = {
_OUTPUT_FILE_ENV_VAR: os.path.join(
FLAGS.benchmark_results_dir, name + '.json'),
_TEST_NAME_ENV_VAR: name
}
gpu_count = (0 if 'gpus_per_machine' not in config
else config['gpus_per_machine'])
volumes = {}
if gpu_count > 0:
volumes = get_gpu_volume_mounts()
env_vars['LD_LIBRARY_PATH'] = (
'/usr/lib/cuda:/usr/lib/nvidia:/usr/lib/x86_64-linux-gnu')
env_vars.update(config.get('env_vars', {}))
args = config.get('args', {})
kubernetes_config = k8s_tensorflow_lib.GenerateConfig(
config['worker_count'],
config['ps_count'],
_PORT,
request_load_balancer=False,
docker_image=docker_image,
name_prefix=name,
additional_args=args,
env_vars=env_vars,
volumes=volumes,
use_shared_volume=False,
use_cluster_spec=False,
gpu_limit=gpu_count)
kubernetes_config_path = os.path.join(
FLAGS.config_output_file_dir, name + '.yaml')
with open(kubernetes_config_path, 'w') as output_config_file:
output_config_file.write(kubernetes_config)
success = _RunBenchmark(name, kubernetes_config_path)
if not success:
sys.exit(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register(
'type', 'bool', lambda v: v.lower() in ('true', 't', 'y', 'yes'))
parser.add_argument(
'--benchmark_configs_file', type=str, default=None, required=True,
help='YAML file with benchmark configs.')
parser.add_argument(
'--config_output_file_dir', type=str, default=None, required=True,
help='Directory to write generated kubernetes configs to.')
parser.add_argument(
'--benchmark_results_dir', type=str, default=None, required=True,
help='Directory to store benchmark results at.')
parser.add_argument(
'--docker_context_dir', type=str, default='',
help='Directory to use as a docker context. By default, docker context '
'will be set to the directory containing a docker file.')
parser.add_argument(
'--build_docker_image', type='bool', nargs='?', const=True, default=True,
help='Whether to build a new docker image or try to use existing one.')
parser.add_argument(
'--store_docker_image_in_gcloud', type='bool', nargs='?', const=True,
default=False, help='Push docker images to google cloud.')
parser.add_argument(
'--cuda_lib_dir', type=str, default=None, required=False,
help='Directory where cuda library files are located on gcloud node.')
parser.add_argument(
'--nvidia_lib_dir', type=str, default=None, required=False,
help='Directory where nvidia library files are located on gcloud node.')
FLAGS, _ = parser.parse_known_args()
logging.basicConfig(level=logging.DEBUG)
main()
| 8,449
| 33.917355
| 80
|
py
|
benchmarks
|
benchmarks-master/tools/kubectl_util_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for kubectl_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
import subprocess
import unittest
import kubectl_util
kubectl_util.WAIT_PERIOD_SECONDS = 1
class KubectlUtilTest(unittest.TestCase):
@mock.patch.object(subprocess, 'check_output')
@mock.patch.object(subprocess, 'check_call')
def testCreatePods(self, mock_check_call, mock_check_output):
mock_check_output.return_value = 'nonempty'
kubectl_util.CreatePods('test_pod', 'test.yaml')
mock_check_call.assert_called_once_with(
['kubectl', 'create', '--filename=test.yaml'])
mock_check_output.assert_called_once_with(
['kubectl', 'get', 'pods', '-o', 'name', '-a', '-l',
'name-prefix in (test_pod)'], universal_newlines=True)
@mock.patch.object(subprocess, 'check_output')
@mock.patch.object(subprocess, 'call')
def testDeletePods(self, mock_check_call, mock_check_output):
mock_check_output.return_value = ''
kubectl_util.DeletePods('test_pod', 'test.yaml')
mock_check_call.assert_called_once_with(
['kubectl', 'delete', '--filename=test.yaml'])
mock_check_output.assert_called_once_with(
['kubectl', 'get', 'pods', '-o', 'name', '-a', '-l',
'name-prefix in (test_pod)'], universal_newlines=True)
@mock.patch.object(subprocess, 'check_output')
def testWaitForCompletion(self, mock_check_output):
# Test success
mock_check_output.return_value = '\'0,0,\''
self.assertTrue(kubectl_util.WaitForCompletion('test_pod'))
# Test failure
mock_check_output.return_value = '\'0,1,\''
self.assertFalse(kubectl_util.WaitForCompletion('test_pod'))
# Test timeout
with self.assertRaises(kubectl_util.TimeoutError):
mock_check_output.return_value = '\'0,,\''
kubectl_util.WaitForCompletion('test_pod', timeout=5)
if __name__ == '__main__':
unittest.main()
| 2,633
| 35.082192
| 80
|
py
|
benchmarks
|
benchmarks-master/tools/k8s_tensorflow_lib.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates YAML configuration files for distributed TensorFlow workers.
The workers will be run in a Kubernetes (k8s) container cluster.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Note: It is intentional that we do not import tensorflow in this script. The
# machine that launches a TensorFlow k8s cluster does not have to have the
# Python package of TensorFlow installed on it.
# Worker pods will mount host volume /shared, as a convenient way to create
# shared storage among workers during local tests.
WORKER_POD = (
"""apiVersion: v1
kind: Pod
metadata:
name: {name_prefix}-worker{worker_id}
labels:
tf-worker: "{worker_id}"
name-prefix: "{name_prefix}"
job: "worker"
spec:
restartPolicy: OnFailure
containers:
- name: tf-worker{worker_id}
image: {docker_image}
args: [{args}]
ports:
- containerPort: {port}
env: [{env_vars}]
resources: {{limits: {resource_limits} }}
volumeMounts: [{volume_mounts}]
volumes: [{volumes}]
""")
WORKER_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: {name_prefix}-worker{worker_id}
labels:
tf-worker: "{worker_id}"
spec:
ports:
- port: {port}
targetPort: {port}
selector:
tf-worker: "{worker_id}"
""")
WORKER_LB_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: {name_prefix}-worker{worker_id}
labels:
tf-worker: "{worker_id}"
spec:
type: LoadBalancer
ports:
- port: {port}
selector:
tf-worker: "{worker_id}"
""")
PARAM_SERVER_POD = (
"""apiVersion: v1
kind: Pod
metadata:
name: {name_prefix}-ps{param_server_id}
labels:
tf-ps: "{param_server_id}"
name-prefix: "{name_prefix}"
job: "ps"
spec:
restartPolicy: OnFailure
containers:
- name: tf-ps{param_server_id}
image: {docker_image}
args: [{args}]
ports:
- containerPort: {port}
env: [{env_vars}]
volumeMounts: [{volume_mounts}]
volumes: [{volumes}]
""")
PARAM_SERVER_SVC = (
"""apiVersion: v1
kind: Service
metadata:
name: {name_prefix}-ps{param_server_id}
labels:
tf-ps: "{param_server_id}"
spec:
ports:
- port: {port}
selector:
tf-ps: "{param_server_id}"
""")
PARAM_LB_SVC = ("""apiVersion: v1
kind: Service
metadata:
name: {name_prefix}-ps{param_server_id}
labels:
tf-ps: "{param_server_id}"
spec:
type: LoadBalancer
ports:
- port: {port}
selector:
tf-ps: "{param_server_id}"
""")
_ENV_VAR_TEMPLATE = '{name: "%s", value: "%s"}'
_ARG_TEMPLATE = '"--%s=%s"'
_SHARED_VOLUME_INFO = {'shared': ('/shared', '/shared')}
_VOLUME_MOUNT_TEMPLATE = '{name: %s, mountPath: %s}'
_VOLUME_TEMPLATE = '{name: %s, hostPath: {path: %s}}'
_GPU_TEMPLATE = '{alpha.kubernetes.io/nvidia-gpu: %d}'
def GenerateConfig(num_workers,
num_param_servers,
port,
request_load_balancer,
docker_image,
name_prefix,
additional_args=None,
env_vars=None,
volumes=None,
use_shared_volume=True,
use_cluster_spec=True,
gpu_limit=0):
"""Generate configuration strings.
Args:
num_workers: number of worker jobs.
num_param_servers: number of ps server jobs.
port: GRPC server port.
request_load_balancer: request worker0 to be exposed on a public IP
address via an external load balancer.
docker_image: docker image to use.
name_prefix: name to prepend to pod job names.
additional_args: dictionary mapping argument names to argument values
to pass to pods.
env_vars: dictionary of environment variables to set.
volumes: dictionary mapping from volume name to a tuple of values
(src_location, dst_location). This volumes will be added in addition
to /shared volume if use_shared_volume is True.
use_shared_volume: whether to add hostPath to /shared directory
to the kubernetes config.
use_cluster_spec: if true, pass --cluster_spec to worker and ps jobs.
If false, pass --worker_hosts and --ps_hosts to worker and ps jobs.
gpu_limit: Add a requirement to worker jobs for this many gpu's.
Returns:
Kubernetes yaml config.
"""
if env_vars is None:
env_vars = {}
env_str = ', '.join([_ENV_VAR_TEMPLATE % (name, value)
for name, value in env_vars.items()])
if additional_args is None:
additional_args = {}
config = ''
common_args = GetCommonArgs(
num_workers, num_param_servers, port, name_prefix, use_cluster_spec)
if volumes is None:
volumes = {}
if use_shared_volume:
volumes.update(_SHARED_VOLUME_INFO)
volumes_str = ', '.join([_VOLUME_TEMPLATE % (name, location[0])
for name, location in volumes.items()])
volume_mounts_str = ', '.join([_VOLUME_MOUNT_TEMPLATE % (name, location[1])
for name, location in volumes.items()])
for worker in range(num_workers):
worker_args = {
'job_name': 'worker',
'task_index': worker
}
worker_args.update(common_args)
worker_args.update(additional_args)
arg_str = ', '.join([_ARG_TEMPLATE % (name, value)
for name, value in worker_args.items()])
config += WORKER_POD.format(
port=port,
worker_id=worker,
docker_image=docker_image,
name_prefix=name_prefix,
volume_mounts=volume_mounts_str,
volumes=volumes_str,
args=arg_str,
env_vars=env_str,
resource_limits=_GPU_TEMPLATE % gpu_limit if gpu_limit > 0 else '')
config += '---\n'
if request_load_balancer:
config += WORKER_LB_SVC.format(port=port,
worker_id=worker,
name_prefix=name_prefix)
else:
config += WORKER_SVC.format(port=port,
worker_id=worker,
name_prefix=name_prefix)
config += '---\n'
for param_server in range(num_param_servers):
ps_args = {
'job_name': 'ps',
'task_index': param_server
}
ps_args.update(common_args)
ps_args.update(additional_args)
arg_str = ', '.join([_ARG_TEMPLATE % (name, value)
for name, value in ps_args.items()])
config += PARAM_SERVER_POD.format(
port=port,
param_server_id=param_server,
docker_image=docker_image,
name_prefix=name_prefix,
volume_mounts=volume_mounts_str,
volumes=volumes_str,
args=arg_str,
env_vars=env_str)
config += '---\n'
if request_load_balancer:
config += PARAM_LB_SVC.format(
port=port, param_server_id=param_server, name_prefix=name_prefix)
else:
config += PARAM_SERVER_SVC.format(
port=port, param_server_id=param_server, name_prefix=name_prefix)
config += '---\n'
return config
def WorkerClusterSpecString(num_workers,
num_param_servers,
port,
name_prefix):
"""Generates worker cluster spec."""
return ClusterSpecString(num_workers, num_param_servers, port, name_prefix)
def ParamServerClusterSpecString(num_workers,
num_param_servers,
port,
name_prefix):
"""Generates parameter server spec."""
return ClusterSpecString(num_workers, num_param_servers, port,
name_prefix)
def ClusterSpecString(num_workers,
num_param_servers,
port,
name_prefix):
"""Generates general cluster spec."""
spec = 'worker|'
for worker in range(num_workers):
spec += '%s-worker%d:%d' % (name_prefix, worker, port)
if worker != num_workers-1:
spec += ';'
spec += ',ps|'
for param_server in range(num_param_servers):
spec += '%s-ps%d:%d' % (name_prefix, param_server, port)
if param_server != num_param_servers-1:
spec += ';'
return spec
def GetCommonArgs(num_workers,
num_param_servers,
port,
name_prefix,
use_cluster_spec):
"""Get arguments common to both worker and ps jobs.
Args:
num_workers: number of workers.
num_param_servers: number of ps servers.
port: worker and ps port number.
name_prefix: prefix to prepend to job names.
use_cluster_spec: if true, pass --cluster_spec argument.
If false, parse --worker_hosts and --ps_hosts arguments.
Returns:
A dictionary of argument names mapping to argument values.
"""
common_args = {}
if use_cluster_spec:
common_args['cluster_spec'] = WorkerClusterSpecString(
num_workers,
num_param_servers,
port,
name_prefix)
else:
common_args['worker_hosts'] = WorkerHosts(num_workers, port, name_prefix)
common_args['ps_hosts'] = PsHosts(num_param_servers, port, name_prefix)
return common_args
def WorkerHosts(num_workers, port, name_prefix):
worker_hosts = ['%s-worker%d:%d' % (name_prefix, i, port)
for i in range(num_workers)]
return ','.join(worker_hosts)
def PsHosts(num_ps, port, name_prefix):
ps_hosts = ['%s-ps%d:%d' % (name_prefix, i, port)
for i in range(num_ps)]
return ','.join(ps_hosts)
| 10,159
| 29.881459
| 80
|
py
|
benchmarks
|
benchmarks-master/tools/k8s_tensorflow_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for k8s_tensorflow_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import k8s_tensorflow_lib
class K8sTensorflowTest(unittest.TestCase):
def testGenerateConfig_LoadBalancer(self):
# Use loadbalancer
config = k8s_tensorflow_lib.GenerateConfig(
num_workers=1,
num_param_servers=1,
port=5000,
request_load_balancer=True,
docker_image='test_image',
name_prefix='abc',
use_shared_volume=False)
self.assertTrue('LoadBalancer' in config)
# Don't use loadbalancer
config = k8s_tensorflow_lib.GenerateConfig(
num_workers=1,
num_param_servers=1,
port=5000,
request_load_balancer=False,
docker_image='test_image',
name_prefix='abc',
use_shared_volume=False)
self.assertFalse('LoadBalancer' in config)
def testGenerateConfig_SharedVolume(self):
# Use shared directory
config = k8s_tensorflow_lib.GenerateConfig(
num_workers=1,
num_param_servers=1,
port=5000,
request_load_balancer=False,
docker_image='test_image',
name_prefix='abc',
use_shared_volume=True)
self.assertTrue('/shared' in config)
# Don't use shared directory
config = k8s_tensorflow_lib.GenerateConfig(
num_workers=1,
num_param_servers=1,
port=5000,
request_load_balancer=False,
docker_image='test_image',
name_prefix='abc',
use_shared_volume=False)
self.assertFalse('/shared' in config)
def testEnvVar(self):
# Use loadbalancer
config = k8s_tensorflow_lib.GenerateConfig(
num_workers=1,
num_param_servers=1,
port=5000,
request_load_balancer=True,
docker_image='test_image',
name_prefix='abc',
use_shared_volume=False,
env_vars={'test1': 'test1_value', 'test2': 'test2_value'})
self.assertTrue('{name: "test1", value: "test1_value"}' in config)
self.assertTrue('{name: "test2", value: "test2_value"}' in config)
def testClusterSpec(self):
# Use cluster_spec
config = k8s_tensorflow_lib.GenerateConfig(
num_workers=1,
num_param_servers=1,
port=5000,
request_load_balancer=True,
docker_image='test_image',
name_prefix='abc',
use_shared_volume=False,
use_cluster_spec=True)
self.assertFalse('worker_hosts' in config)
self.assertFalse('ps_hosts' in config)
self.assertTrue(
'"--cluster_spec=worker|abc-worker0:5000,ps|abc-ps0:5000"' in config)
# Don't use cluster_spec
config = k8s_tensorflow_lib.GenerateConfig(
num_workers=1,
num_param_servers=1,
port=5000,
request_load_balancer=True,
docker_image='test_image',
name_prefix='abc',
use_shared_volume=False,
use_cluster_spec=False)
self.assertFalse('cluster_spec' in config)
self.assertTrue('"--worker_hosts=abc-worker0:5000"' in config)
self.assertTrue('"--ps_hosts=abc-ps0:5000"' in config)
def testWorkerHosts(self):
self.assertEqual(
'test_prefix-worker0:1234',
k8s_tensorflow_lib.WorkerHosts(1, 1234, 'test_prefix'))
self.assertEqual(
'test_prefix-worker0:1234,test_prefix-worker1:1234',
k8s_tensorflow_lib.WorkerHosts(2, 1234, 'test_prefix'))
def testPsHosts(self):
self.assertEqual(
'test_prefix-ps0:1234,test_prefix-ps1:1234',
k8s_tensorflow_lib.PsHosts(2, 1234, 'test_prefix'))
if __name__ == '__main__':
unittest.main()
| 4,331
| 31.328358
| 80
|
py
|
benchmarks
|
benchmarks-master/tools/kubectl_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for running, waiting and stopping benchmark jobs on kubernetes.
Functions in this file assume kubernetes jobs have 'name-prefix' and 'job'
selectors set.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import subprocess
import time
_KUBECTL = 'kubectl'
WAIT_PERIOD_SECONDS = 20
class TimeoutError(Exception):
pass
def _WaitUntil(timeout, predicate, *args):
start_time = time.time()
while time.time() - start_time < timeout:
time.sleep(WAIT_PERIOD_SECONDS)
if predicate(*args):
return True
return False
def _GetPodNames(pod_name_prefix, job_name=None):
"""Get pod names based on the pod_name_prefix and job_name.
Args:
pod_name_prefix: value of 'name-prefix' selector.
job_name: value of 'job' selector. If None, pod names will be
selected only based on 'name-prefix' selector.
Returns:
List of pod names.
"""
pod_list_command = [
_KUBECTL, 'get', 'pods', '-o', 'name', '-a',
'-l', _GetJobSelector(pod_name_prefix, job_name)]
logging.info('Command to get pod names: %s', ' '.join(pod_list_command))
output = subprocess.check_output(pod_list_command, universal_newlines=True)
pod_names = [name for name in output.strip().split('\n') if name]
logging.info('Pod names: "%s"', ','.join(pod_names))
return pod_names
def CreatePods(pod_name, yaml_file):
"""Creates pods based on the given kubernetes config.
Args:
pod_name: 'name-prefix' selector for the pods.
yaml_file: kubernetes yaml config.
Raises:
TimeoutError: if jobs didn't come up for a long time.
"""
command = [_KUBECTL, 'create', '--filename=%s' % yaml_file]
logging.info('Creating pods: %s', subprocess.list2cmdline(command))
subprocess.check_call(command)
if not _WaitUntil(100, _GetPodNames, pod_name):
raise TimeoutError(
'Timed out waiting for %s pod to come up.' % pod_name)
def DeletePods(pod_name, yaml_file):
"""Deletes pods based on the given kubernetes config.
Args:
pod_name: 'name-prefix' selector for the pods.
yaml_file: kubernetes yaml config.
Raises:
TimeoutError: if jobs didn't terminate for a long time.
"""
command = [_KUBECTL, 'delete', '--filename=%s' % yaml_file]
logging.info('Deleting pods: %s', ' '.join(command))
subprocess.call(command)
def CheckPodsAreTerminated():
return not _GetPodNames(pod_name)
if not _WaitUntil(100, CheckPodsAreTerminated):
raise TimeoutError(
'Timed out waiting for %s pod to terminate.' % pod_name)
def _GetJobSelector(pod_name_prefix, job_name=None):
selector = 'name-prefix in (%s)' % pod_name_prefix
if job_name:
selector += ',job in (%s)' % job_name
return selector
def WaitForCompletion(pod_name_prefix, job_name='worker', timeout=2*60*60):
"""Waits until jobs matching pod_name and job_name are terminated.
Args:
pod_name_prefix: value of 'name-prefix' selector.
job_name: value of 'job' selector.
timeout: how long to wait for jobs to terminate before timing out.
Returns:
True if jobs terminated with success, False otherwise.
Raises:
TimeoutError: if jobs haven't terminated after timeout.
ValueError: if we couldn't find jobs matching pod_name and job_name.
"""
# Jsonpath that selects comma-separated exit codes (followed by extra comma
# at the end).
# If a job doesn't have an exit code yet, empty string will be returned
# instead. For ex. output for 2 jobs where one is missing an exit code
# and the other one has an exit code of 0 would look like: ,0,
last_state_query = (
'jsonpath=\'{range .items[*]}'
'{.status.containerStatuses[?(@.state.terminated)]'
'.state.terminated.exitCode},{end}\'')
status_command = [
_KUBECTL, 'get', '-o', last_state_query,
'pods', '-l', _GetJobSelector(pod_name_prefix, job_name), '-a'
]
exit_codes = []
start_time = time.time()
while time.time() - start_time < timeout:
# Output of check_output is a string that starts and ends with '.
output = subprocess.check_output(
status_command, universal_newlines=True).strip('\'')
logging.debug('Pod status: %s', output)
if not output:
raise ValueError(
'Query did not match any data. Query: %s' % ' '.join(status_command))
# Output will end with an extra comma. So, we remove it before splitting.
exit_codes = output[:-1].split(',')
if '' not in exit_codes: # fetched all exit codes
break
time.sleep(WAIT_PERIOD_SECONDS)
if '' in exit_codes:
raise TimeoutError(
'Timed out waiting for %s %s jobs to finish.' %
(pod_name_prefix, job_name))
_PrintLogs(pod_name_prefix, job_name)
failed_job_count = sum(code != '0' for code in exit_codes)
if failed_job_count > 0:
logging.error('%d out of %d jobs failed. Exit codes: %s',
failed_job_count, len(exit_codes), ','.join(exit_codes))
return False
return True
def _PrintLogs(pod_name_prefix, job_name):
"""Prints pod logs.
If a pod has been restarted, prints logs from previous run. Otherwise,
prints the logs from current run. We print logs for pods selected
based on pod_name_prefix and job_name.
Args:
pod_name_prefix: value of 'name-prefix' selector.
job_name: value of 'job' selector.
"""
for pod_name in _GetPodNames(pod_name_prefix, job_name):
try:
# Get previous logs.
logs_command = [_KUBECTL, 'logs', '-p', pod_name]
logging.info('Command to get logs: %s', ' '.join(logs_command))
output = subprocess.check_output(logs_command, universal_newlines=True)
except subprocess.CalledProcessError:
# We couldn't get previous logs, so we will try to get current logs.
logs_command = [_KUBECTL, 'logs', pod_name]
logging.info('Command to get logs: %s', ' '.join(logs_command))
output = subprocess.check_output(logs_command, universal_newlines=True)
print('%s logs:' % pod_name)
print(output)
| 6,712
| 33.076142
| 80
|
py
|
benchmarks
|
benchmarks-master/tools/run_all_CNN_benchmarks.py
|
# Copyright 2017 Ioannis Athanasiadis(supernlogn). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""runs all benchmarks from benchmark_configs.yml file,
except vggXX, because of malfunction.
This script should only be run from opensource repository.
"""
from __future__ import print_function
import argparse
from datetime import datetime
import logging
import os
from string import maketrans
import subprocess
import sys
import yaml
import tensorflow as tf
import numpy as np
import benchmark_cnn
import cnn_util
from cnn_util import log_fn
_OUTPUT_FILE_ENV_VAR = 'TF_DIST_BENCHMARK_RESULTS_FILE'
_TEST_NAME_ENV_VAR = 'TF_DIST_BENCHMARK_NAME'
_PORT = 5000
def _RunBenchmark(name, yaml_file):
pass
benchmark_cnn.define_flags()
def main(_):
params = benchmark_cnn.make_params_from_flags()
models = ['alexnet', ]
if __name__ == '__main__':
tf.app.run()
| 1,482
| 24.135593
| 80
|
py
|
benchmarks
|
benchmarks-master/dashboard_app/main.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Flask application file for benchmark dashboard."""
from collections import namedtuple
from datetime import datetime
from datetime import timedelta
import json
import logging
from operator import itemgetter
import re
import urllib
from flask import Flask, render_template, request
from google.cloud import datastore
app = Flask(__name__)
# How much data to fetch for graphing.
_DAYS_TO_FETCH = 90
# Don't show a benchmark in benchmark list if it hasn't been run
# for this many days.
_MAX_DAYS_WITHOUT_RUN = 14
# Arguments in this list will not be displayed on the dashboard.
_ARGUMENTS_TO_EXCLUDE = set(
['job_name', 'result_storage', 'task_index'])
def argument_name(argument):
"""Gets argument name from string in the form "--arg_name=value".
Args:
argument: String argument in the form --arg_name=value.
Returns:
String argument name.
"""
if len(argument) < 4 or argument[:2] != '--' or '=' not in argument:
logging.error('Invalid argument: %s. Argument must be in the form '
'--name=value', argument)
return ''
return argument[2:argument.index('=')]
@app.route('/')
@app.route('/<pattern>')
def index(pattern=None):
"""Renders index.html page with a list of benchmarks."""
filter_regex = None
if pattern:
filter_regex = re.compile(urllib.parse.unquote(pattern))
min_time_to_lookup = datetime.now() - timedelta(days=_MAX_DAYS_WITHOUT_RUN)
client = datastore.Client()
query = client.query(kind='Test')
query.add_filter('start', '>', min_time_to_lookup)
fetched = list(query.fetch())
test_names = {} # maps test name to encoded test name
for fetched_result in fetched:
if fetched_result['test'] in test_names:
continue # already added
if not filter_regex or re.search(pattern, fetched_result['test']):
test_names[fetched_result['test']] = urllib.parse.quote(
fetched_result['test'], safe='')
# convert test_names to list and sort
test_names = sorted(test_names.items(), key=itemgetter(1), reverse=True)
return render_template('index.html', tests=test_names)
@app.route('/test/<test_id>')
def test(test_id):
"""Renders test.html page with a graph for each benchmark entry."""
test_id = urllib.parse.unquote(test_id)
min_time_to_lookup = datetime.now() - timedelta(days=2)
client = datastore.Client()
# Get most recent start time for this test
query = client.query(kind='Test')
query.add_filter('test', '=', test_id)
query.order = ['-start']
test_results = list(query.fetch(limit=1))
if not test_results:
return 'No data for benchmark %s' % test_id
start_time = test_results[0]['start']
# Get a list of entry ids
query = client.query(kind='Entry')
query.add_filter('test', '=', test_id)
query.add_filter('start', '=', start_time)
try:
test_info = json.loads(test_results[0]['info'])
except ValueError as e:
logging.exception('Failed to parse "info" in test_results.', e)
test_info = None
arguments = []
if (test_info and 'runConfiguration' in test_info and
'argument' in test_info['runConfiguration']):
arguments = test_info['runConfiguration']['argument']
arguments = [
arg for arg in arguments
if argument_name(arg) not in _ARGUMENTS_TO_EXCLUDE]
arguments = ' '.join(arguments)
entries = []
Entry = namedtuple('Entry', ['id', 'latest_value'])
for entry in query.fetch():
info = json.loads(entry['info'])
entries.append(Entry(entry['entry'], info['wallTime']))
return render_template(
'test.html', test_id=test_id, entries=entries,
latest_time=start_time.strftime('%Y-%m-%d %H:%M'),
arguments=arguments)
@app.route('/benchmark_data/')
def benchmark_data():
"""Returns benchmark data in json format for graphing."""
test_id = urllib.parse.unquote(request.args.get('test'))
entry_id = urllib.parse.unquote(request.args.get('entry'))
min_time_to_lookup = datetime.now() - timedelta(days=_DAYS_TO_FETCH)
client = datastore.Client()
timing_query = client.query(kind='Entry')
timing_query.add_filter('test', '=', test_id)
timing_query.add_filter('entry', '=', entry_id)
timing_query.add_filter('start', '>', min_time_to_lookup)
timing_query.projection = ['start', 'timing']
start_and_timing = [
{'start': data['start'], 'timing': data['timing']}
for data in timing_query.fetch()]
start_and_timing_json = json.dumps(start_and_timing)
return start_and_timing_json
@app.errorhandler(500)
def server_error(e):
logging.exception('An error occurred during a request.', e)
return 'An internal error occurred.', 500
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, threaded=True)
| 5,504
| 32.981481
| 80
|
py
|
benchmarks
|
benchmarks-master/dashboard_app/main_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import json
import main
import unittest
import urllib
class TestMain(unittest.TestCase):
def testArgumentInvalidFormat(self):
self.assertEqual('', main.argument_name(''))
self.assertEqual('', main.argument_name('arg=val'))
self.assertEqual('', main.argument_name('-arg=val'))
self.assertEqual('', main.argument_name('--argval'))
self.assertEqual('', main.argument_name('--=val'))
self.assertEqual('', main.argument_name('--='))
def testArgumentValidFormat(self):
self.assertEqual('abc', main.argument_name('--abc=123'))
self.assertEqual('a', main.argument_name('--a=123'))
def testIndexPage(self):
main.app.testing = True
client = main.app.test_client()
r = client.get('/')
self.assertEqual(200, r.status_code)
self.assertIn('sample_logged_benchmark', r.data.decode('utf-8'))
def testTestPage_InvalidTest(self):
main.app.testing = True
client = main.app.test_client()
r = client.get('/test/abc')
self.assertEqual(200, r.status_code)
self.assertIn('No data for benchmark', str(r.data))
def testTestPage_SampleTest(self):
main.app.testing = True
client = main.app.test_client()
sample_benchmark_name = '//tensorflow/examples/benchmark:sample_logged_benchmark'
r = client.get(
'/test/%252F%252Ftensorflow%252Fexamples%252Fbenchmark%253Asample_logged_benchmark')
self.assertEqual(200, r.status_code)
self.assertIn(
'Performance plots for %s' % sample_benchmark_name, str(r.data))
def testFetchBenchmarkData_InvalidTest(self):
main.app.testing = True
client = main.app.test_client()
r = client.get('/benchmark_data/?test=abc&entry=cde')
self.assertEqual(200, r.status_code)
self.assertEqual(b'[]', r.data)
def testFetchBenchmarkData_SampleTest(self):
main.app.testing = True
client = main.app.test_client()
encoded_benchmark_name = (
'/test/%252F%252Ftensorflow%252Fexamples%252Fbenchmark%253Asample_logged_benchmark')
r = client.get('/benchmark_data/?test=%s&entry=SampleBenchmark.sum_wall_time' %
encoded_benchmark_name)
self.assertEqual(200, r.status_code)
self.assertEqual(b'[]', r.data)
if __name__ == '__main__':
unittest.main()
| 2,934
| 33.940476
| 92
|
py
|
benchmarks
|
benchmarks-master/my_tests/reportLmdbError.py
|
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from six.moves import xrange # pylint: disable=redefined-builtin
import lmdb
import PIL.Image
from StringIO import StringIO
# specify dataset path
path_prefix = '/mnt/terabyte/datasets/imagenet/caffe/ilsvrc12_'
path_postfix = '_lmdb'
supported_modes = ['train', 'val']
mode = supported_modes[0]
full_path = path_prefix + mode + path_postfix
# specify how many datums to read at once
batch_length = 11
# set numpy array print options
np.set_printoptions(threshold=21)
reader = tf.LMDBReader(name='reader')
keys_queue = tf.FIFOQueue(
capacity=32,
dtypes=[dtypes.string],
shapes=())
# scenario 1 (buggy)
keys1, values1 = reader.read_up_to(keys_queue, batch_length)
jpg_buffer1 = tf.decode_raw(values1, out_type=tf.uint8)
# scenario 2 (good)
keys2, values2 = reader.read_up_to(keys_queue, 11)
jpg_buffer2 = tf.decode_raw(values2, out_type=tf.uint8)
with tf.Session() as sess:
keys_queue.enqueue([full_path]).run()
keys_queue.close().run()
buffer2 = sess.run(jpg_buffer2)
print(buffer2.shape)
print(buffer2[0:20])
buffer1 = sess.run(jpg_buffer1)
print(buffer1.shape)
print(buffer1[:,0:20])
| 1,388
| 27.9375
| 65
|
py
|
benchmarks
|
benchmarks-master/my_tests/LmdbInputImagePreprocessor.py
|
# Copyright 2017 Ioannis Athanasiadis(supernlogn). All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import unicode_literals
from __future__ import absolute_import
from __future__ import division
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import dtypes
from six.moves import xrange # pylint: disable=redefined-builtin
import lmdb
import PIL.Image
from StringIO import StringIO
path_prefix = '/mnt/terabyte/datasets/imagenet/caffe/ilsvrc12_'
path_postfix = '_lmdb'
supported_modes = ['train', 'val']
mode = supported_modes[0]
full_path = path_prefix + mode + path_postfix
# def read_lmdb(lmdb_file):
# cursor = lmdb.open(lmdb_file, readonly=True).begin().cursor()
# datum = caffe.proto.caffe_pb2.Datum()
# for _, value in cursor:
# datum.ParseFromString(value)
# s = StringIO()
# s.write(datum.data)
# s.seek(0)
# yield np.array(PIL.Image.open(s)), datum.label
# for im, label in read_lmdb(full_path):
# print label, im
np.set_printoptions(threshold='nan')
# env = lmdb.open(full_path, readonly=True)
keys_list = []
i = 1
# with env.begin() as txn:
# cursor = txn.cursor()
# for key, value in cursor:
# val = np.fromstring(value, dtype=np.uint8)
# print("env print: ", val.shape)
# label = val[12:17]
# print([bin(x)[2:].zfill(8) for x in label])
# keys_list.append(key)
# i = i +1
# if i >= 10:
# break
# print(key)
# env.close()
np.set_printoptions(threshold=20)
# print(int(value[0]))
# I1 = value.index( '\xFF\xD8' )
# I2 = value.index( '',I1)
# print(I1)
# value = np.fromstring(value,dtype=np.uint8)
# # print(value[I1:])
# header_data = value[0:17]
# print(np.size(value) - 256*256*3)
# img = value[17:]
# img = np.reshape(img, [256, 256, 3])
# imgToShow = PIL.Image.fromarray(img, 'RGB')
# imgToShow.save('tensImg.png')
# path_tensor = tf.p.aconvert_to_tensor(len(full_path), dtype=tf.int32)
# tf.random_shuffle(keys_tensor)
# tf.train.add_queue_runner(tf.train.QueueRunner(keys_queue,[kq_enqueue_op] * 1))
print(len(full_path))
reader = tf.LMDBReader(name='reader')
keys_queue = tf.FIFOQueue(
capacity=2,
dtypes=[dtypes.string],
shapes=())
# i = tf.Variable(initial_value=0, trainable=False, name="lmdb_iterator_var")
datum_size = 196625
vals = tf.zeros(shape=(1, datum_size), dtype=tf.uint8)
def in_body(in_iterator, vals):
vals = tf.concat(axis=0,
values=[vals,
tf.expand_dims(axis=0,
input=tf.decode_raw(reader.read(keys_queue)[1],
out_type=tf.uint8)[:])])
return in_iterator + 1, vals
in_i = []
in_while_reader = []
for i in range(0, 3):
in_i.append(tf.constant(0))
in_while_reader.append(tf.while_loop(cond=lambda i, vals: tf.less(i, 10),
body=in_body,
loop_vars=[in_i[-1], vals],
shape_invariants=[
in_i[-1].get_shape(), tf.TensorShape((None, datum_size))],
parallel_iterations=1))
def out_body(out_iterator, vals):
out_case = []
for i in range(0, 3):
out_case.append((tf.equal(out_iterator, i),
lambda: in_while_reader[i]))
r = tf.case(out_case, default=lambda: in_while_reader[0])
vals = tf.concat(axis=0,
values=[vals, r[1]])
return out_iterator + 1, vals
out_i = tf.constant(0)
out_while_reader = tf.while_loop(cond=lambda out_i, vals: tf.less(out_i, 2),
body=out_body,
loop_vars=[out_i, vals],
shape_invariants=[
out_i.get_shape(), tf.TensorShape((None, datum_size))],
parallel_iterations=1)
keys, values = reader.read(keys_queue)
jpg_buffer = tf.decode_raw(values, out_type=tf.uint8)
enqueue_op = keys_queue.enqueue([values])
# jpg_label = jpg_buffer[:,-5:-1]
# jpg_img = jpg_buffer[:,12:-5]
# jpg_img = tf.reshape(jpg_img, [32, 3, 256, 256])
# jpg_img = tf.transpose(jpg_img, [0,2,3,1])
# rev = tf.constant([2], dtype=tf.int32)
# jpg_img = tf.reverse(jpg_img, rev)
with tf.Session() as sess:
# keys_queue.enqueue([full_path]).run()
# keys_queue.close().run()
w = sess.run(enqueue_op)
print(w.shape)
# print(w)
# search if two rows are the same
for it1 in range(w.shape[0]):
ans = False
for it2 in range(it1 + 1, w.shape[0]):
if (np.array_equal(w[it1, :], w[it2, :])):
print("Found them: %d, %d" % (it1, it2))
# # coord = tf.train.Coordinator()
# # threads = tf.train.start_queue_runners(coord=coord)
# imgToShow = PIL.Image.fromarray(img, 'RGB')
# imgToShow.save('tensImg2.jpg')
# k,v = sess.run([keys, values])
# # print(k, v)
# print((len(v) - 2556*256*3))
# b = np.array(v)
# np.reshape(b,[256, 256, 3])
# # coord.request_stop()
# # coord.join(threads)
| 5,750
| 29.754011
| 101
|
py
|
benchmarks
|
benchmarks-master/scripts/util/convert_csv_to_json.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Convert CSV benchmark data to JSON format.
CSV benchmark data has the format:
Description,timestamp,num_batches,time mean value,time sd
JSON benchmark data in in the format of TestResults proto
converted to JSON.
https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/util/test_log.proto.
"""
import argparse
import csv
from datetime import datetime
import benchmark_util
def get_data_from_csv(csv_reader):
"""Creates a list of StatEntry objects based on data in CSV data.
Input CSV data must be in the format:
Description,timestamp,num_batches,time mean value,time sd
Args:
csv_reader: csv.reader instance.
Returns:
A tuple of datetime timestamp and list of benchmark_util.StatEntry objects.
Raises:
ValueError: if CSV is invalid.
"""
timestamp = None
stat_entries = []
for row in csv_reader:
if len(row) != 5:
raise ValueError('Expected 5 entries per line in the input CSV file, '
'but found %d entries.' % len(row))
if '' in row:
raise ValueError('Found empty entries in row: %s' % row)
# Set timestamp based on the first line in CSV file.
if timestamp is None:
# Example of time formatting: 2017-06-26 02:59:29.325579
timestamp = datetime.strptime(row[1], "%Y-%m-%d %H:%M:%S.%f")
stat_entries.append(
benchmark_util.StatEntry(row[0], float(row[3]), 1))
return timestamp, stat_entries
def main():
with open(FLAGS.input_csv_file, 'r') as csvfile:
csv_reader = csv.reader(csvfile)
timestamp, stat_entries = get_data_from_csv(csv_reader)
benchmark_util.store_data_in_json(
stat_entries, timestamp,
output_file=FLAGS.output_json_file,
test_name=FLAGS.test_name)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.register(
'type', 'bool', lambda v: v.lower() in ('true', 't', 'y', 'yes'))
parser.add_argument(
'--test_name', type=str, default=None, required=True,
help='Name of the test.')
parser.add_argument(
'--input_csv_file', type=str, default=None, required=True,
help='Path to the CSV file.')
parser.add_argument(
'--output_json_file', type=str, default=None, required=True,
help='Path to output JSON file.')
FLAGS, _ = parser.parse_known_args()
main()
| 3,034
| 32.351648
| 89
|
py
|
benchmarks
|
benchmarks-master/scripts/util/convert_csv_to_json_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convert_csv_to_json."""
import csv
import datetime
import unittest
import convert_csv_to_json
class ConvertCsvToJsonTest(unittest.TestCase):
def testSingleEntryCSV(self):
# Description,timestamp,num_batches,time mean value,time sd
csv_reader = csv.reader(
['abc,2017-06-26 02:59:29.325579,10,2.15,0.1'])
timestamp, stat_entries = convert_csv_to_json.get_data_from_csv(csv_reader)
self.assertEqual(
datetime.datetime(2017, 06, 26, 2, 59, 29, 325579),
timestamp)
self.assertEqual(1, len(stat_entries))
self.assertEqual('abc', stat_entries[0].name)
self.assertEqual(2.15, stat_entries[0].stat_value)
def testTwoEntryCSV(self):
# Description,timestamp,num_batches,time mean value,time sd
csv_reader = csv.reader(
['abc,2017-06-26 02:59:35.425579,10,2.15,0.1',
'def,2017-06-26 02:59:29.325579,10,10.1,0.1'])
timestamp, stat_entries = convert_csv_to_json.get_data_from_csv(csv_reader)
self.assertEqual(
datetime.datetime(2017, 06, 26, 2, 59, 35, 425579),
timestamp)
self.assertEqual(2, len(stat_entries))
self.assertEqual('abc', stat_entries[0].name)
self.assertEqual(2.15, stat_entries[0].stat_value)
self.assertEqual('def', stat_entries[1].name)
self.assertEqual(10.1, stat_entries[1].stat_value)
def testInvalidCSV_LessEntries(self):
csv_reader = csv.reader(
['abc,2017-06-26 02:59:29.325579,10,2.15'])
with self.assertRaises(ValueError):
timestamp, stat_entries = convert_csv_to_json.get_data_from_csv(
csv_reader)
def testInvalidCSV_MoreEntries(self):
csv_reader = csv.reader(
['abc,2017-06-26 02:59:29.325579,10,2.15,0.1,extra_entry'])
with self.assertRaises(ValueError):
timestamp, stat_entries = convert_csv_to_json.get_data_from_csv(
csv_reader)
def testInvalidCSV_EmptyEntry(self):
csv_reader = csv.reader(
[',2017-06-26 02:59:29.325579,10,2.15,0.1'])
with self.assertRaises(ValueError):
timestamp, stat_entries = convert_csv_to_json.get_data_from_csv(
csv_reader)
def testInvalidCSV_InvalidDate(self):
csv_reader = csv.reader(['abc,invaliddate,10,2.15,0.1'])
with self.assertRaises(ValueError):
timestamp, stat_entries = convert_csv_to_json.get_data_from_csv(
csv_reader)
def testInvalidCSV_InvalidValue(self):
csv_reader = csv.reader(
['abc,2017-06-26 02:59:29.325579,10,invalidfloat,0.1'])
with self.assertRaises(ValueError):
timestamp, stat_entries = convert_csv_to_json.get_data_from_csv(
csv_reader)
if __name__ == '__main__':
unittest.main()
| 3,356
| 36.719101
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/util/benchmark_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Provides helper functions for distributed benchmarks running on Jenkins."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import calendar
from collections import namedtuple
import logging
import os
from google.protobuf import json_format
from tensorflow.core.util import test_log_pb2
from tensorflow.python.platform import gfile
_OUTPUT_FILE_ENV_VAR = 'TF_DIST_BENCHMARK_RESULTS_FILE'
_TEST_NAME_ENV_VAR = 'TF_DIST_BENCHMARK_NAME'
# Represents a single stat_value entry where
# - name is a unique identifier for this specific measurement.
# - stat_value is the measurement to track (for e.g. mean time per iter).
# - num_samples is the number of samples that stat_value is averaged over.
StatEntry = namedtuple(
'StatEntry', ['name', 'stat_value', 'num_samples'])
def store_data_in_json(
stat_entries, timestamp, output_file=None, test_name=None):
"""Stores benchmark results in JSON format.
Args:
stat_entries: list of StatEntry objects.
timestamp: (datetime) start time of the test run.
output_file: if specified, writes benchmark results to output_file.
Otherwise, if TF_DIST_BENCHMARK_RESULTS_FILE environment variable is set,
writes to file specified by this environment variable. If neither
output_file is passed in, nor TF_DIST_BENCHMARK_RESULTS_FILE is set,
does nothing.
test_name: benchmark name. This argument is required if
TF_DIST_BENCHMARK_NAME environment variable is not set.
Raises:
ValueError: when neither test_name is passed in nor
TF_DIST_BENCHMARK_NAME is set.
"""
test_result = test_log_pb2.TestResults(
start_time=calendar.timegm(timestamp.timetuple()))
if not output_file:
if _OUTPUT_FILE_ENV_VAR not in os.environ:
logging.warning(
'Skipping storing json output, since we could not determine '
'location to store results at. Either output_file argument or '
'%s environment variable needs to be set.', _OUTPUT_FILE_ENV_VAR)
return
output_file = os.environ[_OUTPUT_FILE_ENV_VAR]
if test_name is not None:
test_result.name = test_name
elif _TEST_NAME_ENV_VAR in os.environ:
test_result.name = os.environ[_TEST_NAME_ENV_VAR]
else:
raise ValueError(
'Could not determine test name. test_name argument is not passed in '
'and TF_DIST_BENCHMARK_NAME environment variable is not set.')
for stat_entry in stat_entries:
test_result.entries.entry.add(
name=stat_entry.name,
iters=stat_entry.num_samples,
wall_time=stat_entry.stat_value
)
json_test_results = json_format.MessageToJson(test_result)
with gfile.Open(output_file, 'wb') as jsonfile:
jsonfile.write(json_test_results)
| 3,476
| 36.387097
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/util/benchmark_util_test.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for benchmark_util."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import tempfile
import unittest
import benchmark_util
class BenchmarkUtilTest(unittest.TestCase):
def testStoreDataWithNoEntries(self):
with tempfile.NamedTemporaryFile() as temp_file:
timing_entries = []
benchmark_util.store_data_in_json(
timing_entries, datetime.date(2017, 1, 1), temp_file.name)
with open(temp_file.name, 'r') as json_file:
json_output = json.loads(json_file.read())
self.assertEqual('TestBenchmark', json_output['name'])
self.assertEqual(u'1483228800', json_output['startTime'])
def testStoreDataWithEntries(self):
with tempfile.NamedTemporaryFile() as temp_file:
timing_entries = [benchmark_util.StatEntry('test', 0.1, 1)]
benchmark_util.store_data_in_json(
timing_entries, datetime.date(2017, 1, 1), temp_file.name)
with open(temp_file.name, 'r') as json_file:
json_output = json.loads(json_file.read())
self.assertEqual(1, len(json_output['entries']['entry']))
self.assertEqual('test', json_output['entries']['entry'][0]['name'])
self.assertEqual(0.1, json_output['entries']['entry'][0]['wallTime'])
self.assertEqual(u'1', json_output['entries']['entry'][0]['iters'])
self.assertEqual(u'1483228800', json_output['startTime'])
self.assertEqual('TestBenchmark', json_output['name'])
if __name__ == '__main__':
unittest.main()
| 2,276
| 36.95
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/util/__init__.py
| 0
| 0
| 0
|
py
|
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/preprocessing.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Image pre-processing utilities.
"""
import math
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.contrib.image.python.ops import distort_image_ops
from tensorflow.python.layers import utils
from tensorflow.python.ops import data_flow_ops
import cnn_util
import data_utils
def parse_example_proto(example_serialized):
"""Parses an Example proto containing a training example of an image.
The output of the build_image_data.py image preprocessing script is a dataset
containing serialized Example protocol buffers. Each Example proto contains
the following fields:
image/height: 462
image/width: 581
image/colorspace: 'RGB'
image/channels: 3
image/class/label: 615
image/class/synset: 'n03623198'
image/class/text: 'knee pad'
image/object/bbox/xmin: 0.1
image/object/bbox/xmax: 0.9
image/object/bbox/ymin: 0.2
image/object/bbox/ymax: 0.6
image/object/bbox/label: 615
image/format: 'JPEG'
image/filename: 'ILSVRC2012_val_00041207.JPEG'
image/encoded: <JPEG encoded string>
Args:
example_serialized: scalar Tensor tf.string containing a serialized
Example protocol buffer.
Returns:
image_buffer: Tensor tf.string containing the contents of a JPEG file.
label: Tensor tf.int32 containing the label.
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged as
[ymin, xmin, ymax, xmax].
text: Tensor tf.string containing the human-readable label.
"""
# Dense features in Example proto.
feature_map = {
'image/encoded': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64,
default_value=-1),
'image/class/text': tf.FixedLenFeature([], dtype=tf.string,
default_value=''),
}
sparse_float32 = tf.VarLenFeature(dtype=tf.float32)
# Sparse features in Example proto.
feature_map.update(
{k: sparse_float32 for k in ['image/object/bbox/xmin',
'image/object/bbox/ymin',
'image/object/bbox/xmax',
'image/object/bbox/ymax']})
features = tf.parse_single_example(example_serialized, feature_map)
label = tf.cast(features['image/class/label'], dtype=tf.int32)
xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0)
ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0)
xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0)
ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0)
# Note that we impose an ordering of (y, x) just to make life difficult.
bbox = tf.concat([ymin, xmin, ymax, xmax], 0)
# Force the variable number of bounding boxes into the shape
# [1, num_boxes, coords].
bbox = tf.expand_dims(bbox, 0)
bbox = tf.transpose(bbox, [0, 2, 1])
return features['image/encoded'], label, bbox, features['image/class/text']
_RESIZE_METHOD_MAP = {
'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR,
'bilinear': tf.image.ResizeMethod.BILINEAR,
'bicubic': tf.image.ResizeMethod.BICUBIC,
'area': tf.image.ResizeMethod.AREA
}
def get_image_resize_method(resize_method, batch_position=0):
"""Get tensorflow resize method.
If resize_method is 'round_robin', return different methods based on batch
position in a round-robin fashion. NOTE: If the batch size is not a multiple
of the number of methods, then the distribution of methods will not be
uniform.
Args:
resize_method: (string) nearest, bilinear, bicubic, area, or round_robin.
batch_position: position of the image in a batch. NOTE: this argument can
be an integer or a tensor
Returns:
one of resize type defined in tf.image.ResizeMethod.
"""
if resize_method != 'round_robin':
return _RESIZE_METHOD_MAP[resize_method]
# return a resize method based on batch position in a round-robin fashion.
resize_methods = list(_RESIZE_METHOD_MAP.values())
def lookup(index):
return resize_methods[index]
def resize_method_0():
return utils.smart_cond(batch_position % len(resize_methods) == 0,
lambda: lookup(0), resize_method_1)
def resize_method_1():
return utils.smart_cond(batch_position % len(resize_methods) == 1,
lambda: lookup(1), resize_method_2)
def resize_method_2():
return utils.smart_cond(batch_position % len(resize_methods) == 2,
lambda: lookup(2), lambda: lookup(3))
# NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here
# because TF would not be able to construct a finite graph.
return resize_method_0()
def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32):
"""Decode a JPEG string into one 3-D float image Tensor.
Args:
image_buffer: scalar string Tensor.
scope: Optional scope for op_scope.
Returns:
3-D float Tensor with values ranging from [0, 1).
"""
# with tf.op_scope([image_buffer], scope, 'decode_jpeg'):
# with tf.name_scope(scope, 'decode_jpeg', [image_buffer]):
with tf.name_scope(scope or 'decode_jpeg'):
# Decode the string as an RGB JPEG.
# Note that the resulting image contains an unknown height and width
# that is set dynamically by decode_jpeg. In other words, the height
# and width of image is unknown at compile-time.
image = tf.image.decode_jpeg(image_buffer, channels=3,
fancy_upscaling=False,
dct_method='INTEGER_FAST')
# image = tf.Print(image, [tf.shape(image)], 'Image shape: ')
return image
def normalized_image(images):
# Rescale from [0, 255] to [0, 2]
images = tf.multiply(images, 1. / 127.5)
# Rescale to [-1, 1]
return tf.subtract(images, 1.0)
def eval_image(image,
height,
width,
batch_position,
resize_method,
summary_verbosity=0):
"""Get the image for model evaluation.
We preprocess the image simiarly to Slim, see
https://github.com/tensorflow/models/blob/master/slim/preprocessing/vgg_preprocessing.py
Validation images do not have bounding boxes, so to crop the image, we first
resize the image such that the aspect ratio is maintained and the resized
height and width are both at least 1.15 times `height` and `width`
respectively. Then, we do a central crop to size (`height`, `width`).
Args:
image: 3-D float Tensor representing the image.
height: The height of the image that will be returned.
width: The width of the image that will be returned.
batch_position: position of the image in a batch, which affects how images
are distorted and resized. NOTE: this argument can be an integer or a
tensor
resize_method: one of the strings 'round_robin', 'nearest', 'bilinear',
'bicubic', or 'area'.
summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both
summaries and checkpoints.
Returns:
An image of size (output_height, output_width, 3) that is resized and
cropped as described above.
"""
# TODO(reedwm): Currently we resize then crop. Investigate if it's faster to
# crop then resize.
with tf.name_scope('eval_image'):
if summary_verbosity >= 3:
tf.summary.image(
'original_image', tf.expand_dims(image, 0))
shape = tf.shape(image)
image_height = shape[0]
image_width = shape[1]
image_height_float = tf.cast(image_height, tf.float32)
image_width_float = tf.cast(image_width, tf.float32)
scale_factor = 1.15
# Compute resize_height and resize_width to be the minimum values such that
# 1. The aspect ratio is maintained (i.e. resize_height / resize_width is
# image_height / image_width), and
# 2. resize_height >= height * `scale_factor`, and
# 3. resize_width >= width * `scale_factor`
max_ratio = tf.maximum(height / image_height_float,
width / image_width_float)
resize_height = tf.cast(image_height_float * max_ratio * scale_factor,
tf.int32)
resize_width = tf.cast(image_width_float * max_ratio * scale_factor,
tf.int32)
# Resize the image to shape (`resize_height`, `resize_width`)
image_resize_method = get_image_resize_method(resize_method, batch_position)
distorted_image = tf.image.resize_images(image,
[resize_height, resize_width],
image_resize_method,
align_corners=False)
# Do a central crop of the image to size (height, width).
total_crop_height = (resize_height - height)
crop_top = total_crop_height // 2
total_crop_width = (resize_width - width)
crop_left = total_crop_width // 2
distorted_image = tf.slice(distorted_image, [crop_top, crop_left, 0],
[height, width, 3])
distorted_image.set_shape([height, width, 3])
if summary_verbosity >= 3:
tf.summary.image(
'cropped_resized_image', tf.expand_dims(distorted_image, 0))
image = distorted_image
return image
def train_image(image_buffer,
height,
width,
bbox,
batch_position,
resize_method,
distortions,
scope=None,
summary_verbosity=0,
distort_color_in_yiq=False,
fuse_decode_and_crop=False):
"""Distort one image for training a network.
Distorting images provides a useful technique for augmenting the data
set during training in order to make the network invariant to aspects
of the image that do not effect the label.
Args:
image_buffer: scalar string Tensor representing the raw JPEG image buffer.
height: integer
width: integer
bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords]
where each coordinate is [0, 1) and the coordinates are arranged
as [ymin, xmin, ymax, xmax].
batch_position: position of the image in a batch, which affects how images
are distorted and resized. NOTE: this argument can be an integer or a
tensor
resize_method: round_robin, nearest, bilinear, bicubic, or area.
distortions: If true, apply full distortions for image colors.
scope: Optional scope for op_scope.
summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both
summaries and checkpoints.
distort_color_in_yiq: distort color of input images in YIQ space.
fuse_decode_and_crop: fuse the decode/crop operation.
Returns:
3-D float Tensor of distorted image used for training.
"""
# with tf.op_scope([image, height, width, bbox], scope, 'distort_image'):
# with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]):
with tf.name_scope(scope or 'distort_image'):
# A large fraction of image datasets contain a human-annotated bounding box
# delineating the region of the image containing the object of interest. We
# choose to create a new bounding box for the object which is a randomly
# distorted version of the human-annotated bounding box that obeys an
# allowed range of aspect ratios, sizes and overlap with the human-annotated
# bounding box. If no box is supplied, then we assume the bounding box is
# the entire image.
sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box(
tf.image.extract_jpeg_shape(image_buffer),
bounding_boxes=bbox,
min_object_covered=0.1,
aspect_ratio_range=[0.75, 1.33],
area_range=[0.05, 1.0],
max_attempts=100,
use_image_if_no_bounding_boxes=True)
bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box
if summary_verbosity >= 3:
image = tf.image.decode_jpeg(image_buffer, channels=3,
dct_method='INTEGER_FAST')
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image_with_distorted_box = tf.image.draw_bounding_boxes(
tf.expand_dims(image, 0), distort_bbox)
tf.summary.image(
'images_with_distorted_bounding_box',
image_with_distorted_box)
# Crop the image to the specified bounding box.
if fuse_decode_and_crop:
offset_y, offset_x, _ = tf.unstack(bbox_begin)
target_height, target_width, _ = tf.unstack(bbox_size)
crop_window = tf.stack([offset_y, offset_x, target_height, target_width])
image = tf.image.decode_and_crop_jpeg(
image_buffer, crop_window, channels=3)
else:
image = tf.image.decode_jpeg(image_buffer, channels=3,
dct_method='INTEGER_FAST')
image = tf.slice(image, bbox_begin, bbox_size)
distorted_image = tf.image.random_flip_left_right(image)
# This resizing operation may distort the images because the aspect
# ratio is not respected.
image_resize_method = get_image_resize_method(resize_method, batch_position)
distorted_image = tf.image.resize_images(
distorted_image, [height, width],
image_resize_method,
align_corners=False)
# Restore the shape since the dynamic slice based upon the bbox_size loses
# the third dimension.
distorted_image.set_shape([height, width, 3])
if summary_verbosity >= 3:
tf.summary.image('cropped_resized_maybe_flipped_image',
tf.expand_dims(distorted_image, 0))
if distortions:
distorted_image = tf.cast(distorted_image, dtype=tf.float32)
# Images values are expected to be in [0,1] for color distortion.
distorted_image /= 255.
# Randomly distort the colors.
distorted_image = distort_color(distorted_image, batch_position,
distort_color_in_yiq=distort_color_in_yiq)
# Note: This ensures the scaling matches the output of eval_image
distorted_image *= 255
if summary_verbosity >= 3:
tf.summary.image(
'final_distorted_image',
tf.expand_dims(distorted_image, 0))
return distorted_image
def distort_color(image, batch_position=0, distort_color_in_yiq=False,
scope=None):
"""Distort the color of the image.
Each color distortion is non-commutative and thus ordering of the color ops
matters. Ideally we would randomly permute the ordering of the color ops.
Rather then adding that level of complication, we select a distinct ordering
of color ops based on the position of the image in a batch.
Args:
image: float32 Tensor containing single image. Tensor values should be in
range [0, 1].
batch_position: the position of the image in a batch. NOTE: this argument
can be an integer or a tensor
distort_color_in_yiq: distort color of input images in YIQ space.
scope: Optional scope for op_scope.
Returns:
color-distorted image
"""
with tf.name_scope(scope or 'distort_color'):
def distort_fn_0(image=image):
"""Variant 0 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
if distort_color_in_yiq:
image = distort_image_ops.random_hsv_in_yiq(
image, lower_saturation=0.5, upper_saturation=1.5,
max_delta_hue=0.2 * math.pi)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
return image
def distort_fn_1(image=image):
"""Variant 1 of distort function."""
image = tf.image.random_brightness(image, max_delta=32. / 255.)
image = tf.image.random_contrast(image, lower=0.5, upper=1.5)
if distort_color_in_yiq:
image = distort_image_ops.random_hsv_in_yiq(
image, lower_saturation=0.5, upper_saturation=1.5,
max_delta_hue=0.2 * math.pi)
else:
image = tf.image.random_saturation(image, lower=0.5, upper=1.5)
image = tf.image.random_hue(image, max_delta=0.2)
return image
image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0,
distort_fn_1)
# The random_* ops do not necessarily clamp.
image = tf.clip_by_value(image, 0.0, 1.0)
return image
class BaseImagePreprocess(object):
"""Base class for all image preprocessors."""
def __init__(self,
height,
width,
batch_size,
num_splits,
dtype,
train,
distortions,
resize_method,
shift_ratio=-1,
summary_verbosity=0,
distort_color_in_yiq=True,
fuse_decode_and_crop=True,
depth=3):
self.height = height
self.width = width
self.batch_size = batch_size
self.num_splits = num_splits
self.dtype = dtype
self.train = train
self.resize_method = resize_method
self.shift_ratio = shift_ratio
self.distortions = distortions
self.distort_color_in_yiq = distort_color_in_yiq
self.fuse_decode_and_crop = fuse_decode_and_crop
if self.batch_size % self.num_splits != 0:
raise ValueError(
('batch_size must be a multiple of num_splits: '
'batch_size %d, num_splits: %d') %
(self.batch_size, self.num_splits))
self.batch_size_per_split = self.batch_size // self.num_splits
self.summary_verbosity = summary_verbosity
self.depth = depth
def preprocess(self, image_buffer, bbox, batch_position):
raise NotImplementedError('Must be implemented by subclass.')
def minibatch(self, dataset, subset, use_datasets, cache_data,
shift_ratio):
raise NotImplementedError('Must be implemented by subclass.')
def supports_datasets(self):
return False
class RecordInputImagePreprocessor(BaseImagePreprocess):
"""Preprocessor for images with RecordInput format."""
def preprocess(self, image_buffer, bbox, batch_position):
"""Preprocessing image_buffer as a function of its batch position."""
if self.train:
image = train_image(image_buffer, self.height, self.width, bbox,
batch_position, self.resize_method, self.distortions,
None, summary_verbosity=self.summary_verbosity,
distort_color_in_yiq=self.distort_color_in_yiq,
fuse_decode_and_crop=self.fuse_decode_and_crop)
else:
image = tf.image.decode_jpeg(
image_buffer, channels=3, dct_method='INTEGER_FAST')
image = eval_image(image, self.height, self.width, batch_position,
self.resize_method,
summary_verbosity=self.summary_verbosity)
# Note: image is now float32 [height,width,3] with range [0, 255]
# image = tf.cast(image, tf.uint8) # HACK TESTING
normalized = normalized_image(image)
return tf.cast(normalized, self.dtype)
def parse_and_preprocess(self, value, batch_position):
image_buffer, label_index, bbox, _ = parse_example_proto(value)
image = self.preprocess(image_buffer, bbox, batch_position)
return (label_index, image)
def minibatch(self, dataset, subset, use_datasets, cache_data,
shift_ratio=-1):
if shift_ratio < 0:
shift_ratio = self.shift_ratio
with tf.name_scope('batch_processing'):
# Build final results per split.
images = [[] for _ in range(self.num_splits)]
labels = [[] for _ in range(self.num_splits)]
if use_datasets:
ds_iterator = data_utils.create_iterator(
self.batch_size, self.num_splits, self.batch_size_per_split,
self.parse_and_preprocess, dataset, subset, self.train, cache_data)
for d in xrange(self.num_splits):
labels[d], images[d] = ds_iterator.get_next()
else:
record_input = data_flow_ops.RecordInput(
file_pattern=dataset.tf_record_pattern(subset),
seed=301,
parallelism=64,
buffer_size=10000,
batch_size=self.batch_size,
shift_ratio=shift_ratio,
name='record_input')
records = record_input.get_yield_op()
records = tf.split(records, self.batch_size, 0)
records = [tf.reshape(record, []) for record in records]
for idx in xrange(self.batch_size):
value = records[idx]
(label, image) = self.parse_and_preprocess(value, idx)
split_index = idx % self.num_splits
labels[split_index].append(label)
images[split_index].append(image)
for split_index in xrange(self.num_splits):
if not use_datasets:
images[split_index] = tf.parallel_stack(images[split_index])
labels[split_index] = tf.concat(labels[split_index], 0)
images[split_index] = tf.reshape(
images[split_index],
shape=[self.batch_size_per_split, self.height, self.width,
self.depth])
labels[split_index] = tf.reshape(labels[split_index],
[self.batch_size_per_split])
return images, labels
def supports_datasets(self):
return True
class ImagenetPreprocessor(RecordInputImagePreprocessor):
def preprocess(self, image_buffer, bbox, batch_position):
# pylint: disable=g-import-not-at-top
try:
from official.resnet.imagenet_preprocessing import preprocess_image
except ImportError:
tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.')
raise
if self.train:
image = preprocess_image(
image_buffer, bbox, self.height, self.width, self.depth,
is_training=True)
else:
image = preprocess_image(
image_buffer, bbox, self.height, self.width, self.depth,
is_training=False)
return tf.cast(image, self.dtype)
class Cifar10ImagePreprocessor(BaseImagePreprocess):
"""Preprocessor for Cifar10 input images."""
def _distort_image(self, image):
"""Distort one image for training a network.
Adopted the standard data augmentation scheme that is widely used for
this dataset: the images are first zero-padded with 4 pixels on each side,
then randomly cropped to again produce distorted images; half of the images
are then horizontally mirrored.
Args:
image: input image.
Returns:
distorted image.
"""
image = tf.image.resize_image_with_crop_or_pad(
image, self.height + 8, self.width + 8)
distorted_image = tf.random_crop(image,
[self.height, self.width, self.depth])
# Randomly flip the image horizontally.
distorted_image = tf.image.random_flip_left_right(distorted_image)
if self.summary_verbosity >= 3:
tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0))
return distorted_image
def _eval_image(self, image):
"""Get the image for model evaluation."""
distorted_image = tf.image.resize_image_with_crop_or_pad(
image, self.width, self.height)
if self.summary_verbosity >= 3:
tf.summary.image('cropped.image', tf.expand_dims(distorted_image, 0))
return distorted_image
def preprocess(self, raw_image):
"""Preprocessing raw image."""
if self.summary_verbosity >= 3:
tf.summary.image('raw.image', tf.expand_dims(raw_image, 0))
if self.train and self.distortions:
image = self._distort_image(raw_image)
else:
image = self._eval_image(raw_image)
normalized = normalized_image(image)
return tf.cast(normalized, self.dtype)
def minibatch(self, dataset, subset, use_datasets, cache_data,
shift_ratio=-1):
# TODO(jsimsa): Implement datasets code path
del use_datasets, cache_data, shift_ratio
with tf.name_scope('batch_processing'):
all_images, all_labels = dataset.read_data_files(subset)
all_images = tf.constant(all_images)
all_labels = tf.constant(all_labels)
input_image, input_label = tf.train.slice_input_producer(
[all_images, all_labels])
input_image = tf.cast(input_image, self.dtype)
input_label = tf.cast(input_label, tf.int32)
# Ensure that the random shuffling has good mixing properties.
min_fraction_of_examples_in_queue = 0.4
min_queue_examples = int(dataset.num_examples_per_epoch(subset) *
min_fraction_of_examples_in_queue)
raw_images, raw_labels = tf.train.shuffle_batch(
[input_image, input_label], batch_size=self.batch_size,
capacity=min_queue_examples + 3 * self.batch_size,
min_after_dequeue=min_queue_examples)
images = [[] for i in range(self.num_splits)]
labels = [[] for i in range(self.num_splits)]
# Create a list of size batch_size, each containing one image of the
# batch. Without the unstack call, raw_images[i] would still access the
# same image via a strided_slice op, but would be slower.
raw_images = tf.unstack(raw_images, axis=0)
raw_labels = tf.unstack(raw_labels, axis=0)
for i in xrange(self.batch_size):
split_index = i % self.num_splits
# The raw image read from data has the format [depth, height, width]
# reshape to the format returned by minibatch.
raw_image = tf.reshape(raw_images[i],
[dataset.depth, dataset.height, dataset.width])
raw_image = tf.transpose(raw_image, [1, 2, 0])
image = self.preprocess(raw_image)
images[split_index].append(image)
labels[split_index].append(raw_labels[i])
for split_index in xrange(self.num_splits):
images[split_index] = tf.parallel_stack(images[split_index])
labels[split_index] = tf.parallel_stack(labels[split_index])
return images, labels
class SyntheticImagePreprocessor(BaseImagePreprocess):
"""Preprocessor used for images and labels."""
def minibatch(self, dataset, subset, use_datasets, cache_data,
shift_ratio=-1):
"""Get synthetic image batches."""
del subset, use_datasets, cache_data, shift_ratio
input_shape = [self.batch_size, self.height, self.width, self.depth]
images = tf.truncated_normal(
input_shape,
dtype=self.dtype,
stddev=1e-1,
name='synthetic_images')
labels = tf.random_uniform(
[self.batch_size],
minval=0,
maxval=dataset.num_classes - 1,
dtype=tf.int32,
name='synthetic_labels')
# Note: This results in a H2D copy, but no computation
# Note: This avoids recomputation of the random values, but still
# results in a H2D copy.
images = tf.contrib.framework.local_variable(images, name='images')
labels = tf.contrib.framework.local_variable(labels, name='labels')
if self.num_splits == 1:
images_splits = [images]
labels_splits = [labels]
else:
images_splits = tf.split(images, self.num_splits, 0)
labels_splits = tf.split(labels, self.num_splits, 0)
return images_splits, labels_splits
class TestImagePreprocessor(BaseImagePreprocess):
"""Preprocessor used for testing.
set_fake_data() sets which images and labels will be output by minibatch(),
and must be called before minibatch(). This allows tests to easily specify
a set of images to use for training, without having to create any files.
Queue runners must be started for this preprocessor to work.
"""
def __init__(self,
height,
width,
batch_size,
num_splits,
dtype,
train=None,
distortions=None,
resize_method=None,
shift_ratio=0,
summary_verbosity=0,
distort_color_in_yiq=False,
fuse_decode_and_crop=False):
super(TestImagePreprocessor, self).__init__(
height, width, batch_size, num_splits, dtype, train, distortions,
resize_method, shift_ratio, summary_verbosity=summary_verbosity,
distort_color_in_yiq=distort_color_in_yiq,
fuse_decode_and_crop=fuse_decode_and_crop)
self.expected_subset = None
def set_fake_data(self, fake_images, fake_labels):
assert len(fake_images.shape) == 4
assert len(fake_labels.shape) == 1
num_images = fake_images.shape[0]
assert num_images == fake_labels.shape[0]
assert num_images % self.batch_size == 0
self.fake_images = fake_images
self.fake_labels = fake_labels
def minibatch(self, dataset, subset, use_datasets, cache_data,
shift_ratio=0):
"""Get test image batches."""
del dataset, use_datasets, cache_data
if (not hasattr(self, 'fake_images') or
not hasattr(self, 'fake_labels')):
raise ValueError('Must call set_fake_data() before calling minibatch '
'on TestImagePreprocessor')
if self.expected_subset is not None:
assert subset == self.expected_subset
shift_ratio = shift_ratio or self.shift_ratio
fake_images = cnn_util.roll_numpy_batches(self.fake_images, self.batch_size,
shift_ratio)
fake_labels = cnn_util.roll_numpy_batches(self.fake_labels, self.batch_size,
shift_ratio)
with tf.name_scope('batch_processing'):
image_slice, label_slice = tf.train.slice_input_producer(
[fake_images, fake_labels],
shuffle=False,
name='image_slice')
raw_images, raw_labels = tf.train.batch(
[image_slice, label_slice], batch_size=self.batch_size,
name='image_batch')
images = [[] for _ in range(self.num_splits)]
labels = [[] for _ in range(self.num_splits)]
for i in xrange(self.batch_size):
split_index = i % self.num_splits
raw_image = tf.cast(raw_images[i], self.dtype)
images[split_index].append(raw_image)
labels[split_index].append(raw_labels[i])
for split_index in xrange(self.num_splits):
images[split_index] = tf.parallel_stack(images[split_index])
labels[split_index] = tf.parallel_stack(labels[split_index])
normalized = normalized_image(images)
return tf.cast(normalized, self.dtype), labels
| 31,472
| 39.505792
| 90
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/power_logger.py
|
""" Descr:
A power consumption logger for nvidia gpus in a system using python.
author:
Ioannis Athanasiadis (supernlogn)
"""
import numpy as np
import subprocess
import threading
import time, datetime
import re
from json import dump
class NvidiaPowerReader(object):
def __init__(self, time_step=1, num_gpus=1):
self.p = subprocess.Popen("nvidia-smi -l " + str(time_step), shell=True, stdout=subprocess.PIPE)
self.pattern = re.compile("[0-9]*?[W]{1,1}[\s]{1,1}[/]{1,1}[\s]{1,1}[0-9]*?[W]{1,1}")
self.measurements = []
self.time_step = time_step
self.num_gpus = num_gpus
self.last_measurements = []
def read_once(self):
"""
read one measurement from all available gpus
"""
time_start = time.time()
i = 0
s = "123123123123123"
measurement_record = []
while( len(s) >= 10 and time.time() - time_start < self.time_step):
s = self.p.stdout.readline()
m = re.findall(self.pattern, s)
if( len(m) != 0 ):
measurement = float(m[0].partition('W /')[0])
if( i == 0 ):
measurement_record.append(datetime.datetime.now())
measurement_record.append(measurement)
else:
measurement_record.append(measurement)
i += 1
if i == self.num_gpus:
break
while( len(measurement_record) != self.num_gpus + 1 ):
measurement_record.append(-1.0)
return measurement_record
def read_multi_synch(self, num_reads):
measurements = [self.read_once() for _ in range(num_reads)]
self.last_measurements = measurements
return measurements
def read_multi_asynch(self, num_reads, delay=5):
self.t = threading.Thread(target=self.read_multi_synch, args=(num_reads,))
timer = threading.Timer(delay, self.t.start)
timer.start()
# results will be in self.last_measurements
return
def filter_measurements(self, measurements):
filtered_measurements = []
for m in measurements:
if(not np.any(m[1:] == -1.0)):
filtered_measurements.append(m)
return np.array(filtered_measurements)
def power_stats(self, measurements=[], filter=True):
if(measurements == []):
measurements = self.last_measurements
if filter:
filtered_measurements = self.filter_measurements(measurements)
else:
filtered_measurements = np.array(measurements)
means = [np.mean(filtered_measurements[:,i]) for i in range(1, self.num_gpus+1)]
vars = [np.std(filtered_measurements[:,i]) for i in range(1, self.num_gpus+1)]
return means, vars
def write_results_to_file(self, logname, measurements=[], filter=True):
if(measurements == []):
measurements = self.last_measurements
if filter:
filtered_measurements = self.filter_measurements(measurements)
else:
filtered_measurements = np.array(measurements)
with open(logname + "_power_log.json", "w") as f:
obj_to_dump = {"times": [str(timestamp) for timestamp in filtered_measurements[:,0].tolist()]}
for i in range(1, self.num_gpus+1):
obj_to_dump["gpu_%d" % i] = filtered_measurements[:,i].tolist()
means, vars = self.power_stats(filtered_measurements, filter=False)
obj_to_dump["mean_powers"] = means
obj_to_dump["vars"] = vars
dump(obj_to_dump, f)
def stop(self):
self.t.join()
self.p.kill()
| 3,372
| 33.418367
| 102
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/constants.py
|
"""Constants used in tf_cnn_benchmarks."""
from enum import Enum
class NetworkTopology(str, Enum):
"""Network topology describes how multiple GPUs are inter-connected.
"""
# DGX-1 uses hybrid cube mesh topology with the following device peer to peer
# matrix:
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y Y N N N
# 1: Y Y Y Y N Y N N
# 2: Y Y Y Y N N Y N
# 3: Y Y Y Y N N N Y
# 4: Y N N N Y Y Y Y
# 5: N Y N N Y Y Y Y
# 6: N N Y N Y Y Y Y
# 7: N N N Y Y Y Y Y
DGX1 = "dgx1"
# V100 in GCP are connected with the following device peer to peer matrix.
# In this topology, bandwidth of the connection depends on if it uses NVLink
# or PCIe link.
# DMA: 0 1 2 3 4 5 6 7
# 0: Y Y Y Y N Y N N
# 1: Y Y Y Y N N N N
# 2: Y Y Y Y N N N Y
# 3: Y Y Y Y N N N N
# 4: N N N N Y Y Y Y
# 5: Y N N N Y Y Y Y
# 6: N N N N Y Y Y Y
# 7: N N Y N Y Y Y Y
GCP_V100 = "gcp_v100"
| 936
| 25.771429
| 79
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/tf_cnn_benchmarks.py
|
"""Benchmark script for TensorFlow.
See the README for more information.
"""
from __future__ import print_function
from absl import app
from absl import flags as absl_flags
import tensorflow as tf
import benchmark_cnn
import cnn_util
import flags
import sys
from cnn_util import log_fn
sys.path.append("/usr/local/cuda/extras/CUPTI/lib64/")
flags.define_flags()
for name in flags.param_specs.keys():
absl_flags.declare_key_flag(name)
def main(positional_arguments):
# Command-line arguments like '--distortions False' are equivalent to
# '--distortions=True False', where False is a positional argument. To prevent
# this from silently running with distortions, we do not allow positional
# arguments.
assert len(positional_arguments) >= 1
if len(positional_arguments) > 1:
raise ValueError('Received unknown positional arguments: %s'
% positional_arguments[1:])
params = benchmark_cnn.make_params_from_flags()
params = benchmark_cnn.setup(params)
bench = benchmark_cnn.BenchmarkCNN(params)
tfversion = cnn_util.tensorflow_version_tuple()
log_fn('TensorFlow: %i.%i' % (tfversion[0], tfversion[1]))
bench.print_info()
bench.run()
if __name__ == '__main__':
app.run(main) # Raises error on invalid flags, unlike tf.app.run()
| 1,290
| 27.065217
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/variable_mgr_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for VariableMgr."""
from __future__ import print_function
import collections as pycoll
import operator
import tensorflow as tf
from tensorflow.python.framework import ops
from tensorflow.python.ops import data_flow_ops
PS_SHADOW_VAR_PREFIX = 'ps_var'
AutoLossScaleParams = pycoll.namedtuple(
'AutoLossScaleParams',
[
# If true, enable automatic loss scaling.
'enable_auto_loss_scale',
# The value to scale the loss before computing gradients.
'loss_scale',
# Number of normal steps with the current `loss_scale`.
'loss_scale_normal_steps',
# Increase loss scale every n steps.
'inc_loss_scale_every_n',
# If true, the current worker is chief. The current implementation
# relies on the chief to update loss_scale value, but in future, we
# might change this to ask the parameter server to update loss_scales
# for better performance.
# TODO(tanmingxing): remove this if loss_scale is updated in ps.
'is_chief',
])
def get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n):
"""Returns the update op for loss scaling variables.
We maintain the counter `loss_scale_normal_steps` to count the number of steps
we have been using the current `loss_scale`. In most cases, this function
increments `loss_scale_normal_steps`. However, if `loss_scale_normal_steps` is
greater than the threshold `inc_loss_scale_every_n`, we double `loss_scale`
and reset `loss_scale_normal_steps` to zero.
This op is only called if the gradients don't have any infs or nans. Instead,
if infs or nans occur in the gradients, we immeditately halve `loss_scale` and
reset `loss_scale_normal_steps` to zero.
Args:
loss_scale: a tf.Variable represneting the loss_scale value.
loss_scale_normal_steps: a tf.Variable representing the number of training
steps that have run since the loss_scale last changed.
inc_loss_scale_every_n: a Python integer threshold. `loss_scale` is
increased every `inc_loss_scale_every_n` steps, unless the gradients have
infs or nans.
Returns:
An op for updating `loss_scale` and `loss_scale_normal_steps`.
"""
def increment_loss_scale_normal_steps_func():
return tf.group(loss_scale_normal_steps.assign_add(1))
def increase_loss_scale_func():
return tf.group(
tf.assign(loss_scale_normal_steps, 0),
tf.assign(loss_scale, loss_scale * 2))
# true_fn and false_fn must have the same type.
return tf.cond(loss_scale_normal_steps < inc_loss_scale_every_n,
increment_loss_scale_normal_steps_func,
increase_loss_scale_func)
def append_gradients_with_loss_scale(training_ops, get_apply_gradients_ops_func,
loss_scale_params, grad_has_inf_nan):
"""Selectively appends gradients update ops with loss scaling.
Args:
training_ops: a list of training ops to be executed.
get_apply_gradients_ops_func: a function that returns a list of ops for
applying gradients. Here, we must pass a function instead of the actual
list of ops; otherwise, those ops would be executed unconditionally due to
the semantics of tf.cond.
loss_scale_params: An AutoLossScaleParams tuple.
grad_has_inf_nan: Boolean tensor indicating whether the gradients have infs
or nans.
"""
is_chief = loss_scale_params.is_chief
loss_scale = loss_scale_params.loss_scale
loss_scale_normal_steps = loss_scale_params.loss_scale_normal_steps
inc_loss_scale_every_n = loss_scale_params.inc_loss_scale_every_n
enable_auto_loss_scale = loss_scale_params.enable_auto_loss_scale
if loss_scale is None or not enable_auto_loss_scale or not is_chief:
training_ops.extend(get_apply_gradients_ops_func())
else:
# If nans/infs occurred, skip applying gradients and instead update
# loss_scale (halve loss_scale and reset loss_scale_normal_steps to zero).
def update_op_if_nan_or_inf():
"""Update loss_scale and discard gradients if nans/infs occurred."""
return tf.group(
tf.assign(loss_scale, loss_scale / 2.),
tf.assign(loss_scale_normal_steps, 0))
# Otherwise, apply gradients, and update loss_scale and
# loss_scale_normal_steps.
def update_op_if_no_nan_or_inf():
"""Apply gradients, and update loss scaling."""
return tf.group(
get_loss_scale_update_op(loss_scale, loss_scale_normal_steps,
inc_loss_scale_every_n),
*get_apply_gradients_ops_func())
# TODO(tanmingxing): Add support for independent and distributed all_reduce.
assert grad_has_inf_nan is not None
update_op = tf.cond(
grad_has_inf_nan,
update_op_if_nan_or_inf,
update_op_if_no_nan_or_inf,
)
training_ops.append(update_op)
# To be used with custom_getter on tf.get_variable.
class OverrideCachingDevice(object):
"""Variable getter which caches variables on the least loaded device.
Variables smaller than a certain threshold are cached on a single specific
device, as specified in the constructor. All other variables are load balanced
across a pool of devices, by caching each variable on the least loaded device.
"""
def __init__(self, devices, device_for_small_variables,
small_variable_size_threshold):
self.devices = devices
self.sizes = [0] * len(self.devices)
self.device_for_small_variables = device_for_small_variables
self.small_variable_size_threshold = small_variable_size_threshold
def __call__(self, getter, *args, **kwargs):
size = tf.TensorShape(kwargs['shape']).num_elements()
if size < self.small_variable_size_threshold:
device_name = self.device_for_small_variables
else:
device_index, _ = min(enumerate(self.sizes), key=operator.itemgetter(1))
device_name = self.devices[device_index]
self.sizes[device_index] += size
kwargs['caching_device'] = device_name
var = getter(*args, **kwargs)
return var
# To be used with custom_getter on tf.get_variable. Ensures the created variable
# is in LOCAL_VARIABLES and not GLOBAL_VARIBLES collection.
class OverrideToLocalVariableIfNotPsVar(object):
# args and kwargs come from the custom_getter interface for Tensorflow
# variables, and matches tf.get_variable's signature, with the addition of
# 'getter' at the beginning.
def __call__(self, getter, name, *args, **kwargs):
if name.startswith(PS_SHADOW_VAR_PREFIX):
return getter(*args, **kwargs)
if 'collections' in kwargs:
collections = kwargs['collections']
if not collections:
collections = [tf.GraphKeys.GLOBAL_VARIABLES]
else:
collections = collections[:]
collections.remove(tf.GraphKeys.GLOBAL_VARIABLES)
collections.append(tf.GraphKeys.LOCAL_VARIABLES)
kwargs['collections'] = list(collections)
return getter(name, *args, **kwargs)
class ParamServerDeviceSetter(object):
"""Helper class to assign variables on the least loaded ps-device."""
def __init__(self, worker_device, ps_devices):
"""Initializer for ParamServerDevicSetter.
Args:
worker_device: the device to use for computer ops.
ps_devices: a list of device to use for Variable ops. Each variable is
assigned to the least loaded device.
"""
self.ps_devices = ps_devices
self.worker_device = worker_device
self.ps_sizes = [0] * len(self.ps_devices)
def __call__(self, op):
if op.device:
return op.device
if op.type not in ['Variable', 'VariableV2']:
return self.worker_device
device_index, _ = min(enumerate(self.ps_sizes), key=operator.itemgetter(1))
device_name = self.ps_devices[device_index]
var_size = op.outputs[0].get_shape().num_elements()
self.ps_sizes[device_index] += var_size
return device_name
class StagedModelVariable(object):
"""Staging variable wrapper that decouples reads and updates.
This class represents a variable through a staging buffer. Reads from this
variable directly gets from the staging buffer. Updates are stacked into
another staging buffer, and will be processed later.
"""
def __init__(self, real_var, var_stage_get, variable_mgr):
"""Initializer for the model variables through a staging buffer.
Args:
real_var: the underlying real variable.
var_stage_get: the read op from the staging buffer.
variable_mgr: the parent variable-manager.
"""
self.real_var = real_var
self.var_stage_get = var_stage_get
self.variable_mgr = variable_mgr
def _value(self):
"""The read access of this variable. The content from the staging buffer."""
return self.var_stage_get
def _ref(self):
"""Return the underlying variable ref, required by tf.colocate_with."""
return self.real_var._ref() # pylint: disable=protected-access
def read_value(self):
"""Mimics tf.Variable.read_value()."""
return tf.identity(self.var_stage_get, name='read')
@property
def dtype(self):
"""Return the non-reference dtype."""
return self.var_stage_get.dtype
def assign_sub(self, delta, name=None):
"""Mimic the updates to the variable.
Args:
delta: is pushed into a staging buffer and will be pumped later.
name: currently ignored; names of ops and the StagingArea are
computed without using this pass name.
Returns:
The actual updates. The colocation constraint will be reapplied.
"""
# This parameter is ignored: the StagingArea only supports setting
# the shared name, not the names of individual ops it uses.
del name
# colocate_with(None, True) clears the colocation constraints.
# Push the delta into a staging buffer.
with ops.colocate_with(None, True), tf.device(self.var_stage_get.device):
delta_staging_area = data_flow_ops.StagingArea(
[self.var_stage_get.dtype], shapes=[self.var_stage_get.shape])
delta_put_op = delta_staging_area.put([delta])
self.variable_mgr.staging_delta_ops.append(delta_put_op)
delta_get_op = delta_staging_area.get()[0]
# Return the actual updates. The colocation constraint will be reapplied.
return self.real_var.assign_sub(delta_get_op)
@staticmethod
# pylint: disable=bad-staticmethod-argument,invalid-name
def _TensorConversionFunction(self, dtype=None, name=None, as_ref=False):
"""Utility function for converting a StagedModelVariable to a Tensor."""
del dtype, name # unused: this function returns the cached ref or value.
if as_ref:
return self._ref()
else:
return self._value()
ops.register_tensor_conversion_function(
StagedModelVariable, StagedModelVariable._TensorConversionFunction) # pylint: disable=protected-access
class StagedVariableGetter(object):
"""A variable getter through staging buffers on devices.
Instead of a caching device, this getter tracks where the variable is used.
And on each device, it goes through a staging buffer.
"""
def __init__(self, device_num, devices, cpu_device, variable_mgr):
"""Initializer for StagedVariableGetter.
Args:
device_num: the current device index.
devices: a list of all the devices to build towers.
cpu_device: a cpu_device for this replica. If None, no cpu-caching is
done.
variable_mgr: the parent variable manager.
"""
self.device_num = device_num
self.devices = devices
self.cpu_device = cpu_device
self.variable_mgr = variable_mgr
def __call__(self, getter, name, *args, **kwargs):
staging_ops = self.variable_mgr.staging_vars_on_devices[self.device_num]
if name in staging_ops:
put_op, get_op = staging_ops[name]
return get_op
real_var = getter(name, *args, **kwargs)
shape = kwargs['shape']
dtype = kwargs['dtype']
trainable = kwargs['trainable']
if self.cpu_device:
with tf.device(self.cpu_device):
# This helps copying the weights from the parameter to this server only
# once.
if name in self.variable_mgr.staged_vars_on_cpu:
cpu_var = self.variable_mgr.staged_vars_on_cpu[name]
else:
cpu_var = tf.identity(real_var)
self.variable_mgr.staged_vars_on_cpu[name] = cpu_var
var_to_stage = cpu_var
else:
var_to_stage = tf.identity(real_var) # de-reference the variable.
with tf.device(self.devices[self.device_num]):
staging_area = data_flow_ops.StagingArea([dtype], shapes=[shape])
put_op = staging_area.put([var_to_stage])
get_op = staging_area.get()[0]
staging_ops[name] = (put_op, get_op)
if trainable:
# For trainable variables, they are managed separatedly through
# apply_gradients.
return get_op
else:
# For other shadow variables, the access is decoupled through a wrapper
# class.
return StagedModelVariable(real_var, get_op, self.variable_mgr)
def trainable_variables_on_device(self, rel_device_num, abs_device_num,
writable):
"""Return the set of trainable variables on the specified device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether the returned variables is writable or read-only.
Returns:
Return the set of trainable variables on the specified device.
"""
del abs_device_num
params_refs = tf.trainable_variables()
if writable:
return params_refs
params = []
for param in params_refs:
var_name = param.name.split(':')[0]
_, var_get_op = self.variable_mgr.staging_vars_on_devices[rel_device_num][
var_name]
params.append(var_get_op)
return params
def aggregate_gradients_using_copy_with_device_selection(
benchmark_cnn, tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, controlling device for the aggregation.
Args:
benchmark_cnn: benchmark_cnn class.
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
if benchmark_cnn.local_parameter_device_flag == 'gpu':
avail_devices = benchmark_cnn.raw_devices
else:
avail_devices = [benchmark_cnn.param_server_device]
agg_grads = []
has_nan_or_inf_list = []
for i, single_grads in enumerate(zip(*tower_grads)):
with tf.device(avail_devices[i % len(avail_devices)]):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy_with_variable_colocation(
tower_grads, use_mean, check_inf_nan):
"""Aggregate gradients, colocating computation with the gradient's variable.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients. All variables
of the same gradient across towers must be the same (that is,
tower_grads[x][a][1] == tower_grads[y][a][1] for all indices x, y, and a)
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: If true, check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
# Note that each single_grads looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
var = single_grads[0][1]
for _, v in single_grads:
assert v == var
with tf.device(var.device):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_gradients_using_copy(tower_grads, use_mean, check_inf_nan):
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over towers. The inner list is over individual gradients.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
agg_grads = []
has_nan_or_inf_list = []
for single_grads in zip(*tower_grads):
grad_and_var, has_nan_or_inf = aggregate_single_gradient_using_copy(
single_grads, use_mean, check_inf_nan)
agg_grads.append(grad_and_var)
has_nan_or_inf_list.append(has_nan_or_inf)
if check_inf_nan:
return agg_grads, tf.reduce_any(has_nan_or_inf_list)
else:
return agg_grads, None
def aggregate_single_gradient_using_copy(grad_and_vars, use_mean,
check_inf_nan):
"""Calculate the average gradient for a shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
grad_and_vars: A list or tuple of (gradient, variable) tuples. Each
(gradient, variable) pair within the outer list represents the gradient
of the variable calculated for a single tower, and the number of pairs
equals the number of towers.
use_mean: if True, mean is taken, else sum of gradients is taken.
check_inf_nan: check grads for nans and infs.
Returns:
The tuple ([(average_gradient, variable),], has_nan_or_inf) where the
gradient has been averaged across all towers. The variable is chosen from
the first tower. The has_nan_or_inf indicates the grads has nan or inf.
"""
grads = [g for g, _ in grad_and_vars]
grad = tf.add_n(grads)
if use_mean and len(grads) > 1:
grad = tf.multiply(grad, 1.0 / len(grads))
v = grad_and_vars[0][1]
if check_inf_nan:
has_nan_or_inf = tf.logical_not(tf.reduce_all(tf.is_finite(grads)))
return (grad, v), has_nan_or_inf
else:
return (grad, v), None
| 19,904
| 37.500967
| 107
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/convnet_builder.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""CNN builder."""
from __future__ import print_function
from collections import defaultdict
import contextlib
import numpy as np
import tensorflow as tf
from tensorflow.python.layers import convolutional as conv_layers
from tensorflow.python.layers import core as core_layers
from tensorflow.python.layers import pooling as pooling_layers
from tensorflow.python.training import moving_averages
class ConvNetBuilder(object):
"""Builder of cnn net."""
def __init__(self,
input_op,
input_nchan,
phase_train,
use_tf_layers,
data_format='NCHW',
dtype=tf.float32,
variable_dtype=tf.float32):
self.top_layer = input_op
self.top_size = input_nchan
self.phase_train = phase_train
self.use_tf_layers = use_tf_layers
self.data_format = data_format
self.dtype = dtype
self.variable_dtype = variable_dtype
self.counts = defaultdict(lambda: 0)
self.n_parameters = 0
self.use_batch_norm = False
self.batch_norm_config = {} # 'decay': 0.997, 'scale': True}
self.channel_pos = ('channels_last'
if data_format == 'NHWC' else 'channels_first')
self.aux_top_layer = None
self.aux_top_size = 0
def get_custom_getter(self):
"""Returns a custom getter that this class's methods must be called under.
All methods of this class must be called under a variable scope that was
passed this custom getter. Example:
```python
network = ConvNetBuilder(...)
with tf.variable_scope('cg', custom_getter=network.get_custom_getter()):
network.conv(...)
# Call more methods of network here
```
Currently, this custom getter only does anything if self.use_tf_layers is
True. In that case, it causes variables to be stored as dtype
self.variable_type, then casted to the requested dtype, instead of directly
storing the variable as the requested dtype.
"""
def inner_custom_getter(getter, *args, **kwargs):
"""Custom getter that forces variables to have type self.variable_type."""
if not self.use_tf_layers:
return getter(*args, **kwargs)
requested_dtype = kwargs['dtype']
if not (requested_dtype == tf.float32 and
self.variable_dtype == tf.float16):
# Only change the variable dtype if doing so does not decrease variable
# precision.
kwargs['dtype'] = self.variable_dtype
var = getter(*args, **kwargs)
# This if statement is needed to guard the cast, because batch norm
# assigns directly to the return value of this custom getter. The cast
# makes the return value not a variable so it cannot be assigned. Batch
# norm variables are always in fp32 so this if statement is never
# triggered for them.
if var.dtype.base_dtype != requested_dtype:
var = tf.cast(var, requested_dtype)
return var
return inner_custom_getter
@contextlib.contextmanager
def switch_to_aux_top_layer(self):
"""Context that construct cnn in the auxiliary arm."""
if self.aux_top_layer is None:
raise RuntimeError('Empty auxiliary top layer in the network.')
saved_top_layer = self.top_layer
saved_top_size = self.top_size
self.top_layer = self.aux_top_layer
self.top_size = self.aux_top_size
yield
self.aux_top_layer = self.top_layer
self.aux_top_size = self.top_size
self.top_layer = saved_top_layer
self.top_size = saved_top_size
def get_variable(self, name, shape, dtype, cast_dtype, *args, **kwargs):
# TODO(reedwm): Currently variables and gradients are transferred to other
# devices and machines as type `dtype`, not `cast_dtype`. In particular,
# this means in fp16 mode, variables are transferred as fp32 values, not
# fp16 values, which uses extra bandwidth.
var = tf.get_variable(name, shape, dtype, *args, **kwargs)
return tf.cast(var, cast_dtype)
def _conv2d_impl(self, input_layer, num_channels_in, filters, kernel_size,
strides, padding, kernel_initializer):
if self.use_tf_layers:
return conv_layers.conv2d(input_layer, filters, kernel_size, strides,
padding, self.channel_pos,
kernel_initializer=kernel_initializer,
use_bias=False)
else:
weights_shape = [kernel_size[0], kernel_size[1], num_channels_in, filters]
# We use the name 'conv2d/kernel' so the variable has the same name as its
# tf.layers equivalent. This way, if a checkpoint is written when
# self.use_tf_layers == True, it can be loaded when
# self.use_tf_layers == False, and vice versa.
weights = self.get_variable('conv2d/kernel', weights_shape,
self.variable_dtype, self.dtype,
initializer=kernel_initializer)
if self.data_format == 'NHWC':
strides = [1] + strides + [1]
else:
strides = [1, 1] + strides
return tf.nn.conv2d(input_layer, weights, strides, padding,
data_format=self.data_format)
def conv(self,
num_out_channels,
k_height,
k_width,
d_height=1,
d_width=1,
mode='SAME',
input_layer=None,
num_channels_in=None,
use_batch_norm=None,
stddev=None,
activation='relu',
bias=0.0,
kernel_initializer=None):
"""Construct a conv2d layer on top of cnn.
Args:
num_out_channels: number of output channels. If not specified they are the same as the networks top layer.
k_height: kernel height.
k_width: kernel width.
d_height: stride height.
d_width: stride width.
mode: see tensorflow convolution modes (SAME = the output has the same dimensions as the input).
input_layer: the input layer of the convolution. If not specified the network's top layer is used.
num_channels_in: The number of input layer's channels. If not specified the network's top layer number of channels is used.
use_batch_norm: whether the norm will be based on a batch of inputs or on individual inputs.
stddev: The weights of the convolution filter are specified by a truncated normal distribution with standar deviation stddev.
activation: string defining the activation after convolving. Possible values are:
relu, linear, tanh
bias: specify a value to be added to all channels after the convolving part.
"""
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
if stddev is not None and kernel_initializer is None:
kernel_initializer = tf.truncated_normal_initializer(stddev=stddev)
name = 'conv' + str(self.counts['conv'])
self.counts['conv'] += 1
self.n_parameters += k_height * k_width * ( num_out_channels - int(num_channels_in) + 1)
with tf.variable_scope(name):
strides = [1, d_height, d_width, 1]
if self.data_format == 'NCHW':
strides = [strides[0], strides[3], strides[1], strides[2]]
if mode != 'SAME_RESNET':
conv = self._conv2d_impl(input_layer, num_channels_in, num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width], padding=mode,
kernel_initializer=kernel_initializer)
else: # Special padding mode for ResNet models
if d_height == 1 and d_width == 1:
conv = self._conv2d_impl(input_layer, num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width], padding='SAME',
kernel_initializer=kernel_initializer)
else:
rate = 1 # Unused (for 'a trous' convolutions)
kernel_size_effective = k_height + (k_width - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
padding = [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]
if self.data_format == 'NCHW':
padding = [padding[0], padding[3], padding[1], padding[2]]
input_layer = tf.pad(input_layer, padding)
conv = self._conv2d_impl(input_layer, num_channels_in,
num_out_channels,
kernel_size=[k_height, k_width],
strides=[d_height, d_width], padding='VALID',
kernel_initializer=kernel_initializer)
if use_batch_norm is None:
use_batch_norm = self.use_batch_norm
if not use_batch_norm:
if bias is not None:
biases = self.get_variable('biases', [num_out_channels],
self.variable_dtype, self.dtype,
initializer=tf.constant_initializer(bias))
biased = tf.reshape(
tf.nn.bias_add(conv, biases, data_format=self.data_format),
conv.get_shape())
else:
biased = conv
else:
self.top_layer = conv
self.top_size = num_out_channels
biased = self.batch_norm(**self.batch_norm_config)
if activation == 'relu':
conv1 = tf.nn.relu(biased)
elif activation == 'linear' or activation is None:
conv1 = biased
elif activation == 'tanh':
conv1 = tf.nn.tanh(biased)
else:
raise KeyError('Invalid activation type \'%s\'' % activation)
self.top_layer = conv1
self.top_size = num_out_channels
return conv1
def _pool(self,
pool_name,
pool_function,
k_height,
k_width,
d_height,
d_width,
mode,
input_layer,
num_channels_in):
"""Construct a pooling layer."""
if input_layer is None:
input_layer = self.top_layer
else:
self.top_size = num_channels_in
name = pool_name + str(self.counts[pool_name])
self.counts[pool_name] += 1
if self.use_tf_layers:
pool = pool_function(
input_layer, [k_height, k_width], [d_height, d_width],
padding=mode,
data_format=self.channel_pos,
name=name)
else:
if self.data_format == 'NHWC':
ksize = [1, k_height, k_width, 1]
strides = [1, d_height, d_width, 1]
else:
ksize = [1, 1, k_height, k_width]
strides = [1, 1, d_height, d_width]
pool = tf.nn.max_pool(input_layer, ksize, strides, padding=mode,
data_format=self.data_format, name=name)
self.top_layer = pool
self.n_parameters += k_height * k_width
return pool
def mpool(self,
k_height,
k_width,
d_height=2,
d_width=2,
mode='VALID',
input_layer=None,
num_channels_in=None):
"""Construct a max pooling layer."""
return self._pool('mpool', pooling_layers.max_pooling2d, k_height, k_width,
d_height, d_width, mode, input_layer, num_channels_in)
def apool(self,
k_height,
k_width,
d_height=2,
d_width=2,
mode='VALID',
input_layer=None,
num_channels_in=None):
"""Construct an average pooling layer."""
return self._pool('apool', pooling_layers.average_pooling2d, k_height,
k_width, d_height, d_width, mode, input_layer,
num_channels_in)
def reshape(self, shape, input_layer=None):
if input_layer is None:
input_layer = self.top_layer
self.top_layer = tf.reshape(input_layer, shape)
if self.data_format == 'NHWC':
self.top_size = shape[-1]
else: # 'NCHW'
self.top_size = shape[1]
return self.top_layer
def affine(self,
num_out_channels,
input_layer=None,
num_channels_in=None,
bias=0.0,
stddev=None,
activation='relu'):
if input_layer is None:
input_layer = self.top_layer
if num_channels_in is None:
num_channels_in = self.top_size
name = 'affine' + str(self.counts['affine'])
self.counts['affine'] += 1
with tf.variable_scope(name):
init_factor = 2. if activation == 'relu' else 1.
stddev = stddev or np.sqrt(init_factor / num_channels_in)
kernel = self.get_variable(
'weights', [num_channels_in, num_out_channels],
self.variable_dtype, self.dtype,
initializer=tf.truncated_normal_initializer(stddev=stddev))
biases = self.get_variable('biases', [num_out_channels],
self.variable_dtype, self.dtype,
initializer=tf.constant_initializer(bias))
self.n_parameters += num_channels_in * num_out_channels + num_out_channels
logits = tf.nn.xw_plus_b(input_layer, kernel, biases)
if activation == 'relu':
affine1 = tf.nn.relu(logits, name=name)
elif activation == 'linear' or activation is None:
affine1 = logits
else:
raise KeyError('Invalid activation type \'%s\'' % activation)
self.top_layer = affine1
self.top_size = num_out_channels
return affine1
def inception_module(self, name, cols, input_layer=None, in_size=None):
if input_layer is None:
input_layer = self.top_layer
if in_size is None:
in_size = self.top_size
name += str(self.counts[name])
self.counts[name] += 1
with tf.variable_scope(name):
col_layers = []
col_layer_sizes = []
for c, col in enumerate(cols):
col_layers.append([])
col_layer_sizes.append([])
for l, layer in enumerate(col):
ltype, args = layer[0], layer[1:]
kwargs = {
'input_layer': input_layer,
'num_channels_in': in_size
} if l == 0 else {}
if ltype == 'conv':
self.conv(*args, **kwargs)
elif ltype == 'mpool':
self.mpool(*args, **kwargs)
elif ltype == 'apool':
self.apool(*args, **kwargs)
elif ltype == 'share': # Share matching layer from previous column
self.top_layer = col_layers[c - 1][l]
self.top_size = col_layer_sizes[c - 1][l]
else:
raise KeyError(
'Invalid layer type for inception module: \'%s\'' % ltype)
col_layers[c].append(self.top_layer)
col_layer_sizes[c].append(self.top_size)
catdim = 3 if self.data_format == 'NHWC' else 1
self.top_layer = tf.concat([layers[-1] for layers in col_layers], catdim)
self.top_size = sum([sizes[-1] for sizes in col_layer_sizes])
return self.top_layer
def spatial_mean(self, keep_dims=False):
name = 'spatial_mean' + str(self.counts['spatial_mean'])
self.counts['spatial_mean'] += 1
axes = [1, 2] if self.data_format == 'NHWC' else [2, 3]
self.top_layer = tf.reduce_mean(
self.top_layer, axes, keep_dims=keep_dims, name=name)
return self.top_layer
def dropout(self, keep_prob=0.5, input_layer=None):
if input_layer is None:
input_layer = self.top_layer
else:
self.top_size = None
name = 'dropout' + str(self.counts['dropout'])
with tf.variable_scope(name):
if not self.phase_train:
keep_prob = 1.0
if self.use_tf_layers:
dropout = core_layers.dropout(input_layer, 1. - keep_prob)
else:
dropout = tf.nn.dropout(input_layer, keep_prob)
self.top_layer = dropout
return dropout
def _batch_norm_without_layers(self, input_layer, decay, use_scale, epsilon):
"""Batch normalization on `input_layer` without tf.layers."""
# We make this function as similar as possible to the
# tf.contrib.layers.batch_norm, to minimize the differences between using
# layers and not using layers.
shape = input_layer.shape
num_channels = shape[3] if self.data_format == 'NHWC' else shape[1]
beta = self.get_variable('beta', [num_channels], tf.float32, tf.float32,
initializer=tf.zeros_initializer())
if use_scale:
gamma = self.get_variable('gamma', [num_channels], tf.float32,
tf.float32, initializer=tf.ones_initializer())
else:
gamma = tf.constant(1.0, tf.float32, [num_channels])
# For moving variables, we use tf.get_variable instead of self.get_variable,
# since self.get_variable returns the result of tf.cast which we cannot
# assign to.
moving_mean = tf.get_variable('moving_mean', [num_channels],
tf.float32,
initializer=tf.zeros_initializer(),
trainable=False)
moving_variance = tf.get_variable('moving_variance', [num_channels],
tf.float32,
initializer=tf.ones_initializer(),
trainable=False)
if self.phase_train:
bn, batch_mean, batch_variance = tf.nn.fused_batch_norm(
input_layer, gamma, beta, epsilon=epsilon,
data_format=self.data_format, is_training=True)
mean_update = moving_averages.assign_moving_average(
moving_mean, batch_mean, decay=decay, zero_debias=False)
variance_update = moving_averages.assign_moving_average(
moving_variance, batch_variance, decay=decay, zero_debias=False)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, mean_update)
tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, variance_update)
else:
bn, _, _ = tf.nn.fused_batch_norm(
input_layer, gamma, beta, mean=moving_mean,
variance=moving_variance, epsilon=epsilon,
data_format=self.data_format, is_training=False)
return bn
def batch_norm(self, input_layer=None, decay=0.999, scale=False,
epsilon=0.001):
"""Adds a Batch Normalization layer."""
if input_layer is None:
input_layer = self.top_layer
else:
self.top_size = None
name = 'batchnorm' + str(self.counts['batchnorm'])
self.counts['batchnorm'] += 1
with tf.variable_scope(name) as scope:
if self.use_tf_layers:
bn = tf.contrib.layers.batch_norm(
input_layer,
decay=decay,
scale=scale,
epsilon=epsilon,
is_training=self.phase_train,
fused=True,
data_format=self.data_format,
scope=scope)
else:
bn = self._batch_norm_without_layers(input_layer, decay, scale, epsilon)
self.top_layer = bn
self.top_size = bn.shape[3] if self.data_format == 'NHWC' else bn.shape[1]
self.top_size = int(self.top_size)
return bn
def lrn(self, depth_radius, bias, alpha, beta):
"""Adds a local response normalization layer."""
name = 'lrn' + str(self.counts['lrn'])
self.counts['lrn'] += 1
self.top_layer = tf.nn.lrn(
self.top_layer, depth_radius, bias, alpha, beta, name=name)
return self.top_layer
def concat_layers(self, list_of_layers=None):
""" Concatenates 2 or more layers. """
catdim = 3 if self.data_format == 'NHWC' else 1
if list_of_layers == None:
return # get the last 2 layers
# list_of_layers = [self.lay]
self.top_layer = tf.concat(axis = catdim, values=list_of_layers)
self.top_size = np.sum([layer.get_shape()[catdim] for layer in list_of_layers])
| 20,618
| 40.403614
| 133
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/batch_allreduce.py
|
# # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
# #
# # Licensed under the Apache License, Version 2.0 (the "License");
# # you may not use this file except in compliance with the License.
# # You may obtain a copy of the License at
# #
# # http://www.apache.org/licenses/LICENSE-2.0
# #
# # Unless required by applicable law or agreed to in writing, software
# # distributed under the License is distributed on an "AS IS" BASIS,
# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# # See the License for the specific language governing permissions and
# # limitations under the License.
# # ==============================================================================
# """Contains classes and functions for doing a single-machine batch all-reduce.
# An all-reduce is taking the reduction (typically a sum) of a list of tensors,
# each on a different device. The result must end up back on each device, which is
# where the word "all" comes from. In summary, each device starts with a single
# tensor, and ends up with the reduction of all tensors.
# A batch all-reduce is doing several independent all-reduces. When doing a batch
# all-reduce, care is taken to evenly distribute the reduction computations
# across devices and inter-device tensor transfers across device links.
# """
# from __future__ import absolute_import
# from __future__ import division
# from __future__ import print_function
# # TODO(reedwm): Support distributed all-reduces in this file.
# # TODO(reedwm): Merge this code with allreduce.py, which contains some batch
# # all-reduce code that this file calls. allreduce.py also supports distributed
# # batch-reduce while this file only supports single-machine all-reduce.
# import abc
# from collections import namedtuple
# import six
# import tensorflow as tf
# from tensorflow.python.ops import data_flow_ops
# import allreduce
# import constants
# def _all_reduce_using_copy(tensors_across_devices, use_mean):
# """Does an all-reduce of a list of tensors by copying to the current device.
# The tensors are copied to the current device and then reduced.
# Args:
# tensors_across_devices: A list of tensors, each on a different device.
# use_mean: Whether to take the mean of the tensors instead of a sum:
# Returns:
# A reduced tensor on the current device.
# """
# reduced_tensor = tf.add_n(tensors_across_devices)
# if use_mean:
# reduced_tensor *= 1 / len(tensors_across_devices)
# return reduced_tensor
# @six.add_metaclass(abc.ABCMeta)
# class BatchAllReduceAlgorithm(object):
# """Represents an algorithm for performing a batch all-reduce operation."""
# def batch_all_reduce(self, all_device_tensors, num_splits, compact_tensors,
# defer_tensors):
# """Performs a batch all-reduce.
# The reduction done is a sum.
# `all_device_tensors` is a list of list of tensors that will be batch
# all-reduced. All tensors within a single inner list must be on the same
# device. The nth element in each list, for any n, will be reduced together.
# The return value is in the same form as `all_device_tensors`, except that
# each tensor is reduced.
# For example, if `all_device_tensors` is:
# [[ A, B ], # A and B are on GPU 0
# [ C, D ]] # C and D are on GPU 1
# Then the return value will be:
# [[ A+C, B+D ], # These two tensors are on GPU 0
# [ A+C, B+D ]] # These two tensors are on GPU 1
# Arguments:
# all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
# is a tensor where `i` is the device index and `j` is the tensor index.
# num_splits: If not None, tensors will be concatenated and split into this
# many pieces during the all-reduce, then split back into their original
# shapes afterwards. Has no impact on correctness and can improve
# performance. Requires all tensors to be the same type.
# compact_tensors: If True, tensors are casted to fp16 before being all-
# reduced. Improves performance, but hurts numerical stability.
# defer_tensors: If True, every time the return value
# `reduced_all_device_tensors` is evaluated, the result will be the
# reduced tensors values of `all_device_tensors` from the previous session
# run instead of the current session run, or zero on the first session
# run. This can improve performance. When training neural networks,
# deferring gradients often does not harm training, so this can be used to
# improve performance.
# Returns:
# reduced_all_device_tensors: A list in the same form as
# `all_device_tensors`, except each tensor has been reduced.
# warmup_ops: A list of ops needed to be run once before the all-reduce can
# occur.
# """
# # Before all-reducing tensors, we do several preprocessing functions that
# # can speed up the all-reduce. We undo these functions after all-reducing
# # the tensors.
# warmup_ops = []
# if num_splits:
# packer = _TensorPacker(num_splits)
# all_device_tensors = packer.concat_all_device_tensors(all_device_tensors)
# # If enabled, we compact and defer tensors in between concatenating them
# # and splitting them, because it is faster to do operations on a single
# # concatenated tensor than on multiple smaller tensors.
# if compact_tensors:
# all_device_tensors_before_compact = all_device_tensors
# all_device_tensors = _compact_all_device_tensors(all_device_tensors)
# if defer_tensors:
# all_device_tensors, put_ops, warmup_ops = _defer_all_device_tensors(
# all_device_tensors)
# if num_splits:
# all_device_tensors = packer.split_all_device_tensors(all_device_tensors)
# all_device_tensors = self._do_batch_all_reduce(all_device_tensors)
# # Undo the preprocessing operations in opposite order as we applied them.
# if num_splits:
# all_device_tensors = packer.undo_split_all_device_tensors(
# all_device_tensors)
# # Note: There is no undo operation for deferring tensors. But we do need to
# # call _add_put_op_control_deps at the end if we deferred the tensors.
# if compact_tensors:
# all_device_tensors = _undo_compact_all_device_tensors(
# all_device_tensors, all_device_tensors_before_compact)
# if num_splits:
# all_device_tensors = packer.undo_concat_all_device_tensors(
# all_device_tensors)
# if defer_tensors:
# all_device_tensors = _add_put_op_control_deps(all_device_tensors,
# num_splits, put_ops)
# return all_device_tensors, warmup_ops
# @abc.abstractmethod
# def _do_batch_all_reduce(self, all_device_tensors):
# """Performs a batch all-reduce.
# Unlike `self.batch_all_reduce`, this does not do any preprocessing of the
# tensors.
# Args:
# all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
# is a tensor where `i` is the device index and `j` is the tensor index.
# Returns:
# reduced_all_device_tensors: A list in the same form as
# `all_device_tensors`, except each tensor has been reduced.
# """
# pass
# class CopyToDeviceAlgorithm(BatchAllReduceAlgorithm):
# """An algorithm that copies tensors to be reduced to a specific device."""
# def __init__(self, devices_to_reduce_on, use_mean=False):
# self._devices = devices_to_reduce_on
# self._use_mean = use_mean
# def _do_batch_all_reduce(self, all_device_tensors):
# reduced_tensors = []
# for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
# with tf.device(self._devices[i % len(self._devices)]):
# reduced_tensor = _all_reduce_using_copy(tensors_across_devices,
# self._use_mean)
# reduced_tensors.append(reduced_tensor)
# # The tensors will be brought back to each device once they are used.
# return [reduced_tensors] * len(all_device_tensors)
# class HierarchicalCopyAlgorithm(BatchAllReduceAlgorithm):
# """An algorithm that uses hierarchical copies. This is only optimized for
# eight devices connected in NetworkTopology.DGX1 or NetworkTopology.GCP_V100
# topology.
# """
# def __init__(self, network_topology):
# """Initializer for HierarchicalCopyAlgorithm.
# Args:
# network_topology: An instance of Enum class constants.NetworkTopology.
# """
# self._network_topology = network_topology
# def _do_batch_all_reduce(self, all_device_tensors):
# avail_devices = [device_tensors[0].device
# for device_tensors in all_device_tensors]
# reduced_tensors = []
# num_devices = len(avail_devices)
# group_size = num_devices // 2
# for i, tensors_across_devices in enumerate(zip(*all_device_tensors)):
# group_0_main_device, group_1_main_device = self.__get_main_devices(
# i, num_devices)
# if group_0_main_device < group_size:
# group_0_begin = 0
# group_1_begin = group_size
# else:
# group_0_begin = group_size
# group_1_begin = 0
# # Reduce the first group.
# group_0_tensors = tensors_across_devices[group_0_begin:
# group_0_begin + group_size]
# with tf.device(avail_devices[group_0_main_device]):
# group_0_reduced_tensor = _all_reduce_using_copy(group_0_tensors, False)
# # Reduce the second group.
# group_1_tensors = tensors_across_devices[group_1_begin:
# group_1_begin + group_size]
# with tf.device(avail_devices[group_1_main_device]):
# group_1_reduced_tensor = _all_reduce_using_copy(group_1_tensors, False)
# # Reduce between the groups.
# with tf.device(avail_devices[group_0_main_device]):
# total_reduced_tensor = _all_reduce_using_copy(
# [group_0_reduced_tensor, group_1_reduced_tensor], False)
# # Broadcast the result back into the root of each group.
# with tf.device(avail_devices[group_0_main_device]):
# group_0_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
# with tf.device(avail_devices[group_1_main_device]):
# group_1_reduced_tensor_bcast = tf.identity(total_reduced_tensor)
# reduced_tensors_bcast = []
# for j in range(len(tensors_across_devices)):
# with tf.device(avail_devices[j]):
# # Broadcast the result back to each member in the group from the root.
# if (group_0_main_device < group_size) == (j < group_size):
# src_device_tensor = group_0_reduced_tensor_bcast
# else:
# src_device_tensor = group_1_reduced_tensor_bcast
# reduced_tensors_bcast.append(tf.identity(src_device_tensor))
# reduced_tensors.append(reduced_tensors_bcast)
# reduced_tensors = list(zip(*reduced_tensors))
# return reduced_tensors
# def __get_main_devices(self, tensor_index, num_devices):
# """Returns the pair of main devices to use for initial reduction.
# Args:
# tensor_index: Index of the current tensor in the list of tensors to copy.
# num_devices: Total number of devices.
# Returns:
# A tuple containing pair of main device indices for the initial
# reduction. Then, the first element of the tuple should be used for the
# final reduction.
# Raises:
# ValueError: Invalid input arguments.
# """
# if self._network_topology == constants.NetworkTopology.DGX1:
# return tensor_index % num_devices, (tensor_index +
# (num_devices // 2)) % num_devices
# elif self._network_topology == constants.NetworkTopology.GCP_V100:
# if num_devices != 8:
# raise ValueError('HierarchicalCopy only supports eight devices in %s.' %
# self._network_topology)
# # TODO(hinsu): Generalize main device indices to handle any other
# # isomorphic connection graph that connects two cliques using connections
# # other than 0-5 and 2-7.
# main_device_pairs = [(0, 5), (2, 7), (5, 0), (7, 2)]
# return main_device_pairs[tensor_index % len(main_device_pairs)]
# else:
# # TODO(reedwm): make this logic more general for arbitrary topology.
# raise ValueError(
# 'HierarchicalCopy is not supported for %s network topology.' %
# self._network_topology)
# class AllReduceSpecAlgorithm(BatchAllReduceAlgorithm):
# """An algorithm that uses an all reduce spec."""
# def __init__(self, all_reduce_spec, gpu_indices, agg_small_grads_max_bytes,
# agg_small_grads_max_group):
# spec = allreduce.parse_all_reduce_spec(all_reduce_spec)
# if len(spec) != 1:
# raise ValueError(
# 'Replicated mode does not support hybrid all-reduce strategies')
# self._all_reduce_spec = spec[0]
# self._gpu_indices = gpu_indices
# self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
# self._agg_small_grads_max_group = agg_small_grads_max_group
# def _do_batch_all_reduce(self, all_device_tensors):
# # TODO(reedwm): Merge allreduce.sum_gradients_all_reduce with the other
# # gradient aggregation code, since gradient aggregation is doing an all
# # reduce. Currently, we do gradient repacking in two different places.
# # TODO(reedwm): Change the allreduce code to reduce tensors instead of
# # tower_grads.
# tower_grads = [[(t, None) for t in device_tensors]
# for device_tensors in all_device_tensors]
# aggregated_device_grads = allreduce.sum_gradients_all_reduce(
# False, # single_session
# ['/job:localhost'],
# tower_grads,
# 1,
# self._all_reduce_spec.alg,
# self._all_reduce_spec.shards,
# self._gpu_indices,
# agg_small_grads_max_bytes=self._agg_small_grads_max_bytes,
# agg_small_grads_max_group=self._agg_small_grads_max_group)
# return [[t for t, _ in grad_vars] for grad_vars in aggregated_device_grads]
# def algorithm_from_params(params):
# """Returns a BatchAllReduceAlgorithm from a Params tuple."""
# if params.all_reduce_spec:
# if params.gpu_indices:
# gpu_indices = [int(x) for x in params.gpu_indices.split(',')]
# else:
# gpu_indices = [x for x in range(params.num_gpus)]
# return AllReduceSpecAlgorithm(params.all_reduce_spec, gpu_indices,
# params.agg_small_grads_max_bytes,
# params.agg_small_grads_max_group)
# elif params.hierarchical_copy:
# return HierarchicalCopyAlgorithm(params.network_topology)
# else:
# if params.local_parameter_device == 'gpu':
# devices_to_reduce_on = ['/gpu:%d' % i for i in range(params.num_gpus)]
# else:
# devices_to_reduce_on = ['/cpu:0']
# return CopyToDeviceAlgorithm(devices_to_reduce_on)
# def _apply_to_all_device_tensors(all_device_tensors, apply_func, colocate=True):
# """Applies a function to each tensor in `all_device_tensors`.
# A new list of lists of tensors is returned, where every tensor in
# `all_device_tensors` has had `apply_func` called on it. `all_device_tensors`
# is not modified.
# Args:
# all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
# a tensor where `i` is the device index and `j` is the tensor index.
# apply_func: A function taking in three arguments: tensor, device_index,
# tensor_index, and returning a modified tensor.
# `tensor` is `all_device_tensors[device_index][tensor_index]`.
# colocate: If True, apply_func will be run under context manager colocated
# with it's input tensor.
# Returns:
# A list in the same form as `all_device_tensors`, except each tensor has had
# `apply_func` called on it.
# """
# new_all_device_tensors = []
# for device_index, device_tensors in enumerate(all_device_tensors):
# new_device_tensors = []
# for tensor_index, t in enumerate(device_tensors):
# if colocate:
# with tf.colocate_with(t):
# new_t = apply_func(t, device_index, tensor_index)
# else:
# new_t = apply_func(t, device_index, tensor_index)
# new_device_tensors.append(new_t)
# new_all_device_tensors.append(new_device_tensors)
# return new_all_device_tensors
# def _defer_tensor(tensor):
# """Defers the retrieval of a tensor.
# The tensor is put into a StagingArea, and the return value is the
# retrieval of the tensor from the StagingArea. The effect is that the
# tensor returned from this function is the tensor that was put in the
# StagingArea for the previous Session.run() call.
# Args:
# tensor: The tensor to defer for one step.
# Returns:
# deferred_tensor: The tensor deferred for one step.
# put_op: An op to put `tensor` in the StagingArea. Must be run every step
# that `deferred_tensor` is run.
# warmup_op: A warmup op that should be called before the first step. Puts
# a zero tensor into the StagingArea.
# """
# tensor_stage = data_flow_ops.StagingArea([tensor.dtype], [tensor.shape])
# put_op = tensor_stage.put([tensor])
# warmup_op = tensor_stage.put([tf.zeros(tensor.shape, dtype=tensor.dtype)])
# # Fetch the next tensor to use.
# (tensor,) = tensor_stage.get()
# return tensor, put_op, warmup_op
# def _defer_all_device_tensors(all_device_tensors):
# """Defers every tensor in `all_device_tensors`."""
# put_ops = [[] for _ in all_device_tensors]
# warmup_ops = [[] for _ in all_device_tensors]
# def apply_func(tensor, device_index, tensor_index):
# del tensor_index
# tensor, put_op, warmup_op = _defer_tensor(tensor)
# put_ops[device_index].append(put_op)
# warmup_ops[device_index].append(warmup_op)
# return tensor
# new_all_device_tensors = _apply_to_all_device_tensors(all_device_tensors,
# apply_func)
# return new_all_device_tensors, put_ops, warmup_ops
# def _add_put_op_control_deps(all_device_tensors, num_splits, put_ops):
# """Add control dependencies from `put_ops` to `all_device_tensors`.
# This should only be called when deferred tensors are being used.
# The control dependencies are added so that the put ops are run whenever
# `all_device_tensors` is run. That way, the caller does not have to explicitly
# run the put ops.
# Args:
# all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]` is
# a tensor where `i` is the device index and `j` is the tensor index.
# num_splits: The number of splits that were used for the all-reduce.
# put_ops: A list of put ops from deferring the tensors.
# Returns:
# A list in the same form as `all_device_tensors`, except each tensor has a
# control dependency on an op in `put_ops`.
# """
# def apply_func(tensor, device_index, tensor_index):
# if num_splits == 0:
# deps = [put_ops[device_index][tensor_index]]
# else:
# deps = put_ops[device_index]
# assert len(deps) == 1
# with tf.control_dependencies(deps):
# return tf.identity(tensor, name='control_dependency')
# return _apply_to_all_device_tensors(all_device_tensors, apply_func)
# def _compact_all_device_tensors(all_device_tensors):
# """Compacts each tensor by casting to fp16."""
# def apply_func(tensor, device_index, tensor_index):
# del device_index, tensor_index
# return tf.cast(tensor, tf.float16)
# return _apply_to_all_device_tensors(all_device_tensors, apply_func)
# def _undo_compact_all_device_tensors(all_device_tensors,
# orig_all_device_tensors):
# """Uncompacts each tensor by casting to it's original dtype."""
# def apply_func(tensor, device_index, tensor_index):
# orig_tensor = orig_all_device_tensors[device_index][tensor_index]
# with tf.colocate_with(orig_tensor):
# return tf.cast(tensor, orig_tensor.dtype)
# return _apply_to_all_device_tensors(all_device_tensors, apply_func,
# colocate=False)
# class _TensorPacker(object):
# """Packs and unpacks tensors into groups.
# This class first concatenates a set of tensors, then split the concatenated
# tensor into a small number of chunks. This is useful for all-reducing tensors,
# as doing a small number of all-reduces on large tensors can be faster than
# doing a large number of all-reduces on small tensors.
# """
# def __init__(self, num_splits):
# """Initializes the _TensorPacker.
# Args:
# num_splits: The number of tensors to split the concatenated tensor into.
# The batch all-reduce will consist of `num_splits` all-reduces.
# """
# assert num_splits > 0
# self._num_splits = num_splits
# self._next_method = 'concat'
# _concat_tensor_state = namedtuple('_concat_tensor_state',
# ['orig_shapes', 'orig_sizes'])
# def _concat_tensors(self, device_tensors):
# """Concatenate tensors into a single tensor."""
# flat_tensors = [tf.reshape(t, [-1]) for t in device_tensors]
# orig_shapes = [t.shape for t in device_tensors]
# orig_sizes = [s.num_elements() for s in orig_shapes]
# # All shapes must be fully defined.
# assert None not in orig_sizes
# concatenated_grad = tf.concat(flat_tensors, 0)
# return concatenated_grad, self._concat_tensor_state(orig_shapes, orig_sizes)
# def _split_tensors(self, concatenated_tensor):
# """Splits concatenated tensor into `num_splits` pieces."""
# # TODO(zhengxq): it is possible to optimize away the additional
# # data movement by copying along the original tensor boundary.
# # TODO(zhengxq): it is also possible to optimize away all the concat
# # as well.
# total_tensor_size = concatenated_tensor.shape.num_elements()
# split_size = total_tensor_size // self._num_splits
# split_size_last = total_tensor_size - split_size * (self._num_splits - 1)
# split_sizes = [split_size] * (self._num_splits - 1) + [split_size_last]
# tensor_packs = tf.split(concatenated_tensor, split_sizes)
# return tensor_packs
# def _undo_split_tensors(self, tensor_packs):
# """Undoes self._split_tensors()."""
# return tf.concat(tensor_packs, 0)
# def _undo_concat_tensors(self, concatenated_tensor, concat_tensor_state):
# """Undoes self._concat_tensors()."""
# tensors_with_sizes = tf.split(concatenated_tensor,
# concat_tensor_state.orig_sizes)
# tensors_with_shapes = [
# tf.reshape(grad, shape)
# for grad, shape in zip(tensors_with_sizes,
# concat_tensor_state.orig_shapes)
# ]
# return tensors_with_shapes
# def concat_all_device_tensors(self, all_device_tensors):
# """For each device, concatenate the device's tensors into a single tensor.
# Args:
# all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
# is a tensor where `i` is the device index and `j` is the tensor index.
# Returns:
# A list of list of tensors in a similar form as all_device_tensors, except
# the tensors on each device have been concatenated. Each inner list
# consists of a single concatenated tensor.
# """
# assert self._next_method == 'concat'
# new_all_device_tensors = []
# tensor_states = []
# for device_tensors in all_device_tensors:
# with tf.colocate_with(device_tensors[0]):
# concat_tensor, tensor_state = self._concat_tensors(device_tensors)
# new_all_device_tensors.append([concat_tensor])
# tensor_states.append(tensor_state)
# self._tensor_states = tensor_states
# self._next_method = 'split'
# return new_all_device_tensors
# def split_all_device_tensors(self, all_device_tensors):
# """Splits concatenated tensors into `num_splits` pieces.
# `num_splits` is specified in the constructor. In the case where the total
# size of a concatenated tensor is not divisible by `num_splits`, the last
# split tensor gets more elements.
# Args:
# all_device_tensors: A list of list of tensors. `all_device_tensors[i][j]`
# is a tensor where `i` is the device index and `j` is the tensor index.
# For each i, `all_device_tensors[i]` must be a list of length 1 of a
# single concatenated tensor.
# Returns:
# A list of list of tensors in a similar form as all_device_tensors, except
# the concatenated tensor on each device have been split. Each inner list
# is a list of length `num_splits`.
# """
# assert self._next_method == 'split'
# new_all_device_tensors = []
# for [concat_tensor] in all_device_tensors:
# with tf.colocate_with(concat_tensor):
# new_all_device_tensors.append(self._split_tensors(concat_tensor))
# self._orig_concat_all_device_tensors = all_device_tensors
# self._next_method = 'undo_split'
# return new_all_device_tensors
# def undo_split_all_device_tensors(self, all_device_tensors):
# """Undoes the effects of `split_all_device_tensors`."""
# assert self._next_method == 'undo_split'
# new_all_device_tensors = []
# for i, device_tensors in enumerate(all_device_tensors):
# [orig_tensor] = self._orig_concat_all_device_tensors[i]
# with tf.colocate_with(orig_tensor):
# new_all_device_tensors.append(
# [self._undo_split_tensors(device_tensors)])
# self._next_method = 'undo_concat'
# return new_all_device_tensors
# def undo_concat_all_device_tensors(self, all_device_tensors):
# """Undoes the effects of `concat_all_device_tensors`."""
# assert self._next_method == 'undo_concat'
# new_all_device_tensors = []
# for [concat_tensor], tensor_state in zip(all_device_tensors,
# self._tensor_states):
# with tf.colocate_with(concat_tensor):
# new_all_device_tensors.append(self._undo_concat_tensors(concat_tensor,
# tensor_state))
# self._next_method = None
# return new_all_device_tensors
| 26,549
| 43.176373
| 82
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/data_utils.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data utility methods.
Collection of utility methods that make CNN benchmark code use tf.data easier.
"""
import tensorflow as tf
from tensorflow.contrib.data.python.ops import batching
from tensorflow.contrib.data.python.ops import interleave_ops
from tensorflow.contrib.data.python.ops import prefetching_ops
from tensorflow.contrib.data.python.ops import threadpool
from tensorflow.python.framework import function
from tensorflow.python.platform import gfile
def build_prefetch_image_processing(height, width, batch_size, num_splits,
preprocess_fn, cpu_device, params,
gpu_devices, dataset):
""""Returns FunctionBufferingResources that do image pre(processing)."""
with tf.device(cpu_device):
if params.eval:
subset = 'validation'
else:
subset = 'train'
function_buffering_resources = []
remote_fn, args = minibatch_fn(
height=height,
width=width,
batch_size=batch_size,
num_splits=num_splits,
preprocess_fn=preprocess_fn,
dataset=dataset,
subset=subset,
train=(not params.eval),
cache_data=params.cache_data,
num_threads=params.datasets_num_private_threads)
for device_num in range(len(gpu_devices)):
with tf.device(gpu_devices[device_num]):
buffer_resource_handle = prefetching_ops.function_buffering_resource(
f=remote_fn,
target_device=cpu_device,
string_arg=args[0],
buffer_size=params.datasets_prefetch_buffer_size,
shared_name=None)
function_buffering_resources.append(buffer_resource_handle)
return function_buffering_resources
def get_images_and_labels(function_buffering_resource, data_type):
"""Given a FunctionBufferingResource obtains images and labels from it."""
return prefetching_ops.function_buffering_resource_get_next(
function_buffer_resource=function_buffering_resource,
output_types=[data_type, tf.int32])
def create_iterator(batch_size,
num_splits,
batch_size_per_split,
preprocess_fn,
dataset,
subset,
train,
cache_data,
num_threads=None):
"""Creates a dataset iterator for the benchmark."""
glob_pattern = dataset.tf_record_pattern(subset)
file_names = gfile.Glob(glob_pattern)
if not file_names:
raise ValueError('Found no files in --data_dir matching: {}'
.format(glob_pattern))
ds = tf.data.TFRecordDataset.list_files(file_names)
ds = ds.apply(
interleave_ops.parallel_interleave(
tf.data.TFRecordDataset, cycle_length=10))
if cache_data:
ds = ds.take(1).cache().repeat()
counter = tf.data.Dataset.range(batch_size)
counter = counter.repeat()
ds = tf.data.Dataset.zip((ds, counter))
ds = ds.prefetch(buffer_size=batch_size)
if train:
ds = ds.shuffle(buffer_size=10000)
ds = ds.repeat()
ds = ds.apply(
batching.map_and_batch(
map_func=preprocess_fn,
batch_size=batch_size_per_split,
num_parallel_batches=num_splits))
ds = ds.prefetch(buffer_size=num_splits)
if num_threads:
ds = threadpool.override_threadpool(
ds,
threadpool.PrivateThreadPool(
num_threads, display_name='input_pipeline_thread_pool'))
ds_iterator = ds.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS,
ds_iterator.initializer)
else:
ds_iterator = ds.make_one_shot_iterator()
return ds_iterator
def minibatch_fn(height, width, batch_size, num_splits, preprocess_fn, dataset,
subset, train, cache_data, num_threads):
"""Returns a function and list of args for the fn to create a minibatch."""
batch_size_per_split = batch_size // num_splits
with tf.name_scope('batch_processing'):
ds_iterator = create_iterator(batch_size, num_splits, batch_size_per_split,
preprocess_fn, dataset, subset, train,
cache_data, num_threads)
ds_iterator_string_handle = ds_iterator.string_handle()
@function.Defun(tf.string)
def _fn(h):
depth = 3
remote_iterator = tf.data.Iterator.from_string_handle(
h, ds_iterator.output_types, ds_iterator.output_shapes)
labels, images = remote_iterator.get_next()
images = tf.reshape(
images, shape=[batch_size_per_split, height, width, depth])
labels = tf.reshape(labels, [batch_size_per_split])
return images, labels
return _fn, [ds_iterator_string_handle]
| 5,421
| 38.007194
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/cnn_util.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities for CNN benchmarks."""
from __future__ import print_function
import sys
import threading
import numpy as np
import tensorflow as tf
def tensorflow_version_tuple():
v = tf.__version__
major, minor, patch = v.split('.')
return (int(major), int(minor), patch)
def tensorflow_version():
vt = tensorflow_version_tuple()
return vt[0] * 1000 + vt[1]
log_file = open("logfile.txt", "w")
def log_fn(log):
log_file.write(log + "\n")
log_file.flush()
def roll_numpy_batches(array, batch_size, shift_ratio):
"""Moves a proportion of batches from start to the end of the array.
This function moves a proportion of batches, specified by `shift_ratio`, from
the starts of the array to the end. The number of batches moved is rounded
down to the nearest integer. For example,
```
roll_numpy_batches([1, 2, 3, 4, 5, 6], 2, 0.34) == [3, 4, 5, 6, 1, 2]
```
Args:
array: A Numpy array whose first dimension is the batch dimension.
batch_size: The batch size.
shift_ratio: Proportion of batches to move from the start of the array to
the end of the array.
Returns:
A new Numpy array, with a proportion of the batches at the start of `array`
moved to the end.
"""
num_items = array.shape[0]
assert num_items % batch_size == 0
num_batches = num_items // batch_size
starting_batch = int(num_batches * shift_ratio)
starting_item = starting_batch * batch_size
return np.roll(array, -starting_item, axis=0)
# For Python 2.7 compatibility, we do not use threading.Barrier.
class Barrier(object):
"""Implements a lightweight Barrier.
Useful for synchronizing a fixed number of threads at known synchronization
points. Threads block on 'wait()' and simultaneously return once they have
all made that call.
# Implementation adopted from boost/thread/barrier.hpp
"""
def __init__(self, parties):
"""Create a barrier, initialised to 'parties' threads."""
self.cond = threading.Condition(threading.Lock())
self.parties = parties
# Indicates the number of waiting parties.
self.waiting = 0
# generation is needed to deal with spurious wakeups. If self.cond.wait()
# wakes up for other reasons, generation will force it go back to wait().
self.generation = 0
self.broken = False
def wait(self):
"""Wait for the barrier."""
with self.cond:
# Check if the barrier has been disabled or not.
if self.broken:
return
gen = self.generation
self.waiting += 1
if self.waiting == self.parties:
self.waiting = 0
self.generation += 1
self.cond.notify_all()
# loop because of spurious wakeups
while gen == self.generation:
self.cond.wait()
# TODO(huangyp): Remove this method once we find a way to know which step
# is the last barrier.
def abort(self):
"""Clear existing barrier and disable this barrier."""
with self.cond:
if self.waiting > 0:
self.generation += 1
self.cond.notify_all()
self.broken = True
class ImageProducer(object):
"""An image producer that puts images into a staging area periodically.
This class is useful for periodically running a set of ops, `put_ops` on a
different thread every `batch_group_size` steps.
The notify_image_consumption() method is used to increment an internal counter
so that every `batch_group_size` times it is called, `put_ops` is executed. A
barrier is placed so that notify_image_consumption() will block until
the previous call to `put_ops` has been executed.
The start() method is used to start the thread that runs `put_ops`.
The done() method waits until the last put_ops is executed and stops the
thread.
The purpose of this class is to fill an image input pipeline every
`batch_group_size` steps. Suppose `put_ops` supplies `batch_group_size` images
to the input pipeline when run, and that every step, 1 batch of images is
consumed. Then, by calling notify_image_consumption() every step, images are
supplied to the input pipeline at the same amount they are consumed.
Example usage:
```
put_ops = ... # Enqueues `batch_group_size` batches to a StagingArea
get_op = ... # Dequeues 1 batch, and does some operations on it
batch_group_size = 4
with tf.Session() as sess:
image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size)
image_producer.start()
for _ in range(100):
sess.run(get_op)
image_producer.notify_image_consumption()
```
"""
def __init__(self, sess, put_ops, batch_group_size, use_python32_barrier):
self.sess = sess
self.num_gets = 0
self.put_ops = put_ops
self.batch_group_size = batch_group_size
self.done_event = threading.Event()
if (use_python32_barrier and
sys.version_info[0] == 3 and sys.version_info[1] >= 2):
self.put_barrier = threading.Barrier(2)
else:
self.put_barrier = Barrier(2)
def _should_put(self):
return (self.num_gets + 1) % self.batch_group_size == 0
def done(self):
"""Stop the image producer."""
self.done_event.set()
self.put_barrier.abort()
self.thread.join()
def start(self):
"""Start the image producer."""
self.sess.run([self.put_ops])
self.thread = threading.Thread(target=self._loop_producer)
# Set daemon to true to allow Ctrl + C to terminate all threads.
self.thread.daemon = True
self.thread.start()
def notify_image_consumption(self):
"""Increment the counter of image_producer by 1.
This should only be called by the main thread that consumes images and runs
the model computation. One batch of images should be consumed between
calling start() and the first call to this method. Then, one batch of images
should be consumed between any two successive calls to this method.
"""
if self._should_put():
self.put_barrier.wait()
self.num_gets += 1
def _loop_producer(self):
while not self.done_event.isSet():
self.sess.run([self.put_ops])
self.put_barrier.wait()
class BaseClusterManager(object):
"""The manager for the cluster of servers running the benchmark."""
def __init__(self, params):
worker_hosts = params.worker_hosts.split(',')
ps_hosts = params.ps_hosts.split(',') if params.ps_hosts else []
cluster = {'worker': worker_hosts}
if ps_hosts:
cluster['ps'] = ps_hosts
self._cluster_spec = tf.train.ClusterSpec(cluster)
def get_target(self):
"""Returns a target to be passed to tf.Session()."""
raise NotImplementedError('get_target must be implemented by subclass')
def join_server(self):
raise NotImplementedError('join must be implemented by subclass')
def get_cluster_spec(self):
return self._cluster_spec
def num_workers(self):
return len(self._cluster_spec.job_tasks('worker'))
def num_ps(self):
if 'ps' in self._cluster_spec.jobs:
return len(self._cluster_spec.job_tasks('ps'))
else:
return 0
class GrpcClusterManager(BaseClusterManager):
"""A cluster manager for a cluster networked with gRPC."""
def __init__(self, params, config_proto):
super(GrpcClusterManager, self).__init__(params)
if params.job_name == 'controller':
self._target = 'grpc://%s' % self._cluster_spec.job_tasks('worker')[0]
else:
self._server = tf.train.Server(self._cluster_spec,
job_name=params.job_name,
task_index=params.task_index,
config=config_proto,
protocol=params.server_protocol)
self._target = self._server.target
def get_target(self):
return self._target
def join_server(self):
return self._server.join()
| 8,485
| 32.541502
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/datasets.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmark dataset utilities.
"""
from abc import abstractmethod
import os
import numpy as np
from six.moves import cPickle
from six.moves import xrange # pylint: disable=redefined-builtin
import tensorflow as tf
from tensorflow.python.platform import gfile
import preprocessing
IMAGENET_NUM_TRAIN_IMAGES = 1281167
IMAGENET_NUM_VAL_IMAGES = 50000
class Dataset(object):
"""Abstract class for cnn benchmarks dataset."""
def __init__(self, name, height=None, width=None, depth=None, data_dir=None,
queue_runner_required=False, num_classes=1000):
self.name = name
self.height = height
self.width = width
self.depth = depth or 3
self.data_dir = data_dir
self._queue_runner_required = queue_runner_required
self._num_classes = num_classes
def tf_record_pattern(self, subset):
return os.path.join(self.data_dir, '%s-*-of-*' % subset)
def reader(self):
return tf.TFRecordReader()
@property
def num_classes(self):
return self._num_classes
@num_classes.setter
def num_classes(self, val):
self._num_classes = val
@abstractmethod
def num_examples_per_epoch(self, subset):
pass
def __str__(self):
return self.name
def get_image_preprocessor(self, input_preprocessor='default'):
if self.use_synthetic_gpu_images():
return preprocessing.SyntheticImagePreprocessor
return _SUPPORTED_INPUT_PREPROCESSORS[self.name][input_preprocessor]
def queue_runner_required(self):
return self._queue_runner_required
def use_synthetic_gpu_images(self):
return not self.data_dir
class ImagenetData(Dataset):
"""Configuration for Imagenet dataset."""
def __init__(self, data_dir=None):
if data_dir is None:
raise ValueError('Data directory not specified')
super(ImagenetData, self).__init__('imagenet', 300, 300, data_dir=data_dir)
def num_examples_per_epoch(self, subset='train'):
if subset == 'train':
return IMAGENET_NUM_TRAIN_IMAGES
elif subset == 'validation':
return IMAGENET_NUM_VAL_IMAGES
else:
raise ValueError('Invalid data subset "%s"' % subset)
def get_image_preprocessor(self):
print("I choose preprocessor in get_image_preprocessor")
if 'lmdb' in self.data_dir[-4:]:
return preprocessing.lmdbImagePreprocessor
else:
return preprocessing.RecordInputImagePreprocessor
class SyntheticData(Dataset):
"""Configuration for synthetic dataset."""
def __init__(self, unused_data_dir):
super(SyntheticData, self).__init__('synthetic')
def use_synthetic_gpu_images(self):
return True
def num_examples_per_epoch(self, subset='train'):
if subset == 'train':
return 50000
elif subset == 'validation':
return 10000
else:
raise ValueError('Invalid data subset "%s"' % subset)
class Cifar10Data(Dataset):
"""Configuration for cifar 10 dataset.
It will mount all the input images to memory.
"""
def __init__(self, data_dir=None):
if data_dir is None:
raise ValueError('Data directory not specified')
super(Cifar10Data, self).__init__('cifar10', 32, 32, data_dir=data_dir,
queue_runner_required=True,
num_classes=11)
def read_data_files(self, subset='train'):
"""Reads from data file and return images and labels in a numpy array."""
if subset == 'train':
filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i)
for i in xrange(1, 6)]
elif subset == 'validation':
filenames = [os.path.join(self.data_dir, 'test_batch')]
else:
raise ValueError('Invalid data subset "%s"' % subset)
inputs = []
for filename in filenames:
with gfile.Open(filename, 'r') as f:
inputs.append(cPickle.load(f))
# See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the
# input format.
all_images = np.concatenate(
[each_input['data'] for each_input in inputs]).astype(np.float32)
all_labels = np.concatenate(
[each_input['labels'] for each_input in inputs])
return all_images, all_labels
def num_examples_per_epoch(self, subset='train'):
if subset == 'train':
return 50000
elif subset == 'validation':
return 10000
else:
raise ValueError('Invalid data subset "%s"' % subset)
_SUPPORTED_DATASETS = {
'imagenet': ImagenetData,
'cifar10': Cifar10Data,
'synthetic': SyntheticData
}
_SUPPORTED_INPUT_PREPROCESSORS = {
'imagenet': {
'default': preprocessing.RecordInputImagePreprocessor,
'official_models_imagenet': preprocessing.ImagenetPreprocessor,
},
'cifar10': {
'default': preprocessing.Cifar10ImagePreprocessor
}
}
def create_dataset(data_dir, data_name):
"""Create a Dataset instance based on data_dir and data_name."""
if not data_dir and not data_name:
# When using synthetic data, use synthetic imagenet images by default.
data_name = 'synthetic'
# Infere dataset name from data_dir if data_name is not provided.
if data_name is None:
for supported_name in _SUPPORTED_DATASETS:
if supported_name in data_dir:
data_name = supported_name
break
else: # Failed to identify dataset name from data dir.
raise ValueError('Could not identify name of dataset. '
'Please specify with --data_name option.')
if data_name not in _SUPPORTED_DATASETS:
raise ValueError('Unknown dataset. Must be one of %s', ', '.join(
[key for key in sorted(_SUPPORTED_DATASETS.keys())]))
return _SUPPORTED_DATASETS[data_name](data_dir)
# def create_dataset(data_dir, data_name):
# """Create a Dataset instance based on data_dir and data_name."""
# supported_datasets = {
# 'synthetic': SyntheticData,
# 'imagenet': ImagenetData,
# 'cifar10': Cifar10Data,
# }
# if not data_dir:
# data_name = 'synthetic'
# if not data_name:
# for supported_name in supported_datasets:
# if supported_name in data_dir:
# data_name = supported_name
# break
# if not data_name:
# raise ValueError('Could not identify name of dataset. '
# 'Please specify with --data_name option.')
# if data_name not in supported_datasets:
# raise ValueError('Unknown dataset. Must be one of %s', ', '.join(
# [key for key in sorted(supported_datasets.keys())]))
# return supported_datasets[data_name](data_dir)
| 7,179
| 30.353712
| 80
|
py
|
benchmarks
|
benchmarks-master/scripts/tf_cnn_benchmarks/variable_mgr.py
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Defines VariableMgr and subclasses used to manage variables.
"""
from __future__ import print_function
import re
import tensorflow as tf
import allreduce
import batch_allreduce
import variable_mgr_util
class VariableMgr(object):
"""Abstract superclass for class used by BenchmarkCNN to control variables.
Functions on this class are used to control how variables are created and
managed, and how gradients are computed and applied.
"""
def __init__(self, benchmark_cnn):
self.benchmark_cnn = benchmark_cnn
self.staging_delta_ops = []
self.use_resource_vars = benchmark_cnn.params.use_resource_vars
# A variable for automatic loss scaling.
self.grad_has_inf_nan = None
def each_tower_has_variables(self):
"""Returns True if each GPU tower of the model has separate variables."""
assert False, 'Must be implemented in subclass'
def supports_staged_vars(self):
"""Whether staged variable management is supported."""
return False
def create_outer_variable_scope(self, device_num):
"""Create the tf.variable_scope around all model graph operations."""
del device_num # unused by this implementation
assert False, 'Must be implemented in subclass'
def preprocess_device_grads(self, device_grads):
"""Preprocess the device gradients prior to applying them.
Args:
device_grads: List of lists of (gradient, variable) tuples.
device_grads[t][g] = (gradient, variable), where t is the index of the
tower and g is the index of the gradient-variable pair.
Returns: a tuple of (apply_gradients_devices, gradient_state).
gradient_state is an opaque structure that should be passed to
get_gradients_to_apply() and append_apply_gradients_ops() (in that order).
apply_gradients_devices is a list of devices where the gradients will be
applied with get_gradients_to_apply() and append_apply_gradients_ops().
"""
del device_grads # unused by this implementation
assert False, 'Must be implemented in subclass'
def get_gradients_to_apply(self, device_num, gradient_state):
"""Returns the [(gradient, variable)] list to apply for device_num.
Args:
device_num: indexes into apply_gradients_devices, which was returned by an
earlier call to preprocess_device_grads.
gradient_state: from previous call to apply_gradients_devices.
"""
del device_num, gradient_state # unused by this implementation
assert False, 'Must be implemented in subclass'
def append_apply_gradients_ops(self, gradient_state, opt, grads, training_ops,
loss_scale_params):
"""Adds training ops for grads to 'training_ops'.
Args:
gradient_state: from previous call to apply_gradients_devices.
opt: the underlying optimizer
grads: [(grad, var)] to apply
training_ops: list to which to add ops
loss_scale_params: parameters for loss scaling.
"""
del gradient_state # unused by this implementation
def get_apply_gradients_ops_func():
"""Returns the apply_gradients op."""
return [opt.apply_gradients(grads)]
variable_mgr_util.append_gradients_with_loss_scale(
training_ops, get_apply_gradients_ops_func, loss_scale_params,
self.grad_has_inf_nan)
def get_post_init_ops(self):
"""Returns ops that should run post-initialization."""
return []
def get_devices(self):
"""Returns devices to use for computation; includes replica selection."""
assert False, 'Must be implemented in subclass'
def savable_variables(self):
"""Returns a list/dict of savable variables to pass to tf.train.Saver."""
return tf.global_variables()
def trainable_variables_on_device(self,
rel_device_num,
abs_device_num,
writable=False):
"""Return the set of trainable variables on device.
Args:
rel_device_num: local worker device index.
abs_device_num: global graph device index.
writable: whether to get a reference to the underlying variable.
Returns:
The set of trainable variables on the specified device.
"""
del rel_device_num, writable
if self.each_tower_has_variables():
params = [
v for v in tf.trainable_variables()
if v.name.startswith('v%s/' % abs_device_num)
]
else:
params = tf.trainable_variables()
return params
class VariableMgrIndependent(VariableMgr):
"""VariableMgr that implements the --independent mode for local jobs.
Each GPU has its own copy of the variables, and gradients are
not shared between towers. This can be used to check
performance when no data is moved between GPUs.
"""
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
return tf.variable_scope('v%s' % device_num,
use_resource=self.use_resource_vars)
def preprocess_device_grads(self, device_grads):
return (self.benchmark_cnn.devices, device_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state
tower_grad = device_grads[device_num]
if self.benchmark_cnn.enable_auto_loss_scale and device_num == 0:
# Since we don't aggregate variables in --independent mode, we cannot tell
# if there are NaNs on all GPUs. So we arbitrarily choose to only check
# NaNs on the first GPU.
has_inf_nan_list = []
for grad, _ in tower_grad:
has_inf_nan_list.append(tf.reduce_all(tf.is_finite(grad)))
self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(has_inf_nan_list))
return tower_grad
def get_devices(self):
return self.benchmark_cnn.raw_devices
class VariableMgrLocalFetchFromPS(VariableMgr):
"""VariableMgr that implements the --parameter_server mode for local jobs.
Variables are stored on a parameter server. For each step, each tower gets
a copy of the variables from the parameter server, and sends its gradients
to the param server.
"""
def each_tower_has_variables(self):
return False
def create_outer_variable_scope(self, device_num):
return tf.variable_scope('v', reuse=bool(device_num),
use_resource=self.use_resource_vars)
def preprocess_device_grads(self, device_grads):
return ([self.benchmark_cnn.param_server_device], device_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
assert device_num == 0
device_grads = gradient_state
agg_grads, self.grad_has_inf_nan = (
variable_mgr_util.
aggregate_gradients_using_copy_with_variable_colocation(
device_grads,
use_mean=True,
check_inf_nan=self.benchmark_cnn.enable_auto_loss_scale))
return agg_grads
def get_devices(self):
raw_devices = self.benchmark_cnn.raw_devices
if self.benchmark_cnn.local_parameter_device_flag == 'gpu':
return [
variable_mgr_util.ParamServerDeviceSetter(d, raw_devices)
for d in raw_devices
]
else:
return [
tf.train.replica_device_setter(
worker_device=d,
ps_device=self.benchmark_cnn.param_server_device,
ps_tasks=1) for d in raw_devices
]
class VariableMgrLocalFetchFromStagedPS(VariableMgrLocalFetchFromPS):
"""Implements fetching a local variable through staging buffers.
"""
def __init__(self, benchmark_cnn):
super(VariableMgrLocalFetchFromStagedPS, self).__init__(benchmark_cnn)
# A data structure to track where the variables are used on each device.
# Indexed by device_num and var_name, each entry stores the "put" and "get"
# ops used for that variable on that device:
# staging_vars_on_devices[device_num][var_name] == (put_op, get_op)
self.staging_vars_on_devices = [
dict() for _ in self.benchmark_cnn.raw_devices
]
def supports_staged_vars(self):
return True
def create_outer_variable_scope(self, device_num):
self._custom_getter = variable_mgr_util.StagedVariableGetter(
device_num, self.benchmark_cnn.raw_devices, None, self)
return tf.variable_scope(
'v', reuse=bool(device_num), custom_getter=self._custom_getter,
use_resource=self.use_resource_vars)
def trainable_variables_on_device(self,
rel_device_num,
abs_device_num,
writable=False):
return self._custom_getter.trainable_variables_on_device(
rel_device_num, abs_device_num, writable=writable)
class VariableMgrLocalReplicated(VariableMgr):
"""VariableMgr that implements the --replicated mode for local jobs.
Each GPU has its own copy of the variables. To apply gradients,
either a local all-reduce algorithm is applied or a regular
cross-device aggregation is used to replicate the combined
gradients to all towers.
"""
def __init__(self, benchmark_cnn, all_reduce_spec,
agg_small_grads_max_bytes, agg_small_grads_max_group,
allreduce_merge_scope):
super(VariableMgrLocalReplicated, self).__init__(benchmark_cnn)
if all_reduce_spec:
spec = allreduce.parse_all_reduce_spec(all_reduce_spec)
if len(spec) != 1:
raise ValueError(
'replicated mode does not support hybrid all-reduce strategies')
self._all_reduce_spec = spec[0]
else:
self._all_reduce_spec = None
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
self._warmup_ops = []
self._allreduce_merge_scope = allreduce_merge_scope
self._gradient_put_ops = None
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
return tf.variable_scope('v%s' % device_num,
use_resource=self.use_resource_vars)
def preprocess_device_grads(self, device_grads):
compact_grads = (self.benchmark_cnn.params.use_fp16 and
self.benchmark_cnn.params.compact_gradient_transfer)
defer_grads = (self.benchmark_cnn.params.variable_consistency == 'relaxed')
grads_to_reduce = [[g for g, _ in grad_vars] for grad_vars in device_grads]
algorithm = batch_allreduce.algorithm_from_params(self.benchmark_cnn.params)
reduced_grads, self._warmup_ops = algorithm.batch_all_reduce(
grads_to_reduce, self.benchmark_cnn.params.gradient_repacking,
compact_grads, defer_grads)
if self.benchmark_cnn.enable_auto_loss_scale:
# Check for infs or nans
is_finite_list = []
for tower_grads in reduced_grads:
with tf.colocate_with(tower_grads[0]):
# TODO(tanmingxing): Create fused op that takes in a list of tensors
# as input and returns scalar boolean True if there are any infs/nans.
is_finite_list.append(tf.reduce_all(
[tf.reduce_all(tf.is_finite(g)) for g in tower_grads]))
self.grad_has_inf_nan = tf.logical_not(tf.reduce_all(is_finite_list))
reduced_device_grads = [[
(g, v) for g, (_, v) in zip(grads, grad_vars)
] for grads, grad_vars in zip(reduced_grads, device_grads)]
return self.benchmark_cnn.devices, reduced_device_grads
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state
return device_grads[device_num]
def get_post_init_ops(self):
# Copy initialized values for variables on GPU 0 to other GPUs.
global_vars = tf.global_variables()
var_by_name = dict([(v.name, v) for v in global_vars])
post_init_ops = []
for v in global_vars:
split_name = v.name.split('/')
# TODO(b/62630508): use more specific prefix than v or v0.
if split_name[0] == 'v0' or not v.name.startswith('v'):
continue
split_name[0] = 'v0'
copy_from = var_by_name['/'.join(split_name)]
post_init_ops.append(v.assign(copy_from.read_value()))
post_init_ops += self._warmup_ops
return post_init_ops
def savable_variables(self):
"""Return the set of variables used for saving/loading the model."""
params = []
for v in tf.global_variables():
split_name = v.name.split('/')
if split_name[0] == 'v0' or not v.name.startswith('v'):
params.append(v)
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
class VariableMgrDistributedAllReduce(VariableMgr):
"""VariableMgr that implements the --distributed_all_reduce mode.
Each GPU has its own copy of the variables. To apply gradients,
the specified all-reduce algorithm is used to reduce the gradients
and replicate the final value to all GPUs.
"""
def __init__(self, benchmark_cnn, all_reduce_spec, job_name,
num_workers, agg_small_grads_max_bytes,
agg_small_grads_max_group, allreduce_merge_scope):
super(VariableMgrDistributedAllReduce, self).__init__(benchmark_cnn)
if not all_reduce_spec:
raise ValueError(
'distributed_all_reduce requires a non-empty all_reduce_spec')
self._all_reduce_spec = allreduce.parse_all_reduce_spec(all_reduce_spec)
self._all_reduce_device_prefixes = (
allreduce.build_all_reduce_device_prefixes(job_name, num_workers))
self._num_workers = num_workers
self._agg_small_grads_max_bytes = agg_small_grads_max_bytes
self._agg_small_grads_max_group = agg_small_grads_max_group
self._allreduce_merge_scope = allreduce_merge_scope
if not self._all_reduce_spec:
raise ValueError('all_reduce_spec must be specified')
self._single_session = True
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
"""Create a scope for the named device.
Args:
device_num: index of device for variable scope. (Note that
device_num spans all processes in cluster since a single global
graph is used.)
Returns:
the requested variable_scope
"""
return tf.variable_scope('v%s' % device_num,
use_resource=self.use_resource_vars)
def preprocess_device_grads(self, device_grads):
remaining_grads = device_grads
aggregated_grads = []
for spec_tuple in self._all_reduce_spec:
if spec_tuple.limit < 0:
this_grads = remaining_grads
remaining_grads = []
else:
(this_grads, remaining_grads) = allreduce.split_grads_by_size(
spec_tuple.limit, remaining_grads)
if this_grads:
range_agg_grads = allreduce.sum_gradients_all_reduce(
self._single_session,
self._all_reduce_device_prefixes,
this_grads,
self._num_workers,
spec_tuple.alg,
spec_tuple.shards,
self.benchmark_cnn.gpu_indices,
agg_small_grads_max_bytes=self._agg_small_grads_max_bytes,
agg_small_grads_max_group=self._agg_small_grads_max_group,
allreduce_merge_scope=self._allreduce_merge_scope)
if not aggregated_grads:
aggregated_grads = range_agg_grads
else:
assert len(aggregated_grads) == len(range_agg_grads)
for i in range(len(aggregated_grads)):
aggregated_grads[i] += range_agg_grads[i]
assert not remaining_grads
full_device_set = []
for grads in device_grads:
g, v = grads[0]
del v
full_device_set.append(g.device)
return (full_device_set, aggregated_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state
if device_num >= len(device_grads):
raise ValueError('device_num %d exceeds length of device_grads (%d)' %
(device_num, len(device_grads)))
return device_grads[device_num]
def get_post_init_ops(self):
"""Copy initialized values for variables to other devices."""
global_vars = tf.global_variables()
var_by_name = dict([(v.name, v) for v in global_vars])
post_init_ops = []
for v in global_vars:
split_name = v.name.split('/')
# TODO(b/62630508): use more specific prefix than v or v0.
if split_name[0] == 'v0' or not v.name.startswith('v'):
continue
split_name[0] = 'v0'
copy_from = var_by_name['/'.join(split_name)]
post_init_ops.append(v.assign(copy_from.read_value()))
return post_init_ops
def savable_variables(self):
"""Return the set of variables used for saving/loading the model."""
params = []
for v in tf.global_variables():
split_name = v.name.split('/')
if split_name[0] == 'v0' or not v.name.startswith('v'):
params.append(v)
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
# TODO(tucker): Merge this mode with DistributedAllReduce.
class VariableMgrCollectiveAllReduce(VariableMgr):
"""VariableMgr that implements the --collective_all_reduce mode.
Each GPU has its own copy of the variables. To apply gradients
the TF native collective all-reduce op is used to reduce the gradients
and replicate the final value to all GPUs.
"""
def __init__(self, benchmark_cnn, all_reduce_spec,
num_workers, num_gpus, task_id, allreduce_merge_scope):
super(VariableMgrCollectiveAllReduce, self).__init__(benchmark_cnn)
if all_reduce_spec:
assert all_reduce_spec == 'collective'
self._all_reduce_spec = 'collective'
self._num_workers = num_workers
self._num_gpus = num_gpus
self._task_id = task_id
self._allreduce_merge_scope = allreduce_merge_scope
self._instance_key_counter = 10000
self._instance_key_table = dict()
self._single_session = False
# List of prefixes for generating PS devices, unused here.
self._all_reduce_device_prefixes = None
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
"""Create a scope for the named device.
Args:
device_num: index of device for variable scope.
Returns:
the requested variable_scope
"""
return tf.variable_scope('v%s' % device_num)
def preprocess_device_grads(self, device_grads):
reduced_grads = allreduce.sum_gradients_all_reduce(
self._single_session,
self._all_reduce_device_prefixes,
device_grads,
self._num_workers,
'collective',
1, # spec_tuple.shards,
self.benchmark_cnn.gpu_indices,
allreduce_merge_scope=self._allreduce_merge_scope)
assert len(reduced_grads) == len(device_grads)
full_device_set = []
for grads in device_grads:
g, _ = grads[0]
full_device_set.append(g.device)
return (full_device_set, reduced_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state
if device_num >= len(device_grads):
raise ValueError('device_num %d exceeds length of device_grads (%d)' %
(device_num, len(device_grads)))
return device_grads[device_num]
def _get_instance_key(self, name):
if name not in self._instance_key_table.keys():
self._instance_key_counter += 1
self._instance_key_table[name] = self._instance_key_counter
return self._instance_key_table[name]
def get_post_init_ops(self):
"""Broadcast initialized values of variables to other devices.
Returns:
At task 0 device 0, broadcast_send.
At all other devices and tasks, broadcast_recv.
"""
global_vars = tf.global_variables()
group_size = self._num_workers * self._num_gpus
post_init_ops = []
# Gather variables into same-var-different-device groups.
vars_by_suffix = dict()
for v in global_vars:
split_name = v.name.split('/')
mo = re.match(r'v(\d+)$', split_name[0])
if mo:
device_id = int(mo.group(1))
suffix = '/'.join(split_name[1:])
if suffix in vars_by_suffix.keys():
vars_by_suffix[suffix].append(v)
else:
vars_by_suffix[suffix] = [v]
# Generate broadcast ops for each such group.
for suffix in sorted(vars_by_suffix):
vlist = vars_by_suffix[suffix]
assert self._num_gpus == len(vlist)
devices = [v.device for v in vlist]
# NOTE: this key should generate the same value for all tasks
group_key = allreduce.collective_group_key(devices)
group_size = self._num_workers * len(devices)
instance_key = self._get_instance_key(suffix)
for v in vlist:
split_name = v.name.split('/')
mo = re.match(r'v(\d+)$', split_name[0])
if mo:
device_id = int(mo.group(1))
if (self._task_id == 0 and device_id == 0):
with tf.device(v.device):
bcast_send = allreduce.broadcast_send(
v, v.shape, v.dtype, group_size, group_key, instance_key)
post_init_ops.append(v.assign(bcast_send))
else:
with tf.device(v.device):
bcast_recv = allreduce.broadcast_recv(
v.shape, v.dtype, group_size, group_key, instance_key)
post_init_ops.append(v.assign(bcast_recv))
return post_init_ops
def savable_variables(self):
"""Return the set of variables used for saving/loading the model."""
params = []
if self._task_id == 0:
for v in tf.global_variables():
split_name = v.name.split('/')
if split_name[0] == 'v0' or not v.name.startswith('v'):
params.append(v)
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
class VariableMgrDistributedFetchFromPS(VariableMgr):
"""Implements --variable_update=parameter_server mode for distributed jobs.
Variables are stored on a parameter server. For each step, each tower gets
a copy of the variables from the parameter server, and sends its gradients
to the param server.
"""
def each_tower_has_variables(self):
return False
def create_outer_variable_scope(self, device_num):
if self.benchmark_cnn.local_parameter_device_flag == 'gpu':
caching_devices = self.benchmark_cnn.raw_devices
else:
caching_devices = [self.benchmark_cnn.cpu_device]
custom_getter = variable_mgr_util.OverrideCachingDevice(
caching_devices, self.benchmark_cnn.cpu_device, 1024 * 64)
return tf.variable_scope(
'v', reuse=bool(device_num), custom_getter=custom_getter,
use_resource=self.use_resource_vars)
def preprocess_device_grads(self, device_grads):
# Returns (gradient_devices, gradient_state)
return ([self.benchmark_cnn.param_server_device], device_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
assert device_num == 0
agg_grads, self.grad_has_inf_nan = (
variable_mgr_util.aggregate_gradients_using_copy(
gradient_state,
use_mean=True,
check_inf_nan=self.benchmark_cnn.enable_auto_loss_scale))
return agg_grads
def get_devices(self):
ps_strategy = tf.contrib.training.GreedyLoadBalancingStrategy(
self.benchmark_cnn.num_ps, tf.contrib.training.byte_size_load_fn)
return [
tf.train.replica_device_setter(
worker_device=d,
cluster=self.benchmark_cnn.cluster_manager.get_cluster_spec(),
ps_strategy=ps_strategy) for d in self.benchmark_cnn.raw_devices
]
class VariableMgrDistributedFetchFromStagedPS(
VariableMgrDistributedFetchFromPS):
"""Extends VariableMgrDistributedFetchFromPS for --staged_vars."""
def __init__(self, benchmark_cnn):
super(VariableMgrDistributedFetchFromStagedPS, self).__init__(benchmark_cnn)
self.staging_vars_on_devices = [
dict() for _ in self.benchmark_cnn.raw_devices
]
self.staged_vars_on_cpu = {}
def create_outer_variable_scope(self, device_num):
self._custom_getter = variable_mgr_util.StagedVariableGetter(
device_num, self.benchmark_cnn.raw_devices,
self.benchmark_cnn.cpu_device, self)
return tf.variable_scope(
'v', reuse=bool(device_num), custom_getter=self._custom_getter,
use_resource=self.use_resource_vars)
def supports_staged_vars(self):
return True
def trainable_variables_on_device(self,
rel_device_num,
abs_device_num,
writable=False):
return self._custom_getter.trainable_variables_on_device(
rel_device_num, abs_device_num, writable=writable)
class VariableMgrDistributedReplicated(VariableMgr):
"""VariableMgr that implements the --distributed_replicated mode.
Each GPU has a copy of the variables, and updates its copy after the
parameter servers are all updated with the gradients from all servers. Only
works with cross_replica_sync=true. Unlike 'replicated', does not use nccl
all-reduce for replicating within a server.
"""
def each_tower_has_variables(self):
return True
def create_outer_variable_scope(self, device_num):
return tf.variable_scope(
'v%s' % device_num,
custom_getter=variable_mgr_util.OverrideToLocalVariableIfNotPsVar(),
use_resource=self.use_resource_vars)
def preprocess_device_grads(self, device_grads):
return ([self.benchmark_cnn.param_server_device], device_grads)
def get_gradients_to_apply(self, device_num, gradient_state):
device_grads = gradient_state # From 2nd result of preprocess_device_grads.
avg_grads, self.grad_has_inf_nan = (
variable_mgr_util.aggregate_gradients_using_copy_with_device_selection(
self.benchmark_cnn,
device_grads,
use_mean=True,
check_inf_nan=self.benchmark_cnn.enable_auto_loss_scale))
# Make shadow variable on a parameter server for each original trainable
# variable.
for i, (g, v) in enumerate(avg_grads):
my_name = variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/' + v.name
if my_name.endswith(':0'):
my_name = my_name[:-2]
new_v = tf.get_variable(
my_name,
dtype=v.dtype.base_dtype,
initializer=v.initial_value,
trainable=True)
avg_grads[i] = (g, new_v)
return avg_grads
def append_apply_gradients_ops(self, gradient_state, opt, grads, training_ops,
loss_scale_params):
device_grads = gradient_state # From 2nd result of preprocess_device_grads.
def get_apply_gradients_ops_func():
"""Returns a list of ops for updating gradients."""
apply_gradients_ops = []
# For each variable, apply the combined gradients for this server on
# the parameter server, and then wait for all other servers to do this.
for i, (g, v) in enumerate(grads):
apply_gradient_op = opt.apply_gradients([(g, v)])
barrier = self.benchmark_cnn.add_sync_queues_and_barrier(
'replicate_variable_%s' % i, [apply_gradient_op])
with tf.control_dependencies([barrier]):
with tf.device(self.benchmark_cnn.cpu_device):
updated_value = v.read_value()
for my_d in range(len(self.benchmark_cnn.devices)):
apply_gradients_ops.append(
device_grads[my_d][i][1].assign(updated_value))
return apply_gradients_ops
variable_mgr_util.append_gradients_with_loss_scale(
training_ops, get_apply_gradients_ops_func, loss_scale_params,
self.grad_has_inf_nan)
def _strip_port(self, s):
if s.endswith(':0'):
return s[:-2]
return s
def get_post_init_ops(self):
# Copy initialized variables for variables on the parameter server
# to the local copy of the variable.
local_vars = tf.local_variables()
local_var_by_name = dict(
[(self._strip_port(v.name), v) for v in local_vars])
post_init_ops = []
for v in tf.global_variables():
if v.name.startswith(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0/'):
prefix = self._strip_port(
v.name[len(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0'):])
for i in range(self.benchmark_cnn.num_gpus):
name = 'v%s%s' % (i, prefix)
if name in local_var_by_name:
copy_to = local_var_by_name[name]
post_init_ops.append(copy_to.assign(v.read_value()))
return post_init_ops
def _remove_shadow_var_prefix_if_present(self, var_name):
if var_name.startswith(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/'):
return var_name[len(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/'):]
else:
return var_name
def var_dict_name(self, v):
return self._strip_port(self._remove_shadow_var_prefix_if_present(v.name))
def savable_variables(self):
"""Returns a list/dict of savable variables to pass to tf.train.Saver."""
params = {}
for v in tf.global_variables():
assert (v.name.startswith(variable_mgr_util.PS_SHADOW_VAR_PREFIX + '/v0/')
or v.name in ('global_step:0', 'loss_scale:0',
'loss_scale_normal_steps:0')), (
'Invalid global variable: %s' % v)
# We store variables in the checkpoint with the shadow variable prefix
# removed so we can evaluate checkpoints in non-distributed replicated
# mode. The checkpoints can also be loaded for training in
# distributed_replicated mode.
name = self._strip_port(self._remove_shadow_var_prefix_if_present(v.name))
params[name] = v
for v in tf.local_variables():
# Non-trainable variables, such as batch norm moving averages, do not have
# corresponding global shadow variables, so we add them here. Trainable
# local variables have corresponding global shadow variables, which were
# added in the global variable loop above.
if v.name.startswith('v0/') and v not in tf.trainable_variables():
params[self._strip_port(v.name)] = v
return params
def get_devices(self):
return self.benchmark_cnn.raw_devices
| 30,926
| 37.610487
| 80
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.