repo stringlengths 2 99 | file stringlengths 14 239 | code stringlengths 20 3.99M | file_length int64 20 3.99M | avg_line_length float64 9.73 128 | max_line_length int64 11 86.4k | extension_type stringclasses 1 value |
|---|---|---|---|---|---|---|
ColBERT | ColBERT-master/colbert/ranking/reranking.py | import os
import time
import faiss
import random
import torch
from colbert.utils.runs import Run
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.evaluation.ranking_logger import RankingLogger
from colbert.utils.utils import print_message, batch
from colbert.ranking.rankers import Ranker
def rerank(args):
inference = ModelInference(args.colbert, amp=args.amp)
ranker = Ranker(args, inference, faiss_depth=None)
ranking_logger = RankingLogger(Run.path, qrels=None)
milliseconds = 0
with ranking_logger.context('ranking.tsv', also_save_annotations=False) as rlogger:
queries = args.queries
qids_in_order = list(queries.keys())
for qoffset, qbatch in batch(qids_in_order, 100, provide_offset=True):
qbatch_text = [queries[qid] for qid in qbatch]
qbatch_pids = [args.topK_pids[qid] for qid in qbatch]
rankings = []
for query_idx, (q, pids) in enumerate(zip(qbatch_text, qbatch_pids)):
torch.cuda.synchronize('cuda:0')
s = time.time()
Q = ranker.encode([q])
pids, scores = ranker.rank(Q, pids=pids)
torch.cuda.synchronize()
milliseconds += (time.time() - s) * 1000.0
if len(pids):
print(qoffset+query_idx, q, len(scores), len(pids), scores[0], pids[0],
milliseconds / (qoffset+query_idx+1), 'ms')
rankings.append(zip(pids, scores))
for query_idx, (qid, ranking) in enumerate(zip(qbatch, rankings)):
query_idx = qoffset + query_idx
if query_idx % 100 == 0:
print_message(f"#> Logging query #{query_idx} (qid {qid}) now...")
ranking = [(score, pid, None) for pid, score in ranking]
rlogger.log(qid, ranking, is_ranked=True)
print('\n\n')
print(ranking_logger.filename)
print("#> Done.")
print('\n\n')
| 2,042 | 31.951613 | 91 | py |
ColBERT | ColBERT-master/colbert/ranking/faiss_index.py | import os
import time
import faiss
import random
import torch
from multiprocessing import Pool
from colbert.modeling.inference import ModelInference
from colbert.utils.utils import print_message, flatten, batch
from colbert.indexing.loaders import load_doclens
class FaissIndex():
def __init__(self, index_path, faiss_index_path, nprobe, part_range=None):
print_message("#> Loading the FAISS index from", faiss_index_path, "..")
faiss_part_range = os.path.basename(faiss_index_path).split('.')[-2].split('-')
if len(faiss_part_range) == 2:
faiss_part_range = range(*map(int, faiss_part_range))
assert part_range[0] in faiss_part_range, (part_range, faiss_part_range)
assert part_range[-1] in faiss_part_range, (part_range, faiss_part_range)
else:
faiss_part_range = None
self.part_range = part_range
self.faiss_part_range = faiss_part_range
self.faiss_index = faiss.read_index(faiss_index_path)
self.faiss_index.nprobe = nprobe
print_message("#> Building the emb2pid mapping..")
all_doclens = load_doclens(index_path, flatten=False)
pid_offset = 0
if faiss_part_range is not None:
print(f"#> Restricting all_doclens to the range {faiss_part_range}.")
pid_offset = len(flatten(all_doclens[:faiss_part_range.start]))
all_doclens = all_doclens[faiss_part_range.start:faiss_part_range.stop]
self.relative_range = None
if self.part_range is not None:
start = self.faiss_part_range.start if self.faiss_part_range is not None else 0
a = len(flatten(all_doclens[:self.part_range.start - start]))
b = len(flatten(all_doclens[:self.part_range.stop - start]))
self.relative_range = range(a, b)
print(f"self.relative_range = {self.relative_range}")
all_doclens = flatten(all_doclens)
total_num_embeddings = sum(all_doclens)
self.emb2pid = torch.zeros(total_num_embeddings, dtype=torch.int)
offset_doclens = 0
for pid, dlength in enumerate(all_doclens):
self.emb2pid[offset_doclens: offset_doclens + dlength] = pid_offset + pid
offset_doclens += dlength
print_message("len(self.emb2pid) =", len(self.emb2pid))
self.parallel_pool = Pool(16)
def retrieve(self, faiss_depth, Q, verbose=False):
embedding_ids = self.queries_to_embedding_ids(faiss_depth, Q, verbose=verbose)
pids = self.embedding_ids_to_pids(embedding_ids, verbose=verbose)
if self.relative_range is not None:
pids = [[pid for pid in pids_ if pid in self.relative_range] for pids_ in pids]
return pids
def queries_to_embedding_ids(self, faiss_depth, Q, verbose=True):
# Flatten into a matrix for the faiss search.
num_queries, embeddings_per_query, dim = Q.size()
Q_faiss = Q.view(num_queries * embeddings_per_query, dim).cpu().contiguous()
# Search in large batches with faiss.
print_message("#> Search in batches with faiss. \t\t",
f"Q.size() = {Q.size()}, Q_faiss.size() = {Q_faiss.size()}",
condition=verbose)
embeddings_ids = []
faiss_bsize = embeddings_per_query * 5000
for offset in range(0, Q_faiss.size(0), faiss_bsize):
endpos = min(offset + faiss_bsize, Q_faiss.size(0))
print_message("#> Searching from {} to {}...".format(offset, endpos), condition=verbose)
some_Q_faiss = Q_faiss[offset:endpos].float().numpy()
_, some_embedding_ids = self.faiss_index.search(some_Q_faiss, faiss_depth)
embeddings_ids.append(torch.from_numpy(some_embedding_ids))
embedding_ids = torch.cat(embeddings_ids)
# Reshape to (number of queries, non-unique embedding IDs per query)
embedding_ids = embedding_ids.view(num_queries, embeddings_per_query * embedding_ids.size(1))
return embedding_ids
def embedding_ids_to_pids(self, embedding_ids, verbose=True):
# Find unique PIDs per query.
print_message("#> Lookup the PIDs..", condition=verbose)
all_pids = self.emb2pid[embedding_ids]
print_message(f"#> Converting to a list [shape = {all_pids.size()}]..", condition=verbose)
all_pids = all_pids.tolist()
print_message("#> Removing duplicates (in parallel if large enough)..", condition=verbose)
if len(all_pids) > 5000:
all_pids = list(self.parallel_pool.map(uniq, all_pids))
else:
all_pids = list(map(uniq, all_pids))
print_message("#> Done with embedding_ids_to_pids().", condition=verbose)
return all_pids
def uniq(l):
return list(set(l))
| 4,820 | 38.195122 | 101 | py |
ColBERT | ColBERT-master/colbert/ranking/rankers.py | import torch
from functools import partial
from colbert.ranking.index_part import IndexPart
from colbert.ranking.faiss_index import FaissIndex
from colbert.utils.utils import flatten, zipstar
class Ranker():
def __init__(self, args, inference, faiss_depth=1024):
self.inference = inference
self.faiss_depth = faiss_depth
if faiss_depth is not None:
self.faiss_index = FaissIndex(args.index_path, args.faiss_index_path, args.nprobe, part_range=args.part_range)
self.retrieve = partial(self.faiss_index.retrieve, self.faiss_depth)
self.index = IndexPart(args.index_path, dim=inference.colbert.dim, part_range=args.part_range, verbose=True)
def encode(self, queries):
assert type(queries) in [list, tuple], type(queries)
Q = self.inference.queryFromText(queries, bsize=512 if len(queries) > 512 else None)
return Q
def rank(self, Q, pids=None):
pids = self.retrieve(Q, verbose=False)[0] if pids is None else pids
assert type(pids) in [list, tuple], type(pids)
assert Q.size(0) == 1, (len(pids), Q.size())
assert all(type(pid) is int for pid in pids)
scores = []
if len(pids) > 0:
Q = Q.permute(0, 2, 1)
scores = self.index.rank(Q, pids)
scores_sorter = torch.tensor(scores).sort(descending=True)
pids, scores = torch.tensor(pids)[scores_sorter.indices].tolist(), scores_sorter.values.tolist()
return pids, scores
| 1,520 | 33.568182 | 122 | py |
ColBERT | ColBERT-master/colbert/modeling/inference.py | import torch
from colbert.modeling.colbert import ColBERT
from colbert.modeling.tokenization import QueryTokenizer, DocTokenizer
from colbert.utils.amp import MixedPrecisionManager
from colbert.parameters import DEVICE
class ModelInference():
def __init__(self, colbert: ColBERT, amp=False):
assert colbert.training is False
self.colbert = colbert
self.query_tokenizer = QueryTokenizer(colbert.query_maxlen)
self.doc_tokenizer = DocTokenizer(colbert.doc_maxlen)
self.amp_manager = MixedPrecisionManager(amp)
def query(self, *args, to_cpu=False, **kw_args):
with torch.no_grad():
with self.amp_manager.context():
Q = self.colbert.query(*args, **kw_args)
return Q.cpu() if to_cpu else Q
def doc(self, *args, to_cpu=False, **kw_args):
with torch.no_grad():
with self.amp_manager.context():
D = self.colbert.doc(*args, **kw_args)
return D.cpu() if to_cpu else D
def queryFromText(self, queries, bsize=None, to_cpu=False):
if bsize:
batches = self.query_tokenizer.tensorize(queries, bsize=bsize)
batches = [self.query(input_ids, attention_mask, to_cpu=to_cpu) for input_ids, attention_mask in batches]
return torch.cat(batches)
input_ids, attention_mask = self.query_tokenizer.tensorize(queries)
return self.query(input_ids, attention_mask)
def docFromText(self, docs, bsize=None, keep_dims=True, to_cpu=False):
if bsize:
batches, reverse_indices = self.doc_tokenizer.tensorize(docs, bsize=bsize)
batches = [self.doc(input_ids, attention_mask, keep_dims=keep_dims, to_cpu=to_cpu)
for input_ids, attention_mask in batches]
if keep_dims:
D = _stack_3D_tensors(batches)
return D[reverse_indices]
D = [d for batch in batches for d in batch]
return [D[idx] for idx in reverse_indices.tolist()]
input_ids, attention_mask = self.doc_tokenizer.tensorize(docs)
return self.doc(input_ids, attention_mask, keep_dims=keep_dims)
def score(self, Q, D, mask=None, lengths=None, explain=False):
if lengths is not None:
assert mask is None, "don't supply both mask and lengths"
mask = torch.arange(D.size(1), device=DEVICE) + 1
mask = mask.unsqueeze(0) <= lengths.to(DEVICE).unsqueeze(-1)
scores = (D @ Q)
scores = scores if mask is None else scores * mask.unsqueeze(-1)
scores = scores.max(1)
if explain:
assert False, "TODO"
return scores.values.sum(-1).cpu()
def _stack_3D_tensors(groups):
bsize = sum([x.size(0) for x in groups])
maxlen = max([x.size(1) for x in groups])
hdim = groups[0].size(2)
output = torch.zeros(bsize, maxlen, hdim, device=groups[0].device, dtype=groups[0].dtype)
offset = 0
for x in groups:
endpos = offset + x.size(0)
output[offset:endpos, :x.size(1)] = x
offset = endpos
return output
| 3,132 | 34.602273 | 117 | py |
ColBERT | ColBERT-master/colbert/modeling/colbert.py | import string
import torch
import torch.nn as nn
from transformers import BertPreTrainedModel, BertModel, BertTokenizerFast
from colbert.parameters import DEVICE
class ColBERT(BertPreTrainedModel):
def __init__(self, config, query_maxlen, doc_maxlen, mask_punctuation, dim=128, similarity_metric='cosine'):
super(ColBERT, self).__init__(config)
self.query_maxlen = query_maxlen
self.doc_maxlen = doc_maxlen
self.similarity_metric = similarity_metric
self.dim = dim
self.mask_punctuation = mask_punctuation
self.skiplist = {}
if self.mask_punctuation:
self.tokenizer = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.skiplist = {w: True
for symbol in string.punctuation
for w in [symbol, self.tokenizer.encode(symbol, add_special_tokens=False)[0]]}
self.bert = BertModel(config)
self.linear = nn.Linear(config.hidden_size, dim, bias=False)
self.init_weights()
def forward(self, Q, D):
return self.score(self.query(*Q), self.doc(*D))
def query(self, input_ids, attention_mask):
input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)
Q = self.bert(input_ids, attention_mask=attention_mask)[0]
Q = self.linear(Q)
return torch.nn.functional.normalize(Q, p=2, dim=2)
def doc(self, input_ids, attention_mask, keep_dims=True):
input_ids, attention_mask = input_ids.to(DEVICE), attention_mask.to(DEVICE)
D = self.bert(input_ids, attention_mask=attention_mask)[0]
D = self.linear(D)
mask = torch.tensor(self.mask(input_ids), device=DEVICE).unsqueeze(2).float()
D = D * mask
D = torch.nn.functional.normalize(D, p=2, dim=2)
if not keep_dims:
D, mask = D.cpu().to(dtype=torch.float16), mask.cpu().bool().squeeze(-1)
D = [d[mask[idx]] for idx, d in enumerate(D)]
return D
def score(self, Q, D):
if self.similarity_metric == 'cosine':
return (Q @ D.permute(0, 2, 1)).max(2).values.sum(1)
assert self.similarity_metric == 'l2'
return (-1.0 * ((Q.unsqueeze(2) - D.unsqueeze(1))**2).sum(-1)).max(-1).values.sum(-1)
def mask(self, input_ids):
mask = [[(x not in self.skiplist) and (x != 0) for x in d] for d in input_ids.cpu().tolist()]
return mask
| 2,458 | 34.637681 | 112 | py |
ColBERT | ColBERT-master/colbert/modeling/tokenization/doc_tokenization.py | import torch
from transformers import BertTokenizerFast
from colbert.modeling.tokenization.utils import _split_into_batches, _sort_by_length
class DocTokenizer():
def __init__(self, doc_maxlen):
self.tok = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.doc_maxlen = doc_maxlen
self.D_marker_token, self.D_marker_token_id = '[D]', self.tok.convert_tokens_to_ids('[unused1]')
self.cls_token, self.cls_token_id = self.tok.cls_token, self.tok.cls_token_id
self.sep_token, self.sep_token_id = self.tok.sep_token, self.tok.sep_token_id
assert self.D_marker_token_id == 2
def tokenize(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
tokens = [self.tok.tokenize(x, add_special_tokens=False) for x in batch_text]
if not add_special_tokens:
return tokens
prefix, suffix = [self.cls_token, self.D_marker_token], [self.sep_token]
tokens = [prefix + lst + suffix for lst in tokens]
return tokens
def encode(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
ids = self.tok(batch_text, add_special_tokens=False)['input_ids']
if not add_special_tokens:
return ids
prefix, suffix = [self.cls_token_id, self.D_marker_token_id], [self.sep_token_id]
ids = [prefix + lst + suffix for lst in ids]
return ids
def tensorize(self, batch_text, bsize=None):
assert type(batch_text) in [list, tuple], (type(batch_text))
# add placehold for the [D] marker
batch_text = ['. ' + x for x in batch_text]
obj = self.tok(batch_text, padding='longest', truncation='longest_first',
return_tensors='pt', max_length=self.doc_maxlen)
ids, mask = obj['input_ids'], obj['attention_mask']
# postprocess for the [D] marker
ids[:, 1] = self.D_marker_token_id
if bsize:
ids, mask, reverse_indices = _sort_by_length(ids, mask, bsize)
batches = _split_into_batches(ids, mask, bsize)
return batches, reverse_indices
return ids, mask
| 2,248 | 34.140625 | 104 | py |
ColBERT | ColBERT-master/colbert/modeling/tokenization/query_tokenization.py | import torch
from transformers import BertTokenizerFast
from colbert.modeling.tokenization.utils import _split_into_batches
class QueryTokenizer():
def __init__(self, query_maxlen):
self.tok = BertTokenizerFast.from_pretrained('bert-base-uncased')
self.query_maxlen = query_maxlen
self.Q_marker_token, self.Q_marker_token_id = '[Q]', self.tok.convert_tokens_to_ids('[unused0]')
self.cls_token, self.cls_token_id = self.tok.cls_token, self.tok.cls_token_id
self.sep_token, self.sep_token_id = self.tok.sep_token, self.tok.sep_token_id
self.mask_token, self.mask_token_id = self.tok.mask_token, self.tok.mask_token_id
assert self.Q_marker_token_id == 1 and self.mask_token_id == 103
def tokenize(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
tokens = [self.tok.tokenize(x, add_special_tokens=False) for x in batch_text]
if not add_special_tokens:
return tokens
prefix, suffix = [self.cls_token, self.Q_marker_token], [self.sep_token]
tokens = [prefix + lst + suffix + [self.mask_token] * (self.query_maxlen - (len(lst)+3)) for lst in tokens]
return tokens
def encode(self, batch_text, add_special_tokens=False):
assert type(batch_text) in [list, tuple], (type(batch_text))
ids = self.tok(batch_text, add_special_tokens=False)['input_ids']
if not add_special_tokens:
return ids
prefix, suffix = [self.cls_token_id, self.Q_marker_token_id], [self.sep_token_id]
ids = [prefix + lst + suffix + [self.mask_token_id] * (self.query_maxlen - (len(lst)+3)) for lst in ids]
return ids
def tensorize(self, batch_text, bsize=None):
assert type(batch_text) in [list, tuple], (type(batch_text))
# add placehold for the [Q] marker
batch_text = ['. ' + x for x in batch_text]
obj = self.tok(batch_text, padding='max_length', truncation=True,
return_tensors='pt', max_length=self.query_maxlen)
ids, mask = obj['input_ids'], obj['attention_mask']
# postprocess for the [Q] marker and the [MASK] augmentation
ids[:, 1] = self.Q_marker_token_id
ids[ids == 0] = self.mask_token_id
if bsize:
batches = _split_into_batches(ids, mask, bsize)
return batches
return ids, mask
| 2,449 | 36.692308 | 115 | py |
ColBERT | ColBERT-master/colbert/modeling/tokenization/utils.py | import torch
def tensorize_triples(query_tokenizer, doc_tokenizer, queries, positives, negatives, bsize):
assert len(queries) == len(positives) == len(negatives)
assert bsize is None or len(queries) % bsize == 0
N = len(queries)
Q_ids, Q_mask = query_tokenizer.tensorize(queries)
D_ids, D_mask = doc_tokenizer.tensorize(positives + negatives)
D_ids, D_mask = D_ids.view(2, N, -1), D_mask.view(2, N, -1)
# Compute max among {length of i^th positive, length of i^th negative} for i \in N
maxlens = D_mask.sum(-1).max(0).values
# Sort by maxlens
indices = maxlens.sort().indices
Q_ids, Q_mask = Q_ids[indices], Q_mask[indices]
D_ids, D_mask = D_ids[:, indices], D_mask[:, indices]
(positive_ids, negative_ids), (positive_mask, negative_mask) = D_ids, D_mask
query_batches = _split_into_batches(Q_ids, Q_mask, bsize)
positive_batches = _split_into_batches(positive_ids, positive_mask, bsize)
negative_batches = _split_into_batches(negative_ids, negative_mask, bsize)
batches = []
for (q_ids, q_mask), (p_ids, p_mask), (n_ids, n_mask) in zip(query_batches, positive_batches, negative_batches):
Q = (torch.cat((q_ids, q_ids)), torch.cat((q_mask, q_mask)))
D = (torch.cat((p_ids, n_ids)), torch.cat((p_mask, n_mask)))
batches.append((Q, D))
return batches
def _sort_by_length(ids, mask, bsize):
if ids.size(0) <= bsize:
return ids, mask, torch.arange(ids.size(0))
indices = mask.sum(-1).sort().indices
reverse_indices = indices.sort().indices
return ids[indices], mask[indices], reverse_indices
def _split_into_batches(ids, mask, bsize):
batches = []
for offset in range(0, ids.size(0), bsize):
batches.append((ids[offset:offset+bsize], mask[offset:offset+bsize]))
return batches
| 1,833 | 34.269231 | 116 | py |
cili | cili-master/make-tsv.py |
"""
Script to produce a TSV file for a release of CILI.
The mappings to the Princeton WordNet generally don't need to be
released regularly as they are unlikely to change and are already
included in WN-LMF releases of the PWN, so this script reduces the
ili.ttl file to a two-column tab-separated-value file containing only
the ILI inventory and their definitions. This assumes that every ILI
has a definition, which is true by design. The resulting .tsv file is
less than half the size of the .ttl file when uncompressed, but
roughly the same size when compressed. TSV is generally much faster to
parse, however, and doesn't require an RDF library, so it is more
appealing for downstream applications.
Requirements:
- Python 3.6+
- rdflib
Usage:
python3 make-tsv.py > cili.tsv
"""
import sys
from rdflib import Graph
from rdflib.namespace import SKOS
g = Graph()
g.parse("ili.ttl", format='ttl')
# pair each ILI (ignoring the URL part) with its definition
data = [(subj.rpartition('/')[2], obj)
for subj, obj
in g.subject_objects(predicate=SKOS.definition)]
# sort by ILI number
data.sort(key=lambda pair: int(pair[0].lstrip('i')))
print('ILI\tDefinition')
for ili, definition in data:
print(f'{ili}\t{definition}')
| 1,284 | 26.934783 | 70 | py |
cili | cili-master/make-html.py |
"""
Requirements:
- Python 3.6+
- rdflib
Usage:
python3 make-html.py OUTDIR
"""
from typing import Dict
import sys
from pathlib import Path
from rdflib import Graph
from rdflib.namespace import RDF, DC, SKOS, Namespace
if len(sys.argv) != 2:
sys.exit('usage: python3 make-html.py OUTDIR')
OUTDIR = Path(sys.argv[1])
if OUTDIR.exists():
sys.exit(f'{OUTDIR!s} already exists; remove or rename it, then try again')
OUTDIR.mkdir()
css = '''\
:root {
--text-color: #111;
--background-color: white;
}
body {
width: 100%;
color: var(--text-color);
margin: 0;
background-color: var(--background-color);
font-family: "Roboto", "Fira Sans", sans-serif;
}
header {
width: 100%;
margin: 0;
padding: 10px;
background-color: black;
color: #eee;
}
header h1 { margin-top: 0; text-align: center; }
article {
width: 800px;
margin: 10px auto;
padding: 10px;
border-radius: 10px;
}
article.ili { background-color: rgba(128,128,128,.1); }
article footer {
margin: 10px;
text-align: right;
}
blockquote {
margin: 10px 0;
padding: 10px;
border-left: 4px solid #888;
background-color: rgba(128,128,128,.1)
}
dl {
display: grid;
grid-template-columns: max-content auto;
}
dt { grid-column-start: 1; }
dd { grid-column-start: 2; }
.ili-type, dd { font-weight: bold; }
a { color: rgb(90, 170, 255); text-decoration: none; }
a:hover { text-decoration: underline; }
a:active { color: rgb(120, 200, 255); }
@media screen and (max-width: 799px) {
article {
width: 400px;
}
}
@media (prefers-color-scheme: dark) {
body {
--text-color: #eee;
--background-color: black;
}
}
'''
base = '''\
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<link href="_static/style.css" rel="stylesheet">
<title>{title}</title>
</head>
<body>
<header>
<h1>Global WordNet Association: Interlingual Index</h1>
</header>
{content}
</body>
</html>
'''
article = '''\
<article class="ili" itemscope itemtype="{type!s}" itemid="{subject!s}">
<h1>{ili}</h1>
<div class="ili-type">{short_type!s}</div>
<blockquote itemprop="http://www.w3.org/2004/02/skos/core#definition">
{definition!s}
</blockquote>
<dl>
<dt>Status</dt>
<dd itemprop="status">{status!s}</dd>
<dt>Source</dt>
<dd><a href="{source_info[url]}">{source_info[name]}</a>
–
<a itemprop="http://purl.org/dc/elements/1.1/source" href="{source!s}">{source_info[local]}</a>
</dd>
</dl>
<footer>Part of <a href="https://github.com/globalwordnet/cili/">globalwordnet/cili</a></footer>
</article>
'''
ILI = Namespace('http://globalwordnet.org/ili/')
sources = {
'http://wordnet-rdf.princeton.edu/wn30/': ('Princeton WordNet 3.0',
'https://wordnet.princeton.edu/'),
}
def source_info(url: str) -> Dict[str, str]:
for src in sources:
if url.startswith(src):
local = url.removeprefix(src).lstrip('/#')
name, project_url = sources[src]
return {'name': name, 'url': project_url, 'local': local}
raise LookupError(f'source info not found for {url!s}')
def short_name(s: str) -> str:
return s.rpartition('/')[2]
g = Graph()
g.parse("ili.ttl", format='ttl')
for subj in g.subjects():
type = g.value(subject=subj, predicate=RDF.type)
if type not in (ILI.Concept, ILI.Instance):
continue
ili = short_name(subj)
source = g.value(subject=subj, predicate=DC.source)
data = {
'ili': ili,
'subject': subj,
'type': type,
'short_type': short_name(type),
'definition': g.value(subject=subj, predicate=SKOS.definition),
'status': g.value(subject=subj, predicate=ILI.status, default='active'),
'source': source,
'source_info': source_info(source),
}
content = base.format(title=f'ILI: {ili}', content=article.format(**data))
(OUTDIR / f'{ili}.html').write_text(content)
(OUTDIR / '.nojekyll').touch() # for GitHub pages
(OUTDIR / '_static').mkdir()
(OUTDIR / '_static' / 'style.css').write_text(css)
(OUTDIR / 'index.html').write_text(base.format(
title='Interlingual Index',
content='''\
<article>
<a href="https://github.com/globalwordnet/cili">https://github.com/globalwordnet/cili</a>
</article>
'''))
| 4,438 | 22.363158 | 105 | py |
gate-teamware | gate-teamware-master/version.py | import json
import yaml
import sys
PACKAGE_JSON_FILE_PATH = "package.json"
DOCS_PACKAGE_JSON_FILE_PATH = "docs/package.json"
CITATION_FILE_PATH = "CITATION.cff"
MASTER_VERSION_FILE = "VERSION"
def check():
"""
Intended for use in CI pipelines, checks versions in files and exits with non-zero exit code if they don't match.
"""
js_version = get_package_json_version(PACKAGE_JSON_FILE_PATH)
print(f"package.json version is {js_version}")
docs_js_version = get_package_json_version(DOCS_PACKAGE_JSON_FILE_PATH)
print(f"docs package.json version is {docs_js_version}")
with open(CITATION_FILE_PATH, "r") as f:
citation_file = yaml.safe_load(f)
citation_version = citation_file['version']
print(f"CITATION.cff version is {citation_version}")
master_version = get_master_version()
print(f"VERSION file version is {master_version}")
if js_version != master_version or docs_js_version != master_version or citation_version != master_version:
print("One or more versions does not match")
sys.exit(1)
else:
print("All versions match!")
def get_package_json_version(file_path: str) -> str:
with open(file_path, "r") as f:
package_json = json.load(f)
js_version = package_json['version']
return js_version
def get_master_version():
with open(MASTER_VERSION_FILE, "r") as f:
master_version = f.readline().strip()
return master_version
def update():
"""
Updates all versions to match the master version file.
"""
master_version = get_master_version()
update_package_json_version(PACKAGE_JSON_FILE_PATH, master_version)
update_package_json_version(DOCS_PACKAGE_JSON_FILE_PATH, master_version)
with open(CITATION_FILE_PATH, "r") as f:
citation_file = yaml.safe_load(f)
print(f"Writing master version {master_version} to {CITATION_FILE_PATH}")
with open(CITATION_FILE_PATH, "w") as f:
citation_file['version'] = master_version
yaml.dump(citation_file, f)
check()
def update_package_json_version(file_path:str, version_no:str):
with open(file_path, "r") as f:
package_json = json.load(f)
print(f"Writing master version {version_no} to {file_path}")
with open(file_path, "w") as f:
package_json['version'] = version_no
json.dump(package_json, f, indent=2)
if __name__ == "__main__":
if sys.argv[1] == 'check':
print("Checking versions...")
check()
elif sys.argv[1] == 'update':
print("Updating versions...")
update()
else:
print(f"Unknown function {sys.argv[1]}, available functions are 'check' and 'update'.")
| 2,693 | 30.325581 | 117 | py |
gate-teamware | gate-teamware-master/manage.py | """Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teamware.settings.base')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 669 | 28.130435 | 77 | py |
gate-teamware | gate-teamware-master/backend/views.py | import tempfile
import json
import math
import csv
from zipfile import ZipFile
from django.conf import settings
from django.http import StreamingHttpResponse, HttpResponse
from django.shortcuts import render
from django.views import View
from backend.models import Project, Document, DocumentType
class MainView(View):
"""
The main view of the app (index page)
"""
template_page = "index.html"
def get(self, request, *args, **kwargs):
"""
:param request:
:return:
"""
context = {
"settings": settings
}
return render(request, self.template_page, context=context)
class DownloadAnnotationsView(View):
def get(self, request, project_id, doc_type, export_type, json_format, entries_per_file, anonymize="anonymize"):
anonymize = False if anonymize=="deanonymize" else True
if request.user.is_manager or request.user.is_staff or request.user.is_superuser:
response = StreamingHttpResponse(self.generate_download(project_id, doc_type, export_type, json_format, anonymize, documents_per_file=entries_per_file))
export_format_extension = ""
if export_type == "json" or export_type == "jsonl":
export_format_extension += export_type
if json_format == "raw" or json_format == "gate":
export_format_extension += "-"+json_format
elif export_type == "csv":
export_format_extension = export_type
response['Content-Type'] = 'application/zip'
response['Content-Disposition'] = f'attachment;filename="project{project_id:04d}-{export_format_extension}.zip"'
return response
return HttpResponse("No permission to access this endpoint", status=401)
def generate_download(self, project_id, doc_type="all", export_type="json", json_format="raw", anonymize=True, chunk_size=512, documents_per_file=500):
project = Project.objects.get(pk=project_id)
with tempfile.TemporaryFile() as z:
with ZipFile(z, "w") as zip:
all_docs = project.documents.all()
if doc_type == "training":
all_docs = project.documents.filter(doc_type=DocumentType.TRAINING)
elif doc_type == "test":
all_docs = project.documents.filter(doc_type=DocumentType.TEST)
elif doc_type == "annotation":
all_docs = project.documents.filter(doc_type=DocumentType.ANNOTATION)
num_docs = all_docs.count()
num_slices = math.ceil(num_docs/documents_per_file)
for slice_index in range(num_slices):
start_index = slice_index*documents_per_file
end_index = ((slice_index+1)*documents_per_file)
if end_index >= num_docs:
end_index = num_docs
slice_docs = all_docs[start_index:end_index]
with tempfile.NamedTemporaryFile("w+") as f:
self.write_docs_to_file(f, slice_docs, export_type, json_format, anonymize)
zip.write(f.name, f"project-{project_id}-{doc_type}-{slice_index:04d}.{export_type}")
# Stream file output
z.seek(0)
while True:
c = z.read(chunk_size)
if c:
yield c
else:
break
def write_docs_to_file(self, file, documents, export_type, json_format, anonymize):
if export_type == "json":
self.write_docs_as_json(file, documents, json_format, anonymize)
elif export_type == "jsonl":
self.write_docs_as_jsonl(file, documents, json_format, anonymize)
elif export_type == "csv":
self.write_docs_as_csv(file, documents, anonymize)
def write_docs_as_json(self, file, documents, json_format, anonymize):
doc_dict_list = []
for document in documents:
doc_dict_list.append(document.get_doc_annotation_dict(json_format, anonymize))
file.write(json.dumps(doc_dict_list))
file.flush()
def write_docs_as_jsonl(self, file, documents, json_format, anonymize):
for document in documents:
doc_dict = document.get_doc_annotation_dict(json_format, anonymize)
file.write(json.dumps(doc_dict) + "\n")
file.flush()
def write_docs_as_csv(self, file, documents, anonymize):
doc_dict_list = []
keys_list = []
for document in documents:
doc_dict_list.append(self.flatten_json(document.get_doc_annotation_dict("csv", anonymize), "."))
for doc_dict in doc_dict_list:
keys_list = self.insert_missing_key(keys_list, doc_dict)
writer = csv.writer(file, delimiter=",", quotechar='"')
# Header row
writer.writerow(keys_list)
# Data
for doc_dict in doc_dict_list:
row = []
for key in keys_list:
if key in doc_dict:
row.append(doc_dict[key])
else:
row.append(None)
writer.writerow(row)
file.flush()
def flatten_json(self, b, delim):
val = {}
for i in b.keys():
if isinstance(b[i], dict):
get = self.flatten_json(b[i], delim)
for j in get.keys():
val[i + delim + j] = get[j]
elif isinstance(b[i], list):
for index, obj in enumerate(b[i]):
if isinstance(obj, dict):
get = self.flatten_json(obj, delim)
for j in get.keys():
val[i + delim + str(index) + delim + j] = get[j]
else:
val[i + delim + str(index)] = obj
else:
val[i] = b[i]
return val
def insert_missing_key(self, key_list, obj_dict):
key_list = list(key_list)
key_set = set(key_list)
obj_keys = list(obj_dict.keys())
obj_key_set = set(obj_keys)
diff_set = obj_key_set.difference(key_set)
num_obj_keys = len(obj_keys)
# Do key filling in order
missing_keys_list = [key for key in obj_keys if key in diff_set]
for missing_key in missing_keys_list:
prev_key = None
next_key = None
for i, item in enumerate(obj_keys):
if obj_keys[i] == missing_key:
prev_key = obj_keys[i-1] if i > 0 else None
next_key = obj_keys[i+1] if i < num_obj_keys - 1 else None
break
if prev_key in key_set:
prev_key_index = key_list.index(prev_key)
key_list.insert(prev_key_index + 1, missing_key)
elif next_key in key_set:
next_key_index = key_list.index(next_key)
key_list.insert(next_key_index, missing_key)
else:
key_list.insert(-1, missing_key)
key_set = set(key_list)
return key_list
| 7,294 | 34.585366 | 164 | py |
gate-teamware | gate-teamware-master/backend/signals.py | from django.db.models.signals import pre_delete
from django.dispatch import receiver
from backend.models import ServiceUser, Annotation
| 137 | 26.6 | 50 | py |
gate-teamware | gate-teamware-master/backend/errors.py | class AuthError(PermissionError):
pass
| 43 | 13.666667 | 33 | py |
gate-teamware | gate-teamware-master/backend/rpcserver.py | import json
import logging
import inspect
from json.decoder import JSONDecodeError
from django.http import JsonResponse, HttpRequest
from django.views import View
from backend.errors import AuthError
log = logging.getLogger(__name__)
REGISTERED_RPC_METHODS = {}
PARSE_ERROR = -32700
INVALID_REQUEST = -32600
METHOD_NOT_FOUND = -32601
INVALID_PARAMS = -32602
INTERNAL_ERROR = -32603
AUTHENTICATION_ERROR = -32000
UNAUTHORIZED_ERROR = -32001
class RPCMethod:
def __init__(self, function, authenticate, requires_manager=False, requires_admin=False):
self.function = function
self.authenticate = authenticate
self.requires_manager = requires_manager
self.requires_admin = requires_admin
class JSONRPCEndpoint(View):
@staticmethod
def endpoint_listing():
endpoints_list = {}
for func_name, rmethod in REGISTERED_RPC_METHODS.items():
argspec = inspect.getfullargspec(rmethod.function)
args_list = []
if len(argspec.args) > 1:
args_list = argspec.args[1:]
endpoints_list[func_name] = {
"description": rmethod.function.__doc__,
"arguments": args_list,
"defaults": argspec.defaults,
"require_authentication": rmethod.authenticate,
"require_manager": rmethod.requires_manager,
"require_admin": rmethod.requires_admin
}
return endpoints_list
def success_response(self, data, msg_id=None, http_status=200):
context = {
"jsonrpc": "2.0",
"result": data
}
if msg_id is not None:
context["id"] = msg_id
return JsonResponse(context, status=http_status)
def error_response(self, code, message, msg_id=None, http_status=400):
context = {
"jsonrpc": "2.0",
"error":
{
"code": code,
"message": message,
}
}
if msg_id is not None:
context["id"] = msg_id
return JsonResponse(context, status=http_status)
def post(self, request: HttpRequest, *args, **kwargs):
msg_id = None
method_name = None
params = []
try:
# Parse message
msg = json.loads(request.body)
# Check id
if "id" in msg:
msg_id = msg["id"]
# Check protocol header
if "jsonrpc" not in msg or msg["jsonrpc"] != "2.0":
log.warning(f"No jsonrpc field in request")
return self.error_response(INVALID_REQUEST, "Not json rpc 2.0", msg_id, http_status=400)
# Get method name
if "method" in msg:
method_name = msg["method"]
if method_name not in REGISTERED_RPC_METHODS:
log.warning(f"No method name {method_name} in request")
return self.error_response(METHOD_NOT_FOUND, f"Method {method_name} was not found", http_status=405)
# Get params
if "params" in msg:
params = msg["params"]
# Get and call method
method = REGISTERED_RPC_METHODS[method_name]
# Check user role
if method.authenticate and not request.user.is_authenticated:
raise AuthError("Must be logged in to perform this operation.")
if method.requires_manager and not (request.user.is_manager or request.user.is_staff or request.user.is_superuser):
raise PermissionError("Must be a manager to perform this operation.")
if method.requires_admin and not (request.user.is_staff or request.user.is_superuser):
raise PermissionError("Must be a admin to perform this operation.")
result = method.function(request, *params)
log.info(f"Called {method_name}")
return self.success_response(result, msg_id)
except JSONDecodeError as e:
log.exception(f"Unable to parse json string from request body {request.body}")
return self.error_response(PARSE_ERROR, "Invalid JSON format in request")
except ValueError as e:
log.exception(f"Value error on rpc function {method_name}")
return self.error_response(INVALID_REQUEST, f"{e}", http_status=400)
except TypeError as e:
log.exception(f"Type error on rpc function {method_name}")
return self.error_response(INVALID_PARAMS, f"{e}", http_status=400)
except RuntimeError as e:
log.exception(f"Runtime error on rpc function {method_name}")
return self.error_response(INVALID_REQUEST, f"{e}", http_status=400)
except AuthError as e:
log.exception(f"Authentication failed trying to access {method_name}")
return self.error_response(AUTHENTICATION_ERROR, f"{e}", http_status=401)
except PermissionError as e:
log.exception(f"Not allowed to use rpc function {method_name}")
return self.error_response(UNAUTHORIZED_ERROR, f"Permission Denied: {e}", http_status=401)
except Exception as e:
log.exception(f"Unknown rpc exception on method {method_name}")
return self.error_response(INTERNAL_ERROR, f"Unknown error: {e}", http_status=500)
def rpc_method(func):
"""
Used as a decorator. Register the method to the list of RPC functions available.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, False)
return func
def rpc_method_auth(func):
"""
Used as a decorator. Register the method to the list of RPC functions available,
authentication check is performed automatically.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, True)
return func
def rpc_method_manager(func):
"""
Used as a decorator. Register the method to the list of RPC functions available,
authentication check is performed automatically.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, True, requires_manager=True)
return func
def rpc_method_admin(func):
"""
Used as a decorator. Register the method to the list of RPC functions available,
authentication check is performed automatically.
The decorated function can throw PermissionError or AuthError which will be converted
to the correct error code automatically.
"""
REGISTERED_RPC_METHODS[func.__name__] = RPCMethod(func, True, requires_manager=True, requires_admin=True)
return func
| 7,035 | 32.826923 | 127 | py |
gate-teamware | gate-teamware-master/backend/rpc.py | import secrets
import logging
import datetime
import json
import os
from urllib.parse import urljoin
from django.conf import settings
from django.contrib.auth import authenticate, get_user_model, login as djlogin, logout as djlogout
from django.contrib.auth.decorators import permission_required
from django.contrib.admin.views.decorators import staff_member_required
from django.core.exceptions import ObjectDoesNotExist
from django.db import transaction
from django.db.models import manager
from django.core import mail
from django.db.models import Q
from django.http import JsonResponse, HttpRequest
from django.shortcuts import redirect, render
from django.template.loader import render_to_string
from django.utils import timezone
import gatenlp
from django.utils.html import strip_tags
from gatenlp import annotation_set
# https://pypi.org/project/gatenlp/
from backend.errors import AuthError
from backend.rpcserver import rpc_method, rpc_method_auth, rpc_method_manager, rpc_method_admin
from backend.models import Project, Document, DocumentType, Annotation, AnnotatorProject, AnnotationChangeHistory, \
UserDocumentFormatPreference, document_preference_str
from backend.utils.misc import get_value_from_key_path, insert_value_to_key_path, read_custom_document
from backend.utils.serialize import ModelSerializer
log = logging.getLogger(__name__)
serializer = ModelSerializer()
User = get_user_model()
@rpc_method
def initialise(request):
"""
Provide the initial context information to initialise the Teamware app
context_object:
user:
isAuthenticated: bool
isManager: bool
isAdmin: bool
configs:
docFormatPref: bool
global_configs:
allowUserDelete: bool
"""
context_object = {
"user": is_authenticated(request),
"configs": {
"docFormatPref": get_user_document_pref_from_request(request)
},
"global_configs": {
"allowUserDelete": settings.ALLOW_USER_DELETE
}
}
return context_object
def get_user_document_pref_from_request(request):
if request.user.is_authenticated:
return document_preference_str(request.user.doc_format_pref)
else:
return document_preference_str(UserDocumentFormatPreference.JSON)
@rpc_method
def is_authenticated(request):
"""
Checks that the current user has logged in.
"""
context = {
"isAuthenticated": False,
"isManager": False,
"isAdmin": False,
}
if request.user.is_authenticated:
context["isAuthenticated"] = True
context["isActivated"] = request.user.is_activated
context["username"] = request.user.username
if not request.user.is_anonymous:
if request.user.is_manager or request.user.is_staff:
context["isManager"] = True
if request.user.is_staff:
context["isAdmin"] = True
return context
@rpc_method
def login(request, payload):
context = {}
if "username" not in payload:
raise RuntimeError("No username provided")
if "password" not in payload:
raise RuntimeError("No password provided")
user = authenticate(username=payload["username"], password=payload["password"])
if user is not None:
if user.is_deleted:
raise AuthError("Cannot login with a deleted account")
djlogin(request, user)
context["username"] = user.username
context["isAuthenticated"] = user.is_authenticated
context["isManager"] = user.is_manager or user.is_staff
context["isAdmin"] = user.is_staff
context["isActivated"] = user.is_activated
return context
else:
raise AuthError("Invalid username or password.")
@rpc_method
def logout(request):
djlogout(request)
return
@rpc_method
def register(request, payload):
context = {}
username = payload.get("username")
password = payload.get("password")
email = payload.get("email")
agreed_privacy_policy = True
if not get_user_model().objects.filter(username=username).exists():
user = get_user_model().objects.create_user(username=username, password=password, email=email, agreed_privacy_policy=agreed_privacy_policy)
_generate_user_activation(user)
djlogin(request, user)
context["username"] = payload["username"]
context["isAuthenticated"] = True
context["isActivated"] = user.is_activated
return context
else:
raise ValueError("Username already exists")
@rpc_method
def generate_user_activation(request, username):
try:
user = get_user_model().objects.get(username=username)
if user.is_activated:
raise ValueError(f"User {username}'s account is already activated.")
_generate_user_activation(user)
except User.DoesNotExist:
log.exception(f"Trying to generate activation code for user: {username} that doesn't exist")
raise ValueError("User does not exist.")
def _generate_user_activation(user):
if settings.ACTIVATION_WITH_EMAIL:
register_token = secrets.token_urlsafe(settings.ACTIVATION_TOKEN_LENGTH)
user.activate_account_token = register_token
user.activate_account_token_expire = timezone.now() + \
datetime.timedelta(days=settings.ACTIVATION_EMAIL_TIMEOUT_DAYS)
user.save()
app_name = settings.APP_NAME
activate_url_base = urljoin(settings.APP_URL, settings.ACTIVATION_URL_PATH)
activate_url = f"{activate_url_base}?username={user.username}&token={user.activate_account_token}"
context = {
"app_name": app_name,
"activate_url": activate_url,
}
message = render_to_string("registration_mail.html", context)
num_sent = mail.send_mail(subject=f"Activate your account at {app_name}",
message=strip_tags(message),
html_message=message,
from_email=settings.ADMIN_EMAIL,
recipient_list=[user.email],
fail_silently=False
)
if num_sent < 1:
log.warning(f"Could not send registration email for user {user.username}")
else:
user.is_account_activated = True
user.save()
@rpc_method
def activate_account(request, username, token):
try:
if token is None or len(token) < settings.ACTIVATION_TOKEN_LENGTH:
log.error(f"Token of invalid length provided: {token} username: {username}")
raise ValueError("Invalid token provided")
user = get_user_model().objects.get(username=username, activate_account_token=token)
if user.activate_account_token_expire < timezone.now():
raise ValueError("Token has expired")
user.is_account_activated = True
user.activate_account_token = None
user.activate_account_token_expire = None
user.save()
except User.DoesNotExist as e:
log.exception(f"Activate account, invalid token provided: {token}")
raise ValueError("Invalid token provided")
@rpc_method
def generate_password_reset(request, username):
user = None
try:
user = get_user_model().objects.get(username=username)
register_token = secrets.token_urlsafe(settings.PASSWORD_RESET_TOKEN_LENGTH)
user.reset_password_token = register_token
user.reset_password_token_expire = timezone.now() + \
datetime.timedelta(hours=settings.PASSWORD_RESET_TIMEOUT_HOURS)
user.save()
app_name = settings.APP_NAME
reset_url_base = urljoin(settings.APP_URL, settings.PASSWORD_RESET_URL_PATH)
reset_url = f"{reset_url_base}?username={user.username}&token={user.reset_password_token}"
context = {
"app_name": app_name,
"reset_url": reset_url,
}
message = render_to_string("password_reset_mail.html", context)
num_sent = mail.send_mail(subject=f"Reset your password at {app_name}",
message=strip_tags(message),
html_message=message,
from_email=settings.ADMIN_EMAIL,
recipient_list=[user.email],
fail_silently=False
)
if num_sent < 1:
log.warning(f"Could not send password reset email for user {user.username}")
except User.DoesNotExist as e:
raise ValueError("Username does not exist.")
@rpc_method
def reset_password(request, username, token, new_password):
try:
if token is None or len(token) < settings.PASSWORD_RESET_TOKEN_LENGTH:
log.error(f"Token of invalid length provided: {token} username: {username}")
raise ValueError("Invalid token provided")
user = get_user_model().objects.get(username=username, reset_password_token=token)
if user.reset_password_token_expire < timezone.now():
raise ValueError("Token has expired")
user.set_password(new_password)
user.reset_password_token = None
user.reset_password_token_expire = None
user.save()
except User.DoesNotExist as e:
log.exception(f"Reset password, invalid token provided: {token}")
raise ValueError("Invalid token provided")
@rpc_method_auth
def change_password(request, payload):
user = request.user
user.set_password(payload.get("password"))
user.save()
return
@rpc_method_auth
def change_email(request, payload):
user = request.user
user.email = payload.get("email")
user.is_account_activated = False # User needs to re-verify their e-mail again
user.save()
_generate_user_activation(user) # Generate
return
@rpc_method_auth
def set_user_receive_mail_notifications(request, do_receive_notifications):
user = request.user
user.receive_mail_notifications = do_receive_notifications
user.save()
@rpc_method_auth
def set_user_document_format_preference(request, doc_preference):
user = request.user
# Convert to enum value
if doc_preference == "JSON":
user.doc_format_pref = UserDocumentFormatPreference.JSON
elif doc_preference == "CSV":
user.doc_format_pref = UserDocumentFormatPreference.CSV
else:
raise ValueError(f"Document preference value {doc_preference} is invalid")
user.save()
@rpc_method_auth
def get_user_details(request):
user = request.user
data = {
"username": user.username,
"email": user.email,
"created": user.created,
"receive_mail_notifications": user.receive_mail_notifications,
}
user_role = "annotator"
if user.is_staff:
user_role = "admin"
elif user.is_manager:
user_role = "manager"
data["user_role"] = user_role
# Convert doc preference to string
data["doc_format_pref"] = document_preference_str(user.doc_format_pref)
return data
@rpc_method_auth
def get_user_annotated_projects(request):
"""
Gets a list of projects that the user has annotated
"""
user = request.user
projects_list = []
for project in Project.objects.filter(documents__annotations__user_id=user.pk).distinct().order_by("-id"):
projects_list.append({
"id": project.pk,
"name": project.name,
"allow_annotation_change": project.allow_annotation_change,
"configuration": project.configuration,
})
return projects_list
@rpc_method_auth
def get_user_annotations_in_project(request, project_id, current_page=1, page_size=None):
"""
Gets a list of documents in a project where the user has performed annotations in.
:param project_id: The id of the project to query
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
user = request.user
if project_id is None:
raise Exception("Must have project_id")
if current_page < 1:
raise Exception("Page must start from 1")
current_page = current_page - 1 # Change to zero index
project = Project.objects.get(pk=project_id)
user_annotated_docs = project.documents.filter(doc_type=DocumentType.ANNOTATION,
annotations__user_id=user.pk).distinct()
total_count = user_annotated_docs.count()
if user_annotated_docs.count() < 1:
raise Exception(f"No annotations in this project {project.pk}:{project.name}")
if page_size is not None:
start_index = current_page * page_size
end_index = (current_page + 1) * page_size
paginated_docs = user_annotated_docs[start_index:end_index]
else:
paginated_docs = user_annotated_docs
documents_out = []
for document in paginated_docs:
annotations_list = [annotation.get_listing() for annotation in document.annotations.filter(user=user)]
documents_out.append(document.get_listing(annotations_list))
return {"items": documents_out, "total_count": total_count}
@rpc_method_auth
def user_delete_personal_information(request):
request.user.delete_user_personal_information()
@rpc_method_auth
def user_delete_account(request):
if settings.ALLOW_USER_DELETE:
request.user.delete()
else:
raise Exception("Teamware's current configuration does not allow user accounts to be deleted.")
@rpc_method_manager
def create_project(request):
with transaction.atomic():
proj = Project.objects.create()
proj.owner = request.user
proj.save()
serialized_project = serializer.serialize(proj, exclude_fields=set(["annotators", "annotatorproject"]))
serialized_project["annotators"] = get_project_annotators(request, proj.id)
return serialized_project
@rpc_method_manager
def delete_project(request, project_id):
with transaction.atomic():
proj = Project.objects.get(pk=project_id)
proj.delete()
return True
@rpc_method_manager
def update_project(request, project_dict):
with transaction.atomic():
project = serializer.deserialize(Project, project_dict, exclude_fields=set(["annotators", "annotatorproject"]))
return True
@rpc_method_manager
def get_project(request, project_id):
proj = Project.objects.get(pk=project_id)
out_proj = {
**serializer.serialize(proj, exclude_fields=set(["annotators", "annotatorproject"])),
**proj.get_annotators_dict(),
**proj.get_project_stats()
}
return out_proj
@rpc_method_manager
def clone_project(request, project_id):
with transaction.atomic():
current_project = Project.objects.get(pk=project_id)
new_project = current_project.clone(owner=request.user)
return serializer.serialize(new_project, exclude_fields=set(["annotators", "annotatorproject"]))
@rpc_method_manager
def import_project_config(request, pk, project_dict):
with transaction.atomic():
serializer.deserialize(Project, {
"id": pk,
**project_dict
}, Project.get_project_export_field_names())
@rpc_method_manager
def export_project_config(request, pk):
proj = Project.objects.get(pk=pk)
return serializer.serialize(proj, Project.get_project_export_field_names())
@rpc_method_manager
def get_projects(request, current_page=1, page_size=None, filters=None):
"""
Gets the list of projects. Query result can be limited by using current_page and page_size and sorted
by using filters.
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter option used to search project, currently only string is used to search
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
if current_page < 1:
raise Exception("Page index starts from 1")
current_page = current_page - 1 # Change to 0 index for query
projects_query = None
total_count = 0
# Perform filtering
if isinstance(filters, str):
# Search project title if is filter is a string only
projects_query = Project.objects.filter(name__contains=filters.strip())
total_count = projects_query.count()
else:
projects_query = Project.objects.all()
total_count = projects_query.count()
# Perform pagination
if current_page is None or page_size is None or current_page*page_size >= total_count:
# Returns first page if limits are None or current_page goes over index
projects = projects_query
else:
start_index = current_page*page_size
end_index = (current_page+1)*page_size
projects = projects_query[start_index:end_index]
# Serialize
output_projects = []
for proj in projects:
out_proj = {
**serializer.serialize(proj, {"id", "name", "created"}),
**proj.get_annotators_dict(),
**proj.get_project_stats()
}
output_projects.append(out_proj)
return {"items": output_projects, "total_count": total_count}
def _get_project_documents(project_id, current_page=1, page_size=None, filters=None, doc_type=DocumentType.ANNOTATION):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents for project title
:param doc_type: Integer enum representation of document type Document.[ANNOTATION, TRAINING, TEST]
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
if project_id is None:
raise Exception("project_id must be provided in the options")
if current_page < 1:
raise Exception("Page index starts from 1")
current_page = current_page - 1 # Change to 0 index for query
project = Project.objects.get(pk=project_id)
documents_query = None
total_count = 0
# Filter
if isinstance(filters, str):
# Search for id
documents_query = project.documents.filter(pk=filters.strip(), doc_type=doc_type)
total_count = documents_query.count()
else:
documents_query = project.documents.filter(doc_type=doc_type).all()
total_count = documents_query.count()
# Paginate
if current_page is None or page_size is None or current_page*page_size >= total_count:
documents = documents_query.all()
else:
start_index = current_page * page_size
end_index = (current_page + 1) * page_size
documents = documents_query[start_index:end_index]
# Serialize
documents_out = []
for document in documents:
annotations_list = [a.get_listing() for a in document.annotations.all()]
documents_out.append(document.get_listing(annotations_list))
return {"items": documents_out, "total_count": total_count}
@rpc_method_manager
def get_project_documents(request, project_id, current_page=1, page_size=None, filters=None):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
return _get_project_documents(project_id, current_page, page_size, filters, DocumentType.ANNOTATION)
@rpc_method_manager
def get_project_test_documents(request, project_id, current_page=1, page_size=None, filters=None):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
return _get_project_documents(project_id, current_page, page_size, filters, DocumentType.TEST)
@rpc_method_manager
def get_project_training_documents(request, project_id, current_page=1, page_size=None, filters=None):
"""
Gets the list of documents and its annotations. Query result can be limited by using current_page and page_size
and sorted by using filters
:param project_id: The id of the project that the documents belong to, is a required variable
:param current_page: A 1-indexed page count
:param page_size: The maximum number of items to return per query
:param filters: Filter currently only searches for ID of documents
for project title
:returns: Dictionary of items and total count after filter is applied {"items": [], "total_count": int}
"""
return _get_project_documents(project_id, current_page, page_size, filters, DocumentType.TRAINING)
def _add_project_document(project_id, document_data, doc_type=DocumentType.ANNOTATION):
project = Project.objects.get(pk=project_id)
document = Document.objects.create(project=project, doc_type=doc_type)
document.data = document_data
document.save()
return document.pk
@rpc_method_manager
def add_project_document(request, project_id, document_data):
with transaction.atomic():
return _add_project_document(project_id, document_data=document_data, doc_type=DocumentType.ANNOTATION)
@rpc_method_manager
def add_project_test_document(request, project_id, document_data):
with transaction.atomic():
return _add_project_document(project_id, document_data=document_data, doc_type=DocumentType.TEST)
@rpc_method_manager
def add_project_training_document(request, project_id, document_data):
with transaction.atomic():
return _add_project_document(project_id, document_data=document_data, doc_type=DocumentType.TRAINING)
@rpc_method_manager
def add_document_annotation(request, doc_id, annotation_data):
with transaction.atomic():
document = Document.objects.get(pk=doc_id)
annotation = Annotation.objects.create(document=document, user=request.user)
annotation.data = annotation_data
return annotation.pk
@rpc_method_manager
def get_annotations(request, project_id):
"""
Serialize project annotations as GATENLP format JSON using the python-gatenlp interface.
"""
project = Project.objects.get(pk=project_id)
annotations = []
for document in project.documents.all():
# create a GateNLP Document instance
doc = gatenlp.Document(text=document.data['text'])
doc.name = str(document.pk)
for annotation in document.annotations.all():
# add an Annotation_Set named as the annotation user
annset = doc.annset(name=annotation.user.username)
# add the annotation to the annotation set
annset.add(start=0,
end=len(document.data['text']),
anntype="Document",
features=dict(label=annotation.data, _id=annotation.pk),
)
# For each document, append the annotations
annotations.append(doc.save_mem(fmt="bdocjs"))
return annotations
@rpc_method_manager
def delete_documents_and_annotations(request, doc_id_ary, anno_id_ary):
for anno_id in anno_id_ary:
Annotation.objects.filter(pk=anno_id).delete()
for doc_id in doc_id_ary:
Document.objects.filter(pk=doc_id).delete()
return True
@rpc_method_manager
def get_possible_annotators(request, proj_id):
project = Project.objects.get(pk=proj_id)
# get a list of IDs of annotators that is currently active in any project
active_annotators = User.objects.filter(annotatorproject__status=AnnotatorProject.ACTIVE).values_list('id', flat=True)
project_annotators = project.annotators.all().values_list('id', flat=True)
# Do an exclude filter to remove annotator with the those ids
valid_annotators = User.objects.filter(is_deleted=False).exclude(id__in=active_annotators).exclude(id__in=project_annotators)
output = [serializer.serialize(annotator, {"id", "username", "email"}) for annotator in valid_annotators]
return output
@rpc_method_manager
def get_project_annotators(request, proj_id):
project_annotators = AnnotatorProject.objects.filter(project_id=proj_id)
output = []
for ap in project_annotators:
output.append({
**serializer.serialize(ap.annotator, {"id", "username", "email"}),
**serializer.serialize(ap, exclude_fields={"annotator", "project"}),
**ap.get_stats()
})
return output
@rpc_method_manager
def add_project_annotator(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.add_annotator(annotator)
project.save()
return True
@rpc_method_manager
def make_project_annotator_active(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.make_annotator_active(annotator)
return True
@rpc_method_manager
def project_annotator_allow_annotation(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.annotator_set_allowed_to_annotate(annotator)
@rpc_method_manager
def remove_project_annotator(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.remove_annotator(annotator)
project.save()
return True
@rpc_method_manager
def reject_project_annotator(request, proj_id, username):
with transaction.atomic():
annotator = User.objects.get(username=username)
project = Project.objects.get(pk=proj_id)
project.reject_annotator(annotator)
project.save()
return True
@rpc_method_manager
def get_annotation_timings(request, proj_id):
project = Project.objects.get(pk=proj_id)
annotation_timings = []
documents = project.documents.select_related("project").all()
for document in documents:
for annotation in document.annotations.all():
if annotation.time_to_complete:
data_point = {'x': annotation.time_to_complete, 'y': 0}
annotation_timings.append(data_point)
return annotation_timings
@rpc_method_manager
def delete_annotation_change_history(request, annotation_change_history_id):
annotation_change_history = AnnotationChangeHistory.objects.get(pk=annotation_change_history_id)
if request.user.is_associated_with_annotation(annotation_change_history.annotation):
if annotation_change_history.annotation.change_history.all().count() > 1:
annotation_change_history.delete()
else:
raise RuntimeError("Must have at least a single annotation change history for a completed annotation.")
else:
raise PermissionError("No permission to access the annotation history")
@rpc_method_auth
def get_annotation_task(request):
"""
Gets the annotator's current task, returns a dictionary about the annotation task that contains all the information
needed to render the Annotate view.
"""
with transaction.atomic():
# Times out any pending annotation
Annotation.check_for_timed_out_annotations()
# Gets project the user's associated with
user = request.user
project = user.annotates.filter(annotatorproject__status=AnnotatorProject.ACTIVE).distinct().first()
# No project to annotate
if not project:
return None
# Gets the annotation task or None
return project.get_annotator_task(user)
@rpc_method_auth
def get_annotation_task_with_id(request, annotation_id):
"""
Get annotation task dictionary for a specific annotation_id, must belong to the annotator (or is a manager or above)
"""
with transaction.atomic():
user = request.user
annotation = Annotation.objects.get(pk=annotation_id)
if not annotation.user_allowed_to_annotate(user):
raise PermissionError(
f"User {user.username} trying to complete annotation id {annotation_id} that doesn't belong to them")
if annotation.document and annotation.document.project:
return annotation.document.project.get_annotation_task_dict(annotation,
include_task_history_in_project=False)
else:
raise RuntimeError(f"Could not get the annotation task with id {annotation_id}")
@rpc_method_auth
def complete_annotation_task(request, annotation_id, annotation_data, elapsed_time=None):
"""
Complete the annotator's current task
"""
with transaction.atomic():
# Gets project the user's associated with
user = request.user
annotation = Annotation.objects.get(pk=annotation_id)
if not annotation.user_allowed_to_annotate(user):
raise PermissionError(
f"User {user.username} trying to complete annotation id {annotation_id} that doesn't belong to them")
if annotation:
annotation.complete_annotation(annotation_data, elapsed_time)
@rpc_method_auth
def reject_annotation_task(request, annotation_id):
"""
Reject the annotator's current task
"""
with transaction.atomic():
# Gets project the user's associated with
user = request.user
annotation = Annotation.objects.get(pk=annotation_id)
if not annotation.user_allowed_to_annotate(user):
raise PermissionError(
f"User {user.username} trying to complete annotation id {annotation_id} that doesn't belong to them")
if annotation:
annotation.reject_annotation()
@rpc_method_auth
def change_annotation(request, annotation_id, new_data):
"""Adds annotation data to history"""
try:
annotation = Annotation.objects.get(pk=annotation_id)
if annotation.document.doc_type is not DocumentType.ANNOTATION:
raise RuntimeError("It not possible to change annotations created for testing or training documents.")
if annotation.user_allowed_to_annotate(request.user) or request.user.is_manager_or_above():
annotation.change_annotation(new_data, request.user)
except Annotation.DoesNotExist:
raise RuntimeError(f"Annotation with ID {annotation_id} does not exist")
@rpc_method_auth
def get_document(request, document_id):
""" Obsolete: to be deleted"""
doc = Document.objects.get(pk=document_id)
if request.user.is_associated_with_document(doc):
return doc.get_listing(annotation_list=[anno.get_listing() for anno in doc.annotations.all()])
else:
raise PermissionError("No permission to access the document")
@rpc_method_auth
def get_annotation(request, annotation_id):
""" Obsolete: to be deleted"""
annotation = Annotation.objects.get(pk=annotation_id)
if request.user.is_associated_with_annotation(annotation):
return annotation.get_listing()
else:
raise PermissionError("No permission to access the annotation")
@rpc_method_auth
def annotator_leave_project(request):
""" Allow annotator to leave their currently associated project. """
user = request.user
project = user.active_project
if project is None:
raise Exception("No current active project")
project.remove_annotator(get_user_model().objects.get(pk=user.id))
@rpc_method_admin
def get_all_users(request):
users = User.objects.all()
output = [serializer.serialize(user, {"id", "username", "email", "is_manager", "is_staff"}) for user in users]
return output
@rpc_method_admin
def get_user(request, username):
user = User.objects.get(username=username)
data = {
"id": user.id,
"username": user.username,
"email": user.email,
"is_manager": user.is_manager,
"is_admin": user.is_staff,
"is_activated": user.is_activated
}
return data
@rpc_method_admin
def admin_update_user(request, user_dict):
user = User.objects.get(id=user_dict["id"])
user.username = user_dict["username"]
user.email = user_dict["email"]
user.is_manager = user_dict["is_manager"]
user.is_staff = user_dict["is_admin"]
user.is_account_activated = user_dict["is_activated"]
user.save()
return user_dict
@rpc_method_admin
def admin_update_user_password(request, username, password):
user = User.objects.get(username=username)
user.set_password(password)
user.save()
@rpc_method_admin
def admin_delete_user_personal_information(request, username):
user = User.objects.get(username=username)
user.delete_user_personal_information()
@rpc_method_admin
def admin_delete_user(request, username):
if settings.ALLOW_USER_DELETE:
user = User.objects.get(username=username)
user.delete()
else:
raise Exception("Teamware's current configuration does not allow the deleting of users")
@rpc_method
def get_privacy_policy_details(request):
details = settings.PRIVACY_POLICY
custom_docs = {
'CUSTOM_PP_DOCUMENT': read_custom_document(settings.CUSTOM_PP_DOCUMENT_PATH) if os.path.isfile(
settings.CUSTOM_PP_DOCUMENT_PATH) else None,
'CUSTOM_TC_DOCUMENT': read_custom_document(settings.CUSTOM_TC_DOCUMENT_PATH) if os.path.isfile(
settings.CUSTOM_TC_DOCUMENT_PATH) else None
}
details.update(custom_docs)
url = {
'URL': request.headers['Host']
}
details.update(url)
return details
@rpc_method
def get_endpoint_listing(request):
from .rpcserver import JSONRPCEndpoint
return JSONRPCEndpoint.endpoint_listing()
| 36,144 | 33.754808 | 147 | py |
gate-teamware | gate-teamware-master/backend/admin.py | from django.contrib import admin
from django.contrib.auth import get_user_model
from .models import Project, Document, Annotation
# Register your models here.
@admin.register(get_user_model())
class UserAdmin(admin.ModelAdmin):
pass
@admin.register(Project)
class ProjectAdmin(admin.ModelAdmin):
pass
@admin.register(Document)
class DocumentAdmin(admin.ModelAdmin):
pass
@admin.register(Annotation)
class AnnotationAdmin(admin.ModelAdmin):
pass
| 466 | 20.227273 | 49 | py |
gate-teamware | gate-teamware-master/backend/models.py | import math
import uuid
from django.conf import settings
import logging
import django
from datetime import timedelta
from django.contrib.auth.models import AbstractUser
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import timezone
from django.db.models import Q, F, Count
from backend.utils.misc import get_value_from_key_path, insert_value_to_key_path, generate_random_string
from backend.utils.telemetry import TelemetrySender
log = logging.getLogger(__name__)
class UserDocumentFormatPreference:
JSON = 0
CSV = 1
USER_DOC_FORMAT_PREF = (
(JSON, 'JSON'),
(CSV, 'CSV')
)
def document_preference_str(pref: UserDocumentFormatPreference.USER_DOC_FORMAT_PREF) -> str:
if pref == UserDocumentFormatPreference.JSON:
return "JSON"
else:
return "CSV"
class DocumentType:
ANNOTATION = 0
TRAINING = 1
TEST = 2
DOCUMENT_TYPE = (
(ANNOTATION, 'Annotation'),
(TRAINING, 'Training'),
(TEST, 'Test')
)
class ServiceUser(AbstractUser):
"""
Custom user class.
"""
is_manager = models.BooleanField(default=False)
created = models.DateTimeField(default=timezone.now)
is_account_activated = models.BooleanField(default=False)
activate_account_token = models.TextField(null=True, blank=True)
activate_account_token_expire = models.DateTimeField(null=True, blank=True)
reset_password_token = models.TextField(null=True, blank=True)
reset_password_token_expire = models.DateTimeField(null=True, blank=True)
receive_mail_notifications = models.BooleanField(default=True)
doc_format_pref = models.IntegerField(choices=UserDocumentFormatPreference.USER_DOC_FORMAT_PREF,
default=UserDocumentFormatPreference.JSON)
agreed_privacy_policy = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
@property
def has_active_project(self):
return self.annotatorproject_set.filter(status=AnnotatorProject.ACTIVE).count() > 0
@property
def active_project(self):
"""
Gets the project that user's currently active in
:returns: Project object that user's active in, None if not active in any project
"""
active_annotator_project = self.annotatorproject_set.filter(status=AnnotatorProject.ACTIVE).first()
if active_annotator_project:
return active_annotator_project.project
return None
@property
def is_activated(self):
"""
Checks whether the user has activated their account, but also takes into account
of the REGISTER_WITH_EMAIL_ACTIVATION settings.
"""
if settings.ACTIVATION_WITH_EMAIL:
return self.is_account_activated
else:
return True
@is_activated.setter
def is_activated(self, value):
self.is_account_activated = value
def is_associated_with_document(self, document):
if self.is_manager or self.is_staff or self.is_superuser:
return True
if self.annotations.filter(document_id=document.pk).count() > 0:
return True
if self.annotates:
if not self.annotates.filter(pk=document.project.pk).first():
return False
if self.annotates.filter(pk=document.project.pk).first().documents.count() > 0:
return True
else:
# If user is no longer active on a project, but has annotations from that project, this should have been caught above
return False
def is_associated_with_annotation(self, annotation):
if self.is_manager or self.is_staff or self.is_superuser:
return True
return self.annotations.filter(pk=annotation.pk).count() > 0
def is_manager_or_above(self):
if self.is_manager or self.is_staff or self.is_superuser:
return True
else:
return False
def clear_pending_annotations(self) -> None:
"""
Clear all of the user's pending annotation in the system to allow other annotators
to take up the task slot.
"""
pending_annotations = self.annotations.filter(status=Annotation.PENDING)
pending_annotations.delete()
def delete_user_personal_information(self) -> None:
"""
Replace user's personal data with placeholder
"""
self.is_deleted = True
retry_limit = 1000
retry_counter = 0
while retry_counter < retry_limit:
random_suffix = generate_random_string(settings.DELETED_USER_USERNAME_HASH_LENGTH)
deleted_username = f"{settings.DELETED_USER_USERNAME_PREFIX}_{random_suffix}"
if not get_user_model().objects.filter(username=deleted_username).exists():
break
retry_counter += 1
if retry_counter >= retry_limit:
raise Exception("Could not delete user, reached hash generation retries limit")
self.username = deleted_username
self.first_name = settings.DELETED_USER_FIRSTNAME
self.last_name = settings.DELETED_USER_LASTNAME
self.email = f"{self.username}@{settings.DELETED_USER_EMAIL_DOMAIN}"
self.save()
# Also clear all pending annotations
self.clear_pending_annotations()
def default_document_input_preview():
return {"text": "<p>Some html text <strong>in bold</strong>.</p><p>Paragraph 2.</p>"}
class Project(models.Model):
"""
Model to store annotation projects.
"""
name = models.TextField(default="New project")
uuid = models.UUIDField(primary_key=False, default=uuid.uuid4, editable=False)
description = models.TextField(default="")
annotator_guideline = models.TextField(default="")
created = models.DateTimeField(default=timezone.now)
configuration = models.JSONField(default=list)
owner = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, null=True, related_name="owns")
annotations_per_doc = models.IntegerField(default=3)
annotator_max_annotation = models.FloatField(default=0.6)
# Allow annotators to reject document
allow_document_reject = models.BooleanField(default=True)
# Allow annotators to change their annotation after it's been submitted
allow_annotation_change = models.BooleanField(default=True)
# Time it takes for user annotation to timeout (minutes)
annotation_timeout = models.IntegerField(default=60)
# Stores a document that's used for previewing in the AnnotationRenderer
document_input_preview = models.JSONField(default=default_document_input_preview)
# Stores a csv document that's used for previewing in the AnnotationRenderer
document_input_preview_csv = models.TextField(default="")
document_id_field = models.TextField(default="name")
annotators = models.ManyToManyField(get_user_model(), through='AnnotatorProject', related_name="annotates")
has_training_stage = models.BooleanField(default=False)
has_test_stage = models.BooleanField(default=False)
can_annotate_after_passing_training_and_test = models.BooleanField(default=True)
min_test_pass_threshold = models.FloatField(default=1.0, null=True)
document_gold_standard_field = models.TextField(default="gold")
document_pre_annotation_field = models.TextField(default="")
@classmethod
def get_project_config_fields(cls, exclude_fields: set = set()):
exclude_field_types = {
models.ManyToOneRel,
models.ManyToManyField,
models.ManyToManyRel,
}
fields = Project._meta.get_fields()
config_fields = []
for field in fields:
if field.__class__ not in exclude_field_types and field.name not in exclude_fields:
config_fields.append(field)
return config_fields
@classmethod
def get_project_export_field_names(cls):
fields = Project.get_project_config_fields({"owner", "id", "created", "uuid"})
return [field.name for field in fields]
def clone(self, new_name=None, clone_name_prefix="Copy of ", owner=None):
"""
Clones the Project object, does not retain documents and annotator membership
"""
exclude_fields = {"name", "owner", "id", "created", "uuid"}
# Setting project name
new_project_name = new_name if new_name is not None else ""
if clone_name_prefix:
new_project_name = clone_name_prefix + self.name
new_project = Project.objects.create(name=new_project_name)
# Setting owner
new_project.owner = owner
# Copy all config over
config_fields = self.get_project_config_fields(exclude_fields)
for field in config_fields:
setattr(new_project, field.name, getattr(self, field.name))
new_project.save()
return new_project
@property
def num_documents(self):
return self.documents.filter(doc_type=DocumentType.ANNOTATION).count()
@property
def num_test_documents(self):
return self.documents.filter(doc_type=DocumentType.TEST).count()
@property
def num_training_documents(self):
return self.documents.filter(doc_type=DocumentType.TRAINING).count()
@property
def num_annotation_tasks_total(self):
return self.num_documents * self.annotations_per_doc
@property
def num_completed_tasks(self):
return self._get_project_annotations_query(status=Annotation.COMPLETED).count()
@property
def num_pending_tasks(self):
return self._get_project_annotations_query(status=Annotation.PENDING).count()
@property
def num_rejected_tasks(self):
return self._get_project_annotations_query(status=Annotation.REJECTED).count()
@property
def num_timed_out_tasks(self):
return self._get_project_annotations_query(status=Annotation.TIMED_OUT).count()
@property
def num_aborted_tasks(self):
return Annotation.objects.filter(document__project_id=self.pk,
status=Annotation.ABORTED,
document__doc_type=DocumentType.ANNOTATION).count()
@property
def num_occupied_tasks(self):
return (self._get_project_annotations_query(Annotation.COMPLETED) |
self._get_project_annotations_query(Annotation.PENDING)).count()
@property
def num_annotation_tasks_remaining(self):
return self.num_annotation_tasks_total - self.num_occupied_tasks
def _get_project_annotations_query(self, status=None, doc_type=DocumentType.ANNOTATION):
if status is None:
return Annotation.objects.filter(document__project_id=self.pk,
document__doc_type=doc_type)
else:
return Annotation.objects.filter(document__project_id=self.pk,
status=status,
document__doc_type=doc_type)
@property
def is_completed(self):
# Project must have documents to be completed
if self.num_annotation_tasks_total <= 0:
return False
return self.num_annotation_tasks_total - self.num_completed_tasks < 1
@property
def max_num_task_per_annotator(self):
return math.ceil(
self.annotator_max_annotation * self.documents.filter(doc_type=DocumentType.ANNOTATION).count())
@property
def num_annotators(self):
return self.annotators.filter(annotatorproject__status=AnnotatorProject.ACTIVE).count()
@property
def num_all_annotators(self) -> int:
"""Count of all annotators associated with project."""
return self.annotators.filter().count()
@property
def is_project_configured(self):
return len(self.configuration) > 0 and self.num_documents > 0
@property
def project_configuration_error_message(self):
errors = []
if len(self.configuration) < 1:
errors.append("No annotation widgets defined in the configuration")
if self.num_documents < 1:
errors.append("No documents to annotate")
return errors
def delete(self):
"""
Overloaded delete method to optionally send project telemetry stats prior to deletion.
"""
try:
if settings.TELEMETRY_ON and self.num_all_annotators > 0:
self.send_telemetry("deleted")
finally:
super().delete()
def add_annotator(self, user):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
except ObjectDoesNotExist:
allowed_to_annotate = not self.has_test_stage and not self.has_training_stage
annotator_project = AnnotatorProject.objects.create(annotator=user,
project=self,
status=AnnotatorProject.ACTIVE,
allowed_to_annotate=allowed_to_annotate)
return annotator_project
def make_annotator_active(self, user):
"""
Makes the user active in the project again. An user can be made inactive from the project as a
result of completing all annotation task, manager marking them as completed the project,
rejecting them from the project or the user has left the project themselves.
"""
# Check that user is not active in another project
active_project = user.active_project
if active_project == self:
raise Exception("User already active in this project")
if active_project is not None:
raise Exception(f"User is already active in project {active_project.name}")
if self.annotator_reached_quota(user):
raise Exception(f"User is already reached annotation quota")
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.status = AnnotatorProject.ACTIVE
annotator_project.rejected = False
annotator_project.save()
except ObjectDoesNotExist:
raise Exception("User must be added to the project before they can be made active.")
def annotator_completed_training(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.training_completed = finished_time
annotator_project.training_score = self.get_annotator_document_score(user, DocumentType.TRAINING)
if annotator_project.project.can_annotate_after_passing_training_and_test and not annotator_project.project.has_test_stage:
annotator_project.allowed_to_annotate = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def get_annotator_document_score(self, user, doc_type):
test_docs = self.documents.filter(doc_type=doc_type)
score = 0
for document in test_docs:
# Checks answers for all test documents
user_annotations = document.annotations.filter(user_id=user.pk)
if user_annotations.count() > 1:
# User should not have more than 1 annotation per document
raise Exception(f"User {user.username} has more than 1 annotation in document")
annotation = user_annotations.first()
# Skip if there's no annotation
if not annotation:
continue
# Check that answer key exists in document
answers = get_value_from_key_path(document.data, self.document_gold_standard_field)
if answers is None:
raise Exception(f"No gold standard (answer) field inside test document")
if self.check_annotation_answer(annotation.data, answers):
score += 1
return score
def check_annotation_answer(self, annotation_data, answers):
"""
Compare answers between the annotation.data and document's gold standard field with answers
"""
is_correct = True
for label in answers:
if label not in annotation_data:
return False # Label does not exist in annotation
annotation_val = annotation_data[label]
answer_val = answers[label]["value"]
if isinstance(annotation_val, str) and isinstance(answer_val, str):
if annotation_val != answer_val:
is_correct = False
elif isinstance(annotation_val, list) and isinstance(answer_val, list):
comparison_set = set(annotation_val) & set(answer_val)
if len(answer_val) != len(annotation_val) or len(comparison_set) != len(answer_val):
is_correct = False
else:
is_correct = False
return is_correct
def annotator_completed_test(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.test_completed = finished_time
annotator_project.test_score = self.get_annotator_document_score(user, DocumentType.TEST)
annotator_test_score_proportion = annotator_project.test_score / self.num_test_documents if self.num_test_documents > 0 else 0
if self.can_annotate_after_passing_training_and_test and \
annotator_test_score_proportion >= self.min_test_pass_threshold:
annotator_project.allowed_to_annotate = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def annotator_set_allowed_to_annotate(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.allowed_to_annotate = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def reject_annotator(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.annotations_completed = finished_time
annotator_project.status = AnnotatorProject.COMPLETED
annotator_project.rejected = True
annotator_project.save()
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def remove_annotator(self, user, finished_time=timezone.now()):
try:
annotator_project = AnnotatorProject.objects.get(project=self, annotator=user)
annotator_project.annotations_completed = finished_time
annotator_project.status = AnnotatorProject.COMPLETED
annotator_project.save()
Annotation.clear_all_pending_user_annotations(user)
except ObjectDoesNotExist:
raise Exception(f"User {user.username} is not an annotator of the project.")
def num_annotator_task_remaining(self, user):
num_annotable = self.get_annotator_annotatable_documents_query(user).count()
num_completed_by_user = self.get_annotator_completed_documents_query(user).count()
max_num_docs_user_can_annotate = self.max_num_task_per_annotator
remaining_docs_in_quota = max_num_docs_user_can_annotate - num_completed_by_user
if remaining_docs_in_quota < num_annotable:
return remaining_docs_in_quota
else:
return num_annotable
def get_annotator_annotatable_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of occupied annotations in the document
# (annotations with COMPLETED and PENDING status)
occupied_filter = (Q(annotations__status=Annotation.COMPLETED) |
Q(annotations__status=Annotation.PENDING))
occupied_count = Count('annotations', filter=occupied_filter)
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
user_occupied_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.COMPLETED) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.PENDING) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.REJECTED))
user_occupied_count = Count('annotations', filter=user_occupied_filter)
# All remaining documents that user can annotate
annotatable_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_occupied=occupied_count) \
.annotate(num_user_occupied=user_occupied_count) \
.filter(num_user_occupied__lt=1)
if doc_type == DocumentType.ANNOTATION:
# Enforce the max number of annotations per document for ANNOTATION docs only (not
# for TRAINING or TEST, which can be annotated by everyone)
annotatable_docs = annotatable_docs.filter(num_occupied__lt=self.annotations_per_doc)
return annotatable_docs
def get_annotator_occupied_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
user_occupied_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.COMPLETED) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.PENDING) |
Q(annotations__user_id=user.pk, annotations__status=Annotation.REJECTED))
user_occupied_count = Count('annotations', filter=user_occupied_filter)
# Number of user annotated docs in the project
occupied_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_user_occupied=user_occupied_count) \
.filter(num_user_occupied__gt=0)
return occupied_docs
def get_annotator_completed_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
completed_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.COMPLETED))
completed_count = Count('annotations', filter=completed_filter)
# Number of user completed annotated docs in the project
completed_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_user_occupied=completed_count) \
.filter(num_user_occupied__gt=0)
return completed_docs
def get_annotator_pending_documents_query(self, user, doc_type=DocumentType.ANNOTATION):
# Filter to get the count of user occupied annotation in the document
# (annotations with COMPLETED, PENDING, and REJECTED status)
pending_filter = (Q(annotations__user_id=user.pk, annotations__status=Annotation.PENDING))
pending_count = Count('annotations', filter=pending_filter)
# Number of user completed annotated docs in the project
pending_docs = Document.objects.filter(project_id=self.pk, doc_type=doc_type) \
.annotate(num_user_occupied=pending_count) \
.filter(num_user_occupied__gt=0)
return pending_docs
def get_annotator_task(self, user):
"""
Gets or creates a new annotation task for user (annotator).
:returns: Dictionary with all information to complete an annotation task. Only project information
is returned if user is waiting to be approved as an annotator. Returns None and removes
user from annotator list if there's no more tasks or user reached quota.
"""
annotation = self.get_current_annotator_task(user)
if annotation:
# User has existing task
return self.get_annotation_task_dict(annotation)
else:
# Tries to generate new task if there's no existing task
if self.annotator_reached_quota(user):
self.remove_annotator(user)
return None # Also return None as we've completed all the task
else:
return self.decide_annotator_task_type_and_assign(user)
def annotator_reached_quota(self, user):
num_user_annotated_docs = (self.get_annotator_completed_documents_query(user) |
self.get_annotator_pending_documents_query(user)).count()
return num_user_annotated_docs >= self.max_num_task_per_annotator
def get_current_annotator_task(self, user):
"""
Gets annotator's current pending task in the project.
"""
current_annotations = user.annotations.filter(status=Annotation.PENDING)
num_annotations = current_annotations.count()
if num_annotations > 1:
raise RuntimeError("Working on more than one annotation at a time! Should not be possible!")
if num_annotations <= 0:
return None
annotation = current_annotations.first()
if annotation.document.project != self:
return RuntimeError(
"The annotation doesn't belong to this project! Annotator should only work on one project at a time")
return annotation
def get_annotation_task_dict(self, annotation, include_task_history_in_project=True):
"""
Returns a dictionary with all information required rendering an annotation task
annotation:Annotation - The annotation to create an annotation task dictionary for
task_history_in_project:bool - Returns a list of annotation ids for this user in the project
"""
document = annotation.document
output = {
**self.get_annotation_task_project_dict(),
"document_id": document.pk,
"document_field_id": get_value_from_key_path(document.data, self.document_id_field),
"document_data": document.data,
"document_type": document.doc_type_str,
"annotation_id": annotation.pk,
"annotation_data": annotation.data,
"allow_document_reject": self.allow_document_reject,
"annotation_timeout": annotation.times_out_at,
"annotator_remaining_tasks": self.num_annotator_task_remaining(user=annotation.user),
"annotator_completed_tasks": self.get_annotator_completed_documents_query(user=annotation.user).count(),
"annotator_completed_training_tasks": self.get_annotator_completed_documents_query(user=annotation.user,
doc_type=DocumentType.TRAINING).count(),
"annotator_completed_test_tasks": self.get_annotator_completed_documents_query(user=annotation.user,
doc_type=DocumentType.TEST).count(),
"document_gold_standard_field": self.document_gold_standard_field,
"document_pre_annotation_field": self.document_pre_annotation_field,
}
if include_task_history_in_project and document.doc_type is DocumentType.ANNOTATION:
# If specified, also returns a list of annotation ids for this user in the project
output["task_history"] = [annotation.pk for annotation in
Annotation.get_annotations_for_user_in_project(annotation.user.pk, self.pk)]
return output
def get_annotation_task_project_dict(self):
return {
"project_name": self.name,
"project_description": self.description,
"project_annotator_guideline": self.annotator_guideline,
"project_config": self.configuration,
"project_id": self.pk,
}
def decide_annotator_task_type_and_assign(self, user):
"""
Assign an available annotation task to a user
"""
# Check annotator's current status in the project
annotator_proj = AnnotatorProject.objects.get(annotator=user, project=self)
# Check annotator's current status in the project
if not annotator_proj.allowed_to_annotate:
# Check whether annotator is in test or training
if self.has_training_stage and not annotator_proj.training_completed:
# Check whether the annotator's completed all training tasks, mark complete if so
if self.get_annotator_annotatable_documents_query(user, doc_type=DocumentType.TRAINING).count() == 0:
self.annotator_completed_training(user)
if self.has_test_stage and not annotator_proj.test_completed:
# Check whether annotator's completed all test tasks, mark complete if so
if self.get_annotator_annotatable_documents_query(user, doc_type=DocumentType.TEST).count() == 0:
self.annotator_completed_test(user)
# Refresh object to ensure the phase changes are picked up
annotator_proj.refresh_from_db()
# Assign task
if annotator_proj.allowed_to_annotate:
# If allowed to annotate then skip over testing and training stage
annotation = self.assign_annotator_task(user)
if annotation:
return self.get_annotation_task_dict(annotation)
else:
# Remove annotator from project if there's no more tasks
annotator_proj.annotations_completed = timezone.now()
annotator_proj.save()
self.remove_annotator(user)
return None
elif self.has_training_stage and not annotator_proj.training_completed:
# Tries to assign training task
return self.get_annotation_task_dict(self.assign_annotator_task(user, DocumentType.TRAINING))
elif self.has_test_stage and not annotator_proj.test_completed:
# Tries to assign test task
return self.get_annotation_task_dict(self.assign_annotator_task(user, DocumentType.TEST))
else:
return self.get_annotation_task_project_dict()
def assign_annotator_task(self, user, doc_type=DocumentType.ANNOTATION):
"""
Assigns an annotation task to the annotator, works for testing, training and annotation tasks.
Annotation task performs an extra check for remaining annotation task (num_annotation_tasks_remaining),
testing and training does not do this check as the annotator must annotate all documents.
"""
if (DocumentType.ANNOTATION and self.num_annotation_tasks_remaining > 0) or \
DocumentType.TEST or DocumentType.TRAINING:
for doc in self.documents.filter(doc_type=doc_type).order_by('?'):
# Check that annotator hasn't annotated and that
# doc hasn't been fully annotated
if doc.user_can_annotate_document(user):
# Returns a new annotation (task) if so
return Annotation.objects.create(user=user,
document=doc,
times_out_at=timezone.now() + timedelta(
minutes=self.annotation_timeout))
return None
def check_project_complete(self):
"""
Checks that all annotations have been completed, release all annotators from project.
If complete, also send telemetry data.
"""
if self.is_completed:
for annotator in self.annotators.all():
self.remove_annotator(annotator)
if settings.TELEMETRY_ON:
self.send_telemetry(status="complete")
def send_telemetry(self, status: str):
"""
Sends telemetry data for the project depending on the status.
"""
if settings.TELEMETRY_ON:
ts = TelemetrySender(status=status, data=self.get_telemetry_stats())
ts.send()
else:
log.info(f"Telemetry is switched off. Not sending telemetry data for project {self.pk}.")
def get_annotators_dict(self):
return {
"annotators": [{"id": ann.id, "username": ann.username, "email": ann.email} for ann in
self.annotators.filter(annotatorproject__status=AnnotatorProject.ACTIVE).all()]
}
def get_project_stats(self):
return {
"owned_by": self.owner.username,
"documents": self.num_documents,
"training_documents": self.num_training_documents,
"test_documents": self.num_test_documents,
"completed_tasks": self.num_completed_tasks,
"pending_tasks": self.num_pending_tasks,
"rejected_tasks": self.num_rejected_tasks,
"timed_out_tasks": self.num_timed_out_tasks,
"aborted_tasks": self.num_aborted_tasks,
"total_tasks": self.num_annotation_tasks_total,
"is_configured": self.is_project_configured,
"configuration_error": None if self.is_project_configured else self.project_configuration_error_message,
"is_completed": self.is_completed,
"num_annotators": self.num_annotators,
}
def get_telemetry_stats(self) -> dict:
"""
Returns a dict of stats specifically for telemetry including no identifying information.
"""
return {
"uuid": str(self.uuid),
"documents": self.num_documents,
"training_documents": self.num_training_documents,
"test_documents": self.num_test_documents,
"completed_tasks": self.num_completed_tasks,
"pending_tasks": self.num_pending_tasks,
"rejected_tasks": self.num_rejected_tasks,
"timed_out_tasks": self.num_timed_out_tasks,
"aborted_tasks": self.num_aborted_tasks,
"total_tasks": self.num_annotation_tasks_total,
"is_configured": self.is_project_configured,
"is_completed": self.is_completed,
"num_annotators": self.num_all_annotators,
}
class AnnotatorProject(models.Model):
"""
Intermediate class to represent annotator-project relationship
"""
ACTIVE = 0
COMPLETED = 1
STATUS = (
(ACTIVE, 'Active'),
(COMPLETED, 'Completed')
)
annotator = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, null=True, blank=True)
project = models.ForeignKey(Project, on_delete=models.SET_NULL, null=True, blank=True)
training_score = models.FloatField(default=0)
test_score = models.FloatField(default=0)
training_completed = models.DateTimeField(null=True)
test_completed = models.DateTimeField(null=True)
annotations_completed = models.DateTimeField(null=True)
allowed_to_annotate = models.BooleanField(default=False)
status = models.IntegerField(choices=STATUS, default=ACTIVE)
rejected = models.BooleanField(default=False)
@property
def num_annotations(self):
"""Number of annotations completed by this annotator in this project"""
count = 0
for d in self.project.documents.filter(doc_type=DocumentType.ANNOTATION):
count += d.annotations.filter(user=self.annotator).count()
return count
def set_status(self, status):
self.status = status
self.save()
def get_stats(self):
return {
"annotations": self.num_annotations,
}
class Document(models.Model):
"""
Model to represent a document.
"""
project = models.ForeignKey(Project, on_delete=models.CASCADE, related_name="documents")
data = models.JSONField(default=dict)
created = models.DateTimeField(default=timezone.now)
doc_type = models.IntegerField(choices=DocumentType.DOCUMENT_TYPE, default=DocumentType.ANNOTATION)
@property
def num_completed_annotations(self):
return self.annotations.filter(status=Annotation.COMPLETED).count()
@property
def num_rejected_annotations(self):
return self.annotations.filter(status=Annotation.REJECTED).count()
@property
def num_timed_out_annotations(self):
return self.annotations.filter(status=Annotation.TIMED_OUT).count()
@property
def num_pending_annotations(self):
return self.annotations.filter(status=Annotation.PENDING).count()
@property
def num_aborted_annotations(self):
return self.annotations.filter(status=Annotation.ABORTED).count()
@property
def num_completed_and_pending_annotations(self):
return self.annotations.filter(
Q(status=Annotation.COMPLETED) | Q(status=Annotation.PENDING)).count()
@property
def doc_type_str(self):
if (self.doc_type == DocumentType.ANNOTATION):
return "Annotation"
elif (self.doc_type == DocumentType.TRAINING):
return "Training"
elif (self.doc_type == DocumentType.TEST):
return "Test"
else:
raise Exception("Unknown document type")
def user_can_annotate_document(self, user):
""" User must not have completed, pending or rejected the document,
and if the document is not a training or test document then it must
not already be fully annotated."""
num_user_annotation_in_doc = self.annotations.filter(
Q(user_id=user.pk, status=Annotation.COMPLETED) |
Q(user_id=user.pk, status=Annotation.PENDING) |
Q(user_id=user.pk, status=Annotation.REJECTED)).count()
if num_user_annotation_in_doc > 1:
raise RuntimeError(
f"The user {user.username} has more than one annotation ({num_user_annotation_in_doc}) in the document.")
return num_user_annotation_in_doc < 1 and (
self.doc_type in (DocumentType.TRAINING, DocumentType.TEST) or
self.num_completed_and_pending_annotations < self.project.annotations_per_doc
)
def num_user_completed_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.COMPLETED).count()
def num_user_pending_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.PENDING).count()
def num_user_rejected_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.REJECTED).count()
def num_user_timed_out_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.TIMED_OUT).count()
def num_user_aborted_annotations(self, user):
return self.annotations.filter(user_id=user.pk, status=Annotation.ABORTED).count()
def user_completed_annotation_of_document(self, user):
return self.num_user_completed_annotations(user) > 0
def get_listing(self, annotation_list=[]):
"""
Get a dictionary representation of document for rendering
"""
doc_out = {
"id": self.pk,
"annotations": annotation_list,
"created": self.created,
"completed": self.num_completed_annotations,
"rejected": self.num_rejected_annotations,
"timed_out": self.num_timed_out_annotations,
"pending": self.num_pending_annotations,
"aborted": self.num_aborted_annotations,
"doc_id": get_value_from_key_path(self.data, self.project.document_id_field),
"project_id": self.project.id,
"data": self.data,
"doc_type": self.doc_type_str,
}
return doc_out
def get_doc_annotation_dict(self, json_format="raw", anonymize=True):
"""
Get dictionary of document and its annotations for export
"""
# Create dictionary for document
doc_dict = None
if json_format == "raw" or json_format == "csv":
doc_dict = self.data
elif json_format == "gate":
ignore_keys = {"text", self.project.document_id_field}
features_dict = {key: value for key, value in self.data.items() if key not in ignore_keys}
doc_dict = {
"text": self.data["text"],
"features": features_dict,
"offset_type": "p",
"name": get_value_from_key_path(self.data, self.project.document_id_field)
}
pass
# Insert annotation sets into the doc dict
annotations = self.annotations.filter(status=Annotation.COMPLETED)
if json_format == "csv":
# Format annotations for CSV export
annotation_sets = {}
for annotation in annotations:
a_data = annotation.data
annotation_dict = {}
# Format for csv, flatten list values
for a_key, a_value in a_data.items():
if isinstance(a_value, list):
annotation_dict[a_key] = ",".join(a_value)
else:
annotation_dict[a_key] = a_value
annotation_dict["duration_seconds"] = annotation.time_to_complete
if anonymize:
annotation_sets[str(annotation.user.id)] = annotation_dict
else:
annotation_sets[annotation.user.username] = annotation_dict
doc_dict["annotations"] = annotation_sets
else:
# Format for JSON in line with GATE formatting
annotation_sets = {}
for annotation in annotations:
a_data = annotation.data
annotation_set = {
"name": annotation.user.id if anonymize else annotation.user.username,
"annotations": [
{
"type": "Document",
"start": 0,
"end": 0,
"id": 0,
"duration_seconds": annotation.time_to_complete,
"features": {
"label": a_data
}
}
],
"next_annid": 1,
}
annotation_sets[annotation.user.username] = annotation_set
doc_dict["annotation_sets"] = annotation_sets
return doc_dict
class Annotation(models.Model):
"""
Model to represent a single annotation.
"""
PENDING = 0
COMPLETED = 1
REJECTED = 2
TIMED_OUT = 3
ABORTED = 4
ANNOTATION_STATUS = (
(PENDING, 'Pending'),
(COMPLETED, 'Completed'),
(REJECTED, 'Rejected'),
(TIMED_OUT, 'Timed out'),
(ABORTED, 'Aborted')
)
user = models.ForeignKey(get_user_model(), on_delete=models.CASCADE, related_name="annotations", null=True)
document = models.ForeignKey(Document, on_delete=models.CASCADE, related_name="annotations")
_data = models.JSONField(default=dict)
@property
def data(self):
ann_history = self.latest_annotation_history()
if ann_history:
return ann_history.data
return None
@data.setter
def data(self, value):
# The setter's value actually wraps the input inside a tuple for some reason
self._append_annotation_history(value)
times_out_at = models.DateTimeField(default=None, null=True)
created = models.DateTimeField(default=timezone.now)
status = models.IntegerField(choices=ANNOTATION_STATUS, default=PENDING)
status_time = models.DateTimeField(default=None, null=True)
time_to_complete = models.FloatField(default=None, null=True)
def _set_new_status(self, status, time=timezone.now()):
self.ensure_status_pending()
self.status = status
self.status_time = time
def complete_annotation(self, data, elapsed_time=None, time=timezone.now()):
self.data = data
self._set_new_status(Annotation.COMPLETED, time)
self.time_to_complete = elapsed_time
self.save()
# Also check whether the project has been completed
self.document.project.check_project_complete()
def reject_annotation(self, time=timezone.now()):
self._set_new_status(Annotation.REJECTED, time)
self.save()
def timeout_annotation(self, time=timezone.now()):
self._set_new_status(Annotation.TIMED_OUT, time)
self.save()
def abort_annotation(self, time=timezone.now()):
self._set_new_status(Annotation.ABORTED, time)
self.save()
def ensure_status_pending(self):
if self.status == Annotation.PENDING and self.status_time is None:
# Ok if still pending and doesn't have status time
return
if self.status == Annotation.COMPLETED:
log.warning(f"Annotation id {self.id} is already completed.")
raise RuntimeError("The annotation is already completed.")
if self.status == Annotation.REJECTED:
log.warning(f"Annotation id {self.id} is already rejected.")
raise RuntimeError("The annotation is already rejected.")
if self.status == Annotation.TIMED_OUT:
log.warning(f"Annotation id {self.id} is already timed out.")
raise RuntimeError("The annotation is already timed out.")
if self.status == Annotation.ABORTED:
log.warning(f"Annotation id {self.id} is already aborted.")
raise RuntimeError("The annotation is already timed out.")
def user_allowed_to_annotate(self, user):
return self.user.id == user.id
def change_annotation(self, data, by_user=None, time=timezone.now()):
if self.status != Annotation.COMPLETED:
raise RuntimeError("The annotation must be completed before it can be changed")
self._append_annotation_history(data, by_user, time)
def _append_annotation_history(self, data, by_user=None, time=timezone.now()):
if by_user is None:
by_user = self.user
AnnotationChangeHistory.objects.create(data=data,
time=time,
annotation=self,
changed_by=by_user)
def latest_annotation_history(self):
"""
Convenience function for getting the latest annotation data from the change history.
Returns None if there's no annotations.
"""
try:
last_item = self.change_history.last()
return last_item
except models.ObjectDoesNotExist:
return None
def get_listing(self):
"""
Get a dictionary representation of the annotation for rendering.
"""
output = {
"id": self.pk,
"annotated_by": self.user.username,
"created": self.created,
"completed": self.status_time if self.status == Annotation.COMPLETED else None,
"rejected": self.status_time if self.status == Annotation.REJECTED else None,
"timed_out": self.status_time if self.status == Annotation.TIMED_OUT else None,
"aborted": self.status_time if self.status == Annotation.ABORTED else None,
"times_out_at": self.times_out_at,
"change_list": [change_history.get_listing() for change_history in self.change_history.all()],
}
return output
@staticmethod
def check_for_timed_out_annotations(current_time=timezone.now()):
"""
Checks for any annotation that has timed out (times_out_at < current_time) and set the timed_out property
to the current_time.
Returns the of annotations that has become timed out.
"""
timed_out_annotations = Annotation.objects.filter(times_out_at__lt=current_time, status=Annotation.PENDING)
for annotation in timed_out_annotations:
annotation.timeout_annotation(current_time)
return len(timed_out_annotations)
@staticmethod
def clear_all_pending_user_annotations(user):
pending_annotations = Annotation.objects.filter(user_id=user.pk, status=Annotation.PENDING)
if pending_annotations.count() > 1:
raise RuntimeError("More than one pending annotation has been created for the user")
for annotation in pending_annotations:
annotation.abort_annotation()
@staticmethod
def get_annotations_for_user_in_project(user_id, project_id, doc_type=DocumentType.ANNOTATION):
"""
Gets a list of all completed and pending annotation tasks in the project project_id that belong to the
annotator with user_id.
Ordered by descending date and PK so the most recent entry is placed first.
"""
return Annotation.objects.filter(document__project_id=project_id,
document__doc_type=doc_type,
user_id=user_id).distinct().filter(
Q(status=Annotation.COMPLETED) | Q(status=Annotation.PENDING)).order_by("-created", "-pk")
class AnnotationChangeHistory(models.Model):
"""
Model to store the changes in annotation when an annotator makes a change after initial submission
"""
data = models.JSONField(default=dict)
time = models.DateTimeField(default=timezone.now)
annotation = models.ForeignKey(Annotation, on_delete=models.CASCADE, related_name="change_history", null=False)
changed_by = models.ForeignKey(get_user_model(), on_delete=models.SET_NULL, related_name="changed_annotations",
null=True)
def get_listing(self):
return {
"id": self.pk,
"data": self.data,
"time": self.time,
"changed_by": self.changed_by.username,
}
| 50,946 | 40.931687 | 138 | py |
gate-teamware | gate-teamware-master/backend/apps.py | from django.apps import AppConfig
import logging
log = logging.getLogger(__name__)
class BackendConfig(AppConfig):
default_auto_field = 'django.db.models.BigAutoField'
name = 'backend'
def ready(self):
# This needs to be imported in order to
# pick up all the registered rpc methods
import backend.rpc
| 343 | 20.5 | 56 | py |
gate-teamware | gate-teamware-master/backend/migrations/0020a_training_score_not_null.py | from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('backend', '0020_auto_20220330_2021'),
]
operations = [
migrations.AlterField(
model_name='annotatorproject',
name='training_score',
field=models.FloatField(default=0),
),
]
| 353 | 18.666667 | 47 | py |
gate-teamware | gate-teamware-master/backend/management/commands/check_create_superuser.py | import sys, os
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = "If no superusers in database, create one from credentials supplied in environment variables"
def handle(self, *args, **options):
User = get_user_model()
su_count = User.objects.filter(is_superuser=True).count()
if su_count == 0:
username = os.environ.get("SUPERUSER_USERNAME")
password = os.environ.get("SUPERUSER_PASSWORD")
email = os.environ.get("SUPERUSER_EMAIL")
if not User.objects.filter(username=username).exists():
User.objects.create_superuser(username=username, password=password, email=email,
is_account_activated=True)
self.stdout.write(f'No superusers found in database.\nSuperuser created with username {username}')
else:
self.stdout.write(self.style.ERROR('Username already exists'))
raise CommandError("Username already exists")
else:
self.stdout.write(f'{su_count} Superusers found in database.')
| 1,211 | 35.727273 | 114 | py |
gate-teamware | gate-teamware-master/backend/management/commands/build_api_docs.py | import json
from django.core.management.base import BaseCommand, CommandError
from django.template.loader import render_to_string
from backend.rpcserver import JSONRPCEndpoint
class Command(BaseCommand):
help = "Generate a JSON file listing API endpoints"
def add_arguments(self, parser):
parser.add_argument('output_dest', type=str)
def handle(self, *args, **options):
output_dest = options["output_dest"]
listing = JSONRPCEndpoint.endpoint_listing()
for name, props in listing.items():
listing[name]["all_args"] = ','.join(props["arguments"])
context = {
"api_dict": listing
}
with open(output_dest, "w") as f:
f.write(render_to_string("api_docs_template.md", context))
| 788 | 24.451613 | 70 | py |
gate-teamware | gate-teamware-master/backend/utils/telemetry.py | import json
import logging
from threading import Thread
import requests
from urllib.parse import urljoin
from django.conf import settings
log = logging.getLogger(__name__)
class TelemetrySender:
def __init__(self, status: str, data: dict) -> None:
self.url = urljoin(settings.TELEMETRY_BASE_URL, settings.TELEMETRY_PATH)
self.data = data
self.data.update({"product": "teamware", "status": status})
self.http_status_code = None
def send(self):
"""
Makes a post request to the telemetry server containing a dict as json data, if telemetry is switched on.
"""
if settings.TELEMETRY_ON:
self.thread = Thread(target=self._post_request)
self.thread.run()
else:
log.info(f"Telemetry is switched off. Not sending telemetry data for project {self.data['uuid']}.")
def _post_request(self):
log.info(f"Sending telemetry data for project {self.data['uuid']} to {self.url}.")
r = requests.post(self.url, json=self.data)
self.http_status_code = r.status_code
log.info(f"{self.http_status_code}: {r.text}")
| 1,165 | 34.333333 | 113 | py |
gate-teamware | gate-teamware-master/backend/utils/misc.py | import string
import random
def get_value_from_key_path(obj_dict, key_path, delimiter="."):
"""
Gets value from a dictionary following a delimited key_path. Does not work for path with array elements.
:returns: None if path does not exist.
"""
if key_path is None:
return None
key_path_split = key_path.split(delimiter)
current_value = obj_dict
for key in key_path_split:
if type(current_value) is dict and key in current_value:
current_value = current_value[key]
else:
return None
return current_value
def insert_value_to_key_path(obj_dict, key_path, value, delimiter="."):
"""
Insert value into a dictionary following a delimited key_path. Does not work for path with array elements.
Returns True if key can be inserted.
"""
key_path_split = key_path.split(delimiter)
key_path_length = len(key_path_split)
current_dict = obj_dict
for index, key in enumerate(key_path_split):
if index < key_path_length - 1:
# Insert dict if doesn't exist
if key not in current_dict:
current_dict[key] = {}
if type(current_dict[key]) is dict:
current_dict = current_dict[key]
else:
return False
else:
current_dict[key] = value
return True
return False
def read_custom_document(path):
"""
Reads in a text file and returns as a string.
Primarily used for reading in custom privacy policy and/or terms & conditions documents.
"""
with open(path) as file:
doc_str = file.read()
return doc_str
def generate_random_string(length) -> string:
"""
Generates random ascii string of lowercase, uppercase and digits of length
@param length Length of the generated random string
@return Generated random string
"""
use_characters = string.ascii_letters + string.digits
return ''.join([random.choice(use_characters) for i in range(length)])
| 2,038 | 29.893939 | 110 | py |
gate-teamware | gate-teamware-master/backend/utils/serialize.py | import logging
import json
from datetime import datetime
from django.db import models
from django.db.models import Model, ManyToOneRel, ManyToManyRel, ForeignKey
from django.utils import timezone
from backend.models import Project
log = logging.getLogger(__name__)
def dsl_val(attr_name, obj, data):
"""
Insert value of `data` with key `attr_name` into the object `obj` with attribute name `attr_name`
"""
if attr_name in data:
setattr(obj, attr_name, data[attr_name])
def dsl_json(attr_name, obj, data):
"""
Convert value of `data` with key `attr_name` into a JSON string and insert into the
object `obj` with attribute name `attr_name`
"""
if attr_name in data:
setattr(obj, attr_name, json.dumps(data[attr_name]))
def dsl_date(attr_name, obj, data):
"""
Convert value of `data` with key `attr_name` into a datetime object and insert into the
object `obj` with attribute name `attr_name`
"""
if attr_name in data:
if data[attr_name] is None:
setattr(obj, attr_name, None)
elif isinstance(data[attr_name], str):
setattr(obj, attr_name, datetime.fromisoformat(data[attr_name]))
elif isinstance(data[attr_name], datetime):
setattr(obj, attr_name, data[attr_name])
else:
raise ValueError("Date must be None, str or datetime object")
class FieldSerializer:
def serialize(self, model_obj, field):
return getattr(model_obj, field.name)
def deserialize(self, model_obj, val_input, field):
setattr(model_obj, field.name, val_input)
class ForeignKeySerializer(FieldSerializer):
def serialize(self, model_obj, field):
related_obj = getattr(model_obj, field.name)
if not related_obj:
return None
return related_obj.id
def deserialize(self, model_obj, val_input, field):
rel_obj = None
if val_input:
rel_obj = field.related_model.objects.get(pk=val_input)
setattr(model_obj, field.name, rel_obj)
class RelationSerializer(FieldSerializer):
def serialize(self, model_obj, field):
relation_objs = getattr(model_obj, field.name).all().values_list('id', flat=True)
return [rel_obj for rel_obj in relation_objs]
def deserialize(self, model_obj, val_input, field):
pass # TODO ? Might be better to manage these relations in a separate field
class ModelSerializer:
def __init__(self):
"""
field_serializer:dict Use special serializer (subclass of FieldSerializer) for the specified field name e.g. {"my_json_field": JSONFieldSerializer}
field_relation_spec:dict Serialize one-to-many or many-to-many relations. The spec allows declarations of
fields in the related object to serialize e.g.
{
"my_relation_field": {
"field": {"id", "name", ...}
}
}
"""
self._field_serializer = FieldSerializer()
self._relation_serializer = RelationSerializer()
self._foreign_key_serializer = ForeignKeySerializer()
self.field_serializers = {}
self.field_relation_spec = {}
self.serializer_dict = {
ManyToManyRel: self._relation_serializer,
ManyToOneRel: self._relation_serializer,
ForeignKey: self._foreign_key_serializer
}
def serialize(self, model_obj: Model, select_fields: set = None, exclude_fields: set = None):
if not model_obj or not isinstance(model_obj, Model):
raise ValueError("Must provide an instance of a Model to serialize")
output = {}
fields_to_serialize = self.get_field_names_to_serialize(model_obj, select_fields, exclude_fields)
# Value fields, foreign keys and fields with serializers
for field in model_obj.__class__._meta.get_fields():
if field.name in fields_to_serialize:
output[field.name] = self.serialize_field(model_obj, field)
return output
def get_field_names_to_serialize(self, model_obj: Model, select_fields: set, exclude_fields: set):
fields_to_serialize = select_fields
if not fields_to_serialize or len(fields_to_serialize) < 1:
fields_to_serialize = set()
for field in model_obj.__class__._meta.get_fields():
fields_to_serialize.add(field.name)
if exclude_fields:
for exclude_name in exclude_fields:
fields_to_serialize.remove(exclude_name)
return fields_to_serialize
def serialize_field(self, model_obj: Model, field):
field_serializer = self.get_field_serializer(field)
return field_serializer.serialize(model_obj, field)
def get_field_serializer(self, django_field):
if django_field.__class__ in self.serializer_dict:
return self.serializer_dict[django_field.__class__]
else:
return FieldSerializer()
def deserialize(self, model_class, input_dict, select_fields: set = None, exclude_fields: set = None):
if not issubclass(model_class, Model):
raise ValueError(f"{model_class} must be subclass of django Model")
model_obj = model_class.objects.get(pk=input_dict["id"])
if not model_obj:
raise ValueError(f"No object with id {input_dict['id']}")
fields_to_serialize = self.get_field_names_to_serialize(model_obj, select_fields, exclude_fields)
# Value fields, foreign keys and fields with serializers
for field in model_obj.__class__._meta.get_fields():
if field.name in fields_to_serialize and field.name in input_dict:
self.deserialize_field(model_obj, input_dict[field.name], field)
model_obj.save()
return model_obj
def deserialize_field(self, model_obj, input_field, field):
field_serializer = self.get_field_serializer(field)
return field_serializer.deserialize(model_obj, input_field, field)
| 6,068 | 35.125 | 155 | py |
gate-teamware | gate-teamware-master/teamware/wsgi.py | """
WSGI config for teamware project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teamware.settings.deployment')
application = get_wsgi_application()
| 404 | 22.823529 | 79 | py |
gate-teamware | gate-teamware-master/teamware/urls.py | """teamware URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, re_path
from backend import views
from backend.rpcserver import JSONRPCEndpoint
urlpatterns = [
path('admin/', admin.site.urls),
path('rpc/', JSONRPCEndpoint.as_view()),
path('download_annotations/<int:project_id>/<str:doc_type>/<str:export_type>/<str:json_format>/<int:entries_per_file>/<str:anonymize>/', views.DownloadAnnotationsView.as_view()),
re_path('^.*$', views.MainView.as_view(), name="index"),
]
| 1,121 | 39.071429 | 182 | py |
gate-teamware | gate-teamware-master/teamware/asgi.py | """
ASGI config for teamware project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'teamware.settings')
application = get_asgi_application()
| 393 | 22.176471 | 78 | py |
gate-teamware | gate-teamware-master/teamware/settings/docker-integration.py | """
Settings for integration testing
Uses a clean database every time
"""
from .deployment import *
DATABASES['default']['NAME'] = "teamware_integration_db"
# Turn off e-mail activation for testing
ACTIVATION_WITH_EMAIL = False
TELEMETRY_ON = False
FRONTEND_DEV_SERVER_USE = False
| 285 | 18.066667 | 56 | py |
gate-teamware | gate-teamware-master/teamware/settings/deployment.py | import logging
import sys
import os
from .base import *
# Enable csrf in production
MIDDLEWARE.append(
'django.middleware.csrf.CsrfViewMiddleware'
)
DEBUG = (os.environ.get('DJANGO_DEBUG', "false").lower() in ['true', 'yes', 'on', '1'])
if 'DJANGO_ALLOWED_HOSTS' in os.environ:
# This looks a bit horrible, but the logic is split DJANGO_ALLOWED_HOSTS on
# commas, strip surrounding whitespace off each element, and filter out any
# remaining empty strings
ALLOWED_HOSTS.extend(host for host in (h.strip() for h in os.environ['DJANGO_ALLOWED_HOSTS'].split(',')) if host)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'verbose': {
'format': '%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
'stream': sys.stdout,
'formatter': 'verbose'
},
},
'loggers': {
'': {
'handlers': ['console'],
'level': 'INFO',
'propagate': True,
},
},
}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get("DJANGO_DB_NAME", "teamware_db"),
"USER": os.environ.get("DB_USERNAME", "user"),
"PASSWORD": os.environ.get("DB_PASSWORD", "password"),
"HOST": os.environ.get("DB_HOST", "db"),
"PORT": os.environ.get("DB_PORT", "5432"),
}
}
TELEMETRY_ON = True
FRONTEND_DEV_SERVER_USE = False
| 1,550 | 25.741379 | 117 | py |
gate-teamware | gate-teamware-master/teamware/settings/integration.py | """
Settings for local integration testing
Uses a clean database every time
"""
from .base import *
DATABASES['default']['NAME'] = "teamware_integration_db"
# Turn off e-mail activation for testing
ACTIVATION_WITH_EMAIL = False
TELEMETRY_ON = False
| 253 | 17.142857 | 56 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/setup.py | import os
import os.path as osp
import shutil
import subprocess
import sys
import warnings
from setuptools import find_packages, setup
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmedit/version.py'
def get_git_hash():
def _minimal_ext_cmd(cmd):
# construct minimal environment
env = {}
for k in ['SYSTEMROOT', 'PATH', 'HOME']:
v = os.environ.get(k)
if v is not None:
env[k] = v
# LANGUAGE is used on win32
env['LANGUAGE'] = 'C'
env['LANG'] = 'C'
env['LC_ALL'] = 'C'
out = subprocess.Popen(
cmd, stdout=subprocess.PIPE, env=env).communicate()[0]
return out
try:
out = _minimal_ext_cmd(['git', 'rev-parse', 'HEAD'])
sha = out.strip().decode('ascii')
except OSError:
sha = 'unknown'
return sha
def get_hash():
if os.path.exists('.git'):
sha = get_git_hash()[:7]
elif os.path.exists(version_file):
try:
from mmedit.version import __version__
sha = __version__.split('+')[-1]
except ImportError:
raise ImportError('Unable to get git version')
else:
sha = 'unknown'
return sha
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
return locals()['__version__']
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
List[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
elif '@git+' in line:
info['package'] = line
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '<=', '==', '>', '<']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extention():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'demo', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmedit', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
try:
os.symlink(src_relpath, tar_path)
except OSError:
# Creating a symbolic link on windows may raise an
# `OSError: [WinError 1314]` due to privilege. If
# the error happens, the src file will be copied
mode = 'copy'
warnings.warn(
f'Failed to create a symbolic link for {src_relpath}, '
f'and it will be copied to {tar_path}')
else:
continue
if mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extention()
setup(
name='mmedit',
version=get_version(),
description='OpenMMLab Image and Video Editing Toolbox and Benchmark',
long_description=readme(),
long_description_content_type='text/markdown',
maintainer='MMEditing Contributors',
maintainer_email='openmmlab@gmail.com',
keywords='computer vision, super resolution, video interpolation, '
'inpainting, matting, SISR, RefSR, VSR, GAN, VFI',
url='https://github.com/open-mmlab/mmediting',
packages=find_packages(exclude=('configs', 'tools', 'demo')),
include_package_data=True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Image Processing',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
},
zip_safe=False)
| 8,503 | 34.286307 | 125 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/evaluate_comp1k.py | import argparse
import os.path as osp
import re
import mmcv
import numpy as np
from mmedit.core.evaluation import connectivity, gradient_error, mse, sad
from mmedit.utils import modify_args
def evaluate_one(args):
"""Function to evaluate one sample of data.
Args:
args (tuple): Information needed to evaluate one sample of data.
Returns:
dict: The evaluation results including sad, mse, gradient error and
connectivity error.
"""
pred_alpha_path, alpha_path, trimap_path = args
pred_alpha = mmcv.imread(pred_alpha_path, flag='grayscale')
alpha = mmcv.imread(alpha_path, flag='grayscale')
if trimap_path is None:
trimap = np.ones_like(alpha)
else:
trimap = mmcv.imread(trimap_path, flag='grayscale')
sad_result = sad(alpha, trimap, pred_alpha)
mse_result = mse(alpha, trimap, pred_alpha)
grad_result = gradient_error(alpha, trimap, pred_alpha)
conn_result = connectivity(alpha, trimap, pred_alpha)
return (sad_result, mse_result, grad_result, conn_result)
def evaluate(pred_root, gt_root, trimap_root, verbose, nproc):
"""Evaluate test results of Adobe composition-1k dataset.
There are 50 different ground truth foregrounds and alpha mattes pairs,
each of the foreground will be composited with 20 different backgrounds,
producing 1000 images for testing. In some repo, the ground truth alpha
matte will be copied 20 times and named the same as the images. This
function accept both original alpha matte folder (contains 50 ground
truth alpha mattes) and copied alpha matte folder (contains 1000 ground
truth alpha mattes) for `gt_root`.
Example of copied name:
```
alpha_matte1.png -> alpha_matte1_0.png
alpha_matte1_1.png
...
alpha_matte1_19.png
alpha_matte1_20.png
```
Args:
pred_root (str): Path to the predicted alpha matte folder.
gt_root (str): Path to the ground truth alpha matte folder.
trimap_root (str): Path to the predicted alpha matte folder.
verbose (bool): Whether print result for each predicted alpha matte.
nproc (int): number of processers.
"""
images = sorted(mmcv.scandir(pred_root))
gt_files_num = len(list(mmcv.scandir(gt_root)))
# If ground truth alpha mattes are not copied (number of files is 50), we
# use the below pattern to recover the name of the original alpha matte.
if gt_files_num == 50:
pattern = re.compile(r'(.+)_(?:\d+)(.png)')
pairs = []
for img in images:
pred_alpha_path = osp.join(pred_root, img)
# if ground truth alpha matte are not copied, recover the original name
if gt_files_num == 50:
groups = pattern.match(img).groups()
alpha_path = osp.join(gt_root, ''.join(groups))
# if ground truth alpha matte are copied, the name should be the same
else: # gt_files_num == 1000
alpha_path = osp.join(gt_root, img)
trimap_path = (
osp.join(trimap_root, img) if trimap_root is not None else None)
pairs.append((pred_alpha_path, alpha_path, trimap_path))
results = mmcv.track_parallel_progress(evaluate_one, pairs, nproc)
if verbose:
# for sad_result, mse_result, grad_result, conn_result in results:
for i, img in enumerate(images):
sad_result, mse_result, grad_result, conn_result = results[i]
print(f'{img} SAD: {sad_result:.6g} MSE: {mse_result:.6g} '
f'GRAD: {grad_result:.6g} CONN: {conn_result:.6g}')
sad_mean, mse_mean, grad_mean, conn_mean = np.mean(results, axis=0)
print(f'MEAN: SAD: {sad_mean:.6g} MSE: {mse_mean:.6g} '
f'GRAD: {grad_mean:.6g} CONN: {conn_mean:.6g}')
def parse_args():
modify_args()
parser = argparse.ArgumentParser(
description='evaluate composition-1k prediction result')
parser.add_argument(
'pred_root', help='Path to the predicted alpha matte folder')
parser.add_argument(
'gt_root', help='Path to the ground truth alpha matte folder')
parser.add_argument(
'--trimap-root',
help='Path to trimap folder. If not specified, '
'results are calculated on the full image.')
parser.add_argument(
'-v',
'--verbose',
action='store_true',
help='Whether print result for each predicted alpha matte')
parser.add_argument(
'--nproc', type=int, default=4, help='number of processers')
return parser.parse_args()
def main():
args = parse_args()
if not osp.exists(args.pred_root):
raise FileNotFoundError(f'pred_root {args.pred_root} not found')
if not osp.exists(args.gt_root):
raise FileNotFoundError(f'gt_root {args.gt_root} not found')
evaluate(args.pred_root, args.gt_root, args.trimap_root, args.verbose,
args.nproc)
if __name__ == '__main__':
main()
| 5,073 | 36.585185 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/get_flops.py | import argparse
from mmcv import Config
from mmcv.cnn.utils import get_model_complexity_info
from mmedit.models import build_model
def parse_args():
parser = argparse.ArgumentParser(description='Train a editor')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[250, 250],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
elif len(args.shape) in [3, 4]: # 4 for video inputs (t, c, h, w)
input_shape = tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported '
f'with {model.__class__.__name__}')
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
if len(input_shape) == 4:
print('!!!If your network computes N frames in one forward pass, you '
'may want to divide the FLOPs by N to get the average FLOPs '
'for each frame.')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 1,956 | 29.578125 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/onnx2tensorrt.py | import argparse
import os
import os.path as osp
import warnings
from typing import Iterable, Optional
import cv2
import mmcv
import numpy as np
import onnxruntime as ort
import torch
from mmcv.ops import get_onnxruntime_op_path
from mmcv.tensorrt import (TRTWrapper, is_tensorrt_plugin_loaded, onnx2trt,
save_trt_engine)
from mmedit.datasets.pipelines import Compose
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def _prepare_input_img(model_type: str,
img_path: str,
config: dict,
rescale_shape: Optional[Iterable] = None) -> dict:
"""Prepare the input image
Args:
model_type (str): which kind of model config belong to, \
one of ['inpainting', 'mattor', 'restorer', 'synthesizer'].
img_path (str): image path to show or verify.
config (dict): MMCV config, determined by the inpupt config file.
rescale_shape (Optional[Iterable]): to rescale the shape of the \
input tensor.
Returns:
dict: {'imgs': imgs, 'img_metas': img_metas}
"""
# remove alpha from test_pipeline
model_type = model_type
if model_type == 'mattor':
keys_to_remove = ['alpha', 'ori_alpha']
elif model_type == 'restorer':
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(config.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
config.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
config.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(config.test_pipeline)
# prepare data
if model_type == 'mattor':
raise RuntimeError('Invalid model_type!', model_type)
if model_type == 'restorer':
data = dict(lq_path=img_path)
data = test_pipeline(data)
if model_type == 'restorer':
imgs = data['lq']
else:
imgs = data['img']
img_metas = [data['meta']]
if rescale_shape is not None:
for img_meta in img_metas:
img_meta['ori_shape'] = tuple(rescale_shape) + (3, )
mm_inputs = {'imgs': imgs, 'img_metas': img_metas}
return mm_inputs
def onnx2tensorrt(onnx_file: str,
trt_file: str,
config: dict,
input_config: dict,
model_type: str,
img_path: str,
fp16: bool = False,
verify: bool = False,
show: bool = False,
workspace_size: int = 1,
verbose: bool = False):
"""Convert ONNX model to TensorRT model
Args:
onnx_file (str): the path of the input ONNX file.
trt_file (str): the path to output the TensorRT file.
config (dict): MMCV configuration.
input_config (dict): contains min_shape, max_shape and \
input image path.
fp16 (bool): whether to enable fp16 mode.
verify (bool): whether to verify the outputs of TensorRT \
and ONNX are same.
show (bool): whether to show the outputs of TensorRT and ONNX.
verbose (bool): whether to print the log when generating \
TensorRT model.
"""
import tensorrt as trt
min_shape = input_config['min_shape']
max_shape = input_config['max_shape']
# create trt engine and wrapper
opt_shape_dict = {'input': [min_shape, min_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_file,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
inputs = _prepare_input_img(
model_type=model_type, img_path=img_path, config=config)
imgs = inputs['imgs']
img_list = [imgs.unsqueeze(0)]
if max_shape[0] > 1:
# concate flip image for batch test
flip_img_list = [_.flip(-1) for _ in img_list]
img_list = [
torch.cat((ori_img, flip_img), 0)
for ori_img, flip_img in zip(img_list, flip_img_list)
]
# Get results from ONNXRuntime
ort_custom_op_path = get_onnxruntime_op_path()
session_options = ort.SessionOptions()
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
sess.set_providers(['CPUExecutionProvider'], [{}]) # use cpu mode
onnx_output = sess.run(['output'],
{'input': img_list[0].detach().numpy()})[0][0]
# Get results from TensorRT
trt_model = TRTWrapper(trt_file, ['input'], ['output'])
with torch.no_grad():
trt_outputs = trt_model({'input': img_list[0].contiguous().cuda()})
trt_output = trt_outputs['output'][0].cpu().detach().numpy()
if show:
onnx_visualize = onnx_output.transpose(1, 2, 0)
onnx_visualize = np.clip(onnx_visualize, 0, 1)[:, :, ::-1]
trt_visualize = trt_output.transpose(1, 2, 0)
trt_visualize = np.clip(trt_visualize, 0, 1)[:, :, ::-1]
cv2.imshow('ONNXRuntime', onnx_visualize)
cv2.imshow('TensorRT', trt_visualize)
cv2.waitKey()
np.testing.assert_allclose(
onnx_output, trt_output, rtol=1e-03, atol=1e-05)
print('TensorRT and ONNXRuntime output all close.')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMSegmentation models from ONNX to TensorRT')
parser.add_argument('config', help='Config file of the model')
parser.add_argument(
'model_type',
help='what kind of model the config belong to.',
choices=['inpainting', 'mattor', 'restorer', 'synthesizer'])
parser.add_argument('img_path', type=str, help='Image for test')
parser.add_argument('onnx_file', help='Path to the input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
help='Path to the output TensorRT engine',
default='tmp.trt')
parser.add_argument(
'--max-shape',
type=int,
nargs=4,
default=[1, 3, 512, 512],
help='Maximum shape of model input.')
parser.add_argument(
'--min-shape',
type=int,
nargs=4,
default=[1, 3, 32, 32],
help='Minimum shape of model input.')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
parser.add_argument('--fp16', action='store_true', help='Enable fp16 mode')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine.')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
# check arguments
assert osp.exists(args.config), 'Config {} not found.'.format(args.config)
assert osp.exists(args.onnx_file), \
'ONNX model {} not found.'.format(args.onnx_file)
assert args.workspace_size >= 0, 'Workspace size less than 0.'
for max_value, min_value in zip(args.max_shape, args.min_shape):
assert max_value >= min_value, \
'max_shape should be larger than min shape'
config = mmcv.Config.fromfile(args.config)
config.model.pretrained = None
input_config = {
'min_shape': args.min_shape,
'max_shape': args.max_shape,
'input_path': args.img_path
}
onnx2tensorrt(
args.onnx_file,
args.trt_file,
config,
input_config,
model_type=args.model_type,
img_path=args.img_path,
fp16=args.fp16,
verify=args.verify,
show=args.show,
workspace_size=args.workspace_size,
verbose=args.verbose)
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 9,445 | 34.115242 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/publish_model.py | import argparse
import subprocess
import torch
from packaging import version
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
if version.parse(torch.__version__) >= version.parse('1.6'):
torch.save(checkpoint, out_file, _use_new_zipfile_serialization=False)
else:
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + f'-{sha[:8]}.pth'
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,256 | 29.658537 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/pytorch2onnx.py | import argparse
import warnings
import cv2
import mmcv
import numpy as np
import onnx
import onnxruntime as rt
import torch
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from mmedit.datasets.pipelines import Compose
from mmedit.models import build_model
def pytorch2onnx(model,
input,
model_type,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
dynamic_export=False):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
input (dict): We need to use this input to execute the model.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
"""
model.cpu().eval()
if model_type == 'mattor':
merged = input['merged'].unsqueeze(0)
trimap = input['trimap'].unsqueeze(0)
data = torch.cat((merged, trimap), 1)
elif model_type == 'restorer':
data = input['lq'].unsqueeze(0)
model.forward = model.forward_dummy
# pytorch has some bug in pytorch1.3, we have to fix it
# by replacing these existing op
register_extra_symbolics(opset_version)
dynamic_axes = None
if dynamic_export:
dynamic_axes = {
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'output': {
0: 'batch',
2: 'height',
3: 'width'
}
}
with torch.no_grad():
torch.onnx.export(
model,
data,
output_file,
input_names=['input'],
output_names=['output'],
export_params=True,
keep_initializers_as_inputs=False,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
print(f'Successfully exported ONNX model: {output_file}')
if verify:
# check by onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
if dynamic_export:
# scale image for dynamic shape test
data = torch.nn.functional.interpolate(data, scale_factor=1.1)
# concate flip image for batch test
flip_data = data.flip(-1)
data = torch.cat((data, flip_data), 0)
# get pytorch output, only concern pred_alpha
with torch.no_grad():
pytorch_result = model(data)
if isinstance(pytorch_result, (tuple, list)):
pytorch_result = pytorch_result[0]
pytorch_result = pytorch_result.detach().numpy()
# get onnx output
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(None, {
'input': data.detach().numpy(),
})
# only concern pred_alpha value
if isinstance(onnx_result, (tuple, list)):
onnx_result = onnx_result[0]
if show:
pytorch_visualize = pytorch_result[0].transpose(1, 2, 0)
pytorch_visualize = np.clip(pytorch_visualize, 0, 1)[:, :, ::-1]
onnx_visualize = onnx_result[0].transpose(1, 2, 0)
onnx_visualize = np.clip(onnx_visualize, 0, 1)[:, :, ::-1]
cv2.imshow('PyTorch', pytorch_visualize)
cv2.imshow('ONNXRuntime', onnx_visualize)
cv2.waitKey()
# check the numerical value
assert np.allclose(
pytorch_result, onnx_result, rtol=1e-5,
atol=1e-5), 'The outputs are different between Pytorch and ONNX'
print('The numerical values are same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(description='Convert MMediting to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'model_type',
help='what kind of model the config belong to.',
choices=['inpainting', 'mattor', 'restorer', 'synthesizer'])
parser.add_argument('img_path', help='path to input image file')
parser.add_argument(
'--trimap-path',
default=None,
help='path to input trimap file, used in mattor model')
parser.add_argument('--show', action='store_true', help='show onnx graph')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--verify',
action='store_true',
help='verify the onnx model output against pytorch output')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
model_type = args.model_type
if model_type == 'mattor' and args.trimap_path is None:
raise ValueError('Please set `--trimap-path` to convert mattor model.')
assert args.opset_version == 11, 'MMEditing only support opset 11 now'
config = mmcv.Config.fromfile(args.config)
config.model.pretrained = None
# ONNX does not support spectral norm
if model_type == 'mattor':
if hasattr(config.model.backbone.encoder, 'with_spectral_norm'):
config.model.backbone.encoder.with_spectral_norm = False
config.model.backbone.decoder.with_spectral_norm = False
config.test_cfg.metrics = None
# build the model
model = build_model(config.model, test_cfg=config.test_cfg)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
# remove alpha from test_pipeline
if model_type == 'mattor':
keys_to_remove = ['alpha', 'ori_alpha']
elif model_type == 'restorer':
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(config.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
config.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
config.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(config.test_pipeline)
# prepare data
if model_type == 'mattor':
data = dict(merged_path=args.img_path, trimap_path=args.trimap_path)
elif model_type == 'restorer':
data = dict(lq_path=args.img_path)
data = test_pipeline(data)
# convert model to onnx file
pytorch2onnx(
model,
data,
model_type,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
dynamic_export=args.dynamic_export)
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 7,975 | 35.420091 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/train.py | import argparse
import copy
import os
import os.path as osp
import time
import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import init_dist
from mmedit import __version__
from mmedit.apis import init_random_seed, set_random_seed, train_model
from mmedit.datasets import build_dataset
from mmedit.models import build_model
from mmedit.utils import collect_env, get_root_logger, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(description='Train an editor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
parser.add_argument(
'--gpus',
type=int,
default=1,
help='number of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--diff_seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--autoscale-lr',
action='store_true',
help='automatically scale lr with the number of gpus')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# update configs according to CLI args
if args.work_dir is not None:
cfg.work_dir = args.work_dir
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.gpus = args.gpus
if args.autoscale_lr:
# apply the linear scaling rule (https://arxiv.org/abs/1706.02677)
cfg.optimizer['lr'] = cfg.optimizer['lr'] * cfg.gpus / 8
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# log env info
env_info_dict = collect_env.collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
# log some basic info
logger.info('Distributed training: {}'.format(distributed))
logger.info('mmedit Version: {}'.format(__version__))
logger.info('Config:\n{}'.format(cfg.text))
# set random seeds
seed = init_random_seed(args.seed)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info('Set random seed to {}, deterministic: {}'.format(
seed, args.deterministic))
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmedit_version=__version__,
config=cfg.text,
)
# meta information
meta = dict()
if cfg.get('exp_name', None) is None:
cfg['exp_name'] = osp.splitext(osp.basename(cfg.work_dir))[0]
meta['exp_name'] = cfg.exp_name
meta['mmedit Version'] = __version__
meta['seed'] = seed
meta['env_info'] = env_info
# add an attribute for visualization convenience
train_model(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 5,738 | 32.758824 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/deployment/mmedit_handler.py | import os
import random
import string
from io import BytesIO
import PIL.Image as Image
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmedit.apis import init_model, restoration_inference
from mmedit.core import tensor2img
class MMEditHandler(BaseHandler):
def initialize(self, context):
print('MMEditHandler.initialize is called')
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_model(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data, *args, **kwargs):
body = data[0].get('data') or data[0].get('body')
result = Image.open(BytesIO(body))
# data preprocess is in inference.
return result
def inference(self, data, *args, **kwargs):
# generate temp image path for restoration_inference
temp_name = ''.join(
random.sample(string.ascii_letters + string.digits, 18))
temp_path = f'./{temp_name}.png'
data.save(temp_path)
results = restoration_inference(self.model, temp_path)
# delete the temp image path
os.remove(temp_path)
return results
def postprocess(self, data):
# convert torch tensor to numpy and then convert to bytes
output_list = []
for data_ in data:
data_np = tensor2img(data_)
data_byte = data_np.tobytes()
output_list.append(data_byte)
return output_list
| 2,099 | 34 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tools/deployment/mmedit2torchserve.py | from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmedit2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMEditing model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMEditing config format.
The contents vary for each task repository.
checkpoint_file:
In MMEditing checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args_ = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmedit_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
print(args_.model_name)
manifest = ModelExportUtils.generate_manifest_json(args_)
package_model(args_, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMEditing models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args_ = parser.parse_args()
return args_
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmedit2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,725 | 32.567568 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/.dev_scripts/github/update_model_index.py |
# This tool is used to update model-index.yml which is required by MIM, and
# will be automatically called as a pre-commit hook. The updating will be
# triggered if any change of model information (.md files in configs/) has been
# detected before a commit.
import glob
import os
import posixpath as osp # Even on windows, use posixpath
import re
import sys
import warnings
from functools import reduce
import mmcv
MMEditing_ROOT = osp.dirname(osp.dirname(osp.dirname(__file__)))
all_training_data = [
'div2k', 'celeba', 'places', 'comp1k', 'vimeo90k', 'reds', 'ffhq', 'cufed',
'cat', 'facades', 'summer2winter', 'horse2zebra', 'maps', 'edges2shoes'
]
def dump_yaml_and_check_difference(obj, file):
"""Dump object to a yaml file, and check if the file content is different
from the original.
Args:
obj (any): The python object to be dumped.
file (str): YAML filename to dump the object to.
Returns:
Bool: If the target YAML file is different from the original.
"""
str_dump = mmcv.dump(
obj, None, file_format='yaml', sort_keys=True,
line_break='\n') # force use LF
if osp.isfile(file):
file_exists = True
print(f' exist {file}')
with open(file, 'r', encoding='utf-8') as f:
str_orig = f.read()
else:
file_exists = False
str_orig = None
if file_exists and str_orig == str_dump:
is_different = False
else:
is_different = True
print(f' update {file}')
with open(file, 'w', encoding='utf-8') as f:
f.write(str_dump)
return is_different
def collate_metrics(keys):
"""Collect metrics from the first row of the table.
Args:
keys (List): Elements in the first row of the table.
Returns:
dict: A dict of metrics.
"""
used_metrics = dict()
for idx, key in enumerate(keys):
if key in ['Method', 'Download']:
continue
used_metrics[key] = idx
return used_metrics
def get_task_name(md_file):
"""Get task name from README.md".
Args:
md_file (str): Path to .md file.
Returns:
Str: Task name.
"""
layers = re.split(r'[\\/]', md_file)
for i in range(len(layers) - 1):
if layers[i] == 'configs':
return layers[i + 1].capitalize()
return 'Unknown'
def generate_unique_name(md_file):
"""Search config files and return the unique name of them.
For Confin.Name.
Args:
md_file (str): Path to .md file.
Returns:
dict: dict of unique name for each config file.
"""
files = os.listdir(osp.dirname(md_file))
config_files = [f[:-3] for f in files if f[-3:] == '.py']
config_files.sort()
config_files.sort(key=lambda x: len(x))
split_names = [f.split('_') for f in config_files]
config_sets = [set(f.split('_')) for f in config_files]
common_set = reduce(lambda x, y: x & y, config_sets)
unique_lists = [[n for n in name if n not in common_set]
for name in split_names]
unique_dict = dict()
name_list = []
for i, f in enumerate(config_files):
base = split_names[i][0]
unique_dict[f] = base
if len(unique_lists[i]) > 0:
for unique in unique_lists[i]:
candidate_name = f'{base}_{unique}'
if candidate_name not in name_list and base != unique:
unique_dict[f] = candidate_name
name_list.append(candidate_name)
break
return unique_dict
def parse_md(md_file):
"""Parse .md file and convert it to a .yml file which can be used for MIM.
Args:
md_file (str): Path to .md file.
Returns:
Bool: If the target YAML file is different from the original.
"""
# unique_dict = generate_unique_name(md_file)
collection_name = osp.splitext(osp.basename(md_file))[0]
readme = osp.relpath(md_file, MMEditing_ROOT)
readme = readme.replace('\\', '/') # for windows
collection = dict(
Name=collection_name,
Metadata={'Architecture': []},
README=readme,
Paper=[])
models = []
with open(md_file, 'r', encoding='utf-8') as md:
lines = md.readlines()
i = 0
name = lines[0][2:]
name = name.split('(', 1)[0].strip()
collection['Metadata']['Architecture'].append(name)
collection['Name'] = name
collection_name = name
while i < len(lines):
# parse reference
if lines[i].startswith('> ['):
url = re.match(r'> \[.*]\((.*)\)', lines[i])
url = url.groups()[0]
collection['Paper'].append(url)
i += 1
# parse table
elif lines[i][0] == '|' and i + 1 < len(lines) and \
(lines[i + 1][:3] == '| :' or lines[i + 1][:2] == '|:'
or lines[i + 1][:2] == '|-'):
cols = [col.strip() for col in lines[i].split('|')][1:-1]
config_idx = cols.index('Method')
checkpoint_idx = cols.index('Download')
try:
flops_idx = cols.index('FLOPs')
except ValueError:
flops_idx = -1
try:
params_idx = cols.index('Params')
except ValueError:
params_idx = -1
used_metrics = collate_metrics(cols)
j = i + 2
while j < len(lines) and lines[j][0] == '|':
task = get_task_name(md_file)
line = lines[j].split('|')[1:-1]
if line[config_idx].find('](') >= 0:
left = line[config_idx].index('](') + 2
right = line[config_idx].index(')', left)
config = line[config_idx][left:right].strip('./')
elif line[config_idx].find('△') == -1:
j += 1
continue
if line[checkpoint_idx].find('](') >= 0:
left = line[checkpoint_idx].index('model](') + 7
right = line[checkpoint_idx].index(')', left)
checkpoint = line[checkpoint_idx][left:right]
name_key = osp.splitext(osp.basename(config))[0]
model_name = name_key
# for these comments
# if name_key in unique_dict:
# model_name = unique_dict[name_key]
# else:
# model_name = name_key
# warnings.warn(
# f'Config file of {model_name} is not found,'
# 'please check it again.')
# find dataset in config file
dataset = 'Others'
config_low = config.lower()
for d in all_training_data:
if d in config_low:
dataset = d.upper()
break
metadata = {'Training Data': dataset}
if flops_idx != -1:
metadata['FLOPs'] = float(line[flops_idx])
if params_idx != -1:
metadata['Parameters'] = float(line[params_idx])
metrics = {}
for key in used_metrics:
metrics_data = line[used_metrics[key]]
metrics_data = metrics_data.replace('*', '')
if '/' not in metrics_data:
try:
metrics[key] = float(metrics_data)
except ValueError:
metrics_data = metrics_data.replace(' ', '')
else:
try:
metrics_data = [
float(d) for d in metrics_data.split('/')
]
metrics[key] = dict(
PSNR=metrics_data[0], SSIM=metrics_data[1])
except ValueError:
pass
model = {
'Name':
model_name,
'In Collection':
collection_name,
'Config':
config,
'Metadata':
metadata,
'Results': [{
'Task': task,
'Dataset': dataset,
'Metrics': metrics
}],
'Weights':
checkpoint
}
models.append(model)
j += 1
i = j
else:
i += 1
if len(models) == 0:
warnings.warn('no model is found in this md file')
result = {'Collections': [collection], 'Models': models}
yml_file = md_file.replace('README.md', 'metafile.yml')
is_different = dump_yaml_and_check_difference(result, yml_file)
return is_different
def update_model_index():
"""Update model-index.yml according to model .md files.
Returns:
Bool: If the updated model-index.yml is different from the original.
"""
configs_dir = osp.join(MMEditing_ROOT, 'configs')
yml_files = glob.glob(osp.join(configs_dir, '**', '*.yml'), recursive=True)
yml_files.sort()
model_index = {
'Import': [
osp.relpath(yml_file, MMEditing_ROOT).replace(
'\\', '/') # force using / as path separators
for yml_file in yml_files
]
}
model_index_file = osp.join(MMEditing_ROOT, 'model-index.yml')
is_different = dump_yaml_and_check_difference(model_index,
model_index_file)
return is_different
if __name__ == '__main__':
if len(sys.argv) <= 1:
configs_root = osp.join(MMEditing_ROOT, 'configs')
file_list = glob.glob(
osp.join(configs_root, '**', '*README.md'), recursive=True)
file_list.sort()
else:
file_list = [
fn for fn in sys.argv[1:] if osp.basename(fn) == 'README.md'
]
if not file_list:
sys.exit(0)
file_modified = False
for fn in file_list:
print(f'process {fn}')
file_modified |= parse_md(fn)
file_modified |= update_model_index()
sys.exit(1 if file_modified else 0)
| 11,096 | 33.039877 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/demo/restoration_video_demo.py | import argparse
import os
import cv2
import mmcv
import numpy as np
import torch
from mmedit.apis import init_model, restoration_video_inference
from mmedit.core import tensor2img
from mmedit.utils import modify_args
VIDEO_EXTENSIONS = ('.mp4', '.mov')
def parse_args():
modify_args()
parser = argparse.ArgumentParser(description='Restoration demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('input_dir', help='directory of the input video')
parser.add_argument('output_dir', help='directory of the output video')
parser.add_argument(
'--start-idx',
type=int,
default=0,
help='index corresponds to the first frame of the sequence')
parser.add_argument(
'--filename-tmpl',
default='{:08d}.png',
help='template of the file names')
parser.add_argument(
'--window-size',
type=int,
default=0,
help='window size if sliding-window framework is used')
parser.add_argument(
'--max-seq-len',
type=int,
default=None,
help='maximum sequence length if recurrent framework is used')
parser.add_argument('--device', type=int, default=0, help='CUDA device id')
args = parser.parse_args()
return args
def main():
""" Demo for video restoration models.
Note that we accept video as input/output, when 'input_dir'/'output_dir'
is set to the path to the video. But using videos introduces video
compression, which lowers the visual quality. If you want actual quality,
please save them as separate images (.png).
"""
args = parse_args()
model = init_model(
args.config, args.checkpoint, device=torch.device('cuda', args.device))
output = restoration_video_inference(model, args.input_dir,
args.window_size, args.start_idx,
args.filename_tmpl, args.max_seq_len)
file_extension = os.path.splitext(args.output_dir)[1]
if file_extension in VIDEO_EXTENSIONS: # save as video
h, w = output.shape[-2:]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(args.output_dir, fourcc, 25, (w, h))
for i in range(0, output.size(1)):
img = tensor2img(output[:, i, :, :, :])
video_writer.write(img.astype(np.uint8))
cv2.destroyAllWindows()
video_writer.release()
else:
for i in range(args.start_idx, args.start_idx + output.size(1)):
output_i = output[:, i - args.start_idx, :, :, :]
output_i = tensor2img(output_i)
save_path_i = f'{args.output_dir}/{args.filename_tmpl.format(i)}'
mmcv.imwrite(output_i, save_path_i)
if __name__ == '__main__':
main()
| 2,938 | 32.781609 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/version.py |
__version__ = '0.14.0'
def parse_version_info(version_str):
ver_info = []
for x in version_str.split('.'):
if x.isdigit():
ver_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
ver_info.append(int(patch_version[0]))
ver_info.append(f'rc{patch_version[1]}')
return tuple(ver_info)
version_info = parse_version_info(__version__)
| 482 | 24.421053 | 52 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/restoration_face_inference.py | import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmedit.datasets.pipelines import Compose
try:
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
has_facexlib = True
except ImportError:
has_facexlib = False
def restoration_face_inference(model, img, upscale_factor=1, face_size=1024):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img (str): File path of input image.
upscale_factor (int, optional): The number of times the input image
is upsampled. Default: 1.
face_size (int, optional): The size of the cropped and aligned faces.
Default: 1024.
Returns:
Tensor: The predicted restoration result.
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# remove gt from test_pipeline
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(test_pipeline)
# face helper for detecting and aligning faces
assert has_facexlib, 'Please install FaceXLib to use the demo.'
face_helper = FaceRestoreHelper(
upscale_factor,
face_size=face_size,
crop_ratio=(1, 1),
det_model='retinaface_resnet50',
template_3points=True,
save_ext='png',
device=device)
face_helper.read_image(img)
# get face landmarks for each face
face_helper.get_face_landmarks_5(
only_center_face=False, eye_dist_threshold=None)
# align and warp each face
face_helper.align_warp_face()
for i, img in enumerate(face_helper.cropped_faces):
# prepare data
data = dict(lq=img.astype(np.float32))
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
output = model(test_mode=True, **data)['output'].clip_(0, 1)
output = output.squeeze(0).permute(1, 2, 0)[:, :, [2, 1, 0]]
output = output.cpu().numpy() * 255 # (0, 255)
face_helper.add_restored_face(output)
face_helper.get_inverse_affine(None)
restored_img = face_helper.paste_faces_to_input_image(upsample_img=None)
return restored_img
| 3,069 | 33.494382 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/generation_inference.py | import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmedit.core import tensor2img
from mmedit.datasets.pipelines import Compose
def generation_inference(model, img, img_unpaired=None):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img (str): File path of input image.
img_unpaired (str, optional): File path of the unpaired image.
If not None, perform unpaired image generation. Default: None.
Returns:
np.ndarray: The predicted generation result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
if img_unpaired is None:
data = dict(pair_path=img)
else:
data = dict(img_a_path=img, img_b_path=img_unpaired)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
results = model(test_mode=True, **data)
# process generation shown mode
if img_unpaired is None:
if model.show_input:
output = np.concatenate([
tensor2img(results['real_a'], min_max=(-1, 1)),
tensor2img(results['fake_b'], min_max=(-1, 1)),
tensor2img(results['real_b'], min_max=(-1, 1))
],
axis=1)
else:
output = tensor2img(results['fake_b'], min_max=(-1, 1))
else:
if model.show_input:
output = np.concatenate([
tensor2img(results['real_a'], min_max=(-1, 1)),
tensor2img(results['fake_b'], min_max=(-1, 1)),
tensor2img(results['real_b'], min_max=(-1, 1)),
tensor2img(results['fake_a'], min_max=(-1, 1))
],
axis=1)
else:
if model.test_direction == 'a2b':
output = tensor2img(results['fake_b'], min_max=(-1, 1))
else:
output = tensor2img(results['fake_a'], min_max=(-1, 1))
return output
| 2,229 | 34.967742 | 74 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/inpainting_inference.py | import torch
from mmcv.parallel import collate, scatter
from mmedit.datasets.pipelines import Compose
def inpainting_inference(model, masked_img, mask):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
masked_img (str): File path of image with mask.
mask (str): Mask file path.
Returns:
Tensor: The predicted inpainting result.
"""
device = next(model.parameters()).device # model device
infer_pipeline = [
dict(type='LoadImageFromFile', key='masked_img'),
dict(type='LoadMask', mask_mode='file', mask_config=dict()),
dict(type='Pad', keys=['masked_img', 'mask'], mode='reflect'),
dict(
type='Normalize',
keys=['masked_img'],
mean=[127.5] * 3,
std=[127.5] * 3,
to_rgb=False),
dict(type='GetMaskedImage', img_name='masked_img'),
dict(
type='Collect',
keys=['masked_img', 'mask'],
meta_keys=['masked_img_path']),
dict(type='ImageToTensor', keys=['masked_img', 'mask'])
]
# build the data pipeline
test_pipeline = Compose(infer_pipeline)
# prepare data
data = dict(masked_img_path=masked_img, mask_path=mask)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['fake_img']
| 1,546 | 29.94 | 70 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/restoration_inference.py | import torch
from mmcv.parallel import collate, scatter
from mmedit.datasets.pipelines import Compose
def restoration_inference(model, img, ref=None):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img (str): File path of input image.
ref (str | None): File path of reference image. Default: None.
Returns:
Tensor: The predicted restoration result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove gt from test_pipeline
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
if ref: # Ref-SR
data = dict(lq_path=img, ref_path=ref)
else: # SISR
data = dict(lq_path=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['output']
| 1,606 | 33.191489 | 72 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/matting_inference.py | import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmedit.datasets.pipelines import Compose
from mmedit.models import build_model
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str): Which device the model will deploy. Default: 'cuda:0'.
Returns:
nn.Module: The constructed model.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
config.test_cfg.metrics = None
model = build_model(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def matting_inference(model, img, trimap):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha']
| 2,659 | 33.545455 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/video_interpolation_inference.py | import math
import os
import os.path as osp
import cv2
import mmcv
import numpy as np
import torch
from mmcv.fileio import FileClient
from mmcv.parallel import collate
from mmedit.datasets.pipelines import Compose
VIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi')
FILE_CLIENT = FileClient('disk')
def read_image(filepath):
"""Read image from file.
Args:
filepath (str): File path.
Returns:
image (np.array): Image.
"""
img_bytes = FILE_CLIENT.get(filepath)
image = mmcv.imfrombytes(
img_bytes, flag='color', channel_order='rgb', backend='pillow')
return image
def read_frames(source, start_index, num_frames, from_video, end_index):
"""Read frames from file or video.
Args:
source (list | mmcv.VideoReader): Source of frames.
start_index (int): Start index of frames.
num_frames (int): frames number to be read.
from_video (bool): Weather read frames from video.
end_index (int): The end index of frames.
Returns:
images (np.array): Images.
"""
images = []
last_index = min(start_index + num_frames, end_index)
# read frames from video
if from_video:
for index in range(start_index, last_index):
if index >= source.frame_cnt:
break
images.append(np.flip(source.get_frame(index), axis=2))
else:
files = source[start_index:last_index]
images = [read_image(f) for f in files]
return images
def video_interpolation_inference(model,
input_dir,
output_dir,
start_idx=0,
end_idx=None,
batch_size=4,
fps_multiplier=0,
fps=0,
filename_tmpl='{:08d}.png'):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
input_dir (str): Directory of the input video.
output_dir (str): Directory of the output video.
start_idx (int): The index corresponding to the first frame in the
sequence. Default: 0
end_idx (int | None): The index corresponding to the last interpolated
frame in the sequence. If it is None, interpolate to the last
frame of video or sequence. Default: None
batch_size (int): Batch size. Default: 4
fps_multiplier (float): multiply the fps based on the input video.
Default: 0.
fps (float): frame rate of the output video. Default: 0.
filename_tmpl (str): template of the file names. Default: '{:08d}.png'
Returns:
output (list[numpy.array]): The predicted interpolation result.
It is an image sequence.
input_fps (float): The fps of input video. If the input is an image
sequence, input_fps=0.0
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# remove the data loading pipeline
tmp_pipeline = []
for pipeline in test_pipeline:
if pipeline['type'] not in [
'GenerateSegmentIndices', 'LoadImageFromFileList',
'LoadImageFromFile'
]:
tmp_pipeline.append(pipeline)
test_pipeline = tmp_pipeline
# compose the pipeline
test_pipeline = Compose(test_pipeline)
# check if the input is a video
input_file_extension = os.path.splitext(input_dir)[1]
if input_file_extension in VIDEO_EXTENSIONS:
source = mmcv.VideoReader(input_dir)
input_fps = source.fps
length = source.frame_cnt
from_video = True
h, w = source.height, source.width
if fps_multiplier:
assert fps_multiplier > 0, '`fps_multiplier` cannot be negative'
output_fps = fps_multiplier * input_fps
else:
output_fps = fps if fps > 0 else input_fps * 2
else:
files = os.listdir(input_dir)
files = [osp.join(input_dir, f) for f in files]
files.sort()
source = files
length = files.__len__()
from_video = False
example_frame = read_image(files[0])
h, w = example_frame.shape[:2]
output_fps = fps
# check if the output is a video
output_file_extension = os.path.splitext(output_dir)[1]
if output_file_extension in VIDEO_EXTENSIONS:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
target = cv2.VideoWriter(output_dir, fourcc, output_fps, (w, h))
to_video = True
else:
to_video = False
end_idx = min(end_idx, length) if end_idx is not None else length
# calculate step args
step_size = model.step_frames * batch_size
lenth_per_step = model.required_frames + model.step_frames * (
batch_size - 1)
repeat_frame = model.required_frames - model.step_frames
prog_bar = mmcv.ProgressBar(
math.ceil(
(end_idx + step_size - lenth_per_step - start_idx) / step_size))
output_index = start_idx
for start_index in range(start_idx, end_idx, step_size):
images = read_frames(
source, start_index, lenth_per_step, from_video, end_index=end_idx)
# data prepare
data = dict(inputs=images, inputs_path=None, key=input_dir)
data = [test_pipeline(data)]
data = collate(data, samples_per_gpu=1)['inputs']
# data.shape: [1, t, c, h, w]
# forward the model
data = model.split_frames(data)
input_tensors = data.clone().detach()
with torch.no_grad():
output = model(data.to(device), test_mode=True)['output']
if len(output.shape) == 4:
output = output.unsqueeze(1)
output_tensors = output.cpu()
if len(output_tensors.shape) == 4:
output_tensors = output_tensors.unsqueeze(1)
result = model.merge_frames(input_tensors, output_tensors)
if not start_idx == start_index:
result = result[0 - repeat_frame:]
prog_bar.update()
# save frames
if to_video:
for frame in result:
target.write(frame)
else:
for frame in result:
save_path = osp.join(output_dir,
filename_tmpl.format(output_index))
mmcv.imwrite(frame, save_path)
output_index += 1
if start_index + lenth_per_step >= end_idx:
break
print()
print(f'Output dir: {output_dir}')
if to_video:
target.release()
| 6,978 | 33.043902 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/train.py | import os
import os.path as osp
import random
import warnings
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel
from mmcv.runner import HOOKS, IterBasedRunner, get_dist_info
from mmcv.utils import build_from_cfg
from mmedit.core import DistEvalIterHook, EvalIterHook, build_optimizers
from mmedit.core.distributed_wrapper import DistributedDataParallelWrapper
from mmedit.datasets.builder import build_dataloader, build_dataset
from mmedit.utils import get_root_logger
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
distributed (bool): Whether to use distributed training.
Default: False.
validate (bool): Whether to do evaluation. Default: False.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(log_level=cfg.log_level)
# start training
if distributed:
_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
else:
_non_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
def _dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
"""Distributed training function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
validate (bool): Whether to do evaluation. Default: False.
logger (logging.Logger | None): Logger for training. Default: None.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None.
"""
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# step 1: give default values and override (if exist) from cfg.data
loader_cfg = {
**dict(seed=cfg.get('seed'), drop_last=False, dist=True),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
pin_memory=False,
)),
**dict((k, cfg.data[k]) for k in [
'samples_per_gpu',
'workers_per_gpu',
'shuffle',
'seed',
'drop_last',
'prefetch_num',
'pin_memory',
] if k in cfg.data)
}
# step 2: cfg.data.train_dataloader has highest priority
train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# put model on gpus
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = DistributedDataParallelWrapper(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
# build runner
optimizer = build_optimizers(model, cfg.optimizers)
runner = IterBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register hooks
runner.register_training_hooks(
cfg.lr_config,
checkpoint_config=cfg.checkpoint_config,
log_config=cfg.log_config)
# visual hook
if cfg.get('visual_config', None) is not None:
cfg.visual_config['output_dir'] = os.path.join(
cfg.work_dir, cfg.visual_config['output_dir'])
runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS))
# evaluation hook
if validate and cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.val)
if ('val_samples_per_gpu' in cfg.data
or 'val_workers_per_gpu' in cfg.data):
warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have '
'been deprecated. Please use '
'"val_dataloader=dict(samples_per_gpu=1)" instead. '
'Details see '
'https://github.com/open-mmlab/mmediting/pull/201')
val_loader_cfg = {
**loader_cfg,
**dict(shuffle=False, drop_last=False),
**dict((newk, cfg.data[oldk]) for oldk, newk in [
('val_samples_per_gpu', 'samples_per_gpu'),
('val_workers_per_gpu', 'workers_per_gpu'),
] if oldk in cfg.data),
**cfg.data.get('val_dataloader', {})
}
data_loader = build_dataloader(dataset, **val_loader_cfg)
save_path = osp.join(cfg.work_dir, 'val_visuals')
runner.register_hook(
DistEvalIterHook(
data_loader, save_path=save_path, **cfg.evaluation),
priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_iters)
def _non_dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
"""Non-Distributed training function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
validate (bool): Whether to do evaluation. Default: False.
logger (logging.Logger | None): Logger for training. Default: None.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None.
"""
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# step 1: give default values and override (if exist) from cfg.data
loader_cfg = {
**dict(
seed=cfg.get('seed'),
drop_last=False,
dist=False,
num_gpus=cfg.gpus),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
pin_memory=False,
)),
**dict((k, cfg.data[k]) for k in [
'samples_per_gpu',
'workers_per_gpu',
'shuffle',
'seed',
'drop_last',
'prefetch_num',
'pin_memory',
] if k in cfg.data)
}
# step 2: cfg.data.train_dataloader has highest priority
train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# put model on gpus/cpus
model = MMDataParallel(model, device_ids=range(cfg.gpus))
# build runner
optimizer = build_optimizers(model, cfg.optimizers)
runner = IterBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register hooks
runner.register_training_hooks(
cfg.lr_config,
checkpoint_config=cfg.checkpoint_config,
log_config=cfg.log_config)
# visual hook
if cfg.get('visual_config', None) is not None:
cfg.visual_config['output_dir'] = os.path.join(
cfg.work_dir, cfg.visual_config['output_dir'])
runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS))
# evaluation hook
if validate and cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.val)
if ('val_samples_per_gpu' in cfg.data
or 'val_workers_per_gpu' in cfg.data):
warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have '
'been deprecated. Please use '
'"val_dataloader=dict(samples_per_gpu=1)" instead. '
'Details see '
'https://github.com/open-mmlab/mmediting/pull/201')
val_loader_cfg = {
**loader_cfg,
**dict(shuffle=False, drop_last=False),
**dict((newk, cfg.data[oldk]) for oldk, newk in [
('val_samples_per_gpu', 'samples_per_gpu'),
('val_workers_per_gpu', 'workers_per_gpu'),
] if oldk in cfg.data),
**cfg.data.get('val_dataloader', {})
}
data_loader = build_dataloader(dataset, **val_loader_cfg)
save_path = osp.join(cfg.work_dir, 'val_visuals')
runner.register_hook(
EvalIterHook(data_loader, save_path=save_path, **cfg.evaluation),
priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_iters)
| 12,897 | 34.629834 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/restoration_video_inference.py | import glob
import os.path as osp
import re
from functools import reduce
import mmcv
import numpy as np
import torch
from mmedit.datasets.pipelines import Compose
VIDEO_EXTENSIONS = ('.mp4', '.mov')
def pad_sequence(data, window_size):
padding = window_size // 2
data = torch.cat([
data[:, 1 + padding:1 + 2 * padding].flip(1), data,
data[:, -1 - 2 * padding:-1 - padding].flip(1)
],
dim=1)
return data
def restoration_video_inference(model,
img_dir,
window_size,
start_idx,
filename_tmpl,
max_seq_len=None):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img_dir (str): Directory of the input video.
window_size (int): The window size used in sliding-window framework.
This value should be set according to the settings of the network.
A value smaller than 0 means using recurrent framework.
start_idx (int): The index corresponds to the first frame in the
sequence.
filename_tmpl (str): Template for file name.
max_seq_len (int | None): The maximum sequence length that the model
processes. If the sequence length is larger than this number,
the sequence is split into multiple segments. If it is None,
the entire sequence is processed at once.
Returns:
Tensor: The predicted restoration result.
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# check if the input is a video
file_extension = osp.splitext(img_dir)[1]
if file_extension in VIDEO_EXTENSIONS:
video_reader = mmcv.VideoReader(img_dir)
# load the images
data = dict(lq=[], lq_path=None, key=img_dir)
for frame in video_reader:
data['lq'].append(np.flip(frame, axis=2))
# remove the data loading pipeline
tmp_pipeline = []
for pipeline in test_pipeline:
if pipeline['type'] not in [
'GenerateSegmentIndices', 'LoadImageFromFileList'
]:
tmp_pipeline.append(pipeline)
test_pipeline = tmp_pipeline
else:
# the first element in the pipeline must be 'GenerateSegmentIndices'
if test_pipeline[0]['type'] != 'GenerateSegmentIndices':
raise TypeError('The first element in the pipeline must be '
f'"GenerateSegmentIndices", but got '
f'"{test_pipeline[0]["type"]}".')
# specify start_idx and filename_tmpl
test_pipeline[0]['start_idx'] = start_idx
test_pipeline[0]['filename_tmpl'] = filename_tmpl
# prepare data
sequence_length = len(glob.glob(osp.join(img_dir, '*')))
img_dir_split = re.split(r'[\\/]', img_dir)
key = img_dir_split[-1]
lq_folder = reduce(osp.join, img_dir_split[:-1])
data = dict(
lq_path=lq_folder,
gt_path='',
key=key,
sequence_length=sequence_length)
# compose the pipeline
test_pipeline = Compose(test_pipeline)
data = test_pipeline(data)
data = data['lq'].unsqueeze(0) # in cpu
# forward the model
with torch.no_grad():
if window_size > 0: # sliding window framework
data = pad_sequence(data, window_size)
result = []
for i in range(0, data.size(1) - 2 * (window_size // 2)):
data_i = data[:, i:i + window_size].to(device)
result.append(model(lq=data_i, test_mode=True)['output'].cpu())
result = torch.stack(result, dim=1)
else: # recurrent framework
if max_seq_len is None:
result = model(
lq=data.to(device), test_mode=True)['output'].cpu()
else:
result = []
for i in range(0, data.size(1), max_seq_len):
result.append(
model(
lq=data[:, i:i + max_seq_len].to(device),
test_mode=True)['output'].cpu())
result = torch.cat(result, dim=1)
return result
| 4,669 | 34.923077 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/distributed_wrapper.py | import torch
import torch.nn as nn
from mmcv.parallel import MODULE_WRAPPERS, MMDistributedDataParallel
from mmcv.parallel.scatter_gather import scatter_kwargs
from torch.cuda._utils import _get_device_index
@MODULE_WRAPPERS.register_module()
class DistributedDataParallelWrapper(nn.Module):
"""A DistributedDataParallel wrapper for models in MMediting.
In MMedting, there is a need to wrap different modules in the models
with separate DistributedDataParallel. Otherwise, it will cause
errors for GAN training.
More specific, the GAN model, usually has two sub-modules:
generator and discriminator. If we wrap both of them in one
standard DistributedDataParallel, it will cause errors during training,
because when we update the parameters of the generator (or discriminator),
the parameters of the discriminator (or generator) is not updated, which is
not allowed for DistributedDataParallel.
So we design this wrapper to separately wrap DistributedDataParallel
for generator and discriminator.
In this wrapper, we perform two operations:
1. Wrap the modules in the models with separate MMDistributedDataParallel.
Note that only modules with parameters will be wrapped.
2. Do scatter operation for 'forward', 'train_step' and 'val_step'.
Note that the arguments of this wrapper is the same as those in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Args:
module (nn.Module): Module that needs to be wrapped.
device_ids (list[int | `torch.device`]): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
dim (int, optional): Same as that in the official scatter function in
pytorch. Defaults to 0.
broadcast_buffers (bool): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Defaults to False.
find_unused_parameters (bool, optional): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Traverse the autograd graph of all tensors contained in returned
value of the wrapped module’s forward function. Defaults to False.
kwargs (dict): Other arguments used in
`torch.nn.parallel.distributed.DistributedDataParallel`.
"""
def __init__(self,
module,
device_ids,
dim=0,
broadcast_buffers=False,
find_unused_parameters=False,
**kwargs):
super().__init__()
assert len(device_ids) == 1, (
'Currently, DistributedDataParallelWrapper only supports one'
'single CUDA device for each process.'
f'The length of device_ids must be 1, but got {len(device_ids)}.')
self.module = module
self.dim = dim
self.to_ddp(
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.output_device = _get_device_index(device_ids[0], True)
def to_ddp(self, device_ids, dim, broadcast_buffers,
find_unused_parameters, **kwargs):
"""Wrap models with separate MMDistributedDataParallel.
It only wraps the modules with parameters.
"""
for name, module in self.module._modules.items():
if next(module.parameters(), None) is None:
module = module.cuda()
elif all(not p.requires_grad for p in module.parameters()):
module = module.cuda()
else:
module = MMDistributedDataParallel(
module.cuda(),
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.module._modules[name] = module
def scatter(self, inputs, kwargs, device_ids):
"""Scatter function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
device_ids (int): Device id.
"""
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
"""Forward function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
def train_step(self, *inputs, **kwargs):
"""Train step function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.train_step(*inputs[0], **kwargs[0])
return output
def val_step(self, *inputs, **kwargs):
"""Validation step function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for ``scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.val_step(*inputs[0], **kwargs[0])
return output
| 5,720 | 39.864286 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/misc.py | import math
import numpy as np
import torch
from torchvision.utils import make_grid
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to (min, max), image values will be normalized to [0, 1].
For different tensor shapes, this function will have different behaviors:
1. 4D mini-batch Tensor of shape (N x 3/1 x H x W):
Use `make_grid` to stitch images in the batch dimension, and then
convert it to numpy array.
2. 3D Tensor of shape (3/1 x H x W) and 2D Tensor of shape (H x W):
Directly change to numpy array.
Note that the image channel in input tensors should be RGB order. This
function will convert it to cv2 convention, i.e., (H x W x C) with BGR
order.
Args:
tensor (Tensor | list[Tensor]): Input tensors.
out_type (numpy type): Output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple): min and max values for clamp.
Returns:
(Tensor | list[Tensor]): 3D ndarray of shape (H x W x C) or 2D ndarray
of shape (H x W).
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
# Squeeze two times so that:
# 1. (1, 1, h, w) -> (h, w) or
# 3. (1, 3, h, w) -> (3, h, w) or
# 2. (n>1, 3/1, h, w) -> (n>1, 3/1, h, w)
_tensor = _tensor.squeeze(0).squeeze(0)
_tensor = _tensor.float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))),
normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise ValueError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
result = result[0] if len(result) == 1 else result
return result
| 2,898 | 37.653333 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/mask.py | import math
import cv2
import mmcv
import numpy as np
from PIL import Image, ImageDraw
def random_bbox(img_shape, max_bbox_shape, max_bbox_delta=40, min_margin=20):
"""Generate a random bbox for the mask on a given image.
In our implementation, the max value cannot be obtained since we use
`np.random.randint`. And this may be different with other standard scripts
in the community.
Args:
img_shape (tuple[int]): The size of a image, in the form of (h, w).
max_bbox_shape (int | tuple[int]): Maximum shape of the mask box,
in the form of (h, w). If it is an integer, the mask box will be
square.
max_bbox_delta (int | tuple[int]): Maximum delta of the mask box,
in the form of (delta_h, delta_w). If it is an integer, delta_h
and delta_w will be the same. Mask shape will be randomly sampled
from the range of `max_bbox_shape - max_bbox_delta` and
`max_bbox_shape`. Default: (40, 40).
min_margin (int | tuple[int]): The minimum margin size from the
edges of mask box to the image boarder, in the form of
(margin_h, margin_w). If it is an integer, margin_h and margin_w
will be the same. Default: (20, 20).
Returns:
tuple[int]: The generated box, (top, left, h, w).
"""
if not isinstance(max_bbox_shape, tuple):
max_bbox_shape = (max_bbox_shape, max_bbox_shape)
if not isinstance(max_bbox_delta, tuple):
max_bbox_delta = (max_bbox_delta, max_bbox_delta)
if not isinstance(min_margin, tuple):
min_margin = (min_margin, min_margin)
assert mmcv.is_tuple_of(max_bbox_shape, int)
assert mmcv.is_tuple_of(max_bbox_delta, int)
assert mmcv.is_tuple_of(min_margin, int)
img_h, img_w = img_shape[:2]
max_mask_h, max_mask_w = max_bbox_shape
max_delta_h, max_delta_w = max_bbox_delta
margin_h, margin_w = min_margin
if max_mask_h > img_h or max_mask_w > img_w:
raise ValueError(f'mask shape {max_bbox_shape} should be smaller than '
f'image shape {img_shape}')
if (max_delta_h // 2 * 2 >= max_mask_h
or max_delta_w // 2 * 2 >= max_mask_w):
raise ValueError(f'mask delta {max_bbox_delta} should be smaller than'
f'mask shape {max_bbox_shape}')
if img_h - max_mask_h < 2 * margin_h or img_w - max_mask_w < 2 * margin_w:
raise ValueError(f'Margin {min_margin} cannot be satisfied for img'
f'shape {img_shape} and mask shape {max_bbox_shape}')
# get the max value of (top, left)
max_top = img_h - margin_h - max_mask_h
max_left = img_w - margin_w - max_mask_w
# randomly select a (top, left)
top = np.random.randint(margin_h, max_top)
left = np.random.randint(margin_w, max_left)
# randomly shrink the shape of mask box according to `max_bbox_delta`
# the center of box is fixed
delta_top = np.random.randint(0, max_delta_h // 2 + 1)
delta_left = np.random.randint(0, max_delta_w // 2 + 1)
top = top + delta_top
left = left + delta_left
h = max_mask_h - delta_top
w = max_mask_w - delta_left
return (top, left, h, w)
def bbox2mask(img_shape, bbox, dtype='uint8'):
"""Generate mask in ndarray from bbox.
The returned mask has the shape of (h, w, 1). '1' indicates the
hole and '0' indicates the valid regions.
We prefer to use `uint8` as the data type of masks, which may be different
from other codes in the community.
Args:
img_shape (tuple[int]): The size of the image.
bbox (tuple[int]): Configuration tuple, (top, left, height, width)
dtype (str): Indicate the data type of returned masks. Default: 'uint8'
Return:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
height, width = img_shape[:2]
mask = np.zeros((height, width, 1), dtype=dtype)
mask[bbox[0]:bbox[0] + bbox[2], bbox[1]:bbox[1] + bbox[3], :] = 1
return mask
def brush_stroke_mask(img_shape,
num_vertices=(4, 12),
mean_angle=2 * math.pi / 5,
angle_range=2 * math.pi / 15,
brush_width=(12, 40),
max_loops=4,
dtype='uint8'):
"""Generate free-form mask.
The method of generating free-form mask is in the following paper:
Free-Form Image Inpainting with Gated Convolution.
When you set the config of this type of mask. You may note the usage of
`np.random.randint` and the range of `np.random.randint` is [left, right).
We prefer to use `uint8` as the data type of masks, which may be different
from other codes in the community.
TODO: Rewrite the implementation of this function.
Args:
img_shape (tuple[int]): Size of the image.
num_vertices (int | tuple[int]): Min and max number of vertices. If
only give an integer, we will fix the number of vertices.
Default: (4, 12).
mean_angle (float): Mean value of the angle in each vertex. The angle
is measured in radians. Default: 2 * math.pi / 5.
angle_range (float): Range of the random angle.
Default: 2 * math.pi / 15.
brush_width (int | tuple[int]): (min_width, max_width). If only give
an integer, we will fix the width of brush. Default: (12, 40).
max_loops (int): The max number of for loops of drawing strokes.
dtype (str): Indicate the data type of returned masks.
Default: 'uint8'.
Returns:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
img_h, img_w = img_shape[:2]
if isinstance(num_vertices, int):
min_num_vertices, max_num_vertices = num_vertices, num_vertices + 1
elif isinstance(num_vertices, tuple):
min_num_vertices, max_num_vertices = num_vertices
else:
raise TypeError('The type of num_vertices should be int'
f'or tuple[int], but got type: {num_vertices}')
if isinstance(brush_width, tuple):
min_width, max_width = brush_width
elif isinstance(brush_width, int):
min_width, max_width = brush_width, brush_width + 1
else:
raise TypeError('The type of brush_width should be int'
f'or tuple[int], but got type: {brush_width}')
average_radius = math.sqrt(img_h * img_h + img_w * img_w) / 8
mask = Image.new('L', (img_w, img_h), 0)
loop_num = np.random.randint(1, max_loops)
num_vertex_list = np.random.randint(
min_num_vertices, max_num_vertices, size=loop_num)
angle_min_list = np.random.uniform(0, angle_range, size=loop_num)
angle_max_list = np.random.uniform(0, angle_range, size=loop_num)
for loop_n in range(loop_num):
num_vertex = num_vertex_list[loop_n]
angle_min = mean_angle - angle_min_list[loop_n]
angle_max = mean_angle + angle_max_list[loop_n]
angles = []
vertex = []
# set random angle on each vertex
angles = np.random.uniform(angle_min, angle_max, size=num_vertex)
reverse_mask = (np.arange(num_vertex, dtype=np.float32) % 2) == 0
angles[reverse_mask] = 2 * math.pi - angles[reverse_mask]
h, w = mask.size
# set random vertices
vertex.append((np.random.randint(0, w), np.random.randint(0, h)))
r_list = np.random.normal(
loc=average_radius, scale=average_radius // 2, size=num_vertex)
for i in range(num_vertex):
r = np.clip(r_list[i], 0, 2 * average_radius)
new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)
new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)
vertex.append((int(new_x), int(new_y)))
# draw brush strokes according to the vertex and angle list
draw = ImageDraw.Draw(mask)
width = np.random.randint(min_width, max_width)
draw.line(vertex, fill=1, width=width)
for v in vertex:
draw.ellipse((v[0] - width // 2, v[1] - width // 2,
v[0] + width // 2, v[1] + width // 2),
fill=1)
# randomly flip the mask
if np.random.normal() > 0:
mask.transpose(Image.FLIP_LEFT_RIGHT)
if np.random.normal() > 0:
mask.transpose(Image.FLIP_TOP_BOTTOM)
mask = np.array(mask).astype(dtype=getattr(np, dtype))
mask = mask[:, :, None]
return mask
def random_irregular_mask(img_shape,
num_vertices=(4, 8),
max_angle=4,
length_range=(10, 100),
brush_width=(10, 40),
dtype='uint8'):
"""Generate random irregular masks.
This is a modified version of free-form mask implemented in
'brush_stroke_mask'.
We prefer to use `uint8` as the data type of masks, which may be different
from other codes in the community.
TODO: Rewrite the implementation of this function.
Args:
img_shape (tuple[int]): Size of the image.
num_vertices (int | tuple[int]): Min and max number of vertices. If
only give an integer, we will fix the number of vertices.
Default: (4, 8).
max_angle (float): Max value of angle at each vertex. Default 4.0.
length_range (int | tuple[int]): (min_length, max_length). If only give
an integer, we will fix the length of brush. Default: (10, 100).
brush_width (int | tuple[int]): (min_width, max_width). If only give
an integer, we will fix the width of brush. Default: (10, 40).
dtype (str): Indicate the data type of returned masks. Default: 'uint8'
Returns:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
h, w = img_shape[:2]
mask = np.zeros((h, w), dtype=dtype)
if isinstance(length_range, int):
min_length, max_length = length_range, length_range + 1
elif isinstance(length_range, tuple):
min_length, max_length = length_range
else:
raise TypeError('The type of length_range should be int'
f'or tuple[int], but got type: {length_range}')
if isinstance(num_vertices, int):
min_num_vertices, max_num_vertices = num_vertices, num_vertices + 1
elif isinstance(num_vertices, tuple):
min_num_vertices, max_num_vertices = num_vertices
else:
raise TypeError('The type of num_vertices should be int'
f'or tuple[int], but got type: {num_vertices}')
if isinstance(brush_width, int):
min_brush_width, max_brush_width = brush_width, brush_width + 1
elif isinstance(brush_width, tuple):
min_brush_width, max_brush_width = brush_width
else:
raise TypeError('The type of brush_width should be int'
f'or tuple[int], but got type: {brush_width}')
num_v = np.random.randint(min_num_vertices, max_num_vertices)
for i in range(num_v):
start_x = np.random.randint(w)
start_y = np.random.randint(h)
# from the start point, randomly setlect n \in [1, 6] directions.
direction_num = np.random.randint(1, 6)
angle_list = np.random.randint(0, max_angle, size=direction_num)
length_list = np.random.randint(
min_length, max_length, size=direction_num)
brush_width_list = np.random.randint(
min_brush_width, max_brush_width, size=direction_num)
for direct_n in range(direction_num):
angle = 0.01 + angle_list[direct_n]
if i % 2 == 0:
angle = 2 * math.pi - angle
length = length_list[direct_n]
brush_w = brush_width_list[direct_n]
# compute end point according to the random angle
end_x = (start_x + length * np.sin(angle)).astype(np.int32)
end_y = (start_y + length * np.cos(angle)).astype(np.int32)
cv2.line(mask, (start_y, start_x), (end_y, end_x), 1, brush_w)
start_x, start_y = end_x, end_y
mask = np.expand_dims(mask, axis=2)
return mask
def get_irregular_mask(img_shape, area_ratio_range=(0.15, 0.5), **kwargs):
"""Get irregular mask with the constraints in mask ratio
Args:
img_shape (tuple[int]): Size of the image.
area_ratio_range (tuple(float)): Contain the minimum and maximum area
ratio. Default: (0.15, 0.5).
Returns:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
mask = random_irregular_mask(img_shape, **kwargs)
min_ratio, max_ratio = area_ratio_range
while not min_ratio < (np.sum(mask) /
(img_shape[0] * img_shape[1])) < max_ratio:
mask = random_irregular_mask(img_shape, **kwargs)
return mask
| 12,928 | 39.785489 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/scheduler/lr_updater.py | from mmcv.runner import HOOKS, LrUpdaterHook
@HOOKS.register_module()
class LinearLrUpdaterHook(LrUpdaterHook):
"""Linear learning rate scheduler for image generation.
In the beginning, the learning rate is 'base_lr' defined in mmcv.
We give a target learning rate 'target_lr' and a start point 'start'
(iteration / epoch). Before 'start', we fix learning rate as 'base_lr';
After 'start', we linearly update learning rate to 'target_lr'.
Args:
target_lr (float): The target learning rate. Default: 0.
start (int): The start point (iteration / epoch, specified by args
'by_epoch' in its parent class in mmcv) to update learning rate.
Default: 0.
interval (int): The interval to update the learning rate. Default: 1.
"""
def __init__(self, target_lr=0, start=0, interval=1, **kwargs):
super().__init__(**kwargs)
self.target_lr = target_lr
self.start = start
self.interval = interval
def get_lr(self, runner, base_lr):
"""Calculates the learning rate.
Args:
runner (object): The passed runner.
base_lr (float): Base learning rate.
Returns:
float: Current learning rate.
"""
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
assert max_progress >= self.start
if max_progress == self.start:
return base_lr
# Before 'start', fix lr; After 'start', linearly update lr.
factor = (max(0, progress - self.start) // self.interval) / (
(max_progress - self.start) // self.interval)
return base_lr + (self.target_lr - base_lr) * factor
| 1,880 | 34.490566 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/metric_utils.py | import cv2
import numpy as np
def gaussian(x, sigma):
"""Gaussian function.
Args:
x (array_like): The independent variable.
sigma (float): Standard deviation of the gaussian function.
Return:
ndarray or scalar: Gaussian value of `x`.
"""
return np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi))
def dgaussian(x, sigma):
"""Gradient of gaussian.
Args:
x (array_like): The independent variable.
sigma (float): Standard deviation of the gaussian function.
Return:
ndarray or scalar: Gradient of gaussian of `x`.
"""
return -x * gaussian(x, sigma) / sigma**2
def gauss_filter(sigma, epsilon=1e-2):
"""Gradient of gaussian.
Args:
sigma (float): Standard deviation of the gaussian kernel.
epsilon (float): Small value used when calculating kernel size.
Default: 1e-2.
Return:
tuple[ndarray]: Gaussian filter along x and y axis.
"""
half_size = np.ceil(
sigma * np.sqrt(-2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon)))
size = int(2 * half_size + 1)
# create filter in x axis
filter_x = np.zeros((size, size))
for i in range(size):
for j in range(size):
filter_x[i, j] = gaussian(i - half_size, sigma) * dgaussian(
j - half_size, sigma)
# normalize filter
norm = np.sqrt((filter_x**2).sum())
filter_x = filter_x / norm
filter_y = np.transpose(filter_x)
return filter_x, filter_y
def gauss_gradient(img, sigma):
"""Gaussian gradient.
From https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/
submissions/8060/versions/2/previews/gaussgradient/gaussgradient.m/
index.html
Args:
img (ndarray): Input image.
sigma (float): Standard deviation of the gaussian kernel.
Return:
ndarray: Gaussian gradient of input `img`.
"""
filter_x, filter_y = gauss_filter(sigma)
img_filtered_x = cv2.filter2D(
img, -1, filter_x, borderType=cv2.BORDER_REPLICATE)
img_filtered_y = cv2.filter2D(
img, -1, filter_y, borderType=cv2.BORDER_REPLICATE)
return np.sqrt(img_filtered_x**2 + img_filtered_y**2)
| 2,273 | 26.731707 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/eval_hooks.py | import os.path as osp
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class EvalIterHook(Hook):
"""Non-Distributed evaluation hook for iteration-based runner.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
eval_kwargs (dict): Other eval kwargs. It contains:
save_image (bool): Whether to save image.
save_path (str): The path to save image.
"""
def __init__(self, dataloader, interval=1, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, '
f'but got { type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.save_image = self.eval_kwargs.pop('save_image', False)
self.save_path = self.eval_kwargs.pop('save_path', None)
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import single_gpu_test
results = single_gpu_test(
runner.model,
self.dataloader,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
self.evaluate(runner, results)
def evaluate(self, runner, results):
"""Evaluation function.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
results (dict): Model forward results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalIterHook(EvalIterHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
eval_kwargs (dict): Other eval kwargs. It may contain:
save_image (bool): Whether save image.
save_path (str): The path to save image.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
**eval_kwargs):
super().__init__(dataloader, interval, **eval_kwargs)
self.gpu_collect = gpu_collect
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| 3,766 | 33.87963 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/metrics.py | import math
import cv2
import mmcv
import numpy as np
from scipy.ndimage import convolve
from scipy.special import gamma
from mmedit.datasets.pipelines.matlab_like_resize import MATLABLikeResize
from .metric_utils import gauss_gradient
def sad(alpha, trimap, pred_alpha):
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
assert (pred_alpha[trimap == 0] == 0).all()
assert (pred_alpha[trimap == 255] == 255).all()
alpha = alpha.astype(np.float64) / 255
pred_alpha = pred_alpha.astype(np.float64) / 255
sad_result = np.abs(pred_alpha - alpha).sum() / 1000
return sad_result
def mse(alpha, trimap, pred_alpha):
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
assert (pred_alpha[trimap == 0] == 0).all()
assert (pred_alpha[trimap == 255] == 255).all()
alpha = alpha.astype(np.float64) / 255
pred_alpha = pred_alpha.astype(np.float64) / 255
weight_sum = (trimap == 128).sum()
if weight_sum != 0:
mse_result = ((pred_alpha - alpha)**2).sum() / weight_sum
else:
mse_result = 0
return mse_result
def gradient_error(alpha, trimap, pred_alpha, sigma=1.4):
"""Gradient error for evaluating alpha matte prediction.
Args:
alpha (ndarray): Ground-truth alpha matte.
trimap (ndarray): Input trimap with its value in {0, 128, 255}.
pred_alpha (ndarray): Predicted alpha matte.
sigma (float): Standard deviation of the gaussian kernel. Default: 1.4.
"""
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
if not ((pred_alpha[trimap == 0] == 0).all() and
(pred_alpha[trimap == 255] == 255).all()):
raise ValueError(
'pred_alpha should be masked by trimap before evaluation')
alpha = alpha.astype(np.float64)
pred_alpha = pred_alpha.astype(np.float64)
alpha_normed = np.zeros_like(alpha)
pred_alpha_normed = np.zeros_like(pred_alpha)
cv2.normalize(alpha, alpha_normed, 1., 0., cv2.NORM_MINMAX)
cv2.normalize(pred_alpha, pred_alpha_normed, 1., 0., cv2.NORM_MINMAX)
alpha_grad = gauss_gradient(alpha_normed, sigma).astype(np.float32)
pred_alpha_grad = gauss_gradient(pred_alpha_normed,
sigma).astype(np.float32)
grad_loss = ((alpha_grad - pred_alpha_grad)**2 * (trimap == 128)).sum()
# same as SAD, divide by 1000 to reduce the magnitude of the result
return grad_loss / 1000
def connectivity(alpha, trimap, pred_alpha, step=0.1):
"""Connectivity error for evaluating alpha matte prediction.
Args:
alpha (ndarray): Ground-truth alpha matte with shape (height, width).
Value range of alpha is [0, 255].
trimap (ndarray): Input trimap with shape (height, width). Elements
in trimap are one of {0, 128, 255}.
pred_alpha (ndarray): Predicted alpha matte with shape (height, width).
Value range of pred_alpha is [0, 255].
step (float): Step of threshold when computing intersection between
`alpha` and `pred_alpha`.
"""
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
if not ((pred_alpha[trimap == 0] == 0).all() and
(pred_alpha[trimap == 255] == 255).all()):
raise ValueError(
'pred_alpha should be masked by trimap before evaluation')
alpha = alpha.astype(np.float32) / 255
pred_alpha = pred_alpha.astype(np.float32) / 255
thresh_steps = np.arange(0, 1 + step, step)
round_down_map = -np.ones_like(alpha)
for i in range(1, len(thresh_steps)):
alpha_thresh = alpha >= thresh_steps[i]
pred_alpha_thresh = pred_alpha >= thresh_steps[i]
intersection = (alpha_thresh & pred_alpha_thresh).astype(np.uint8)
# connected components
_, output, stats, _ = cv2.connectedComponentsWithStats(
intersection, connectivity=4)
# start from 1 in dim 0 to exclude background
size = stats[1:, -1]
# largest connected component of the intersection
omega = np.zeros_like(alpha)
if len(size) != 0:
max_id = np.argmax(size)
# plus one to include background
omega[output == max_id + 1] = 1
mask = (round_down_map == -1) & (omega == 0)
round_down_map[mask] = thresh_steps[i - 1]
round_down_map[round_down_map == -1] = 1
alpha_diff = alpha - round_down_map
pred_alpha_diff = pred_alpha - round_down_map
# only calculate difference larger than or equal to 0.15
alpha_phi = 1 - alpha_diff * (alpha_diff >= 0.15)
pred_alpha_phi = 1 - pred_alpha_diff * (pred_alpha_diff >= 0.15)
connectivity_error = np.sum(
np.abs(alpha_phi - pred_alpha_phi) * (trimap == 128))
# same as SAD, divide by 1000 to reduce the magnitude of the result
return connectivity_error / 1000
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
if len(img.shape) == 2:
img = img[..., None]
return img
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def psnr(img1, img2, crop_border=0, input_order='HWC', convert_to=None):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edges of an image. These
pixels are not involved in the PSNR calculation. Default: 0.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether to convert the images to other color models.
If None, the images are not altered. When computing for 'Y',
the images are assumed to be in BGR order. Options are 'Y' and
None. Default: None.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are different: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1, img2 = img1.astype(np.float32), img2.astype(np.float32)
if isinstance(convert_to, str) and convert_to.lower() == 'y':
img1 = mmcv.bgr2ycbcr(img1 / 255., y_only=True) * 255.
img2 = mmcv.bgr2ycbcr(img2 / 255., y_only=True) * 255.
elif convert_to is not None:
raise ValueError('Wrong color model. Supported values are '
'"Y" and None.')
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]
mse_value = np.mean((img1 - img2)**2)
if mse_value == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse_value))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1, img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) *
(2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def ssim(img1, img2, crop_border=0, input_order='HWC', convert_to=None):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edges of an image. These
pixels are not involved in the SSIM calculation. Default: 0.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether to convert the images to other color models.
If None, the images are not altered. When computing for 'Y',
the images are assumed to be in BGR order. Options are 'Y' and
None. Default: None.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are different: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
if isinstance(convert_to, str) and convert_to.lower() == 'y':
img1, img2 = img1.astype(np.float32), img2.astype(np.float32)
img1 = mmcv.bgr2ycbcr(img1 / 255., y_only=True) * 255.
img2 = mmcv.bgr2ycbcr(img2 / 255., y_only=True) * 255.
img1 = np.expand_dims(img1, axis=2)
img2 = np.expand_dims(img2, axis=2)
elif convert_to is not None:
raise ValueError('Wrong color model. Supported values are '
'"Y" and None')
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
class L1Evaluation:
"""L1 evaluation metric.
Args:
data_dict (dict): Must contain keys of 'gt_img' and 'fake_res'. If
'mask' is given, the results will be computed with mask as weight.
"""
def __call__(self, data_dict):
gt = data_dict['gt_img']
if 'fake_img' in data_dict:
pred = data_dict.get('fake_img')
else:
pred = data_dict.get('fake_res')
mask = data_dict.get('mask', None)
from mmedit.models.losses.pixelwise_loss import l1_loss
l1_error = l1_loss(pred, gt, weight=mask, reduction='mean')
return l1_error
def estimate_aggd_param(block):
"""Estimate AGGD (Asymmetric Generalized Gaussian Distribution) parameters.
Args:
block (ndarray): 2D Image block.
Returns:
tuple: alpha (float), beta_l (float) and beta_r (float) for the AGGD
distribution (Estimating the parames in Equation 7 in the paper).
"""
block = block.flatten()
gam = np.arange(0.2, 10.001, 0.001) # len = 9801
gam_reciprocal = np.reciprocal(gam)
r_gam = np.square(gamma(gam_reciprocal * 2)) / (
gamma(gam_reciprocal) * gamma(gam_reciprocal * 3))
left_std = np.sqrt(np.mean(block[block < 0]**2))
right_std = np.sqrt(np.mean(block[block > 0]**2))
gammahat = left_std / right_std
rhat = (np.mean(np.abs(block)))**2 / np.mean(block**2)
rhatnorm = (rhat * (gammahat**3 + 1) *
(gammahat + 1)) / ((gammahat**2 + 1)**2)
array_position = np.argmin((r_gam - rhatnorm)**2)
alpha = gam[array_position]
beta_l = left_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
beta_r = right_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
return (alpha, beta_l, beta_r)
def compute_feature(block):
"""Compute features.
Args:
block (ndarray): 2D Image block.
Returns:
list: Features with length of 18.
"""
feat = []
alpha, beta_l, beta_r = estimate_aggd_param(block)
feat.extend([alpha, (beta_l + beta_r) / 2])
# distortions disturb the fairly regular structure of natural images.
# This deviation can be captured by analyzing the sample distribution of
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for shift in shifts:
shifted_block = np.roll(block, shift, axis=(0, 1))
alpha, beta_l, beta_r = estimate_aggd_param(block * shifted_block)
mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha))
feat.extend([alpha, mean, beta_l, beta_r])
return feat
def niqe_core(img,
mu_pris_param,
cov_pris_param,
gaussian_window,
block_size_h=96,
block_size_w=96):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
Note that we do not include block overlap height and width, since they are
always 0 in the official implementation.
For good performance, it is advisable by the official implementation to
divide the distorted image in to the same size patched as used for the
construction of multivariate Gaussian model.
Args:
img (ndarray): Input image whose quality needs to be computed. The
image must be a gray or Y (of YCbCr) image with shape (h, w).
Range [0, 255] with float type.
mu_pris_param (ndarray): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (ndarray): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (ndarray): A 7x7 Gaussian window used for smoothing the
image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
# crop image
h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
mu = convolve(img, gaussian_window, mode='nearest')
sigma = np.sqrt(
np.abs(
convolve(np.square(img), gaussian_window, mode='nearest') -
np.square(mu)))
# normalize, as in Eq. 1 in the paper
img_nomalized = (img - mu) / (sigma + 1)
feat = []
for idx_w in range(num_block_w):
for idx_h in range(num_block_h):
# process each block
block = img_nomalized[idx_h * block_size_h //
scale:(idx_h + 1) * block_size_h //
scale, idx_w * block_size_w //
scale:(idx_w + 1) * block_size_w //
scale]
feat.append(compute_feature(block))
distparam.append(np.array(feat))
# matlab-like bicubic downsample with anti-aliasing
if scale == 1:
resize = MATLABLikeResize(keys=None, scale=0.5)
img = resize._resize(img[:, :, np.newaxis] / 255.)[:, :, 0] * 255.
distparam = np.concatenate(distparam, axis=1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = np.nanmean(distparam, axis=0)
distparam_no_nan = distparam[~np.isnan(distparam).any(axis=1)]
cov_distparam = np.cov(distparam_no_nan, rowvar=False)
# compute niqe quality, Eq. 10 in the paper
invcov_param = np.linalg.pinv((cov_pris_param + cov_distparam) / 2)
quality = np.matmul(
np.matmul((mu_pris_param - mu_distparam), invcov_param),
np.transpose((mu_pris_param - mu_distparam)))
return np.squeeze(np.sqrt(quality))
def niqe(img, crop_border, input_order='HWC', convert_to='y'):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
We use the official params estimated from the pristine dataset.
We use the recommended block size (96, 96) without overlaps.
Args:
img (ndarray): Input image whose quality needs to be computed.
The input image must be in range [0, 255] with float/int type.
The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order)
If the input order is 'HWC' or 'CHW', it will be converted to gray
or Y (of YCbCr) image according to the ``convert_to`` argument.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
Default: 'y'.
Returns:
float: NIQE result.
"""
# we use the official params estimated from the pristine dataset.
niqe_pris_params = np.load('mmedit/core/evaluation/niqe_pris_params.npz')
mu_pris_param = niqe_pris_params['mu_pris_param']
cov_pris_param = niqe_pris_params['cov_pris_param']
gaussian_window = niqe_pris_params['gaussian_window']
img = img.astype(np.float32)
if input_order != 'HW':
img = reorder_image(img, input_order=input_order)
if convert_to == 'y':
img = mmcv.bgr2ycbcr(img / 255., y_only=True) * 255.
elif convert_to == 'gray':
img = mmcv.bgr2gray(img / 255., cv2.COLOR_BGR2GRAY) * 255.
img = np.squeeze(img)
if crop_border != 0:
img = img[crop_border:-crop_border, crop_border:-crop_border]
# round to follow official implementation
img = img.round()
niqe_result = niqe_core(img, mu_pris_param, cov_pris_param,
gaussian_window)
return niqe_result
| 20,691 | 38.413333 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/export/wrappers.py | import os.path as osp
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn
from mmedit.models import BaseMattor, BasicRestorer, build_model
def inference_with_session(sess, io_binding, output_names, input_tensor):
device_type = input_tensor.device.type
device_id = input_tensor.device.index
device_id = 0 if device_id is None else device_id
io_binding.bind_input(
name='input',
device_type=device_type,
device_id=device_id,
element_type=np.float32,
shape=input_tensor.shape,
buffer_ptr=input_tensor.data_ptr())
for name in output_names:
io_binding.bind_output(name)
sess.run_with_iobinding(io_binding)
pred = io_binding.copy_outputs_to_cpu()
return pred
class ONNXRuntimeMattor(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeMattor, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
def forward(self,
merged,
trimap,
meta,
test_mode=False,
save_image=False,
save_path=None,
iteration=None):
input_tensor = torch.cat((merged, trimap), 1).contiguous()
pred_alpha = inference_with_session(self.sess, self.io_binding,
self.output_names, input_tensor)[0]
pred_alpha = pred_alpha.squeeze()
pred_alpha = self.base_model.restore_shape(pred_alpha, meta)
eval_result = self.base_model.evaluate(pred_alpha, meta)
if save_image:
self.base_model.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
class RestorerGenerator(nn.Module):
def __init__(self, sess, io_binding, output_names):
super(RestorerGenerator, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
def forward(self, x):
pred = inference_with_session(self.sess, self.io_binding,
self.output_names, x)[0]
pred = torch.from_numpy(pred)
return pred
class ONNXRuntimeRestorer(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeRestorer, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
restorer_generator = RestorerGenerator(self.sess, self.io_binding,
self.output_names)
base_model.generator = restorer_generator
def forward(self, lq, gt=None, test_mode=False, **kwargs):
return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class ONNXRuntimeEditing(nn.Module):
def __init__(self, onnx_file, cfg, device_id):
super(ONNXRuntimeEditing, self).__init__()
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
base_model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
if isinstance(base_model, BaseMattor):
WrapperClass = ONNXRuntimeMattor
elif isinstance(base_model, BasicRestorer):
WrapperClass = ONNXRuntimeRestorer
self.wrapper = WrapperClass(self.sess, self.io_binding,
self.output_names, base_model)
def forward(self, **kwargs):
return self.wrapper(**kwargs)
| 4,767 | 34.318519 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/hooks/visualization.py | import os.path as osp
import mmcv
import torch
from mmcv.runner import HOOKS, Hook
from mmcv.runner.dist_utils import master_only
from torchvision.utils import save_image
@HOOKS.register_module()
class VisualizationHook(Hook):
"""Visualization hook.
In this hook, we use the official api `save_image` in torchvision to save
the visualization results.
Args:
output_dir (str): The file path to store visualizations.
res_name_list (str): The list contains the name of results in outputs
dict. The results in outputs dict must be a torch.Tensor with shape
(n, c, h, w).
interval (int): The interval of calling this hook. If set to -1,
the visualization hook will not be called. Default: -1.
filename_tmpl (str): Format string used to save images. The output file
name will be formatted as this args. Default: 'iter_{}.png'.
rerange (bool): Whether to rerange the output value from [-1, 1] to
[0, 1]. We highly recommend users should preprocess the
visualization results on their own. Here, we just provide a simple
interface. Default: True.
bgr2rgb (bool): Whether to reformat the channel dimension from BGR to
RGB. The final image we will save is following RGB style.
Default: True.
nrow (int): The number of samples in a row. Default: 1.
padding (int): The number of padding pixels between each samples.
Default: 4.
"""
def __init__(self,
output_dir,
res_name_list,
interval=-1,
filename_tmpl='iter_{}.png',
rerange=True,
bgr2rgb=True,
nrow=1,
padding=4):
assert mmcv.is_list_of(res_name_list, str)
self.output_dir = output_dir
self.res_name_list = res_name_list
self.interval = interval
self.filename_tmpl = filename_tmpl
self.bgr2rgb = bgr2rgb
self.rerange = rerange
self.nrow = nrow
self.padding = padding
mmcv.mkdir_or_exist(self.output_dir)
@master_only
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (object): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
results = runner.outputs['results']
filename = self.filename_tmpl.format(runner.iter + 1)
img_list = [x for k, x in results.items() if k in self.res_name_list]
img_cat = torch.cat(img_list, dim=3).detach()
if self.rerange:
img_cat = ((img_cat + 1) / 2)
if self.bgr2rgb:
img_cat = img_cat[:, [2, 1, 0], ...]
img_cat = img_cat.clamp_(0, 1)
save_image(
img_cat,
osp.join(self.output_dir, filename),
nrow=self.nrow,
padding=self.padding)
| 3,050 | 34.894118 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/hooks/ema.py | import warnings
from copy import deepcopy
from functools import partial
import mmcv
import torch
from mmcv.parallel import is_module_wrapper
from mmcv.runner import HOOKS, Hook
@HOOKS.register_module()
class ExponentialMovingAverageHook(Hook):
"""Exponential Moving Average Hook.
Exponential moving average is a trick that widely used in current GAN
literature, e.g., PGGAN, StyleGAN, and BigGAN. This general idea of it is
maintaining a model with the same architecture, but its parameters are
updated as a moving average of the trained weights in the original model.
In general, the model with moving averaged weights achieves better
performance.
Args:
module_keys (str | tuple[str]): The name of the ema model. Note that we
require these keys are followed by '_ema' so that we can easily
find the original model by discarding the last four characters.
interp_mode (str, optional): Mode of the interpolation method.
Defaults to 'lerp'.
interp_cfg (dict | None, optional): Set arguments of the interpolation
function. Defaults to None.
interval (int, optional): Evaluation interval (by iterations).
Default: -1.
start_iter (int, optional): Start iteration for ema. If the start
iteration is not reached, the weights of ema model will maintain
the same as the original one. Otherwise, its parameters are updated
as a moving average of the trained weights in the original model.
Default: 0.
"""
def __init__(self,
module_keys,
interp_mode='lerp',
interp_cfg=None,
interval=-1,
start_iter=0):
super().__init__()
assert isinstance(module_keys, str) or mmcv.is_tuple_of(
module_keys, str)
self.module_keys = (module_keys, ) if isinstance(module_keys,
str) else module_keys
# sanity check for the format of module keys
for k in self.module_keys:
assert k.endswith(
'_ema'), 'You should give keys that end with "_ema".'
self.interp_mode = interp_mode
self.interp_cfg = dict() if interp_cfg is None else deepcopy(
interp_cfg)
self.interval = interval
self.start_iter = start_iter
assert hasattr(
self, interp_mode
), f'Currently, we do not support {self.interp_mode} for EMA.'
self.interp_func = partial(
getattr(self, interp_mode), **self.interp_cfg)
@staticmethod
def lerp(a, b, momentum=0.999, momentum_nontrainable=0., trainable=True):
m = momentum if trainable else momentum_nontrainable
return a + (b - a) * m
def every_n_iters(self, runner, n):
if runner.iter < self.start_iter:
return True
return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False
@torch.no_grad()
def after_train_iter(self, runner):
if not self.every_n_iters(runner, self.interval):
return
model = runner.model.module if is_module_wrapper(
runner.model) else runner.model
for key in self.module_keys:
# get current ema states
ema_net = getattr(model, key)
states_ema = ema_net.state_dict(keep_vars=False)
# get currently original states
net = getattr(model, key[:-4])
states_orig = net.state_dict(keep_vars=True)
for k, v in states_orig.items():
if runner.iter < self.start_iter:
states_ema[k].data.copy_(v.data)
else:
states_ema[k] = self.interp_func(
v, states_ema[k], trainable=v.requires_grad).detach()
ema_net.load_state_dict(states_ema, strict=True)
def before_run(self, runner):
model = runner.model.module if is_module_wrapper(
runner.model) else runner.model
# sanity check for ema model
for k in self.module_keys:
if not hasattr(model, k) and not hasattr(model, k[:-4]):
raise RuntimeError(
f'Cannot find both {k[:-4]} and {k} network for EMA hook.')
if not hasattr(model, k) and hasattr(model, k[:-4]):
setattr(model, k, deepcopy(getattr(model, k[:-4])))
warnings.warn(
f'We do not suggest construct and initialize EMA model {k}'
' in hook. You may explicitly define it by yourself.')
| 4,719 | 40.403509 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/utils/dist_utils.py | import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
def sync_random_seed(seed=None, device='cuda'):
"""Make sure different ranks share the same seed.
All workers must call this function, otherwise it will deadlock.
This method is generally used in `DistributedSampler`,
because the seed should be identical across all processes
in the distributed group.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is None:
seed = np.random.randint(2**31)
assert isinstance(seed, int)
rank, world_size = get_dist_info()
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
| 1,108 | 29.805556 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/optimizer/builder.py | from mmcv.runner import build_optimizer
def build_optimizers(model, cfgs):
"""Build multiple optimizers from configs.
If `cfgs` contains several dicts for optimizers, then a dict for each
constructed optimizers will be returned.
If `cfgs` only contains one optimizer config, the constructed optimizer
itself will be returned.
For example,
1) Multiple optimizer configs:
.. code-block:: python
optimizer_cfg = dict(
model1=dict(type='SGD', lr=lr),
model2=dict(type='SGD', lr=lr))
The return dict is
``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``
2) Single optimizer config:
.. code-block:: python
optimizer_cfg = dict(type='SGD', lr=lr)
The return is ``torch.optim.Optimizer``.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
cfgs (dict): The config dict of the optimizer.
Returns:
dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:
The initialized optimizers.
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
is_dict_of_dict = True
for key, cfg in cfgs.items():
if not isinstance(cfg, dict):
is_dict_of_dict = False
if is_dict_of_dict:
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
return build_optimizer(model, cfgs)
| 1,679 | 27.474576 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/base.py | from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.nn as nn
class BaseModel(nn.Module, metaclass=ABCMeta):
"""Base model.
All models should subclass it.
All subclass should overwrite:
``init_weights``, supporting to initialize models.
``forward_train``, supporting to forward when training.
``forward_test``, supporting to forward when testing.
``train_step``, supporting to train one step when training.
"""
@abstractmethod
def init_weights(self):
"""Abstract method for initializing weight.
All subclass should overwrite it.
"""
@abstractmethod
def forward_train(self, imgs, labels):
"""Abstract method for training forward.
All subclass should overwrite it.
"""
@abstractmethod
def forward_test(self, imgs):
"""Abstract method for testing forward.
All subclass should overwrite it.
"""
def forward(self, imgs, labels, test_mode, **kwargs):
"""Forward function for base model.
Args:
imgs (Tensor): Input image(s).
labels (Tensor): Ground-truth label(s).
test_mode (bool): Whether in test mode.
kwargs (dict): Other arguments.
Returns:
Tensor: Forward results.
"""
if test_mode:
return self.forward_test(imgs, **kwargs)
return self.forward_train(imgs, labels, **kwargs)
@abstractmethod
def train_step(self, data_batch, optimizer):
"""Abstract method for one training step.
All subclass should overwrite it.
"""
def val_step(self, data_batch, **kwargs):
"""Abstract method for one validation step.
All subclass should overwrite it.
"""
output = self.forward_test(**data_batch, **kwargs)
return output
def parse_losses(self, losses):
"""Parse losses dict for different loss variants.
Args:
losses (dict): Loss dict.
Returns:
loss (float): Sum of the total loss.
log_vars (dict): loss dict for different variants.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return loss, log_vars
| 2,948 | 26.820755 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/registry.py | from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.utils import Registry
MODELS = Registry('model', parent=MMCV_MODELS)
BACKBONES = MODELS
COMPONENTS = MODELS
LOSSES = MODELS
| 226 | 24.222222 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/builder.py | import torch.nn as nn
from mmcv import build_from_cfg
from .registry import BACKBONES, COMPONENTS, LOSSES, MODELS
def build(cfg, registry, default_args=None):
"""Build module function.
Args:
cfg (dict): Configuration for building modules.
registry (obj): ``registry`` object.
default_args (dict, optional): Default arguments. Defaults to None.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone.
Args:
cfg (dict): Configuration for building backbone.
"""
return build(cfg, BACKBONES)
def build_component(cfg):
"""Build component.
Args:
cfg (dict): Configuration for building component.
"""
return build(cfg, COMPONENTS)
def build_loss(cfg):
"""Build loss.
Args:
cfg (dict): Configuration for building loss.
"""
return build(cfg, LOSSES)
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model.
Args:
cfg (dict): Configuration for building model.
train_cfg (dict): Training configuration. Default: None.
test_cfg (dict): Testing configuration. Default: None.
"""
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
| 1,482 | 23.311475 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/restorers/basicvsr.py | import numbers
import os.path as osp
import mmcv
import numpy as np
import torch
from mmedit.core import tensor2img
from ..registry import MODELS
from .basic_restorer import BasicRestorer
@MODELS.register_module()
class BasicVSR(BasicRestorer):
"""BasicVSR model for video super-resolution.
Note that this model is used for IconVSR.
Paper:
BasicVSR: The Search for Essential Components in Video Super-Resolution
and Beyond, CVPR, 2021
Args:
generator (dict): Config for the generator structure.
pixel_loss (dict): Config for pixel-wise loss.
ensemble (dict): Config for ensemble. Default: None.
train_cfg (dict): Config for training. Default: None.
test_cfg (dict): Config for testing. Default: None.
pretrained (str): Path for pretrained model. Default: None.
"""
def __init__(self,
generator,
pixel_loss,
ensemble=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(generator, pixel_loss, train_cfg, test_cfg,
pretrained)
# fix pre-trained networks
self.fix_iter = train_cfg.get('fix_iter', 0) if train_cfg else 0
self.is_weight_fixed = False
# count training steps
self.register_buffer('step_counter', torch.zeros(1))
# ensemble
self.forward_ensemble = None
if ensemble is not None:
if ensemble['type'] == 'SpatialTemporalEnsemble':
from mmedit.models.common.ensemble import \
SpatialTemporalEnsemble
is_temporal = ensemble.get('is_temporal_ensemble', False)
self.forward_ensemble = SpatialTemporalEnsemble(is_temporal)
else:
raise NotImplementedError(
'Currently support only '
'"SpatialTemporalEnsemble", but got type '
f'[{ensemble["type"]}]')
def check_if_mirror_extended(self, lrs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
"""
is_mirror_extended = False
if lrs.size(1) % 2 == 0:
lrs_1, lrs_2 = torch.chunk(lrs, 2, dim=1)
if torch.norm(lrs_1 - lrs_2.flip(1)) == 0:
is_mirror_extended = True
return is_mirror_extended
def train_step(self, data_batch, optimizer):
"""Train step.
Args:
data_batch (dict): A batch of data.
optimizer (obj): Optimizer.
Returns:
dict: Returned output.
"""
# fix SPyNet and EDVR at the beginning
if self.step_counter < self.fix_iter:
if not self.is_weight_fixed:
self.is_weight_fixed = True
for k, v in self.generator.named_parameters():
if 'spynet' in k or 'edvr' in k:
v.requires_grad_(False)
elif self.step_counter == self.fix_iter:
# train all the parameters
self.generator.requires_grad_(True)
outputs = self(**data_batch, test_mode=False)
loss, log_vars = self.parse_losses(outputs.pop('losses'))
# optimize
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
self.step_counter += 1
outputs.update({'log_vars': log_vars})
return outputs
def evaluate(self, output, gt):
"""Evaluation function.
If the output contains multiple frames, we compute the metric
one by one and take an average.
Args:
output (Tensor): Model output with shape (n, t, c, h, w).
gt (Tensor): GT Tensor with shape (n, t, c, h, w).
Returns:
dict: Evaluation results.
"""
crop_border = self.test_cfg.crop_border
convert_to = self.test_cfg.get('convert_to', None)
eval_result = dict()
for metric in self.test_cfg.metrics:
if output.ndim == 5: # a sequence: (n, t, c, h, w)
avg = []
for i in range(0, output.size(1)):
output_i = tensor2img(output[:, i, :, :, :])
gt_i = tensor2img(gt[:, i, :, :, :])
avg.append(self.allowed_metrics[metric](
output_i, gt_i, crop_border, convert_to=convert_to))
eval_result[metric] = np.mean(avg)
elif output.ndim == 4: # an image: (n, c, t, w), for Vimeo-90K-T
output_img = tensor2img(output)
gt_img = tensor2img(gt)
value = self.allowed_metrics[metric](
output_img, gt_img, crop_border, convert_to=convert_to)
eval_result[metric] = value
return eval_result
def forward_test(self,
lq,
gt=None,
meta=None,
save_image=False,
save_path=None,
iteration=None):
"""Testing forward function.
Args:
lq (Tensor): LQ Tensor with shape (n, t, c, h, w).
gt (Tensor): GT Tensor with shape (n, t, c, h, w). Default: None.
save_image (bool): Whether to save image. Default: False.
save_path (str): Path to save image. Default: None.
iteration (int): Iteration for the saving image name.
Default: None.
Returns:
dict: Output results.
"""
with torch.no_grad():
if self.forward_ensemble is not None:
output = self.forward_ensemble(lq, self.generator)
else:
output = self.generator(lq)
# If the GT is an image (i.e. the center frame), the output sequence is
# turned to an image.
if gt is not None and gt.ndim == 4:
t = output.size(1)
if self.check_if_mirror_extended(lq): # with mirror extension
output = 0.5 * (output[:, t // 4] + output[:, -1 - t // 4])
else: # without mirror extension
output = output[:, t // 2]
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
assert gt is not None, (
'evaluation with metrics must have gt images.')
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if gt is not None:
results['gt'] = gt.cpu()
# save image
if save_image:
if output.ndim == 4: # an image, key = 000001/0000 (Vimeo-90K)
img_name = meta[0]['key'].replace('/', '_')
if isinstance(iteration, numbers.Number):
save_path = osp.join(
save_path, f'{img_name}-{iteration + 1:06d}.png')
elif iteration is None:
save_path = osp.join(save_path, f'{img_name}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
elif output.ndim == 5: # a sequence, key = 000
folder_name = meta[0]['key'].split('/')[0]
for i in range(0, output.size(1)):
if isinstance(iteration, numbers.Number):
save_path_i = osp.join(
save_path, folder_name,
f'{i:08d}-{iteration + 1:06d}.png')
elif iteration is None:
save_path_i = osp.join(save_path, folder_name,
f'{i:08d}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(
tensor2img(output[:, i, :, :, :]), save_path_i)
return results
| 8,430 | 36.471111 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/restorers/basic_restorer.py | import numbers
import os.path as osp
import mmcv
from mmcv.runner import auto_fp16
from mmedit.core import psnr, ssim, tensor2img
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
@MODELS.register_module()
class BasicRestorer(BaseModel):
"""Basic model for image restoration.
It must contain a generator that takes an image as inputs and outputs a
restored image. It also has a pixel-wise loss for training.
The subclasses should overwrite the function `forward_train`,
`forward_test` and `train_step`.
Args:
generator (dict): Config for the generator structure.
pixel_loss (dict): Config for pixel-wise loss.
train_cfg (dict): Config for training. Default: None.
test_cfg (dict): Config for testing. Default: None.
pretrained (str): Path for pretrained model. Default: None.
"""
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
def __init__(self,
generator,
pixel_loss,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# support fp16
self.fp16_enabled = False
# generator
self.generator = build_backbone(generator)
self.init_weights(pretrained)
# loss
self.pixel_loss = build_loss(pixel_loss)
def init_weights(self, pretrained=None):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""
self.generator.init_weights(pretrained)
@auto_fp16(apply_to=('lq', ))
def forward(self, lq, gt=None, test_mode=False, **kwargs):
"""Forward function.
Args:
lq (Tensor): Input lq images.
gt (Tensor): Ground-truth image. Default: None.
test_mode (bool): Whether in test mode or not. Default: False.
kwargs (dict): Other arguments.
"""
if test_mode:
return self.forward_test(lq, gt, **kwargs)
return self.forward_train(lq, gt)
def forward_train(self, lq, gt):
"""Training forward function.
Args:
lq (Tensor): LQ Tensor with shape (n, c, h, w).
gt (Tensor): GT Tensor with shape (n, c, h, w).
Returns:
Tensor: Output tensor.
"""
losses = dict()
output = self.generator(lq)
loss_pix = self.pixel_loss(output, gt)
losses['loss_pix'] = loss_pix
outputs = dict(
losses=losses,
num_samples=len(gt.data),
results=dict(lq=lq.cpu(), gt=gt.cpu(), output=output.cpu()))
return outputs
def evaluate(self, output, gt):
"""Evaluation function.
Args:
output (Tensor): Model output with shape (n, c, h, w).
gt (Tensor): GT Tensor with shape (n, c, h, w).
Returns:
dict: Evaluation results.
"""
crop_border = self.test_cfg.crop_border
output = tensor2img(output)
gt = tensor2img(gt)
eval_result = dict()
for metric in self.test_cfg.metrics:
eval_result[metric] = self.allowed_metrics[metric](output, gt,
crop_border)
return eval_result
def forward_test(self,
lq,
gt=None,
meta=None,
save_image=False,
save_path=None,
iteration=None):
"""Testing forward function.
Args:
lq (Tensor): LQ Tensor with shape (n, c, h, w).
gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.
save_image (bool): Whether to save image. Default: False.
save_path (str): Path to save image. Default: None.
iteration (int): Iteration for the saving image name.
Default: None.
Returns:
dict: Output results.
"""
output = self.generator(lq)
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
assert gt is not None, (
'evaluation with metrics must have gt images.')
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if gt is not None:
results['gt'] = gt.cpu()
# save image
if save_image:
lq_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(lq_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name,
f'{folder_name}-{iteration + 1:06d}.png')
elif iteration is None:
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
def forward_dummy(self, img):
"""Used for computing network FLOPs.
Args:
img (Tensor): Input image.
Returns:
Tensor: Output image.
"""
out = self.generator(img)
return out
def train_step(self, data_batch, optimizer):
"""Train step.
Args:
data_batch (dict): A batch of data.
optimizer (obj): Optimizer.
Returns:
dict: Returned output.
"""
outputs = self(**data_batch, test_mode=False)
loss, log_vars = self.parse_losses(outputs.pop('losses'))
# optimize
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
outputs.update({'log_vars': log_vars})
return outputs
def val_step(self, data_batch, **kwargs):
"""Validation step.
Args:
data_batch (dict): A batch of data.
kwargs (dict): Other arguments for ``val_step``.
Returns:
dict: Returned output.
"""
output = self.forward_test(**data_batch, **kwargs)
return output
| 6,558 | 30.085308 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/flow_warp.py | import torch
import torch.nn.functional as F
def flow_warp(x,
flow,
interpolation='bilinear',
padding_mode='zeros',
align_corners=True):
"""Warp an image or a feature map with optical flow.
Args:
x (Tensor): Tensor with size (n, c, h, w).
flow (Tensor): Tensor with size (n, h, w, 2). The last dimension is
a two-channel, denoting the width and height relative offsets.
Note that the values are not normalized to [-1, 1].
interpolation (str): Interpolation mode: 'nearest' or 'bilinear'.
Default: 'bilinear'.
padding_mode (str): Padding mode: 'zeros' or 'border' or 'reflection'.
Default: 'zeros'.
align_corners (bool): Whether align corners. Default: True.
Returns:
Tensor: Warped image or feature map.
"""
if x.size()[-2:] != flow.size()[1:3]:
raise ValueError(f'The spatial sizes of input ({x.size()[-2:]}) and '
f'flow ({flow.size()[1:3]}) are not the same.')
_, _, h, w = x.size()
# create mesh grid
grid_y, grid_x = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
grid = torch.stack((grid_x, grid_y), 2).type_as(x) # (h, w, 2)
grid.requires_grad = False
grid_flow = grid + flow
# scale grid_flow to [-1,1]
grid_flow_x = 2.0 * grid_flow[:, :, :, 0] / max(w - 1, 1) - 1.0
grid_flow_y = 2.0 * grid_flow[:, :, :, 1] / max(h - 1, 1) - 1.0
grid_flow = torch.stack((grid_flow_x, grid_flow_y), dim=3)
output = F.grid_sample(
x,
grid_flow,
mode=interpolation,
padding_mode=padding_mode,
align_corners=align_corners)
return output
| 1,781 | 36.125 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/aspp.py | import torch
from mmcv.cnn import ConvModule
from torch import nn
from torch.nn import functional as F
from .separable_conv_module import DepthwiseSeparableConvModule
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels, conv_cfg, norm_cfg, act_cfg):
super().__init__(
nn.AdaptiveAvgPool2d(1),
ConvModule(
in_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(
x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
"""ASPP module from DeepLabV3.
The code is adopted from
https://github.com/pytorch/vision/blob/master/torchvision/models/
segmentation/deeplabv3.py
For more information about the module:
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Args:
in_channels (int): Input channels of the module.
out_channels (int): Output channels of the module.
mid_channels (int): Output channels of the intermediate ASPP conv
modules.
dilations (Sequence[int]): Dilation rate of three ASPP conv module.
Default: [12, 24, 36].
conv_cfg (dict): Config dict for convolution layer. If "None",
nn.Conv2d will be applied. Default: None.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
separable_conv (bool): Whether replace normal conv with depthwise
separable conv which is faster. Default: False.
"""
def __init__(self,
in_channels,
out_channels=256,
mid_channels=256,
dilations=(12, 24, 36),
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
separable_conv=False):
super().__init__()
if separable_conv:
conv_module = DepthwiseSeparableConvModule
else:
conv_module = ConvModule
modules = []
modules.append(
ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
for dilation in dilations:
modules.append(
conv_module(
in_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
modules.append(
ASPPPooling(in_channels, mid_channels, conv_cfg, norm_cfg,
act_cfg))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
ConvModule(
5 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg), nn.Dropout(0.5))
def forward(self, x):
"""Forward function for ASPP module.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
| 3,861 | 29.650794 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/sr_backbone_utils.py | import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from mmcv.utils.parrots_wrapper import _BatchNorm
def default_init_weights(module, scale=1):
"""Initialize network weights.
Args:
modules (nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks.
"""
for m in module.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, nn.Linear):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, _BatchNorm):
constant_init(m.weight, val=1, bias=0)
def make_layer(block, num_blocks, **kwarg):
"""Make layers by stacking the same blocks.
Args:
block (nn.module): nn.module class for basic block.
num_blocks (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
layers = []
for _ in range(num_blocks):
layers.append(block(**kwarg))
return nn.Sequential(*layers)
class ResidualBlockNoBN(nn.Module):
"""Residual block without BN.
It has a style of:
::
---Conv-ReLU-Conv-+-
|________________|
Args:
mid_channels (int): Channel number of intermediate features.
Default: 64.
res_scale (float): Used to scale the residual before addition.
Default: 1.0.
"""
def __init__(self, mid_channels=64, res_scale=1.0):
super().__init__()
self.res_scale = res_scale
self.conv1 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.relu = nn.ReLU(inplace=True)
# if res_scale < 1.0, use the default initialization, as in EDSR.
# if res_scale = 1.0, use scaled kaiming_init, as in MSRResNet.
if res_scale == 1.0:
self.init_weights()
def init_weights(self):
"""Initialize weights for ResidualBlockNoBN.
Initialization methods like `kaiming_init` are for VGG-style
modules. For modules with residual paths, using smaller std is
better for stability and performance. We empirically use 0.1.
See more details in "ESRGAN: Enhanced Super-Resolution Generative
Adversarial Networks"
"""
for m in [self.conv1, self.conv2]:
default_init_weights(m, 0.1)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out * self.res_scale
| 2,919 | 28.795918 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/model_utils.py | import numpy as np
import torch
def set_requires_grad(nets, requires_grad=False):
"""Set requires_grad for all the networks.
Args:
nets (nn.Module | list[nn.Module]): A list of networks or a single
network.
requires_grad (bool): Whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def extract_bbox_patch(bbox, img, channel_first=True):
"""Extract patch from a given bbox
Args:
bbox (torch.Tensor | numpy.array): Bbox with (top, left, h, w). If
`img` has batch dimension, the `bbox` must be stacked at first
dimension. The shape should be (4,) or (n, 4).
img (torch.Tensor | numpy.array): Image data to be extracted. If
organized in batch dimension, the batch dimension must be the first
order like (n, h, w, c) or (n, c, h, w).
channel_first (bool): If True, the channel dimension of img is before
height and width, e.g. (c, h, w). Otherwise, the img shape (samples
in the batch) is like (h, w, c).
Returns:
(torch.Tensor | numpy.array): Extracted patches. The dimension of the \
output should be the same as `img`.
"""
def _extract(bbox, img):
assert len(bbox) == 4
t, l, h, w = bbox
if channel_first:
img_patch = img[..., t:t + h, l:l + w]
else:
img_patch = img[t:t + h, l:l + w, ...]
return img_patch
input_size = img.shape
assert len(input_size) == 3 or len(input_size) == 4
bbox_size = bbox.shape
assert bbox_size == (4, ) or (len(bbox_size) == 2
and bbox_size[0] == input_size[0])
# images with batch dimension
if len(input_size) == 4:
output_list = []
for i in range(input_size[0]):
img_patch_ = _extract(bbox[i], img[i:i + 1, ...])
output_list.append(img_patch_)
if isinstance(img, torch.Tensor):
img_patch = torch.cat(output_list, dim=0)
else:
img_patch = np.concatenate(output_list, axis=0)
# standardize image
else:
img_patch = _extract(bbox, img)
return img_patch
def scale_bbox(bbox, target_size):
"""Modify bbox to target size.
The original bbox will be enlarged to the target size with the original
bbox in the center of the new bbox.
Args:
bbox (np.ndarray | torch.Tensor): Bboxes to be modified. Bbox can
be in batch or not. The shape should be (4,) or (n, 4).
target_size (tuple[int]): Target size of final bbox.
Returns:
(np.ndarray | torch.Tensor): Modified bboxes.
"""
def _mod(bbox, target_size):
top_ori, left_ori, h_ori, w_ori = bbox
h, w = target_size
assert h >= h_ori and w >= w_ori
top = int(max(0, top_ori - (h - h_ori) // 2))
left = int(max(0, left_ori - (w - w_ori) // 2))
if isinstance(bbox, torch.Tensor):
bbox_new = torch.Tensor([top, left, h, w]).type_as(bbox)
else:
bbox_new = np.asarray([top, left, h, w])
return bbox_new
if isinstance(bbox, torch.Tensor):
bbox_new = torch.zeros_like(bbox)
elif isinstance(bbox, np.ndarray):
bbox_new = np.zeros_like(bbox)
else:
raise TypeError('bbox mush be torch.Tensor or numpy.ndarray'
f'but got type {type(bbox)}')
bbox_shape = list(bbox.shape)
if len(bbox_shape) == 2:
for i in range(bbox_shape[0]):
bbox_new[i, :] = _mod(bbox[i], target_size)
else:
bbox_new = _mod(bbox, target_size)
return bbox_new
def extract_around_bbox(img, bbox, target_size, channel_first=True):
"""Extract patches around the given bbox.
Args:
bbox (np.ndarray | torch.Tensor): Bboxes to be modified. Bbox can
be in batch or not.
target_size (List(int)): Target size of final bbox.
Returns:
(torch.Tensor | numpy.array): Extracted patches. The dimension of the \
output should be the same as `img`.
"""
bbox_new = scale_bbox(bbox, target_size)
img_patch = extract_bbox_patch(bbox_new, img, channel_first=channel_first)
return img_patch, bbox_new
| 4,502 | 31.868613 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/separable_conv_module.py | import torch.nn as nn
from mmcv.cnn import ConvModule
class DepthwiseSeparableConvModule(nn.Module):
"""Depthwise separable convolution module.
See https://arxiv.org/pdf/1704.04861.pdf for details.
This module can replace a ConvModule with the conv block replaced by two
conv block: depthwise conv block and pointwise conv block. The depthwise
conv block contains depthwise-conv/norm/activation layers. The pointwise
conv block contains pointwise-conv/norm/activation layers. It should be
noted that there will be norm/activation layer in the depthwise conv block
if ``norm_cfg`` and ``act_cfg`` are specified.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d. Default: 1.
padding (int or tuple[int]): Same as nn.Conv2d. Default: 0.
dilation (int or tuple[int]): Same as nn.Conv2d. Default: 1.
norm_cfg (dict): Default norm config for both depthwise ConvModule and
pointwise ConvModule. Default: None.
act_cfg (dict): Default activation config for both depthwise ConvModule
and pointwise ConvModule. Default: dict(type='ReLU').
dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
'default', it will be the same as ``norm_cfg``. Default: 'default'.
dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
'default', it will be the same as `norm_cfg`. Default: 'default'.
pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
kwargs (optional): Other shared arguments for depthwise and pointwise
ConvModule. See ConvModule for ref.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dw_norm_cfg='default',
dw_act_cfg='default',
pw_norm_cfg='default',
pw_act_cfg='default',
**kwargs):
super().__init__()
assert 'groups' not in kwargs, 'groups should not be specified'
# if norm/activation config of depthwise/pointwise ConvModule is not
# specified, use default config.
dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
# depthwise convolution
self.depthwise_conv = ConvModule(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
norm_cfg=dw_norm_cfg,
act_cfg=dw_act_cfg,
**kwargs)
self.pointwise_conv = ConvModule(
in_channels,
out_channels,
1,
norm_cfg=pw_norm_cfg,
act_cfg=pw_act_cfg,
**kwargs)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
| 3,907 | 38.877551 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/linear_module.py | import torch.nn as nn
from mmcv.cnn import build_activation_layer, kaiming_init
class LinearModule(nn.Module):
"""A linear block that contains linear/norm/activation layers.
For low level vision, we add spectral norm and padding layer.
Args:
in_features (int): Same as nn.Linear.
out_features (int): Same as nn.Linear.
bias (bool): Same as nn.Linear.
act_cfg (dict): Config dict for activation layer, "relu" by default.
inplace (bool): Whether to use inplace mode for activation.
with_spectral_norm (bool): Whether use spectral norm in linear module.
order (tuple[str]): The order of linear/activation layers. It is a
sequence of "linear", "norm" and "act". Examples are
("linear", "act") and ("act", "linear").
"""
def __init__(self,
in_features,
out_features,
bias=True,
act_cfg=dict(type='ReLU'),
inplace=True,
with_spectral_norm=False,
order=('linear', 'act')):
super().__init__()
assert act_cfg is None or isinstance(act_cfg, dict)
self.act_cfg = act_cfg
self.inplace = inplace
self.with_spectral_norm = with_spectral_norm
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 2
assert set(order) == set(['linear', 'act'])
self.with_activation = act_cfg is not None
self.with_bias = bias
# build linear layer
self.linear = nn.Linear(in_features, out_features, bias=bias)
# export the attributes of self.linear to a higher level for
# convenience
self.in_features = self.linear.in_features
self.out_features = self.linear.out_features
if self.with_spectral_norm:
self.linear = nn.utils.spectral_norm(self.linear)
# build activation layer
if self.with_activation:
act_cfg_ = act_cfg.copy()
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)
# Use msra init by default
self.init_weights()
def init_weights(self):
if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
nonlinearity = 'leaky_relu'
a = self.act_cfg.get('negative_slope', 0.01)
else:
nonlinearity = 'relu'
a = 0
kaiming_init(self.linear, a=a, nonlinearity=nonlinearity)
def forward(self, x, activate=True):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of :math:`(n, *, c)`.
Same as ``torch.nn.Linear``.
activate (bool, optional): Whether to use activation layer.
Defaults to True.
Returns:
torch.Tensor: Same as ``torch.nn.Linear``.
"""
for layer in self.order:
if layer == 'linear':
x = self.linear(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
return x
| 3,204 | 34.611111 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/contextual_attention.py | from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContextualAttentionModule(nn.Module):
"""Contexture attention module.
The details of this module can be found in:
Generative Image Inpainting with Contextual Attention
Args:
unfold_raw_kernel_size (int): Kernel size used in unfolding raw
feature. Default: 4.
unfold_raw_stride (int): Stride used in unfolding raw feature. Default:
2.
unfold_raw_padding (int): Padding used in unfolding raw feature.
Default: 1.
unfold_corr_kernel_size (int): Kernel size used in unfolding
context for computing correlation maps. Default: 3.
unfold_corr_stride (int): Stride used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_dilation (int): Dilation used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_padding (int): Padding used in unfolding context for
computing correlation maps. Default: 1.
scale (float): The resale factor used in resize input features.
Default: 0.5.
fuse_kernel_size (int): The kernel size used in fusion module.
Default: 3.
softmax_scale (float): The scale factor for softmax function.
Default: 10.
return_attention_score (bool): If True, the attention score will be
returned. Default: True.
"""
def __init__(self,
unfold_raw_kernel_size=4,
unfold_raw_stride=2,
unfold_raw_padding=1,
unfold_corr_kernel_size=3,
unfold_corr_stride=1,
unfold_corr_dilation=1,
unfold_corr_padding=1,
scale=0.5,
fuse_kernel_size=3,
softmax_scale=10,
return_attention_score=True):
super().__init__()
self.unfold_raw_kernel_size = unfold_raw_kernel_size
self.unfold_raw_stride = unfold_raw_stride
self.unfold_raw_padding = unfold_raw_padding
self.unfold_corr_kernel_size = unfold_corr_kernel_size
self.unfold_corr_stride = unfold_corr_stride
self.unfold_corr_dilation = unfold_corr_dilation
self.unfold_corr_padding = unfold_corr_padding
self.scale = scale
self.fuse_kernel_size = fuse_kernel_size
self.with_fuse_correlation = fuse_kernel_size > 1
self.softmax_scale = softmax_scale
self.return_attention_score = return_attention_score
if self.with_fuse_correlation:
assert fuse_kernel_size % 2 == 1
fuse_kernel = torch.eye(fuse_kernel_size).view(
1, 1, fuse_kernel_size, fuse_kernel_size)
self.register_buffer('fuse_kernel', fuse_kernel)
padding = int((fuse_kernel_size - 1) // 2)
self.fuse_conv = partial(F.conv2d, padding=padding, stride=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, context, mask=None):
"""Forward Function.
Args:
x (torch.Tensor): Tensor with shape (n, c, h, w).
context (torch.Tensor): Tensor with shape (n, c, h, w).
mask (torch.Tensor): Tensor with shape (n, 1, h, w). Default: None.
Returns:
tuple(torch.Tensor): Features after contextural attention.
"""
# raw features to be used in copy (deconv)
raw_context = context
raw_context_cols = self.im2col(
raw_context,
kernel_size=self.unfold_raw_kernel_size,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
normalize=False,
return_cols=True)
# resize the feature to reduce computational cost
x = F.interpolate(x, scale_factor=self.scale)
context = F.interpolate(context, scale_factor=self.scale)
context_cols = self.im2col(
context,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
normalize=True,
return_cols=True)
h_unfold, w_unfold = self.calculate_unfold_hw(
context.size()[-2:],
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
)
# reshape context_cols to
# (n*h_unfold*w_unfold, c, unfold_mks, unfold_mks)
# 'mks' is short for 'mask_kernel_size'
context_cols = context_cols.reshape(-1, *context_cols.shape[2:])
# the shape of correlation map should be:
# (n, h_unfold*w_unfold, h', w')
correlation_map = self.patch_correlation(x, context_cols)
# fuse correlation map to enlarge consistent attention region.
if self.with_fuse_correlation:
correlation_map = self.fuse_correlation_map(
correlation_map, h_unfold, w_unfold)
correlation_map = self.mask_correlation_map(correlation_map, mask=mask)
attention_score = self.softmax(correlation_map * self.softmax_scale)
raw_context_filter = raw_context_cols.reshape(
-1, *raw_context_cols.shape[2:])
output = self.patch_copy_deconv(attention_score, raw_context_filter)
# deconv will cause overlap and we need to remove the effects of that
overlap_factor = self.calculate_overlap_factor(attention_score)
output /= overlap_factor
if self.return_attention_score:
n, _, h_s, w_s = attention_score.size()
attention_score = attention_score.view(n, h_unfold, w_unfold, h_s,
w_s)
return output, attention_score
return output
def patch_correlation(self, x, kernel):
"""Calculate patch correlation.
Args:
x (torch.Tensor): Input tensor.
kernel (torch.Tensor): Kernel tensor.
Returns:
torch.Tensor: Tensor with shape of (n, l, h, w).
"""
n, _, h_in, w_in = x.size()
patch_corr = F.conv2d(
x.view(1, -1, h_in, w_in),
kernel,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
groups=n)
h_out, w_out = patch_corr.size()[-2:]
return patch_corr.view(n, -1, h_out, w_out)
def patch_copy_deconv(self, attention_score, context_filter):
"""Copy patches using deconv.
Args:
attention_score (torch.Tensor): Tensor with shape of (n, l , h, w).
context_filter (torch.Tensor): Filter kernel.
Returns:
torch.Tensor: Tensor with shape of (n, c, h, w).
"""
n, _, h, w = attention_score.size()
attention_score = attention_score.view(1, -1, h, w)
output = F.conv_transpose2d(
attention_score,
context_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
groups=n)
h_out, w_out = output.size()[-2:]
return output.view(n, -1, h_out, w_out)
def fuse_correlation_map(self, correlation_map, h_unfold, w_unfold):
"""Fuse correlation map.
This operation is to fuse correlation map for increasing large
consistent correlation regions.
The mechanism behind this op is simple and easy to understand. A
standard 'Eye' matrix will be applied as a filter on the correlation
map in horizontal and vertical direction.
The shape of input correlation map is (n, h_unfold*w_unfold, h, w).
When adopting fusing, we will apply convolutional filter in the
reshaped feature map with shape of (n, 1, h_unfold*w_fold, h*w).
A simple specification for horizontal direction is shown below:
.. code-block:: python
(h, (h, (h, (h,
0) 1) 2) 3) ...
(h, 0)
(h, 1) 1
(h, 2) 1
(h, 3) 1
...
"""
# horizontal direction
n, _, h_map, w_map = correlation_map.size()
map_ = correlation_map.permute(0, 2, 3, 1)
map_ = map_.reshape(n, h_map * w_map, h_unfold * w_unfold, 1)
map_ = map_.permute(0, 3, 1, 2).contiguous()
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, h_unfold, w_unfold, h_map, w_map)
# vertical direction
map_ = correlation_map.permute(0, 2, 1, 4,
3).reshape(n, 1, h_unfold * w_unfold,
h_map * w_map)
map_ = self.fuse_conv(map_, self.fuse_kernel)
# Note that the dimension should be transposed since the convolution of
# eye matrix will put the normed scores into the last several dimension
correlation_map = map_.view(n, w_unfold, h_unfold, w_map,
h_map).permute(0, 4, 3, 2, 1)
correlation_map = correlation_map.reshape(n, -1, h_unfold, w_unfold)
return correlation_map
def calculate_unfold_hw(self,
input_size,
kernel_size=3,
stride=1,
dilation=1,
padding=0):
"""Calculate (h, w) after unfolding
The official implementation of `unfold` in pytorch will put the
dimension (h, w) into `L`. Thus, this function is just to calculate the
(h, w) according to the equation in:
https://pytorch.org/docs/stable/nn.html#torch.nn.Unfold
"""
h_in, w_in = input_size
h_unfold = int((h_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
w_unfold = int((w_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
return h_unfold, w_unfold
def calculate_overlap_factor(self, attention_score):
"""Calculate the overlap factor after applying deconv.
Args:
attention_score (torch.Tensor): The attention score with shape of
(n, c, h, w).
Returns:
torch.Tensor: The overlap factor will be returned.
"""
h, w = attention_score.shape[-2:]
kernel_size = self.unfold_raw_kernel_size
ones_input = torch.ones(1, 1, h, w).to(attention_score)
ones_filter = torch.ones(1, 1, kernel_size,
kernel_size).to(attention_score)
overlap = F.conv_transpose2d(
ones_input,
ones_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding)
# avoid division by zero
overlap[overlap == 0] = 1.
return overlap
def mask_correlation_map(self, correlation_map, mask):
"""Add mask weight for correlation map.
Add a negative infinity number to the masked regions so that softmax
function will result in 'zero' in those regions.
Args:
correlation_map (torch.Tensor): Correlation map with shape of
(n, h_unfold*w_unfold, h_map, w_map).
mask (torch.Tensor): Mask tensor with shape of (n, c, h, w). '1'
in the mask indicates masked region while '0' indicates valid
region.
Returns:
torch.Tensor: Updated correlation map with mask.
"""
if mask is not None:
mask = F.interpolate(mask, scale_factor=self.scale)
# if any pixel is masked in patch, the patch is considered to be
# masked
mask_cols = self.im2col(
mask,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation)
mask_cols = (mask_cols.sum(dim=1, keepdim=True) > 0).float()
mask_cols = mask_cols.permute(0, 2,
1).reshape(mask.size(0), -1, 1, 1)
# add negative inf will bring zero in softmax
mask_cols[mask_cols == 1] = -float('inf')
correlation_map += mask_cols
return correlation_map
def im2col(self,
img,
kernel_size,
stride=1,
padding=0,
dilation=1,
normalize=False,
return_cols=False):
"""Reshape image-style feature to columns.
This function is used for unfold feature maps to columns. The
details of this function can be found in:
https://pytorch.org/docs/1.1.0/nn.html?highlight=unfold#torch.nn.Unfold
Args:
img (torch.Tensor): Features to be unfolded. The shape of this
feature should be (n, c, h, w).
kernel_size (int): In this function, we only support square kernel
with same height and width.
stride (int): Stride number in unfolding. Default: 1.
padding (int): Padding number in unfolding. Default: 0.
dilation (int): Dilation number in unfolding. Default: 1.
normalize (bool): If True, the unfolded feature will be normalized.
Default: False.
return_cols (bool): The official implementation in PyTorch of
unfolding will return features with shape of
(n, c*$prod{kernel_size}$, L). If True, the features will be
reshaped to (n, L, c, kernel_size, kernel_size). Otherwise,
the results will maintain the shape as the official
implementation.
Returns:
torch.Tensor: Unfolded columns. If `return_cols` is True, the \
shape of output tensor is \
`(n, L, c, kernel_size, kernel_size)`. Otherwise, the shape \
will be `(n, c*$prod{kernel_size}$, L)`.
"""
# unfold img to columns with shape (n, c*kernel_size**2, num_cols)
img_unfold = F.unfold(
img,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
# normalize the feature map
if normalize:
norm = torch.sqrt((img_unfold**2).sum(dim=1, keepdim=True))
eps = torch.tensor([1e-4]).to(img)
img_unfold = img_unfold / torch.max(norm, eps)
if return_cols:
img_unfold_ = img_unfold.permute(0, 2, 1)
n, num_cols = img_unfold_.size()[:2]
img_cols = img_unfold_.view(n, num_cols, img.size(1), kernel_size,
kernel_size)
return img_cols
return img_unfold
| 15,214 | 39.039474 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/gated_conv_module.py | import copy
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, build_activation_layer
class SimpleGatedConvModule(nn.Module):
"""Simple Gated Convolutional Module.
This module is a simple gated convolutional module. The detailed formula
is:
.. math::
y = \\phi(conv1(x)) * \\sigma(conv2(x)),
where `phi` is the feature activation function and `sigma` is the gate
activation function. In default, the gate activation function is sigmoid.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): The number of channels of the output feature. Note
that `out_channels` in the conv module is doubled since this module
contains two convolutions for feature and gate separately.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
feat_act_cfg (dict): Config dict for feature activation layer.
gate_act_cfg (dict): Config dict for gate activation layer.
kwargs (keyword arguments): Same as `ConvModule`.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
feat_act_cfg=dict(type='ELU'),
gate_act_cfg=dict(type='Sigmoid'),
**kwargs):
super().__init__()
# the activation function should specified outside conv module
kwargs_ = copy.deepcopy(kwargs)
kwargs_['act_cfg'] = None
self.with_feat_act = feat_act_cfg is not None
self.with_gate_act = gate_act_cfg is not None
self.conv = ConvModule(in_channels, out_channels * 2, kernel_size,
**kwargs_)
if self.with_feat_act:
self.feat_act = build_activation_layer(feat_act_cfg)
if self.with_gate_act:
self.gate_act = build_activation_layer(gate_act_cfg)
def forward(self, x):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
x = self.conv(x)
x, gate = torch.split(x, x.size(1) // 2, dim=1)
if self.with_feat_act:
x = self.feat_act(x)
if self.with_gate_act:
gate = self.gate_act(gate)
x = x * gate
return x
| 2,423 | 32.205479 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/conv.py | from mmcv.cnn import CONV_LAYERS
from torch import nn
CONV_LAYERS.register_module('Deconv', module=nn.ConvTranspose2d)
# TODO: octave conv
| 188 | 26 | 64 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/downsample.py | def pixel_unshuffle(x, scale):
"""Down-sample by pixel unshuffle.
Args:
x (Tensor): Input tensor.
scale (int): Scale factor.
Returns:
Tensor: Output tensor.
"""
b, c, h, w = x.shape
if h % scale != 0 or w % scale != 0:
raise AssertionError(
f'Invalid scale ({scale}) of pixel unshuffle for tensor '
f'with shape: {x.shape}')
h = int(h / scale)
w = int(w / scale)
x = x.view(b, c, h, scale, w, scale)
x = x.permute(0, 1, 3, 5, 2, 4)
return x.reshape(b, -1, h, w)
| 613 | 25.695652 | 69 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/generation_model_utils.py | import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, kaiming_init, normal_init, xavier_init
from torch.nn import init
def generation_init_weights(module, init_type='normal', init_gain=0.02):
"""Default initialization of network weights for image generation.
By default, we use normal init, but xavier and kaiming might work
better for some applications.
Args:
module (nn.Module): Module to be initialized.
init_type (str): The name of an initialization method:
normal | xavier | kaiming | orthogonal.
init_gain (float): Scaling factor for normal, xavier and
orthogonal.
"""
def init_func(m):
"""Initialization function.
Args:
m (nn.Module): Module to be initialized.
"""
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1
or classname.find('Linear') != -1):
if init_type == 'normal':
normal_init(m, 0.0, init_gain)
elif init_type == 'xavier':
xavier_init(m, gain=init_gain, distribution='normal')
elif init_type == 'kaiming':
kaiming_init(
m,
a=0,
mode='fan_in',
nonlinearity='leaky_relu',
distribution='normal')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight, gain=init_gain)
init.constant_(m.bias.data, 0.0)
else:
raise NotImplementedError(
f"Initialization method '{init_type}' is not implemented")
elif classname.find('BatchNorm2d') != -1:
# BatchNorm Layer's weight is not a matrix;
# only normal distribution applies.
normal_init(m, 1.0, init_gain)
module.apply(init_func)
class GANImageBuffer:
"""This class implements an image buffer that stores previously
generated images.
This buffer allows us to update the discriminator using a history of
generated images rather than the ones produced by the latest generator
to reduce model oscillation.
Args:
buffer_size (int): The size of image buffer. If buffer_size = 0,
no buffer will be created.
buffer_ratio (float): The chance / possibility to use the images
previously stored in the buffer.
"""
def __init__(self, buffer_size, buffer_ratio=0.5):
self.buffer_size = buffer_size
# create an empty buffer
if self.buffer_size > 0:
self.img_num = 0
self.image_buffer = []
self.buffer_ratio = buffer_ratio
def query(self, images):
"""Query current image batch using a history of generated images.
Args:
images (Tensor): Current image batch without history information.
"""
if self.buffer_size == 0: # if the buffer size is 0, do nothing
return images
return_images = []
for image in images:
image = torch.unsqueeze(image.data, 0)
# if the buffer is not full, keep inserting current images
if self.img_num < self.buffer_size:
self.img_num = self.img_num + 1
self.image_buffer.append(image)
return_images.append(image)
else:
use_buffer = np.random.random() < self.buffer_ratio
# by self.buffer_ratio, the buffer will return a previously
# stored image, and insert the current image into the buffer
if use_buffer:
random_id = np.random.randint(0, self.buffer_size)
image_tmp = self.image_buffer[random_id].clone()
self.image_buffer[random_id] = image
return_images.append(image_tmp)
# by (1 - self.buffer_ratio), the buffer will return the
# current image
else:
return_images.append(image)
# collect all the images and return
return_images = torch.cat(return_images, 0)
return return_images
class UnetSkipConnectionBlock(nn.Module):
"""Construct a Unet submodule with skip connections, with the following
structure: downsampling - `submodule` - upsampling.
Args:
outer_channels (int): Number of channels at the outer conv layer.
inner_channels (int): Number of channels at the inner conv layer.
in_channels (int): Number of channels in input images/features. If is
None, equals to `outer_channels`. Default: None.
submodule (UnetSkipConnectionBlock): Previously constructed submodule.
Default: None.
is_outermost (bool): Whether this module is the outermost module.
Default: False.
is_innermost (bool): Whether this module is the innermost module.
Default: False.
norm_cfg (dict): Config dict to build norm layer. Default:
`dict(type='BN')`.
use_dropout (bool): Whether to use dropout layers. Default: False.
"""
def __init__(self,
outer_channels,
inner_channels,
in_channels=None,
submodule=None,
is_outermost=False,
is_innermost=False,
norm_cfg=dict(type='BN'),
use_dropout=False):
super().__init__()
# cannot be both outermost and innermost
assert not (is_outermost and is_innermost), (
"'is_outermost' and 'is_innermost' cannot be True"
'at the same time.')
self.is_outermost = is_outermost
assert isinstance(norm_cfg, dict), ("'norm_cfg' should be dict, but"
f'got {type(norm_cfg)}')
assert 'type' in norm_cfg, "'norm_cfg' must have key 'type'"
# We use norm layers in the unet skip connection block.
# Only for IN, use bias since it does not have affine parameters.
use_bias = norm_cfg['type'] == 'IN'
kernel_size = 4
stride = 2
padding = 1
if in_channels is None:
in_channels = outer_channels
down_conv_cfg = dict(type='Conv2d')
down_norm_cfg = norm_cfg
down_act_cfg = dict(type='LeakyReLU', negative_slope=0.2)
up_conv_cfg = dict(type='Deconv')
up_norm_cfg = norm_cfg
up_act_cfg = dict(type='ReLU')
up_in_channels = inner_channels * 2
up_bias = use_bias
middle = [submodule]
upper = []
if is_outermost:
down_act_cfg = None
down_norm_cfg = None
up_bias = True
up_norm_cfg = None
upper = [nn.Tanh()]
elif is_innermost:
down_norm_cfg = None
up_in_channels = inner_channels
middle = []
else:
upper = [nn.Dropout(0.5)] if use_dropout else []
down = [
ConvModule(
in_channels=in_channels,
out_channels=inner_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=use_bias,
conv_cfg=down_conv_cfg,
norm_cfg=down_norm_cfg,
act_cfg=down_act_cfg,
order=('act', 'conv', 'norm'))
]
up = [
ConvModule(
in_channels=up_in_channels,
out_channels=outer_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
bias=up_bias,
conv_cfg=up_conv_cfg,
norm_cfg=up_norm_cfg,
act_cfg=up_act_cfg,
order=('act', 'conv', 'norm'))
]
model = down + middle + up + upper
self.model = nn.Sequential(*model)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.is_outermost:
return self.model(x)
# add skip connections
return torch.cat([x, self.model(x)], 1)
class ResidualBlockWithDropout(nn.Module):
"""Define a Residual Block with dropout layers.
Ref:
Deep Residual Learning for Image Recognition
A residual block is a conv block with skip connections. A dropout layer is
added between two common conv modules.
Args:
channels (int): Number of channels in the conv layer.
padding_mode (str): The name of padding layer:
'reflect' | 'replicate' | 'zeros'.
norm_cfg (dict): Config dict to build norm layer. Default:
`dict(type='IN')`.
use_dropout (bool): Whether to use dropout layers. Default: True.
"""
def __init__(self,
channels,
padding_mode,
norm_cfg=dict(type='BN'),
use_dropout=True):
super().__init__()
assert isinstance(norm_cfg, dict), ("'norm_cfg' should be dict, but"
f'got {type(norm_cfg)}')
assert 'type' in norm_cfg, "'norm_cfg' must have key 'type'"
# We use norm layers in the residual block with dropout layers.
# Only for IN, use bias since it does not have affine parameters.
use_bias = norm_cfg['type'] == 'IN'
block = [
ConvModule(
in_channels=channels,
out_channels=channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm_cfg=norm_cfg,
padding_mode=padding_mode)
]
if use_dropout:
block += [nn.Dropout(0.5)]
block += [
ConvModule(
in_channels=channels,
out_channels=channels,
kernel_size=3,
padding=1,
bias=use_bias,
norm_cfg=norm_cfg,
act_cfg=None,
padding_mode=padding_mode)
]
self.block = nn.Sequential(*block)
def forward(self, x):
"""Forward function. Add skip connections without final ReLU.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
out = x + self.block(x)
return out
| 10,699 | 34.430464 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/ensemble.py | import torch
import torch.nn as nn
class SpatialTemporalEnsemble(nn.Module):
""" Apply spatial and temporal ensemble and compute outputs
Args:
is_temporal_ensemble (bool, optional): Whether to apply ensemble
temporally. If True, the sequence will also be flipped temporally.
If the input is an image, this argument must be set to False.
Default: False.
"""
def __init__(self, is_temporal_ensemble=False):
super().__init__()
self.is_temporal_ensemble = is_temporal_ensemble
def _transform(self, imgs, mode):
"""Apply spatial transform (flip, rotate) to the images.
Args:
imgs (torch.Tensor): The images to be transformed/
mode (str): The mode of transform. Supported values are 'vertical',
'horizontal', and 'transpose', corresponding to vertical flip,
horizontal flip, and rotation, respectively.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
is_single_image = False
if imgs.ndim == 4:
if self.is_temporal_ensemble:
raise ValueError('"is_temporal_ensemble" must be False if '
'the input is an image.')
is_single_image = True
imgs = imgs.unsqueeze(1)
if mode == 'vertical':
imgs = imgs.flip(4).clone()
elif mode == 'horizontal':
imgs = imgs.flip(3).clone()
elif mode == 'transpose':
imgs = imgs.permute(0, 1, 2, 4, 3).clone()
if is_single_image:
imgs = imgs.squeeze(1)
return imgs
def spatial_ensemble(self, imgs, model):
"""Apply spatial ensemble.
Args:
imgs (torch.Tensor): The images to be processed by the model. Its
size should be either (n, t, c, h, w) or (n, c, h, w).
model (nn.Module): The model to process the images.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
img_list = [imgs.cpu()]
for mode in ['vertical', 'horizontal', 'transpose']:
img_list.extend([self._transform(t, mode) for t in img_list])
output_list = [model(t.to(imgs.device)).cpu() for t in img_list]
for i in range(len(output_list)):
if i > 3:
output_list[i] = self._transform(output_list[i], 'transpose')
if i % 4 > 1:
output_list[i] = self._transform(output_list[i], 'horizontal')
if (i % 4) % 2 == 1:
output_list[i] = self._transform(output_list[i], 'vertical')
outputs = torch.stack(output_list, dim=0)
outputs = outputs.mean(dim=0, keepdim=False)
return outputs.to(imgs.device)
def forward(self, imgs, model):
"""Apply spatial and temporal ensemble.
Args:
imgs (torch.Tensor): The images to be processed by the model. Its
size should be either (n, t, c, h, w) or (n, c, h, w).
model (nn.Module): The model to process the images.
Returns:
torch.Tensor: Output of the model with spatial ensemble applied.
"""
outputs = self.spatial_ensemble(imgs, model)
if self.is_temporal_ensemble:
outputs += self.spatial_ensemble(imgs.flip(1), model).flip(1)
outputs *= 0.5
return outputs
| 3,541 | 32.415094 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/upsample.py | import torch.nn as nn
import torch.nn.functional as F
from .sr_backbone_utils import default_init_weights
class PixelShufflePack(nn.Module):
""" Pixel Shuffle upsample layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Upsample ratio.
upsample_kernel (int): Kernel size of Conv layer to expand channels.
Returns:
Upsampled feature map.
"""
def __init__(self, in_channels, out_channels, scale_factor,
upsample_kernel):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.scale_factor = scale_factor
self.upsample_kernel = upsample_kernel
self.upsample_conv = nn.Conv2d(
self.in_channels,
self.out_channels * scale_factor * scale_factor,
self.upsample_kernel,
padding=(self.upsample_kernel - 1) // 2)
self.init_weights()
def init_weights(self):
"""Initialize weights for PixelShufflePack.
"""
default_init_weights(self, 1)
def forward(self, x):
"""Forward function for PixelShufflePack.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
x = self.upsample_conv(x)
x = F.pixel_shuffle(x, self.scale_factor)
return x
| 1,517 | 28.192308 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/img_normalize.py | import torch
import torch.nn as nn
class ImgNormalize(nn.Conv2d):
"""Normalize images with the given mean and std value.
Based on Conv2d layer, can work in GPU.
Args:
pixel_range (float): Pixel range of feature.
img_mean (Tuple[float]): Image mean of each channel.
img_std (Tuple[float]): Image std of each channel.
sign (int): Sign of bias. Default -1.
"""
def __init__(self, pixel_range, img_mean, img_std, sign=-1):
assert len(img_mean) == len(img_std)
num_channels = len(img_mean)
super().__init__(num_channels, num_channels, kernel_size=1)
std = torch.Tensor(img_std)
self.weight.data = torch.eye(num_channels).view(
num_channels, num_channels, 1, 1)
self.weight.data.div_(std.view(num_channels, 1, 1, 1))
self.bias.data = sign * pixel_range * torch.Tensor(img_mean)
self.bias.data.div_(std)
self.weight.requires_grad = False
self.bias.requires_grad = False
| 1,063 | 31.242424 | 68 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/mask_conv_module.py | from mmcv.cnn import ConvModule
class MaskConvModule(ConvModule):
"""Mask convolution module.
This is a simple wrapper for mask convolution like: 'partial conv'.
Convolutions in this module always need a mask as extra input.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
conv_cfg (dict): Config dict for convolution layer.
norm_cfg (dict): Config dict for normalization layer.
act_cfg (dict): Config dict for activation layer, "relu" by default.
inplace (bool): Whether to use inplace mode for activation.
with_spectral_norm (bool): Whether use spectral norm in conv module.
padding_mode (str): If the `padding_mode` has not been supported by
current `Conv2d` in Pytorch, we will use our own padding layer
instead. Currently, we support ['zeros', 'circular'] with official
implementation and ['reflect'] with our own implementation.
Default: 'zeros'.
order (tuple[str]): The order of conv/norm/activation layers. It is a
sequence of "conv", "norm" and "act". Examples are
("conv", "norm", "act") and ("act", "conv", "norm").
"""
supported_conv_list = ['PConv']
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.conv_cfg['type'] in self.supported_conv_list
self.init_weights()
def forward(self,
x,
mask=None,
activate=True,
norm=True,
return_mask=True):
"""Forward function for partial conv2d.
Args:
input (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor): Tensor with shape of (n, c, h, w) or
(n, 1, h, w). If mask is not given, the function will
work as standard conv2d. Default: None.
activate (bool): Whether use activation layer.
norm (bool): Whether use norm layer.
return_mask (bool): If True and mask is not None, the updated
mask will be returned. Default: True.
Returns:
Tensor or tuple: Result Tensor or 2-tuple of
``Tensor``: Results after partial conv.
``Tensor``: Updated mask will be returned if mask is given \
and `return_mask` is True.
"""
for layer in self.order:
if layer == 'conv':
if self.with_explicit_padding:
x = self.padding_layer(x)
mask = self.padding_layer(mask)
if return_mask:
x, updated_mask = self.conv(
x, mask, return_mask=return_mask)
else:
x = self.conv(x, mask, return_mask=False)
elif layer == 'norm' and norm and self.with_norm:
x = self.norm(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
if return_mask:
return x, updated_mask
return x
| 3,649 | 40.011236 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/partial_conv.py | import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import CONV_LAYERS
@CONV_LAYERS.register_module(name='PConv')
class PartialConv2d(nn.Conv2d):
"""Implementation for partial convolution.
Image Inpainting for Irregular Holes Using Partial Convolutions
[https://arxiv.org/abs/1804.07723]
Args:
multi_channel (bool): If True, the mask is multi-channel. Otherwise,
the mask is single-channel.
eps (float): Need to be changed for mixed precision training.
For mixed precision training, you need change 1e-8 to 1e-6.
"""
def __init__(self, *args, multi_channel=False, eps=1e-8, **kwargs):
super().__init__(*args, **kwargs)
# whether the mask is multi-channel or not
self.multi_channel = multi_channel
self.eps = eps
if self.multi_channel:
out_channels, in_channels = self.out_channels, self.in_channels
else:
out_channels, in_channels = 1, 1
self.register_buffer(
'weight_mask_updater',
torch.ones(out_channels, in_channels, self.kernel_size[0],
self.kernel_size[1]))
self.mask_kernel_numel = np.prod(self.weight_mask_updater.shape[1:4])
self.mask_kernel_numel = (self.mask_kernel_numel).item()
def forward(self, input, mask=None, return_mask=True):
"""Forward function for partial conv2d.
Args:
input (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor): Tensor with shape of (n, c, h, w) or
(n, 1, h, w). If mask is not given, the function will
work as standard conv2d. Default: None.
return_mask (bool): If True and mask is not None, the updated
mask will be returned. Default: True.
Returns:
torch.Tensor : Results after partial conv.\
torch.Tensor : Updated mask will be returned if mask is given and \
``return_mask`` is True.
"""
assert input.dim() == 4
if mask is not None:
assert mask.dim() == 4
if self.multi_channel:
assert mask.shape[1] == input.shape[1]
else:
assert mask.shape[1] == 1
# update mask and compute mask ratio
if mask is not None:
with torch.no_grad():
updated_mask = F.conv2d(
mask,
self.weight_mask_updater,
bias=None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation)
mask_ratio = self.mask_kernel_numel / (updated_mask + self.eps)
updated_mask = torch.clamp(updated_mask, 0, 1)
mask_ratio = mask_ratio * updated_mask
# standard conv2d
if mask is not None:
input = input * mask
raw_out = super().forward(input)
if mask is not None:
if self.bias is None:
output = raw_out * mask_ratio
else:
# compute new bias when mask is given
bias_view = self.bias.view(1, self.out_channels, 1, 1)
output = (raw_out - bias_view) * mask_ratio + bias_view
output = output * updated_mask
else:
output = raw_out
if return_mask and mask is not None:
return output, updated_mask
return output
| 3,605 | 34.009709 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/pixelwise_loss.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..registry import LOSSES
from .utils import masked_loss
_reduction_modes = ['none', 'mean', 'sum']
@masked_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated L1 loss.
"""
return F.l1_loss(pred, target, reduction='none')
@masked_loss
def mse_loss(pred, target):
"""MSE loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated MSE loss.
"""
return F.mse_loss(pred, target, reduction='none')
@masked_loss
def charbonnier_loss(pred, target, eps=1e-12):
"""Charbonnier loss.
Args:
pred (Tensor): Prediction Tensor with shape (n, c, h, w).
target ([type]): Target Tensor with shape (n, c, h, w).
Returns:
Tensor: Calculated Charbonnier loss.
"""
return torch.sqrt((pred - target)**2 + eps)
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 (mean absolute error, MAE) loss.
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduce loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * l1_loss(
pred,
target,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSE (L2) loss.
Args:
loss_weight (float): Loss weight for MSE loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
"""
def __init__(self, loss_weight=1.0, reduction='mean', sample_wise=False):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * mse_loss(
pred,
target,
weight,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class CharbonnierLoss(nn.Module):
"""Charbonnier loss (one variant of Robust L1Loss, a differentiable
variant of L1Loss).
Described in "Deep Laplacian Pyramid Networks for Fast and Accurate
Super-Resolution".
Args:
loss_weight (float): Loss weight for L1 loss. Default: 1.0.
reduction (str): Specifies the reduction to apply to the output.
Supported choices are 'none' | 'mean' | 'sum'. Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
eps (float): A value used to control the curvature near zero.
Default: 1e-12.
"""
def __init__(self,
loss_weight=1.0,
reduction='mean',
sample_wise=False,
eps=1e-12):
super().__init__()
if reduction not in ['none', 'mean', 'sum']:
raise ValueError(f'Unsupported reduction mode: {reduction}. '
f'Supported ones are: {_reduction_modes}')
self.loss_weight = loss_weight
self.reduction = reduction
self.sample_wise = sample_wise
self.eps = eps
def forward(self, pred, target, weight=None, **kwargs):
"""Forward Function.
Args:
pred (Tensor): of shape (N, C, H, W). Predicted tensor.
target (Tensor): of shape (N, C, H, W). Ground truth tensor.
weight (Tensor, optional): of shape (N, C, H, W). Element-wise
weights. Default: None.
"""
return self.loss_weight * charbonnier_loss(
pred,
target,
weight,
eps=self.eps,
reduction=self.reduction,
sample_wise=self.sample_wise)
@LOSSES.register_module()
class MaskedTVLoss(L1Loss):
"""Masked TV loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=1.0):
super().__init__(loss_weight=loss_weight)
def forward(self, pred, mask=None):
"""Forward function.
Args:
pred (torch.Tensor): Tensor with shape of (n, c, h, w).
mask (torch.Tensor, optional): Tensor with shape of (n, 1, h, w).
Defaults to None.
Returns:
[type]: [description]
"""
y_diff = super().forward(
pred[:, :, :-1, :], pred[:, :, 1:, :], weight=mask[:, :, :-1, :])
x_diff = super().forward(
pred[:, :, :, :-1], pred[:, :, :, 1:], weight=mask[:, :, :, :-1])
loss = x_diff + y_diff
return loss
| 7,356 | 32.13964 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/utils.py | import functools
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Returns:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
if reduction_enum == 1:
return loss.mean()
return loss.sum()
def mask_reduce_loss(loss, weight=None, reduction='mean', sample_wise=False):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights. Default: None.
reduction (str): Same as built-in losses of PyTorch. Options are
"none", "mean" and "sum". Default: 'mean'.
sample_wise (bool): Whether calculate the loss sample-wise. This
argument only takes effect when `reduction` is 'mean' and `weight`
(argument of `forward()`) is not None. It will first reduces loss
with 'mean' per-sample, and then it means over all the samples.
Default: False.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
assert weight.dim() == loss.dim()
assert weight.size(1) == 1 or weight.size(1) == loss.size(1)
loss = loss * weight
# if weight is not specified or reduction is sum, just reduce the loss
if weight is None or reduction == 'sum':
loss = reduce_loss(loss, reduction)
# if reduction is mean, then compute mean over masked region
elif reduction == 'mean':
# expand weight from N1HW to NCHW
if weight.size(1) == 1:
weight = weight.expand_as(loss)
# small value to prevent division by zero
eps = 1e-12
# perform sample-wise mean
if sample_wise:
weight = weight.sum(dim=[1, 2, 3], keepdim=True) # NCHW to N111
loss = (loss / (weight + eps)).sum() / weight.size(0)
# perform pixel-wise mean
else:
loss = loss.sum() / (weight.sum() + eps)
return loss
def masked_loss(loss_func):
"""Create a masked version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @masked_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.5000)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, reduction='sum')
tensor(3.)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
sample_wise=False,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = mask_reduce_loss(loss, weight, reduction, sample_wise)
return loss
return wrapper
| 3,743 | 31.275862 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/gan_loss.py | import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.functional import conv2d
from ..registry import LOSSES
@LOSSES.register_module()
class GANLoss(nn.Module):
"""Define GAN loss.
Args:
gan_type (str): Support 'vanilla', 'lsgan', 'wgan', 'hinge'.
real_label_val (float): The value for real label. Default: 1.0.
fake_label_val (float): The value for fake label. Default: 0.0.
loss_weight (float): Loss weight. Default: 1.0.
Note that loss_weight is only for generators; and it is always 1.0
for discriminators.
"""
def __init__(self,
gan_type,
real_label_val=1.0,
fake_label_val=0.0,
loss_weight=1.0):
super().__init__()
self.gan_type = gan_type
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
self.loss_weight = loss_weight
if self.gan_type == 'smgan':
self.gaussian_blur = GaussianBlur()
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan' or self.gan_type == 'smgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan':
self.loss = self._wgan_loss
elif self.gan_type == 'hinge':
self.loss = nn.ReLU()
else:
raise NotImplementedError(
f'GAN type {self.gan_type} is not implemented.')
def _wgan_loss(self, input, target):
"""wgan loss.
Args:
input (Tensor): Input tensor.
target (bool): Target label.
Returns:
Tensor: wgan loss.
"""
return -input.mean() if target else input.mean()
def get_target_label(self, input, target_is_real):
"""Get target label.
Args:
input (Tensor): Input tensor.
target_is_real (bool): Whether the target is real or fake.
Returns:
(bool | Tensor): Target tensor. Return bool for wgan, otherwise,
return Tensor.
"""
if self.gan_type == 'wgan':
return target_is_real
target_val = (
self.real_label_val if target_is_real else self.fake_label_val)
return input.new_ones(input.size()) * target_val
def forward(self, input, target_is_real, is_disc=False, mask=None):
"""
Args:
input (Tensor): The input for the loss module, i.e., the network
prediction.
target_is_real (bool): Whether the target is real or fake.
is_disc (bool): Whether the loss for discriminators or not.
Default: False.
Returns:
Tensor: GAN loss value.
"""
target_label = self.get_target_label(input, target_is_real)
if self.gan_type == 'hinge':
if is_disc: # for discriminators in hinge-gan
input = -input if target_is_real else input
loss = self.loss(1 + input).mean()
else: # for generators in hinge-gan
loss = -input.mean()
elif self.gan_type == 'smgan':
input_height, input_width = input.shape[2:]
mask_height, mask_width = mask.shape[2:]
# Handle inconsistent size between outputs and masks
if input_height != mask_height or input_width != mask_width:
input = F.interpolate(
input,
size=(mask_height, mask_width),
mode='bilinear',
align_corners=True)
target_label = self.get_target_label(input, target_is_real)
if is_disc:
if target_is_real:
target_label = target_label
else:
target_label = self.gaussian_blur(mask).detach().cuda(
) if mask.is_cuda else self.gaussian_blur(
mask).detach().cpu()
# target_label = self.gaussian_blur(mask).detach().cpu()
loss = self.loss(input, target_label)
else:
loss = self.loss(input, target_label) * mask / mask.mean()
loss = loss.mean()
else: # other gan types
loss = self.loss(input, target_label)
# loss_weight is always 1.0 for discriminators
return loss if is_disc else loss * self.loss_weight
@LOSSES.register_module()
class GaussianBlur(nn.Module):
"""A Gaussian filter which blurs a given tensor with a two-dimensional
gaussian kernel by convolving it along each channel. Batch operation
is supported.
This function is modified from kornia.filters.gaussian:
`<https://kornia.readthedocs.io/en/latest/_modules/kornia/filters/gaussian.html>`.
Args:
kernel_size (tuple[int]): The size of the kernel. Default: (71, 71).
sigma (tuple[float]): The standard deviation of the kernel.
Default (10.0, 10.0)
Returns:
Tensor: The Gaussian-blurred tensor.
Shape:
- input: Tensor with shape of (n, c, h, w)
- output: Tensor with shape of (n, c, h, w)
"""
def __init__(self, kernel_size=(71, 71), sigma=(10.0, 10.0)):
super(GaussianBlur, self).__init__()
self.kernel_size = kernel_size
self.sigma = sigma
self.padding = self.compute_zero_padding(kernel_size)
self.kernel = self.get_2d_gaussian_kernel(kernel_size, sigma)
@staticmethod
def compute_zero_padding(kernel_size):
"""Compute zero padding tuple."""
padding = [(ks - 1) // 2 for ks in kernel_size]
return padding[0], padding[1]
def get_2d_gaussian_kernel(self, kernel_size, sigma):
"""Get the two-dimensional Gaussian filter matrix coefficients.
Args:
kernel_size (tuple[int]): Kernel filter size in the x and y
direction. The kernel sizes
should be odd and positive.
sigma (tuple[int]): Gaussian standard deviation in
the x and y direction.
Returns:
kernel_2d (Tensor): A 2D torch tensor with gaussian filter
matrix coefficients.
"""
if not isinstance(kernel_size, tuple) or len(kernel_size) != 2:
raise TypeError(
'kernel_size must be a tuple of length two. Got {}'.format(
kernel_size))
if not isinstance(sigma, tuple) or len(sigma) != 2:
raise TypeError(
'sigma must be a tuple of length two. Got {}'.format(sigma))
kernel_size_x, kernel_size_y = kernel_size
sigma_x, sigma_y = sigma
kernel_x = self.get_1d_gaussian_kernel(kernel_size_x, sigma_x)
kernel_y = self.get_1d_gaussian_kernel(kernel_size_y, sigma_y)
kernel_2d = torch.matmul(
kernel_x.unsqueeze(-1),
kernel_y.unsqueeze(-1).t())
return kernel_2d
def get_1d_gaussian_kernel(self, kernel_size, sigma):
"""Get the Gaussian filter coefficients in one dimension (x or y direction).
Args:
kernel_size (int): Kernel filter size in x or y direction.
Should be odd and positive.
sigma (float): Gaussian standard deviation in x or y direction.
Returns:
kernel_1d (Tensor): A 1D torch tensor with gaussian filter
coefficients in x or y direction.
"""
if not isinstance(kernel_size,
int) or kernel_size % 2 == 0 or kernel_size <= 0:
raise TypeError(
'kernel_size must be an odd positive integer. Got {}'.format(
kernel_size))
kernel_1d = self.gaussian(kernel_size, sigma)
return kernel_1d
def gaussian(self, kernel_size, sigma):
def gauss_arg(x):
return -(x - kernel_size // 2)**2 / float(2 * sigma**2)
gauss = torch.stack([
torch.exp(torch.tensor(gauss_arg(x))) for x in range(kernel_size)
])
return gauss / gauss.sum()
def forward(self, x):
if not torch.is_tensor(x):
raise TypeError(
'Input x type is not a torch.Tensor. Got {}'.format(type(x)))
if not len(x.shape) == 4:
raise ValueError(
'Invalid input shape, we expect BxCxHxW. Got: {}'.format(
x.shape))
_, c, _, _ = x.shape
tmp_kernel = self.kernel.to(x.device).to(x.dtype)
kernel = tmp_kernel.repeat(c, 1, 1, 1)
return conv2d(x, kernel, padding=self.padding, stride=1, groups=c)
def gradient_penalty_loss(discriminator, real_data, fake_data, mask=None):
"""Calculate gradient penalty for wgan-gp.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
mask (Tensor): Masks for inpainting. Default: None.
Returns:
Tensor: A tensor for gradient penalty.
"""
batch_size = real_data.size(0)
alpha = torch.rand(batch_size, 1, 1, 1).to(real_data)
# interpolate between real_data and fake_data
interpolates = alpha * real_data + (1. - alpha) * fake_data
interpolates = autograd.Variable(interpolates, requires_grad=True)
disc_interpolates = discriminator(interpolates)
gradients = autograd.grad(
outputs=disc_interpolates,
inputs=interpolates,
grad_outputs=torch.ones_like(disc_interpolates),
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
if mask is not None:
gradients = gradients * mask
gradients_penalty = ((gradients.norm(2, dim=1) - 1)**2).mean()
if mask is not None:
gradients_penalty /= torch.mean(mask)
return gradients_penalty
@LOSSES.register_module()
class GradientPenaltyLoss(nn.Module):
"""Gradient penalty loss for wgan-gp.
Args:
loss_weight (float): Loss weight. Default: 1.0.
"""
def __init__(self, loss_weight=1.):
super().__init__()
self.loss_weight = loss_weight
def forward(self, discriminator, real_data, fake_data, mask=None):
"""Forward function.
Args:
discriminator (nn.Module): Network for the discriminator.
real_data (Tensor): Real input data.
fake_data (Tensor): Fake input data.
mask (Tensor): Masks for inpainting. Default: None.
Returns:
Tensor: Loss.
"""
loss = gradient_penalty_loss(
discriminator, real_data, fake_data, mask=mask)
return loss * self.loss_weight
@LOSSES.register_module()
class DiscShiftLoss(nn.Module):
"""Disc shift loss.
Args:
loss_weight (float, optional): Loss weight. Defaults to 1.0.
"""
def __init__(self, loss_weight=0.1):
super().__init__()
self.loss_weight = loss_weight
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Tensor with shape (n, c, h, w)
Returns:
Tensor: Loss.
"""
loss = torch.mean(x**2)
return loss * self.loss_weight
| 11,506 | 32.353623 | 86 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/losses/perceptual_loss.py | import torch
import torch.nn as nn
import torchvision.models.vgg as vgg
from mmcv.runner import load_checkpoint
from torch.nn import functional as F
from mmedit.utils import get_root_logger
from ..registry import LOSSES
class PerceptualVGG(nn.Module):
"""VGG network used in calculating perceptual loss.
In this implementation, we allow users to choose whether use normalization
in the input feature and the type of vgg network. Note that the pretrained
path must fit the vgg type.
Args:
layer_name_list (list[str]): According to the name in this list,
forward function will return the corresponding features. This
list contains the name each layer in `vgg.feature`. An example
of this list is ['4', '10'].
vgg_type (str): Set the type of vgg network. Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image.
Importantly, the input feature must in the range [0, 1].
Default: True.
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'
"""
def __init__(self,
layer_name_list,
vgg_type='vgg19',
use_input_norm=True,
pretrained='torchvision://vgg19'):
super().__init__()
if pretrained.startswith('torchvision://'):
assert vgg_type in pretrained
self.layer_name_list = layer_name_list
self.use_input_norm = use_input_norm
# get vgg model and load pretrained vgg weight
# remove _vgg from attributes to avoid `find_unused_parameters` bug
_vgg = getattr(vgg, vgg_type)()
self.init_weights(_vgg, pretrained)
num_layers = max(map(int, layer_name_list)) + 1
assert len(_vgg.features) >= num_layers
# only borrow layers that will be used from _vgg to avoid unused params
self.vgg_layers = _vgg.features[:num_layers]
if self.use_input_norm:
# the mean is for image with range [0, 1]
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
# the std is for image with range [-1, 1]
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
for v in self.vgg_layers.parameters():
v.requires_grad = False
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.use_input_norm:
x = (x - self.mean) / self.std
output = {}
for name, module in self.vgg_layers.named_children():
x = module(x)
if name in self.layer_name_list:
output[name] = x.clone()
return output
def init_weights(self, model, pretrained):
"""Init weights.
Args:
model (nn.Module): Models to be inited.
pretrained (str): Path for pretrained weights.
"""
logger = get_root_logger()
load_checkpoint(model, pretrained, logger=logger)
@LOSSES.register_module()
class PerceptualLoss(nn.Module):
"""Perceptual loss with commonly used style loss.
Args:
layers_weights (dict): The weight for each layer of vgg feature for
perceptual loss. Here is an example: {'4': 1., '9': 1., '18': 1.},
which means the 5th, 10th and 18th feature layer will be
extracted with weight 1.0 in calculating losses.
layers_weights_style (dict): The weight for each layer of vgg feature
for style loss. If set to 'None', the weights are set equal to
the weights for perceptual loss. Default: None.
vgg_type (str): The type of vgg network used as feature extractor.
Default: 'vgg19'.
use_input_norm (bool): If True, normalize the input image in vgg.
Default: True.
perceptual_weight (float): If `perceptual_weight > 0`, the perceptual
loss will be calculated and the loss will multiplied by the
weight. Default: 1.0.
style_weight (float): If `style_weight > 0`, the style loss will be
calculated and the loss will multiplied by the weight.
Default: 1.0.
norm_img (bool): If True, the image will be normed to [0, 1]. Note that
this is different from the `use_input_norm` which norm the input in
in forward function of vgg according to the statistics of dataset.
Importantly, the input image must be in range [-1, 1].
pretrained (str): Path for pretrained weights. Default:
'torchvision://vgg19'.
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self,
layer_weights,
layer_weights_style=None,
vgg_type='vgg19',
use_input_norm=True,
perceptual_weight=1.0,
style_weight=1.0,
norm_img=True,
pretrained='torchvision://vgg19',
criterion='l1'):
super().__init__()
self.norm_img = norm_img
self.perceptual_weight = perceptual_weight
self.style_weight = style_weight
self.layer_weights = layer_weights
self.layer_weights_style = layer_weights_style
self.vgg = PerceptualVGG(
layer_name_list=list(self.layer_weights.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
if self.layer_weights_style is not None and \
self.layer_weights_style != self.layer_weights:
self.vgg_style = PerceptualVGG(
layer_name_list=list(self.layer_weights_style.keys()),
vgg_type=vgg_type,
use_input_norm=use_input_norm,
pretrained=pretrained)
else:
self.layer_weights_style = self.layer_weights
self.vgg_style = None
criterion = criterion.lower()
if criterion == 'l1':
self.criterion = torch.nn.L1Loss()
elif criterion == 'mse':
self.criterion = torch.nn.MSELoss()
else:
raise NotImplementedError(
f'{criterion} criterion has not been supported in'
' this version.')
def forward(self, x, gt):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
gt (Tensor): Ground-truth tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
if self.norm_img:
x = (x + 1.) * 0.5
gt = (gt + 1.) * 0.5
# extract vgg features
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
# calculate perceptual loss
if self.perceptual_weight > 0:
percep_loss = 0
for k in x_features.keys():
percep_loss += self.criterion(
x_features[k], gt_features[k]) * self.layer_weights[k]
percep_loss *= self.perceptual_weight
else:
percep_loss = None
# calculate style loss
if self.style_weight > 0:
if self.vgg_style is not None:
x_features = self.vgg_style(x)
gt_features = self.vgg_style(gt.detach())
style_loss = 0
for k in x_features.keys():
style_loss += self.criterion(
self._gram_mat(x_features[k]),
self._gram_mat(
gt_features[k])) * self.layer_weights_style[k]
style_loss *= self.style_weight
else:
style_loss = None
return percep_loss, style_loss
def _gram_mat(self, x):
"""Calculate Gram matrix.
Args:
x (torch.Tensor): Tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Gram matrix.
"""
(n, c, h, w) = x.size()
features = x.view(n, c, w * h)
features_t = features.transpose(1, 2)
gram = features.bmm(features_t) / (c * h * w)
return gram
@LOSSES.register_module()
class TransferalPerceptualLoss(nn.Module):
"""Transferal perceptual loss.
Args:
loss_weight (float): Loss weight. Default: 1.0.
use_attention (bool): If True, use soft-attention tensor. Default: True
criterion (str): Criterion type. Options are 'l1' and 'mse'.
Default: 'l1'.
"""
def __init__(self, loss_weight=1.0, use_attention=True, criterion='mse'):
super().__init__()
self.use_attention = use_attention
self.loss_weight = loss_weight
criterion = criterion.lower()
if criterion == 'l1':
self.loss_function = torch.nn.L1Loss()
elif criterion == 'mse':
self.loss_function = torch.nn.MSELoss()
else:
raise ValueError(
f"criterion should be 'l1' or 'mse', but got {criterion}")
def forward(self, maps, soft_attention, textures):
"""Forward function.
Args:
maps (Tuple[Tensor]): Input tensors.
soft_attention (Tensor): Soft-attention tensor.
textures (Tuple[Tensor]): Ground-truth tensors.
Returns:
Tensor: Forward results.
"""
if self.use_attention:
h, w = soft_attention.shape[-2:]
softs = [torch.sigmoid(soft_attention)]
for i in range(1, len(maps)):
softs.append(
F.interpolate(
soft_attention,
size=(h * pow(2, i), w * pow(2, i)),
mode='bicubic',
align_corners=False))
else:
softs = [1., 1., 1.]
loss_texture = 0
for map, soft, texture in zip(maps, softs, textures):
loss_texture += self.loss_function(map * soft, texture * soft)
return loss_texture * self.loss_weight
| 10,350 | 34.940972 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/sr_backbones/basicvsr_pp.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init
from mmcv.ops import ModulatedDeformConv2d, modulated_deform_conv2d
from mmcv.runner import load_checkpoint
from mmedit.models.backbones.sr_backbones.basicvsr_net import (
ResidualBlocksWithInputConv, SPyNet)
from mmedit.models.common import PixelShufflePack, flow_warp
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class BasicVSRPlusPlus(nn.Module):
"""BasicVSR++ network structure.
Support either x4 upsampling or same size output.
Paper:
BasicVSR++: Improving Video Super-Resolution with Enhanced Propagation
and Alignment
Args:
mid_channels (int, optional): Channel number of the intermediate
features. Default: 64.
num_blocks (int, optional): The number of residual blocks in each
propagation branch. Default: 7.
max_residue_magnitude (int): The maximum magnitude of the offset
residue (Eq. 6 in paper). Default: 10.
is_low_res_input (bool, optional): Whether the input is low-resolution
or not. If False, the output resolution is equal to the input
resolution. Default: True.
spynet_pretrained (str, optional): Pre-trained model path of SPyNet.
Default: None.
cpu_cache_length (int, optional): When the length of sequence is larger
than this value, the intermediate features are sent to CPU. This
saves GPU memory, but slows down the inference speed. You can
increase this number if you have a GPU with large memory.
Default: 100.
"""
def __init__(self,
mid_channels=64,
num_blocks=7,
max_residue_magnitude=10,
is_low_res_input=True,
spynet_pretrained=None,
cpu_cache_length=100):
super().__init__()
self.mid_channels = mid_channels
self.is_low_res_input = is_low_res_input
self.cpu_cache_length = cpu_cache_length
# optical flow
self.spynet = SPyNet(pretrained=spynet_pretrained)
# feature extraction module
if is_low_res_input:
self.feat_extract = ResidualBlocksWithInputConv(3, mid_channels, 5)
else:
self.feat_extract = nn.Sequential(
nn.Conv2d(3, mid_channels, 3, 2, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(mid_channels, mid_channels, 3, 2, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
ResidualBlocksWithInputConv(mid_channels, mid_channels, 5))
# propagation branches
self.deform_align = nn.ModuleDict()
self.backbone = nn.ModuleDict()
modules = ['backward_1', 'forward_1', 'backward_2', 'forward_2']
for i, module in enumerate(modules):
self.deform_align[module] = SecondOrderDeformableAlignment(
2 * mid_channels,
mid_channels,
3,
padding=1,
deform_groups=16,
max_residue_magnitude=max_residue_magnitude)
self.backbone[module] = ResidualBlocksWithInputConv(
(2 + i) * mid_channels, mid_channels, num_blocks)
# upsampling module
self.reconstruction = ResidualBlocksWithInputConv(
5 * mid_channels, mid_channels, 5)
self.upsample1 = PixelShufflePack(
mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(
mid_channels, 64, 2, upsample_kernel=3)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.img_upsample = nn.Upsample(
scale_factor=4, mode='bilinear', align_corners=False)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
# check if the sequence is augmented by flipping
self.is_mirror_extended = False
def check_if_mirror_extended(self, lqs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
"""
if lqs.size(1) % 2 == 0:
lqs_1, lqs_2 = torch.chunk(lqs, 2, dim=1)
if torch.norm(lqs_1 - lqs_2.flip(1)) == 0:
self.is_mirror_extended = True
def compute_flow(self, lqs):
"""Compute optical flow using SPyNet for feature alignment.
Note that if the input is an mirror-extended sequence, 'flows_forward'
is not needed, since it is equal to 'flows_backward.flip(1)'.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
Return:
tuple(Tensor): Optical flow. 'flows_forward' corresponds to the
flows used for forward-time propagation (current to previous).
'flows_backward' corresponds to the flows used for
backward-time propagation (current to next).
"""
n, t, c, h, w = lqs.size()
lqs_1 = lqs[:, :-1, :, :, :].reshape(-1, c, h, w)
lqs_2 = lqs[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(lqs_1, lqs_2).view(n, t - 1, 2, h, w)
if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
flows_forward = None
else:
flows_forward = self.spynet(lqs_2, lqs_1).view(n, t - 1, 2, h, w)
if self.cpu_cache:
flows_backward = flows_backward.cpu()
flows_forward = flows_forward.cpu()
return flows_forward, flows_backward
def propagate(self, feats, flows, module_name):
"""Propagate the latent features throughout the sequence.
Args:
feats dict(list[tensor]): Features from previous branches. Each
component is a list of tensors with shape (n, c, h, w).
flows (tensor): Optical flows with shape (n, t - 1, 2, h, w).
module_name (str): The name of the propgation branches. Can either
be 'backward_1', 'forward_1', 'backward_2', 'forward_2'.
Return:
dict(list[tensor]): A dictionary containing all the propagated
features. Each key in the dictionary corresponds to a
propagation branch, which is represented by a list of tensors.
"""
n, t, _, h, w = flows.size()
frame_idx = range(0, t + 1)
flow_idx = range(-1, t)
mapping_idx = list(range(0, len(feats['spatial'])))
mapping_idx += mapping_idx[::-1]
if 'backward' in module_name:
frame_idx = frame_idx[::-1]
flow_idx = frame_idx
feat_prop = flows.new_zeros(n, self.mid_channels, h, w)
for i, idx in enumerate(frame_idx):
feat_current = feats['spatial'][mapping_idx[idx]]
if self.cpu_cache:
feat_current = feat_current.cuda()
feat_prop = feat_prop.cuda()
# second-order deformable alignment
if i > 0:
flow_n1 = flows[:, flow_idx[i], :, :, :]
if self.cpu_cache:
flow_n1 = flow_n1.cuda()
cond_n1 = flow_warp(feat_prop, flow_n1.permute(0, 2, 3, 1))
# initialize second-order features
feat_n2 = torch.zeros_like(feat_prop)
flow_n2 = torch.zeros_like(flow_n1)
cond_n2 = torch.zeros_like(cond_n1)
if i > 1: # second-order features
feat_n2 = feats[module_name][-2]
if self.cpu_cache:
feat_n2 = feat_n2.cuda()
flow_n2 = flows[:, flow_idx[i - 1], :, :, :]
if self.cpu_cache:
flow_n2 = flow_n2.cuda()
flow_n2 = flow_n1 + flow_warp(flow_n2,
flow_n1.permute(0, 2, 3, 1))
cond_n2 = flow_warp(feat_n2, flow_n2.permute(0, 2, 3, 1))
# flow-guided deformable convolution
cond = torch.cat([cond_n1, feat_current, cond_n2], dim=1)
feat_prop = torch.cat([feat_prop, feat_n2], dim=1)
feat_prop = self.deform_align[module_name](feat_prop, cond,
flow_n1, flow_n2)
# concatenate and residual blocks
feat = [feat_current] + [
feats[k][idx]
for k in feats if k not in ['spatial', module_name]
] + [feat_prop]
if self.cpu_cache:
feat = [f.cuda() for f in feat]
feat = torch.cat(feat, dim=1)
feat_prop = feat_prop + self.backbone[module_name](feat)
feats[module_name].append(feat_prop)
if self.cpu_cache:
feats[module_name][-1] = feats[module_name][-1].cpu()
torch.cuda.empty_cache()
if 'backward' in module_name:
feats[module_name] = feats[module_name][::-1]
return feats
def upsample(self, lqs, feats):
"""Compute the output image given the features.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
feats (dict): The features from the propgation branches.
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
outputs = []
num_outputs = len(feats['spatial'])
mapping_idx = list(range(0, num_outputs))
mapping_idx += mapping_idx[::-1]
for i in range(0, lqs.size(1)):
hr = [feats[k].pop(0) for k in feats if k != 'spatial']
hr.insert(0, feats['spatial'][mapping_idx[i]])
hr = torch.cat(hr, dim=1)
if self.cpu_cache:
hr = hr.cuda()
hr = self.reconstruction(hr)
hr = self.lrelu(self.upsample1(hr))
hr = self.lrelu(self.upsample2(hr))
hr = self.lrelu(self.conv_hr(hr))
hr = self.conv_last(hr)
if self.is_low_res_input:
hr += self.img_upsample(lqs[:, i, :, :, :])
else:
hr += lqs[:, i, :, :, :]
if self.cpu_cache:
hr = hr.cpu()
torch.cuda.empty_cache()
outputs.append(hr)
return torch.stack(outputs, dim=1)
def forward(self, lqs):
"""Forward function for BasicVSR++.
Args:
lqs (tensor): Input low quality (LQ) sequence with
shape (n, t, c, h, w).
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
n, t, c, h, w = lqs.size()
# whether to cache the features in CPU (no effect if using CPU)
if t > self.cpu_cache_length and lqs.is_cuda:
self.cpu_cache = True
else:
self.cpu_cache = False
if self.is_low_res_input:
lqs_downsample = lqs.clone()
else:
lqs_downsample = F.interpolate(
lqs.view(-1, c, h, w), scale_factor=0.25,
mode='bicubic').view(n, t, c, h // 4, w // 4)
# check whether the input is an extended sequence
self.check_if_mirror_extended(lqs)
feats = {}
# compute spatial features
if self.cpu_cache:
feats['spatial'] = []
for i in range(0, t):
feat = self.feat_extract(lqs[:, i, :, :, :]).cpu()
feats['spatial'].append(feat)
torch.cuda.empty_cache()
else:
feats_ = self.feat_extract(lqs.view(-1, c, h, w))
h, w = feats_.shape[2:]
feats_ = feats_.view(n, t, -1, h, w)
feats['spatial'] = [feats_[:, i, :, :, :] for i in range(0, t)]
# compute optical flow using the low-res inputs
assert lqs_downsample.size(3) >= 64 and lqs_downsample.size(4) >= 64, (
'The height and width of low-res inputs must be at least 64, '
f'but got {h} and {w}.')
flows_forward, flows_backward = self.compute_flow(lqs_downsample)
# feature propgation
for iter_ in [1, 2]:
for direction in ['backward', 'forward']:
module = f'{direction}_{iter_}'
feats[module] = []
if direction == 'backward':
flows = flows_backward
elif flows_forward is not None:
flows = flows_forward
else:
flows = flows_backward.flip(1)
feats = self.propagate(feats, flows, module)
if self.cpu_cache:
del flows
torch.cuda.empty_cache()
return self.upsample(lqs, feats)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Default: None.
strict (bool, optional): Whether strictly load the pretrained
model. Default: True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is not None:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
class SecondOrderDeformableAlignment(ModulatedDeformConv2d):
"""Second-order deformable alignment module.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d.
padding (int or tuple[int]): Same as nn.Conv2d.
dilation (int or tuple[int]): Same as nn.Conv2d.
groups (int): Same as nn.Conv2d.
bias (bool or str): If specified as `auto`, it will be decided by the
norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
False.
max_residue_magnitude (int): The maximum magnitude of the offset
residue (Eq. 6 in paper). Default: 10.
"""
def __init__(self, *args, **kwargs):
self.max_residue_magnitude = kwargs.pop('max_residue_magnitude', 10)
super(SecondOrderDeformableAlignment, self).__init__(*args, **kwargs)
self.conv_offset = nn.Sequential(
nn.Conv2d(3 * self.out_channels + 4, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1),
nn.LeakyReLU(negative_slope=0.1, inplace=True),
nn.Conv2d(self.out_channels, 27 * self.deform_groups, 3, 1, 1),
)
self.init_offset()
def init_offset(self):
constant_init(self.conv_offset[-1], val=0, bias=0)
def forward(self, x, extra_feat, flow_1, flow_2):
extra_feat = torch.cat([extra_feat, flow_1, flow_2], dim=1)
out = self.conv_offset(extra_feat)
o1, o2, mask = torch.chunk(out, 3, dim=1)
# offset
offset = self.max_residue_magnitude * torch.tanh(
torch.cat((o1, o2), dim=1))
offset_1, offset_2 = torch.chunk(offset, 2, dim=1)
offset_1 = offset_1 + flow_1.flip(1).repeat(1,
offset_1.size(1) // 2, 1,
1)
offset_2 = offset_2 + flow_2.flip(1).repeat(1,
offset_2.size(1) // 2, 1,
1)
offset = torch.cat([offset_1, offset_2], dim=1)
# mask
mask = torch.sigmoid(mask)
return modulated_deform_conv2d(x, offset, mask, self.weight, self.bias,
self.stride, self.padding,
self.dilation, self.groups,
self.deform_groups)
| 16,773 | 37.56092 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/backbones/sr_backbones/basicvsr_net.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import load_checkpoint
from mmedit.models.common import (PixelShufflePack, ResidualBlockNoBN,
flow_warp, make_layer)
from mmedit.models.registry import BACKBONES
from mmedit.utils import get_root_logger
@BACKBONES.register_module()
class BasicVSRNet(nn.Module):
"""BasicVSR network structure for video super-resolution.
Support only x4 upsampling.
Paper:
BasicVSR: The Search for Essential Components in Video Super-Resolution
and Beyond, CVPR, 2021
Args:
mid_channels (int): Channel number of the intermediate features.
Default: 64.
num_blocks (int): Number of residual blocks in each propagation branch.
Default: 30.
spynet_pretrained (str): Pre-trained model path of SPyNet.
Default: None.
"""
def __init__(self, mid_channels=64, num_blocks=30, spynet_pretrained=None):
super().__init__()
self.mid_channels = mid_channels
# optical flow network for feature alignment
self.spynet = SPyNet(pretrained=spynet_pretrained)
# propagation branches
self.backward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
self.forward_resblocks = ResidualBlocksWithInputConv(
mid_channels + 3, mid_channels, num_blocks)
# upsample
self.fusion = nn.Conv2d(
mid_channels * 2, mid_channels, 1, 1, 0, bias=True)
self.upsample1 = PixelShufflePack(
mid_channels, mid_channels, 2, upsample_kernel=3)
self.upsample2 = PixelShufflePack(
mid_channels, 64, 2, upsample_kernel=3)
self.conv_hr = nn.Conv2d(64, 64, 3, 1, 1)
self.conv_last = nn.Conv2d(64, 3, 3, 1, 1)
self.img_upsample = nn.Upsample(
scale_factor=4, mode='bilinear', align_corners=False)
# activation function
self.lrelu = nn.LeakyReLU(negative_slope=0.1, inplace=True)
def check_if_mirror_extended(self, lrs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
"""
self.is_mirror_extended = False
if lrs.size(1) % 2 == 0:
lrs_1, lrs_2 = torch.chunk(lrs, 2, dim=1)
if torch.norm(lrs_1 - lrs_2.flip(1)) == 0:
self.is_mirror_extended = True
def compute_flow(self, lrs):
"""Compute optical flow using SPyNet for feature warping.
Note that if the input is an mirror-extended sequence, 'flows_forward'
is not needed, since it is equal to 'flows_backward.flip(1)'.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
Return:
tuple(Tensor): Optical flow. 'flows_forward' corresponds to the
flows used for forward-time propagation (current to previous).
'flows_backward' corresponds to the flows used for
backward-time propagation (current to next).
"""
n, t, c, h, w = lrs.size()
lrs_1 = lrs[:, :-1, :, :, :].reshape(-1, c, h, w)
lrs_2 = lrs[:, 1:, :, :, :].reshape(-1, c, h, w)
flows_backward = self.spynet(lrs_1, lrs_2).view(n, t - 1, 2, h, w)
if self.is_mirror_extended: # flows_forward = flows_backward.flip(1)
flows_forward = None
else:
flows_forward = self.spynet(lrs_2, lrs_1).view(n, t - 1, 2, h, w)
return flows_forward, flows_backward
def forward(self, lrs):
"""Forward function for BasicVSR.
Args:
lrs (Tensor): Input LR sequence with shape (n, t, c, h, w).
Returns:
Tensor: Output HR sequence with shape (n, t, c, 4h, 4w).
"""
n, t, c, h, w = lrs.size()
assert h >= 64 and w >= 64, (
'The height and width of inputs should be at least 64, '
f'but got {h} and {w}.')
# check whether the input is an extended sequence
self.check_if_mirror_extended(lrs)
# compute optical flow
flows_forward, flows_backward = self.compute_flow(lrs)
# backward-time propgation
outputs = []
feat_prop = lrs.new_zeros(n, self.mid_channels, h, w)
for i in range(t - 1, -1, -1):
if i < t - 1: # no warping required for the last timestep
flow = flows_backward[:, i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([lrs[:, i, :, :, :], feat_prop], dim=1)
feat_prop = self.backward_resblocks(feat_prop)
outputs.append(feat_prop)
outputs = outputs[::-1]
# forward-time propagation and upsampling
feat_prop = torch.zeros_like(feat_prop)
for i in range(0, t):
lr_curr = lrs[:, i, :, :, :]
if i > 0: # no warping required for the first timestep
if flows_forward is not None:
flow = flows_forward[:, i - 1, :, :, :]
else:
flow = flows_backward[:, -i, :, :, :]
feat_prop = flow_warp(feat_prop, flow.permute(0, 2, 3, 1))
feat_prop = torch.cat([lr_curr, feat_prop], dim=1)
feat_prop = self.forward_resblocks(feat_prop)
# upsampling given the backward and forward features
out = torch.cat([outputs[i], feat_prop], dim=1)
out = self.lrelu(self.fusion(out))
out = self.lrelu(self.upsample1(out))
out = self.lrelu(self.upsample2(out))
out = self.lrelu(self.conv_hr(out))
out = self.conv_last(out)
base = self.img_upsample(lr_curr)
out += base
outputs[i] = out
return torch.stack(outputs, dim=1)
def init_weights(self, pretrained=None, strict=True):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults: None.
strict (boo, optional): Whether strictly load the pretrained model.
Defaults to True.
"""
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif pretrained is not None:
raise TypeError(f'"pretrained" must be a str or None. '
f'But received {type(pretrained)}.')
class ResidualBlocksWithInputConv(nn.Module):
"""Residual blocks with a convolution in front.
Args:
in_channels (int): Number of input channels of the first conv.
out_channels (int): Number of channels of the residual blocks.
Default: 64.
num_blocks (int): Number of residual blocks. Default: 30.
"""
def __init__(self, in_channels, out_channels=64, num_blocks=30):
super().__init__()
main = []
# a convolution used to match the channels of the residual blocks
main.append(nn.Conv2d(in_channels, out_channels, 3, 1, 1, bias=True))
main.append(nn.LeakyReLU(negative_slope=0.1, inplace=True))
# residual blocks
main.append(
make_layer(
ResidualBlockNoBN, num_blocks, mid_channels=out_channels))
self.main = nn.Sequential(*main)
def forward(self, feat):
"""
Forward function for ResidualBlocksWithInputConv.
Args:
feat (Tensor): Input feature with shape (n, in_channels, h, w)
Returns:
Tensor: Output feature with shape (n, out_channels, h, w)
"""
return self.main(feat)
class SPyNet(nn.Module):
"""SPyNet network structure.
The difference to the SPyNet in [tof.py] is that
1. more SPyNetBasicModule is used in this version, and
2. no batch normalization is used in this version.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
Args:
pretrained (str): path for pre-trained SPyNet. Default: None.
"""
def __init__(self, pretrained):
super().__init__()
self.basic_module = nn.ModuleList(
[SPyNetBasicModule() for _ in range(6)])
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=True, logger=logger)
elif pretrained is not None:
raise TypeError('[pretrained] should be str or None, '
f'but got {type(pretrained)}.')
self.register_buffer(
'mean',
torch.Tensor([0.485, 0.456, 0.406]).view(1, 3, 1, 1))
self.register_buffer(
'std',
torch.Tensor([0.229, 0.224, 0.225]).view(1, 3, 1, 1))
def compute_flow(self, ref, supp):
"""Compute flow from ref to supp.
Note that in this function, the images are already resized to a
multiple of 32.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
n, _, h, w = ref.size()
# normalize the input images
ref = [(ref - self.mean) / self.std]
supp = [(supp - self.mean) / self.std]
# generate downsampled frames
for level in range(5):
ref.append(
F.avg_pool2d(
input=ref[-1],
kernel_size=2,
stride=2,
count_include_pad=False))
supp.append(
F.avg_pool2d(
input=supp[-1],
kernel_size=2,
stride=2,
count_include_pad=False))
ref = ref[::-1]
supp = supp[::-1]
# flow computation
flow = ref[0].new_zeros(n, 2, h // 32, w // 32)
for level in range(len(ref)):
if level == 0:
flow_up = flow
else:
flow_up = F.interpolate(
input=flow,
scale_factor=2,
mode='bilinear',
align_corners=True) * 2.0
# add the residue to the upsampled flow
flow = flow_up + self.basic_module[level](
torch.cat([
ref[level],
flow_warp(
supp[level],
flow_up.permute(0, 2, 3, 1),
padding_mode='border'), flow_up
], 1))
return flow
def forward(self, ref, supp):
"""Forward function of SPyNet.
This function computes the optical flow from ref to supp.
Args:
ref (Tensor): Reference image with shape of (n, 3, h, w).
supp (Tensor): Supporting image with shape of (n, 3, h, w).
Returns:
Tensor: Estimated optical flow: (n, 2, h, w).
"""
# upsize to a multiple of 32
h, w = ref.shape[2:4]
w_up = w if (w % 32) == 0 else 32 * (w // 32 + 1)
h_up = h if (h % 32) == 0 else 32 * (h // 32 + 1)
ref = F.interpolate(
input=ref, size=(h_up, w_up), mode='bilinear', align_corners=False)
supp = F.interpolate(
input=supp,
size=(h_up, w_up),
mode='bilinear',
align_corners=False)
# compute flow, and resize back to the original resolution
flow = F.interpolate(
input=self.compute_flow(ref, supp),
size=(h, w),
mode='bilinear',
align_corners=False)
# adjust the flow values
flow[:, 0, :, :] *= float(w) / float(w_up)
flow[:, 1, :, :] *= float(h) / float(h_up)
return flow
class SPyNetBasicModule(nn.Module):
"""Basic Module for SPyNet.
Paper:
Optical Flow Estimation using a Spatial Pyramid Network, CVPR, 2017
"""
def __init__(self):
super().__init__()
self.basic_module = nn.Sequential(
ConvModule(
in_channels=8,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=64,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=64,
out_channels=32,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=32,
out_channels=16,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=dict(type='ReLU')),
ConvModule(
in_channels=16,
out_channels=2,
kernel_size=7,
stride=1,
padding=3,
norm_cfg=None,
act_cfg=None))
def forward(self, tensor_input):
"""
Args:
tensor_input (Tensor): Input tensor with shape (b, 8, h, w).
8 channels contain:
[reference image (3), neighbor image (3), initial flow (2)].
Returns:
Tensor: Refined flow with shape (b, 2, h, w)
"""
return self.basic_module(tensor_input)
| 14,148 | 32.608076 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/sr_vimeo90k_multiple_gt_dataset.py | import os
import os.path as osp
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRVimeo90KMultipleGTDataset(BaseSRDataset):
"""Vimeo90K dataset for video super resolution for recurrent networks.
The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)
frames. Then it applies specified transforms and finally returns a dict
containing paired data and other information.
It reads Vimeo90K keys from the txt file. Each line contains:
1. video frame folder
2. image shape
Examples:
::
00001/0266 (256,448,3)
00001/0268 (256,448,3)
Args:
lq_folder (str | :obj:`Path`): Path to a lq folder.
gt_folder (str | :obj:`Path`): Path to a gt folder.
ann_file (str | :obj:`Path`): Path to the annotation file.
pipeline (list[dict | callable]): A sequence of data transformations.
scale (int): Upsampling scale ratio.
num_input_frames (int): Number of frames in each training sequence.
Default: 7.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self,
lq_folder,
gt_folder,
ann_file,
pipeline,
scale,
num_input_frames=7,
test_mode=False):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = str(ann_file)
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for Vimeo-90K dataset.
Returns:
list[dict]: A list of dicts for paired paths and other information.
"""
# get keys
with open(self.ann_file, 'r') as fin:
keys = [line.strip().split(' ')[0] for line in fin]
data_infos = []
for key in keys:
key = key.replace('/', os.sep)
lq_paths = [
osp.join(self.lq_folder, key, f'im{i}.png')
for i in range(1, self.num_input_frames + 1)
]
gt_paths = [
osp.join(self.gt_folder, key, f'im{i}.png')
for i in range(1, self.num_input_frames + 1)
]
data_infos.append(
dict(lq_path=lq_paths, gt_path=gt_paths, key=key))
return data_infos
| 2,608 | 30.059524 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/sr_folder_multiple_gt_dataset.py | import glob
import os
import os.path as osp
import mmcv
from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRFolderMultipleGTDataset(BaseSRDataset):
"""General dataset for video super resolution, used for recurrent networks.
The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)
frames. Then it applies specified transforms and finally returns a dict
containing paired data and other information.
This dataset takes an annotation file specifying the sequences used in
training or test. If no annotation file is provided, it assumes all video
sequences under the root directory is used for training or test.
In the annotation file (.txt), each line contains:
1. folder name;
2. number of frames in this sequence (in the same folder)
Examples:
::
calendar 41
city 34
foliage 49
walk 47
Args:
lq_folder (str | :obj:`Path`): Path to a lq folder.
gt_folder (str | :obj:`Path`): Path to a gt folder.
pipeline (list[dict | callable]): A sequence of data transformations.
scale (int): Upsampling scale ratio.
ann_file (str): The path to the annotation file. If None, we assume
that all sequences in the folder is used. Default: None
num_input_frames (None | int): The number of frames per iteration.
If None, the whole clip is extracted. If it is a positive integer,
a sequence of 'num_input_frames' frames is extracted from the clip.
Note that non-positive integers are not accepted. Default: None.
test_mode (bool): Store `True` when building test dataset.
Default: `True`.
"""
def __init__(self,
lq_folder,
gt_folder,
pipeline,
scale,
ann_file=None,
num_input_frames=None,
test_mode=True):
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.ann_file = ann_file
if num_input_frames is not None and num_input_frames <= 0:
raise ValueError('"num_input_frames" must be None or positive, '
f'but got {num_input_frames}.')
self.num_input_frames = num_input_frames
self.data_infos = self.load_annotations()
def _load_annotations_from_file(self):
data_infos = []
ann_list = mmcv.list_from_file(self.ann_file)
for ann in ann_list:
key, sequence_length = ann.strip().split(' ')
if self.num_input_frames is None:
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
data_infos.append(
dict(
lq_path=self.lq_folder,
gt_path=self.gt_folder,
key=key,
num_input_frames=int(num_input_frames),
sequence_length=int(sequence_length)))
return data_infos
def load_annotations(self):
"""Load annoations for the dataset.
Returns:
list[dict]: Returned list of dicts for paired paths of LQ and GT.
"""
if self.ann_file:
return self._load_annotations_from_file()
sequences = sorted(glob.glob(osp.join(self.lq_folder, '*')))
data_infos = []
for sequence in sequences:
sequence_length = len(glob.glob(osp.join(sequence, '*.png')))
if self.num_input_frames is None:
num_input_frames = sequence_length
else:
num_input_frames = self.num_input_frames
data_infos.append(
dict(
lq_path=self.lq_folder,
gt_path=self.gt_folder,
key=sequence.replace(f'{self.lq_folder}{os.sep}', ''),
num_input_frames=num_input_frames,
sequence_length=sequence_length))
return data_infos
| 4,220 | 33.884298 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/registry.py | from mmcv.utils import Registry
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
| 145 | 23.333333 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/base_dataset.py | import copy
from abc import ABCMeta, abstractmethod
from torch.utils.data import Dataset
from .pipelines import Compose
class BaseDataset(Dataset, metaclass=ABCMeta):
"""Base class for datasets.
All datasets should subclass it.
All subclasses should overwrite:
``load_annotations``, supporting to load information and generate
image lists.
Args:
pipeline (list[dict | callable]): A sequence of data transforms.
test_mode (bool): If True, the dataset will work in test mode.
Otherwise, in train mode.
"""
def __init__(self, pipeline, test_mode=False):
super().__init__()
self.test_mode = test_mode
self.pipeline = Compose(pipeline)
@abstractmethod
def load_annotations(self):
"""Abstract function for loading annotation.
All subclasses should overwrite this function
"""
def prepare_train_data(self, idx):
"""Prepare training data.
Args:
idx (int): Index of the training batch data.
Returns:
dict: Returned training batch.
"""
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def prepare_test_data(self, idx):
"""Prepare testing data.
Args:
idx (int): Index for getting each testing batch.
Returns:
Tensor: Returned testing batch.
"""
results = copy.deepcopy(self.data_infos[idx])
return self.pipeline(results)
def __len__(self):
"""Length of the dataset.
Returns:
int: Length of the dataset.
"""
return len(self.data_infos)
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
if self.test_mode:
return self.prepare_test_data(idx)
return self.prepare_train_data(idx)
| 2,006 | 24.405063 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/dataset_wrappers.py | from .registry import DATASETS
@DATASETS.register_module()
class RepeatDataset:
"""A wrapper of repeated dataset.
The length of repeated dataset will be `times` larger than the original
dataset. This is useful when the data loading time is long but the dataset
is small. Using RepeatDataset can reduce the data loading time between
epochs.
Args:
dataset (:obj:`Dataset`): The dataset to be repeated.
times (int): Repeat times.
"""
def __init__(self, dataset, times):
self.dataset = dataset
self.times = times
self._ori_len = len(self.dataset)
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
return self.dataset[idx % self._ori_len]
def __len__(self):
"""Length of the dataset.
Returns:
int: Length of the dataset.
"""
return self.times * self._ori_len
| 1,034 | 24.875 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/sr_reds_multiple_gt_dataset.py | from .base_sr_dataset import BaseSRDataset
from .registry import DATASETS
@DATASETS.register_module()
class SRREDSMultipleGTDataset(BaseSRDataset):
"""REDS dataset for video super resolution for recurrent networks.
The dataset loads several LQ (Low-Quality) frames and GT (Ground-Truth)
frames. Then it applies specified transforms and finally returns a dict
containing paired data and other information.
Args:
lq_folder (str | :obj:`Path`): Path to a lq folder.
gt_folder (str | :obj:`Path`): Path to a gt folder.
num_input_frames (int): Number of input frames.
pipeline (list[dict | callable]): A sequence of data transformations.
scale (int): Upsampling scale ratio.
val_partition (str): Validation partition mode. Choices ['official' or
'REDS4']. Default: 'official'.
repeat (int): Number of replication of the validation set. This is used
to allow training REDS4 with more than 4 GPUs. For example, if
8 GPUs are used, this number can be set to 2. Default: 1.
test_mode (bool): Store `True` when building test dataset.
Default: `False`.
"""
def __init__(self,
lq_folder,
gt_folder,
num_input_frames,
pipeline,
scale,
val_partition='official',
repeat=1,
test_mode=False):
self.repeat = repeat
if not isinstance(repeat, int):
raise TypeError('"repeat" must be an integer, but got '
f'{type(repeat)}.')
super().__init__(pipeline, scale, test_mode)
self.lq_folder = str(lq_folder)
self.gt_folder = str(gt_folder)
self.num_input_frames = num_input_frames
self.val_partition = val_partition
self.data_infos = self.load_annotations()
def load_annotations(self):
"""Load annoations for REDS dataset.
Returns:
list[dict]: A list of dicts for paired paths and other information.
"""
# generate keys
keys = [f'{i:03d}' for i in range(0, 270)]
if self.val_partition == 'REDS4':
val_partition = ['000', '011', '015', '020']
elif self.val_partition == 'official':
val_partition = [f'{i:03d}' for i in range(240, 270)]
else:
raise ValueError(
f'Wrong validation partition {self.val_partition}.'
f'Supported ones are ["official", "REDS4"]')
if self.test_mode:
keys = [v for v in keys if v in val_partition]
keys *= self.repeat
else:
keys = [v for v in keys if v not in val_partition]
data_infos = []
for key in keys:
data_infos.append(
dict(
lq_path=self.lq_folder,
gt_path=self.gt_folder,
key=key,
sequence_length=100, # REDS has 100 frames for each clip
num_input_frames=self.num_input_frames))
return data_infos
| 3,194 | 36.151163 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/base_sr_dataset.py | import copy
import os.path as osp
from collections import defaultdict
from pathlib import Path
from mmcv import scandir
from .base_dataset import BaseDataset
IMG_EXTENSIONS = ('.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm',
'.PPM', '.bmp', '.BMP', '.tif', '.TIF', '.tiff', '.TIFF')
class BaseSRDataset(BaseDataset):
"""Base class for super resolution datasets.
"""
def __init__(self, pipeline, scale, test_mode=False):
super().__init__(pipeline, test_mode)
self.scale = scale
@staticmethod
def scan_folder(path):
"""Obtain image path list (including sub-folders) from a given folder.
Args:
path (str | :obj:`Path`): Folder path.
Returns:
list[str]: image list obtained form given folder.
"""
if isinstance(path, (str, Path)):
path = str(path)
else:
raise TypeError("'path' must be a str or a Path object, "
f'but received {type(path)}.')
images = list(scandir(path, suffix=IMG_EXTENSIONS, recursive=True))
images = [osp.join(path, v) for v in images]
assert images, f'{path} has no valid image file.'
return images
def __getitem__(self, idx):
"""Get item at each call.
Args:
idx (int): Index for getting each item.
"""
results = copy.deepcopy(self.data_infos[idx])
results['scale'] = self.scale
return self.pipeline(results)
def evaluate(self, results, logger=None):
"""Evaluate with different metrics.
Args:
results (list[tuple]): The output of forward_test() of the model.
Return:
dict: Evaluation results dict.
"""
if not isinstance(results, list):
raise TypeError(f'results must be a list, but got {type(results)}')
assert len(results) == len(self), (
'The length of results is not equal to the dataset len: '
f'{len(results)} != {len(self)}')
results = [res['eval_result'] for res in results] # a list of dict
eval_result = defaultdict(list) # a dict of list
for res in results:
for metric, val in res.items():
eval_result[metric].append(val)
for metric, val_list in eval_result.items():
assert len(val_list) == len(self), (
f'Length of evaluation result of {metric} is {len(val_list)}, '
f'should be {len(self)}')
# average the results
eval_result = {
metric: sum(values) / len(self)
for metric, values in eval_result.items()
}
return eval_result
| 2,779 | 30.590909 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/builder.py | import copy
import platform
import random
from functools import partial
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import build_from_cfg
from packaging import version
from torch.utils.data import ConcatDataset, DataLoader
from .dataset_wrappers import RepeatDataset
from .registry import DATASETS
from .samplers import DistributedSampler
if platform.system() != 'Windows':
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
def _concat_dataset(cfg, default_args=None):
"""Concat datasets with different ann_file but the same type.
Args:
cfg (dict): The config of dataset.
default_args (dict, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The concatenated dataset.
"""
ann_files = cfg['ann_file']
datasets = []
num_dset = len(ann_files)
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
data_cfg['ann_file'] = ann_files[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets)
def build_dataset(cfg, default_args=None):
"""Build a dataset from config dict.
It supports a variety of dataset config. If ``cfg`` is a Sequential (list
or dict), it will be a concatenated dataset of the datasets specified by
the Sequential. If it is a ``RepeatDataset``, then it will repeat the
dataset ``cfg['dataset']`` for ``cfg['times']`` times. If the ``ann_file``
of the dataset is a Sequential, then it will build a concatenated dataset
with the same dataset type but different ``ann_file``.
Args:
cfg (dict): Config dict. It should at least contain the key "type".
default_args (dict, optional): Default initialization arguments.
Default: None.
Returns:
Dataset: The constructed dataset.
"""
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
persistent_workers=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (:obj:`Dataset`): A PyTorch dataset.
samples_per_gpu (int): Number of samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data
loading for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed
training. Default: 1.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers Dataset instances alive.
The argument also has effect in PyTorch>=1.7.0.
Default: True
kwargs (dict, optional): Any keyword argument to be used to initialize
DataLoader.
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset,
world_size,
rank,
shuffle=shuffle,
samples_per_gpu=samples_per_gpu,
seed=seed)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
if version.parse(torch.__version__) >= version.parse('1.7.0'):
kwargs['persistent_workers'] = persistent_workers
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Function to initialize each worker.
The seed of each worker equals to
``num_worker * rank + worker_id + user_seed``.
Args:
worker_id (int): Id for each worker.
num_workers (int): Number of workers.
rank (int): Rank in distributed training.
seed (int): Random seed.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
torch.manual_seed(worker_seed)
| 6,177 | 32.945055 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/datasets/samplers/distributed_sampler.py | from __future__ import division
import math
import torch
from torch.utils.data import DistributedSampler as _DistributedSampler
from mmedit.core.utils import sync_random_seed
class DistributedSampler(_DistributedSampler):
"""DistributedSampler inheriting from `torch.utils.data.DistributedSampler`.
In pytorch of lower versions, there is no `shuffle` argument. This child
class will port one to DistributedSampler.
"""
def __init__(self,
dataset,
num_replicas=None,
rank=None,
shuffle=True,
samples_per_gpu=1,
seed=0):
super().__init__(dataset, num_replicas=num_replicas, rank=rank)
self.shuffle = shuffle
self.samples_per_gpu = samples_per_gpu
# fix the bug of the official implementation
self.num_samples_per_replica = int(
math.ceil(
len(self.dataset) * 1.0 / self.num_replicas / samples_per_gpu))
self.num_samples = self.num_samples_per_replica * self.samples_per_gpu
self.total_size = self.num_samples * self.num_replicas
# In distributed sampling, different ranks should sample
# non-overlapped data in the dataset. Therefore, this function
# is used to make sure that each rank shuffles the data indices
# in the same order based on the same seed. Then different ranks
# could use different indices to select non-overlapped data from the
# same data list.
self.seed = sync_random_seed(seed)
# to avoid padding bug when meeting too small dataset
if len(dataset) < self.num_replicas * samples_per_gpu:
raise ValueError(
'You may use too small dataset and our distributed '
'sampler cannot pad your dataset correctly. We highly '
'recommend you to use fewer GPUs to finish your work')
def __iter__(self):
# deterministically shuffle based on epoch
if self.shuffle:
g = torch.Generator()
# When :attr:`shuffle=True`, this ensures all replicas
# use a different random ordering for each epoch.
# Otherwise, the next iteration of this sampler will
# yield the same ordering.
g.manual_seed(self.epoch + self.seed)
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = torch.arange(len(self.dataset)).tolist()
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
return iter(indices)
| 2,892 | 39.180556 | 80 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.