code stringlengths 17 6.64M |
|---|
def write_infinite_segment_header(f):
f.write(ebml_element(408125543, '', (- 1)))
|
def random_uid():
def rint():
return int((random.random() * (256 ** 4)))
return (((ben(rint()) + ben(rint())) + ben(rint())) + ben(rint()))
|
def example():
write_ebml_header(sys.stdout, 'matroska', 2, 2)
write_infinite_segment_header(sys.stdout)
sys.stdout.write(ebml_element(357149030, (((('' + ebml_element(29604, random_uid())) + ebml_element(31657, 'mkvgen.py test')) + ebml_element(19840, 'mkvgen.py')) + ebml_element(22337, 'mkvgen.py'))))
sys.stdout.write(ebml_element(374648427, (('' + ebml_element(174, (((((('' + ebml_element(215, ben(1))) + ebml_element(29637, ben(119))) + ebml_element(131, ben(1))) + ebml_element(21358, 'mjpeg data')) + ebml_element(134, 'V_MJPEG')) + ebml_element(224, (('' + ebml_element(176, ben(640))) + ebml_element(186, ben(480))))))) + ebml_element(174, ((((('' + ebml_element(215, ben(2))) + ebml_element(29637, ben(120))) + ebml_element(131, ben(2))) + ebml_element(21358, 'content of mp3 file')) + ebml_element(134, 'A_MPEG/L3'))))))
mp3file = open('q.mp3', 'rb')
mp3file.read(500000)
def mp3framesgenerator(f):
debt = ''
while True:
for i in xrange(0, (len(debt) + 1)):
if (i >= (len(debt) - 1)):
debt = (debt + f.read(8192))
break
if ((ord(debt[i]) == 255) and ((ord(debt[(i + 1)]) & 240) == 240) and (i > 700)):
if (i > 0):
(yield debt[0:i])
debt = debt[i:]
break
mp3 = mp3framesgenerator(mp3file)
mp3.next()
for i in xrange(0, 530):
framefile = open((('img/' + str(i)) + '.jpg'), 'rb')
framedata = framefile.read()
framefile.close()
if (random.random() < 1):
sys.stdout.write(ebml_element(524531317, (('' + ebml_element(231, ben(int(((i * 26) * 4))))) + ebml_element(163, ((((('' + ebml_encode_number(1)) + chr(0)) + chr(0)) + chr(0)) + framedata)))))
for u in xrange(0, 4):
mp3f = mp3.next()
if (random.random() < 1):
sys.stdout.write(ebml_element(524531317, (('' + ebml_element(231, ben((((i * 26) * 4) + (u * 26))))) + ebml_element(163, ((((('' + ebml_encode_number(2)) + chr(0)) + chr(0)) + chr(0)) + mp3f)))))
|
class MatroskaIndex(mkvparse.MatroskaHandler):
def __init__(self):
self.frameindex = []
def tracks_available(self):
(_, self.config_record) = self.tracks[1]['CodecPrivate']
def frame(self, track_id, timestamp, pos, length, more_laced_frames, duration, keyframe, invisible, discardable):
self.frameindex.append((pos, length, keyframe))
|
def mkvindex(f):
handler = MatroskaIndex()
mkvparse.mkvparse(f, handler)
return (handler.config_record, handler.frameindex)
|
def simple_gen(of, config_record, w, h, framedata):
mkvgen.write_ebml_header(of, 'matroska', 2, 2)
mkvgen.write_infinite_segment_header(of)
of.write(ebml_element(374648427, ('' + ebml_element(174, (((((('' + ebml_element(215, ben(1))) + ebml_element(29637, ben(1))) + ebml_element(131, ben(1))) + ebml_element(134, 'V_MS/VFW/FOURCC')) + ebml_element(224, (('' + ebml_element(176, ben(w))) + ebml_element(186, ben(h))))) + ebml_element(25506, config_record))))))
blocks = []
for fd in framedata:
blocks.append(ebml_element(163, ((((('' + ebml_encode_number(1)) + chr(0)) + chr(0)) + chr(128)) + fd)))
of.write(ebml_element(524531317, (('' + ebml_element(231, ben(0))) + ''.join(blocks))))
|
def get_major_bit_number(n):
'\n Takes uint8, returns number of the most significant bit plus the number with that bit cleared.\n Examples:\n 0b10010101 -> (0, 0b00010101)\n 0b00010101 -> (3, 0b00000101)\n 0b01111111 -> (1, 0b00111111)\n '
if (not n):
raise Exception('Bad number')
i = 128
r = 0
while (not (n & i)):
r += 1
i >>= 1
return (r, (n & (~ i)))
|
def read_matroska_number(f, unmodified=False, signed=False):
'\n Read ebml number. Unmodified means don\'t clear the length bit (as in Element IDs)\n Returns the number and it\'s length as a tuple\n\n See examples in "parse_matroska_number" function\n '
if (unmodified and signed):
raise Exception('Contradictary arguments')
first_byte = f.read(1)
if (first_byte == ''):
raise StopIteration
r = ord(first_byte)
(n, r2) = get_major_bit_number(r)
if (not unmodified):
r = r2
i = n
while i:
r = ((r * 256) + ord(f.read(1)))
i -= 1
if signed:
r -= ((2 ** ((7 * n) + 7)) - 1)
elif (r == ((2 ** ((7 * n) + 7)) - 1)):
return ((- 1), (n + 1))
return (r, (n + 1))
|
def parse_matroska_number(data, pos, unmodified=False, signed=False):
'\n Parse ebml number from buffer[pos:]. Just like read_matroska_number.\n Unmodified means don\'t clear the length bit (as in Element IDs)\n Returns the number plus the new position in input buffer\n\n Examples:\n "\x81" -> (1, pos+1)\n "@\x01" -> (1, pos+2)\n " \x00\x01" -> (1, pos+3)\n "?ÿÿ" -> (0x1FFFFF, pos+3)\n " \x00\x01" unmodified -> (0x200001, pos+3)\n "¿" signed -> (0, pos+1)\n "¾" signed -> (-1, pos+1)\n "À" signed -> (1, pos+1)\n "_ï" signed -> (-16, pos+2)\n '
if (unmodified and signed):
raise Exception('Contradictary arguments')
r = ord(data[pos])
pos += 1
(n, r2) = get_major_bit_number(r)
if (not unmodified):
r = r2
i = n
while i:
r = ((r * 256) + ord(data[pos]))
pos += 1
i -= 1
if signed:
r -= ((2 ** ((7 * n) + 6)) - 1)
elif (r == ((2 ** ((7 * n) + 7)) - 1)):
return ((- 1), pos)
return (r, pos)
|
def parse_xiph_number(data, pos):
'\n Parse the Xiph lacing number from data[pos:]\n Returns the number plus the new position\n\n Examples:\n "\x01" -> (1, pos+1)\n "U" -> (0x55, pos+1)\n "ÿ\x04" -> (0x103, pos+2)\n "ÿÿ\x04" -> (0x202, pos+3)\n "ÿÿ\x00" -> (0x1FE, pos+3)\n '
v = ord(data[pos])
pos += 1
r = 0
while (v == 255):
r += v
v = ord(data[pos])
pos += 1
r += v
return (r, pos)
|
def parse_fixedlength_number(data, pos, length, signed=False):
'\n Read the big-endian number from data[pos:pos+length]\n Returns the number plus the new position\n\n Examples:\n "\x01" -> (0x1, pos+1)\n "U" -> (0x55, pos+1)\n "U" signed -> (0x55, pos+1)\n "ÿ\x04" -> (0xFF04, pos+2)\n "ÿ\x04" signed -> (-0x00FC, pos+2)\n '
r = 0
for i in range(length):
r = ((r * 256) + ord(data[(pos + i)]))
if signed:
if (ord(data[pos]) & 128):
r -= (2 ** (8 * length))
return (r, (pos + length))
|
def read_fixedlength_number(f, length, signed=False):
' Read length bytes and parse (parse_fixedlength_number) it.\n Returns only the number'
buf = f.read(length)
(r, pos) = parse_fixedlength_number(buf, 0, length, signed)
return r
|
def read_ebml_element_header(f):
'\n Read Element ID and size\n Returns id, element size and this header size\n '
(id_, n) = read_matroska_number(f, unmodified=True)
(size, n2) = read_matroska_number(f)
return (id_, size, (n + n2))
|
class EbmlElementType():
VOID = 0
MASTER = 1
UNSIGNED = 2
SIGNED = 3
TEXTA = 4
TEXTU = 5
BINARY = 6
FLOAT = 7
DATE = 8
JUST_GO_ON = 10
|
def read_simple_element(f, type_, size):
date = None
if (size == 0):
return ''
if (type_ == EET.UNSIGNED):
data = read_fixedlength_number(f, size, False)
elif (type_ == EET.SIGNED):
data = read_fixedlength_number(f, size, True)
elif (type_ == EET.TEXTA):
data = f.read(size)
data = data.replace(b'\x00', b'')
data = data.decode('ascii')
elif (type_ == EET.TEXTU):
data = f.read(size)
data = data.replace(b'\x00', b'')
data = data.decode('UTF-8')
elif (type_ == EET.MASTER):
data = read_ebml_element_tree(f, size)
elif (type_ == EET.DATE):
data = read_fixedlength_number(f, size, True)
data *= 1e-09
data += (datetime.datetime(2001, 1, 1) - datetime.datetime(1970, 1, 1)).total_seconds()
elif (type_ == EET.FLOAT):
if (size == 4):
data = f.read(4)
data = unpack('>f', data)[0]
elif (size == 8):
data = f.read(8)
data = unpack('>d', data)[0]
else:
data = read_fixedlength_number(f, size, False)
sys.stderr.write(('mkvparse: Floating point of size %d is not supported\n' % size))
data = None
else:
data = f.read(size)
return data
|
def read_ebml_element_tree(f, total_size):
"\n Build tree of elements, reading f until total_size reached\n Don't use for the whole segment, it's not Haskell\n\n Returns list of pairs (element_name, element_value).\n element_value can also be list of pairs\n "
childs = []
while (total_size > 0):
(id_, size, hsize) = read_ebml_element_header(f)
if (size == (- 1)):
sys.stderr.write(('mkvparse: Element %x without size? Damaged data? Skipping %d bytes\n' % (id_, size, total_size)))
f.read(total_size)
break
if (size > total_size):
sys.stderr.write(('mkvparse: Element %x with size %d? Damaged data? Skipping %d bytes\n' % (id_, size, total_size)))
f.read(total_size)
break
type_ = EET.BINARY
name = ('unknown_%x' % id_)
if (id_ in element_types_names):
(type_, name) = element_types_names[id_]
data = read_simple_element(f, type_, size)
total_size -= (size + hsize)
childs.append((name, (type_, data)))
return childs
|
class MatroskaHandler():
' User for mkvparse should override these methods '
def tracks_available(self):
pass
def segment_info_available(self):
pass
def frame(self, track_id, timestamp, data, more_laced_frames, duration, keyframe, invisible, discardable):
pass
def ebml_top_element(self, id_, name_, type_, data_):
pass
def before_handling_an_element(self):
pass
def begin_handling_ebml_element(self, id_, name, type_, headersize, datasize):
return type_
def element_data_available(self, id_, name, type_, headersize, data):
pass
|
def handle_block(buffer, buffer_pos, handler, cluster_timecode, timecode_scale=1000000, duration=None, header_removal_headers_for_tracks={}):
'\n Decode a block, handling all lacings, send it to handler with appropriate timestamp, track number\n '
pos = 0
(tracknum, pos) = parse_matroska_number(buffer, pos, signed=False)
(tcode, pos) = parse_fixedlength_number(buffer, pos, 2, signed=True)
flags = ord(buffer[pos])
pos += 1
f_keyframe = ((flags & 128) == 128)
f_invisible = ((flags & 8) == 8)
f_discardable = ((flags & 1) == 1)
laceflags = (flags & 6)
block_timecode = ((cluster_timecode + tcode) * (timecode_scale * 1e-09))
header_removal_prefix = b''
if (tracknum in header_removal_headers_for_tracks):
raise NotImplementedError
if (laceflags == 0):
handler.frame(tracknum, block_timecode, (buffer_pos + pos), (len(buffer) - pos), 0, duration, f_keyframe, f_invisible, f_discardable)
return
numframes = ord(buffer[pos])
pos += 1
numframes += 1
lengths = []
if (laceflags == 2):
accumlength = 0
for i in range((numframes - 1)):
(l, pos) = parse_xiph_number(buffer, pos)
lengths.append(l)
accumlength += l
lengths.append(((len(buffer) - pos) - accumlength))
elif (laceflags == 6):
accumlength = 0
if numframes:
(flength, pos) = parse_matroska_number(buffer, pos, signed=False)
lengths.append(flength)
accumlength += flength
for i in range((numframes - 2)):
(l, pos) = parse_matroska_number(buffer, pos, signed=True)
flength += l
lengths.append(flength)
accumlength += flength
lengths.append(((len(buffer) - pos) - accumlength))
elif (laceflags == 4):
fl = int(((len(buffer) - pos) / numframes))
for i in range(numframes):
lengths.append(fl)
more_laced_frames = (numframes - 1)
for i in lengths:
handler.frame(tracknum, block_timecode, (buffer_pos + pos), i, more_laced_frames, duration, f_keyframe, f_invisible, f_discardable)
pos += i
more_laced_frames -= 1
|
def resync(f):
sys.stderr.write('mvkparse: Resyncing\n')
while True:
b = f.read(1)
if (b == b''):
return (None, None)
if (b == b'\x1f'):
b2 = f.read(3)
if (b2 == b'C\xb6u'):
(seglen, x) = read_matroska_number(f)
return (524531317, seglen, (x + 4))
if (b == b'\x18'):
b2 = f.read(3)
if (b2 == b'S\x80g'):
(seglen, x) = read_matroska_number(f)
return (408125543, seglen, (x + 4))
if (b == b'\x16'):
b2 = f.read(3)
if (b2 == b'T\xaek'):
(seglen, x) = read_matroska_number(f)
return (374648427, seglen, (x + 4))
|
def mkvparse(f, handler):
'\n Read mkv file f and call handler methods when track or segment information is ready or when frame is read.\n Handles lacing, timecodes (except of per-track scaling)\n '
timecode_scale = 1000000
current_cluster_timecode = 0
resync_element_id = None
resync_element_size = None
resync_element_headersize = None
header_removal_headers_for_tracks = {}
while f:
(id_, size, hsize) = (None, None, None)
tree = None
data = None
(type_, name) = (None, None)
try:
if (not resync_element_id):
try:
handler.before_handling_an_element()
(id_, size, hsize) = read_ebml_element_header(f)
except StopIteration:
break
if (not (id_ in element_types_names)):
sys.stderr.write(('mkvparse: Unknown element with id %x and size %d\n' % (id_, size)))
(resync_element_id, resync_element_size, resync_element_headersize) = resync(f)
if resync_element_id:
continue
else:
break
else:
id_ = resync_element_id
size = resync_element_size
hsize = resync_element_headersize
resync_element_id = None
resync_element_size = None
resync_element_headersize = None
(type_, name) = element_types_names[id_]
(type_, name) = element_types_names[id_]
type_ = handler.begin_handling_ebml_element(id_, name, type_, hsize, size)
if (type_ == EET.MASTER):
tree = read_ebml_element_tree(f, size)
data = tree
except Exception:
traceback.print_exc()
handler.before_handling_an_element()
(resync_element_id, resync_element_size, resync_element_headersize) = resync(f)
if resync_element_id:
continue
else:
break
if ((name == 'EBML') and (type(data) == list)):
d = dict(tree)
if ('EBMLReadVersion' in d):
if (d['EBMLReadVersion'][1] > 1):
sys.stderr.write('mkvparse: Warning: EBMLReadVersion too big\n')
if ('DocTypeReadVersion' in d):
if (d['DocTypeReadVersion'][1] > 2):
sys.stderr.write('mkvparse: Warning: DocTypeReadVersion too big\n')
dt = d['DocType'][1]
if ((dt != 'matroska') and (dt != 'webm')):
sys.stderr.write('mkvparse: Warning: EBML DocType is not "matroska" or "webm"')
elif ((name == 'Info') and (type(data) == list)):
handler.segment_info = tree
handler.segment_info_available()
d = dict(tree)
if ('TimecodeScale' in d):
timecode_scale = d['TimecodeScale'][1]
elif ((name == 'Tracks') and (type(data) == list)):
handler.tracks = {}
for (ten, (_t, track)) in tree:
if (ten != 'TrackEntry'):
continue
d = dict(track)
n = d['TrackNumber'][1]
handler.tracks[n] = d
tt = d['TrackType'][1]
if (tt == 1):
d['type'] = 'video'
elif (tt == 2):
d['type'] = 'audio'
elif (tt == 3):
d['type'] = 'complex'
elif (tt == 16):
d['type'] = 'logo'
elif (tt == 17):
d['type'] = 'subtitle'
elif (tt == 18):
d['type'] = 'button'
elif (tt == 32):
d['type'] = 'control'
if ('TrackTimecodeScale' in d):
sys.stderr.write('mkvparse: Warning: TrackTimecodeScale is not supported\n')
if ('ContentEncodings' in d):
try:
compr = dict(d['ContentEncodings'][1][0][1][1][0][1][1])
if (compr['ContentCompAlgo'][1] == 3):
header_removal_headers_for_tracks[n] = compr['ContentCompSettings'][1]
else:
sys.stderr.write('mkvparse: Warning: compression other than header removal is not supported\n')
except:
sys.stderr.write('mkvparse: Warning: unsuccessfully tried to handle header removal compression\n')
handler.tracks_available()
elif ((name == 'Timecode') and (type_ == EET.UNSIGNED)):
data = read_fixedlength_number(f, size, False)
current_cluster_timecode = data
elif ((name == 'SimpleBlock') and (type_ == EET.BINARY)):
pos = f.tell()
data = f.read(size)
handle_block(data, pos, handler, current_cluster_timecode, timecode_scale, None, header_removal_headers_for_tracks)
elif ((name == 'BlockGroup') and (type_ == EET.MASTER)):
d2 = dict(tree)
duration = None
raise NotImplementedError
elif ((type_ != EET.JUST_GO_ON) and (type_ != EET.MASTER)):
data = read_simple_element(f, type_, size)
handler.ebml_top_element(id_, name, type_, data)
|
class PollableQueue(object):
'A Queue that you can poll().\n Only works with a single producer.\n '
def __init__(self, maxlen=None):
with open('/proc/sys/fs/pipe-max-size') as f:
max_maxlen = int(f.read().rstrip())
if (maxlen is None):
maxlen = max_maxlen
else:
maxlen = min(maxlen, max_maxlen)
self._maxlen = maxlen
self._q = deque()
(self._get_fd, self._put_fd) = os.pipe()
fcntl.fcntl(self._get_fd, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self._put_fd, fcntl.F_SETFL, os.O_NONBLOCK)
fcntl.fcntl(self._get_fd, (fcntl.F_SETLEASE + 7), self._maxlen)
fcntl.fcntl(self._put_fd, (fcntl.F_SETLEASE + 7), self._maxlen)
get_poller = select.epoll()
put_poller = select.epoll()
get_poller.register(self._get_fd, select.EPOLLIN)
put_poller.register(self._put_fd, select.EPOLLOUT)
self._get_poll = get_poller.poll
self._put_poll = put_poller.poll
def get_fd(self):
return self._get_fd
def put_fd(self):
return self._put_fd
def put(self, item, block=True, timeout=None):
if block:
while self._put_poll((timeout if (timeout is not None) else (- 1))):
try:
return self.put_nowait(item)
except OSError as e:
if (e.errno != 11):
raise
raise Full()
else:
return self.put_nowait(item)
def put_nowait(self, item):
self._q.appendleft(item)
os.write(self._put_fd, b'\x00')
def get(self, block=True, timeout=None):
if block:
while self._get_poll((timeout if (timeout is not None) else (- 1))):
try:
return self.get_nowait()
except OSError as e:
if (e.errno != 11):
raise
raise Empty()
else:
return self.get_nowait()
def get_nowait(self):
os.read(self._get_fd, 1)
return self._q.pop()
def get_multiple(self, block=True, timeout=None):
if block:
if self._get_poll((timeout if (timeout is not None) else (- 1))):
return self.get_multiple_nowait()
else:
raise Empty()
else:
return self.get_multiple_nowait()
def get_multiple_nowait(self, max_messages=None):
num_read = len(os.read(self._get_fd, (max_messages or self._maxlen)))
return [self._q.pop() for _ in range(num_read)]
def empty(self):
return (len(self._q) == 0)
def full(self):
return (len(self._q) >= self._maxlen)
def close(self):
os.close(self._get_fd)
os.close(self._put_fd)
def __len__(self):
return len(self._q)
|
class Route(object):
def __init__(self, route_name, data_dir):
self.route_name = route_name.replace('_', '|')
self._segments = self._get_segments(data_dir)
@property
def segments(self):
return self._segments
def log_paths(self):
max_seg_number = self._segments[(- 1)].canonical_name.segment_num
log_path_by_seg_num = {s.canonical_name.segment_num: s.log_path for s in self._segments}
return [log_path_by_seg_num.get(i, None) for i in xrange((max_seg_number + 1))]
def camera_paths(self):
max_seg_number = self._segments[(- 1)].canonical_name.segment_num
camera_path_by_seg_num = {s.canonical_name.segment_num: s.camera_path for s in self._segments}
return [camera_path_by_seg_num.get(i, None) for i in xrange((max_seg_number + 1))]
def _get_segments(self, data_dir):
files = os.listdir(data_dir)
segment_files = defaultdict(list)
for f in files:
fullpath = os.path.join(data_dir, f)
explorer_match = re.match(EXPLORER_FILE_RE, f)
op_match = re.match(OP_SEGMENT_DIR_RE, f)
if explorer_match:
(segment_name, fn) = explorer_match.groups()
if segment_name.replace('_', '|').startswith(self.route_name):
segment_files[segment_name].append((fullpath, fn))
elif (op_match and os.path.isdir(fullpath)):
(segment_name,) = op_match.groups()
if segment_name.startswith(self.route_name):
for seg_f in os.listdir(fullpath):
segment_files[segment_name].append((os.path.join(fullpath, seg_f), seg_f))
elif (f == self.route_name):
for seg_num in os.listdir(fullpath):
if (not seg_num.isdigit()):
continue
segment_name = '{}--{}'.format(self.route_name, seg_num)
for seg_f in os.listdir(os.path.join(fullpath, seg_num)):
segment_files[segment_name].append((os.path.join(fullpath, seg_num, seg_f), seg_f))
segments = []
for (segment, files) in segment_files.iteritems():
try:
log_path = next((path for (path, filename) in files if (filename in LOG_FILENAMES)))
except StopIteration:
log_path = None
try:
camera_path = next((path for (path, filename) in files if (filename in CAMERA_FILENAMES)))
except StopIteration:
camera_path = None
segments.append(RouteSegment(segment, log_path, camera_path))
if (len(segments) == 0):
raise ValueError('Could not find segments for route {} in data directory {}'.format(self.route_name, data_dir))
return sorted(segments, key=(lambda seg: seg.canonical_name.segment_num))
|
class RouteSegment(object):
def __init__(self, name, log_path, camera_path):
self._name = RouteSegmentName(name)
self.log_path = log_path
self.camera_path = camera_path
@property
def name(self):
return str(self._name)
@property
def canonical_name(self):
return self._name
|
class RouteSegmentName(object):
def __init__(self, name_str):
self._segment_name_str = name_str
(self._route_name_str, num_str) = self._segment_name_str.rsplit('--', 1)
self._num = int(num_str)
@property
def segment_num(self):
return self._num
def __str__(self):
return self._segment_name_str
|
class _FrameReaderDict(dict):
def __init__(self, camera_paths, cache_paths, framereader_kwargs, *args, **kwargs):
super(_FrameReaderDict, self).__init__(*args, **kwargs)
if (cache_paths is None):
cache_paths = {}
if (not isinstance(cache_paths, dict)):
cache_paths = {k: v for (k, v) in enumerate(cache_paths)}
self._camera_paths = camera_paths
self._cache_paths = cache_paths
self._framereader_kwargs = framereader_kwargs
def __missing__(self, key):
if ((key < len(self._camera_paths)) and (self._camera_paths[key] is not None)):
frame_reader = FrameReader(self._camera_paths[key], self._cache_paths.get(key), **self._framereader_kwargs)
self[key] = frame_reader
return frame_reader
else:
raise KeyError('Segment index out of bounds: {}'.format(key))
|
class RouteFrameReader(object):
'Reads frames across routes and route segments by frameId.'
def __init__(self, camera_paths, cache_paths, frame_id_lookup, **kwargs):
'Create a route framereader.\n\n Inputs:\n TODO\n\n kwargs: Forwarded to the FrameReader function. If cache_prefix is included, that path\n will also be used for frame position indices.\n '
self._first_camera_idx = next((i for i in xrange(len(camera_paths)) if (camera_paths[i] is not None)))
self._frame_readers = _FrameReaderDict(camera_paths, cache_paths, kwargs)
self._frame_id_lookup = frame_id_lookup
@property
def w(self):
'Width of each frame in pixels.'
return self._frame_readers[self._first_camera_idx].w
@property
def h(self):
'Height of each frame in pixels.'
return self._frame_readers[self._first_camera_idx].h
def get(self, frame_id, **kwargs):
'Get a frame for a route based on frameId.\n\n Inputs:\n frame_id: The frameId of the returned frame.\n kwargs: Forwarded to BaseFrameReader.get. "count" is not implemented.\n '
(segment_num, segment_id) = self._frame_id_lookup.get(frame_id, (None, None))
if ((segment_num is None) or (segment_num == (- 1)) or (segment_id == (- 1))):
return None
else:
return self.get_from_segment(segment_num, segment_id)
def get_from_segment(self, segment_num, segment_id, **kwargs):
'Get a frame from a specific segment with a specific index in that segment (segment_id).\n\n Inputs:\n segment_num: The number of the segment.\n segment_id: The index of the return frame within that segment.\n kwargs: Forwarded to BaseFrameReader.get. "count" is not implemented.\n '
if ('count' in kwargs):
raise NotImplementedError('count')
return self._frame_readers[segment_num].get(segment_id, **kwargs)[0]
def close(self):
frs = self._frame_readers
self._frame_readers.clear()
for fr in frs:
fr.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
|
def get_arg_parser():
parser = argparse.ArgumentParser(description='Unlogging and save to file', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('data_dir', nargs='?', help='Path to directory in which log and camera files are located.')
parser.add_argument('route_name', type=(lambda x: x.replace('#', '|')), nargs='?', help='The route whose messages will be published.')
parser.add_argument('--out_path', nargs='?', default='/data/ubloxRaw.stream', help='Output pickle file path')
return parser
|
def main(argv):
args = get_arg_parser().parse_args(sys.argv[1:])
if (not args.data_dir):
print('Data directory invalid.')
return
if (not args.route_name):
args.route_name = os.path.basename(args.data_dir)
args.data_dir = os.path.dirname(args.data_dir)
route = Route(args.route_name, args.data_dir)
lr = MultiLogIterator(route.log_paths(), wraparound=False)
with open(args.out_path, 'wb') as f:
try:
done = False
i = 0
while (not done):
msg = next(lr)
if (not msg):
break
smsg = msg.as_builder()
typ = smsg.which()
if (typ == 'ubloxRaw'):
f.write(smsg.to_bytes())
i += 1
except StopIteration:
print('All done')
print('Writed {} msgs'.format(i))
|
def can_list_to_can_capnp(can_msgs, msgtype='can'):
dat = messaging.new_message()
dat.init(msgtype, len(can_msgs))
for (i, can_msg) in enumerate(can_msgs):
if (msgtype == 'sendcan'):
cc = dat.sendcan[i]
else:
cc = dat.can[i]
cc.address = can_msg[0]
cc.busTime = can_msg[1]
cc.dat = str(can_msg[2])
cc.src = can_msg[3]
return dat
|
def can_capnp_to_can_list(can, src_filter=None):
ret = []
for msg in can:
if ((src_filter is None) or (msg.src in src_filter)):
ret.append((msg.address, msg.busTime, msg.dat, msg.src))
return ret
|
def can_health():
while 1:
try:
dat = handle.controlRead((usb1.TYPE_VENDOR | usb1.RECIPIENT_DEVICE), 210, 0, 0, 16)
break
except (USBErrorIO, USBErrorOverflow):
cloudlog.exception('CAN: BAD HEALTH, RETRYING')
(v, i, started) = struct.unpack('IIB', dat[0:9])
return {'voltage': v, 'current': i, 'started': bool(started)}
|
def __parse_can_buffer(dat):
ret = []
for j in range(0, len(dat), 16):
ddat = dat[j:(j + 16)]
(f1, f2) = struct.unpack('II', ddat[0:8])
ret.append(((f1 >> 21), (f2 >> 16), ddat[8:(8 + (f2 & 15))], ((f2 >> 4) & 15)))
return ret
|
def can_send_many(arr):
snds = []
for (addr, _, dat, alt) in arr:
if (addr < 2048):
snd = (struct.pack('II', ((addr << 21) | 1), (len(dat) | (alt << 4))) + dat)
snd = snd.ljust(16, '\x00')
snds.append(snd)
while 1:
try:
handle.bulkWrite(3, ''.join(snds))
break
except (USBErrorIO, USBErrorOverflow):
cloudlog.exception('CAN: BAD SEND MANY, RETRYING')
|
def can_recv():
dat = ''
while 1:
try:
dat = handle.bulkRead(1, (16 * 256))
break
except (USBErrorIO, USBErrorOverflow):
cloudlog.exception('CAN: BAD RECV, RETRYING')
return __parse_can_buffer(dat)
|
def can_init():
global handle, context
handle = None
cloudlog.info('attempting can init')
context = usb1.USBContext()
for device in context.getDeviceList(skip_on_error=True):
if ((device.getVendorID() == 48042) and (device.getProductID() == 56780)):
handle = device.open()
handle.claimInterface(0)
handle.controlWrite(64, 220, SAFETY_ALLOUTPUT, 0, b'')
if (handle is None):
cloudlog.warn('CAN NOT FOUND')
exit((- 1))
cloudlog.info('got handle')
cloudlog.info('can init done')
|
def boardd_mock_loop():
context = zmq.Context()
can_init()
handle.controlWrite(64, 220, SAFETY_ALLOUTPUT, 0, b'')
logcan = messaging.sub_sock(context, service_list['can'].port)
sendcan = messaging.pub_sock(context, service_list['sendcan'].port)
while 1:
tsc = messaging.drain_sock(logcan, wait_for_one=True)
snds = map((lambda x: can_capnp_to_can_list(x.can)), tsc)
snd = []
for s in snds:
snd += s
snd = filter((lambda x: (x[(- 1)] <= 1)), snd)
can_send_many(snd)
can_msgs = can_recv()
print(('sent %d got %d' % (len(snd), len(can_msgs))))
m = can_list_to_can_capnp(can_msgs)
sendcan.send(m.to_bytes())
|
def boardd_test_loop():
can_init()
cnt = 0
while 1:
can_send_many([[187, 0, 'ªªªª', 0], [170, 0, ('ªªªª' + struct.pack('!I', cnt)), 1]])
can_msgs = can_recv()
print(('got %d' % len(can_msgs)))
time.sleep(0.01)
cnt += 1
|
def boardd_loop(rate=200):
rk = Ratekeeper(rate)
context = zmq.Context()
can_init()
logcan = messaging.pub_sock(context, service_list['can'].port)
health_sock = messaging.pub_sock(context, service_list['health'].port)
sendcan = messaging.sub_sock(context, service_list['sendcan'].port)
while 1:
if ((rk.frame % rate) == 0):
health = can_health()
msg = messaging.new_message()
msg.init('health')
msg.health.voltage = health['voltage']
msg.health.current = health['current']
msg.health.started = health['started']
health_sock.send(msg.to_bytes())
can_msgs = can_recv()
if (len(can_msgs) > 0):
dat = can_list_to_can_capnp(can_msgs)
logcan.send(dat.to_bytes())
tsc = messaging.recv_sock(sendcan)
if (tsc is not None):
can_send_many(can_capnp_to_can_list(tsc.sendcan))
rk.keep_time()
|
def boardd_proxy_loop(rate=200, address='192.168.2.251'):
rk = Ratekeeper(rate)
context = zmq.Context()
can_init()
logcan = messaging.sub_sock(context, service_list['can'].port, addr=address)
sendcan = messaging.pub_sock(context, service_list['sendcan'].port)
while 1:
can_msgs = can_recv()
if (len(can_msgs) > 0):
dat = can_list_to_can_capnp(can_msgs, 'sendcan')
sendcan.send(dat.to_bytes())
tsc = messaging.recv_sock(logcan)
if (tsc is not None):
cl = can_capnp_to_can_list(tsc.can)
can_send_many(cl)
rk.keep_time()
|
def main(gctx=None):
if (os.getenv('MOCK') is not None):
boardd_mock_loop()
elif (os.getenv('PROXY') is not None):
boardd_proxy_loop()
elif (os.getenv('BOARDTEST') is not None):
boardd_test_loop()
else:
boardd_loop()
|
def pygame_modules_have_loaded():
return (pygame.display.get_init() and pygame.font.get_init())
|
def ui_thread(addr, frame_address):
context = zmq.Context()
pygame.init()
pygame.font.init()
assert pygame_modules_have_loaded()
size = ((_FULL_FRAME_SIZE[0] * SCALE), (_FULL_FRAME_SIZE[1] * SCALE))
pygame.display.set_caption('comma one debug UI')
screen = pygame.display.set_mode(size, pygame.DOUBLEBUF)
camera_surface = pygame.surface.Surface(((_FULL_FRAME_SIZE[0] * SCALE), (_FULL_FRAME_SIZE[1] * SCALE)), 0, 24).convert()
frame = context.socket(zmq.SUB)
frame.connect((frame_address or ('tcp://%s:%d' % (addr, service_list['frame'].port))))
frame.setsockopt(zmq.SUBSCRIBE, '')
img = np.zeros((_FULL_FRAME_SIZE[1], _FULL_FRAME_SIZE[0], 3), dtype='uint8')
imgff = np.zeros((_FULL_FRAME_SIZE[1], _FULL_FRAME_SIZE[0], 3), dtype=np.uint8)
while 1:
list(pygame.event.get())
screen.fill((64, 64, 64))
fpkt = recv_one(frame)
yuv_img = fpkt.frame.image
if fpkt.frame.transform:
yuv_transform = np.array(fpkt.frame.transform).reshape(3, 3)
else:
yuv_transform = np.array([[(- 1.0), 0.0, (_FULL_FRAME_SIZE[0] - 1)], [0.0, (- 1.0), (_FULL_FRAME_SIZE[1] - 1)], [0.0, 0.0, 1.0]])
if (yuv_img and (len(yuv_img) == (((_FULL_FRAME_SIZE[0] * _FULL_FRAME_SIZE[1]) * 3) // 2))):
yuv_np = np.frombuffer(yuv_img, dtype=np.uint8).reshape(((_FULL_FRAME_SIZE[1] * 3) // 2), (- 1))
cv2.cvtColor(yuv_np, cv2.COLOR_YUV2RGB_I420, dst=imgff)
cv2.warpAffine(imgff, np.dot(yuv_transform, _BB_TO_FULL_FRAME)[:2], (img.shape[1], img.shape[0]), dst=img, flags=cv2.WARP_INVERSE_MAP)
else:
img.fill(0)
(height, width) = img.shape[:2]
img_resized = cv2.resize(img, ((SCALE * width), (SCALE * height)), interpolation=cv2.INTER_CUBIC)
pygame.surfarray.blit_array(camera_surface, img_resized.swapaxes(0, 1))
screen.blit(camera_surface, (0, 0))
pygame.display.flip()
|
def get_arg_parser():
parser = argparse.ArgumentParser(description='Show replay data in a UI.', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('ip_address', nargs='?', default='127.0.0.1', help='The ip address on which to receive zmq messages.')
parser.add_argument('--frame-address', default=None, help='The ip address on which to receive zmq messages.')
return parser
|
def asymmetric_l2_loss(u, tau):
return torch.mean((torch.abs((tau - (u < 0).float())) * (u ** 2)))
|
class IQL(nn.Module):
def __init__(self, qf, vf, policy, max_steps, tau, alpha, value_lr=0.0001, policy_lr=0.0001, discount=0.99, beta=0.005):
super().__init__()
self.qf = qf.to(DEFAULT_DEVICE)
self.q_target = copy.deepcopy(qf).requires_grad_(False).to(DEFAULT_DEVICE)
self.vf = vf.to(DEFAULT_DEVICE)
self.policy = policy.to(DEFAULT_DEVICE)
self.v_optimizer = torch.optim.Adam(self.vf.parameters(), lr=value_lr)
self.q_optimizer = torch.optim.Adam(self.qf.parameters(), lr=value_lr)
self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=policy_lr)
self.policy_lr_schedule = CosineAnnealingLR(self.policy_optimizer, max_steps)
self.tau = tau
self.alpha = alpha
self.discount = discount
self.beta = beta
self.step = 0
self.pretrain_step = 0
def iql_update(self, observations, actions, next_observations, rewards, terminals):
with torch.no_grad():
target_q = self.q_target(observations, actions)
next_v = self.vf(next_observations)
v = self.vf(observations)
adv = (target_q - v)
v_loss = asymmetric_l2_loss(adv, self.tau)
self.v_optimizer.zero_grad(set_to_none=True)
v_loss.backward()
self.v_optimizer.step()
targets = (rewards + (((1.0 - terminals.float()) * self.discount) * next_v))
qs = self.qf.both(observations, actions)
q_loss = (sum((F.mse_loss(q, targets) for q in qs)) / len(qs))
self.q_optimizer.zero_grad(set_to_none=True)
q_loss.backward()
self.q_optimizer.step()
update_exponential_moving_average(self.q_target, self.qf, self.beta)
weight = torch.exp((self.alpha * adv))
weight = torch.clamp_max(weight, EXP_ADV_MAX).detach()
policy_out = self.policy(observations)
bc_losses = (- policy_out.log_prob(actions))
policy_loss = torch.mean((weight * bc_losses))
self.policy_optimizer.zero_grad(set_to_none=True)
policy_loss.backward()
self.policy_optimizer.step()
wandb.log({'p_loss': policy_loss}, step=self.step)
if (((self.step + 1) % 10000) == 0):
wandb.log({'v_loss': v_loss, 'v_value': v.mean(), 'q_loss': q_loss, 'q_value': qs[0].mean()}, step=self.step)
self.step += 1
def save(self, filename):
torch.save(self.policy.state_dict(), (filename + '-policy_network'))
print(f'***save models to {filename}***')
def load(self, filename):
self.policy.load_state_dict(torch.load((filename + '-policy_network'), map_location=torch.device('cpu')))
print(f'***load the RvS policy model from {filename}***')
|
def get_env_and_dataset(env_name, max_episode_steps, normalize):
env = gym.make(env_name)
dataset = d4rl.qlearning_dataset(env)
if any(((s in env_name) for s in ('halfcheetah', 'hopper', 'walker2d'))):
(min_ret, max_ret) = return_range(dataset, max_episode_steps)
print(f'Dataset returns have range [{min_ret}, {max_ret}]')
dataset['rewards'] /= (max_ret - min_ret)
dataset['rewards'] *= max_episode_steps
elif ('antmaze' in env_name):
dataset['rewards'] -= 1.0
print('***********************************************************************')
print(f'Normalize for the state: {normalize}')
print('***********************************************************************')
if normalize:
mean = dataset['observations'].mean(0)
std = (dataset['observations'].std(0) + 0.001)
dataset['observations'] = ((dataset['observations'] - mean) / std)
dataset['next_observations'] = ((dataset['next_observations'] - mean) / std)
else:
obs_dim = dataset['observations'].shape[1]
(mean, std) = (np.zeros(obs_dim), np.ones(obs_dim))
for (k, v) in dataset.items():
dataset[k] = torchify(v)
return (env, dataset, mean, std)
|
def main(args):
wandb.init(project='project_name', entity='your_wandb_id', name=f'{args.env_name}', config={'env_name': args.env_name, 'normalize': args.normalize, 'tau': args.tau, 'alpha': args.alpha, 'seed': args.seed, 'type': args.type, 'value_lr': args.value_lr, 'policy_lr': args.policy_lr})
torch.set_num_threads(1)
(env, dataset, mean, std) = get_env_and_dataset(args.env_name, args.max_episode_steps, args.normalize)
obs_dim = dataset['observations'].shape[1]
act_dim = dataset['actions'].shape[1]
set_seed(args.seed, env=env)
policy = GaussianPolicy(obs_dim, act_dim, hidden_dim=1024, n_hidden=2)
iql = IQL(qf=TwinQ(obs_dim, act_dim, hidden_dim=args.hidden_dim, n_hidden=args.n_hidden), vf=ValueFunction(obs_dim, hidden_dim=args.hidden_dim, n_hidden=args.n_hidden), policy=policy, max_steps=args.train_steps, tau=args.tau, alpha=args.alpha, discount=args.discount, value_lr=args.value_lr, policy_lr=args.policy_lr)
def eval_iql(step):
eval_returns = np.array([evaluate_iql(env, policy, mean, std) for _ in range(args.n_eval_episodes)])
normalized_returns = (d4rl.get_normalized_score(args.env_name, eval_returns) * 100.0)
wandb.log({'return mean': eval_returns.mean(), 'normalized return mean': normalized_returns.mean()}, step=step)
return normalized_returns.mean()
algo_name = f'{args.type}_tau-{args.tau}_alpha-{args.alpha}_normalize-{args.normalize}'
os.makedirs(f'{args.log_dir}/{args.env_name}/{algo_name}', exist_ok=True)
eval_log = open(f'{args.log_dir}/{args.env_name}/{algo_name}/seed-{args.seed}.txt', 'w')
for step in trange(args.train_steps):
if (args.type == 'iql'):
iql.iql_update(**sample_batch(dataset, args.batch_size))
if (((step + 1) % args.eval_period) == 0):
average_returns = eval_iql(step)
eval_log.write(f'''{(step + 1)} {average_returns}
''')
eval_log.flush()
eval_log.close()
os.makedirs(f'{args.model_dir}/{args.env_name}', exist_ok=True)
iql.save(f'{args.model_dir}/{args.env_name}/{algo_name}')
|
def get_env_and_dataset(env_name, max_episode_steps, normalize):
env = gym.make(env_name)
dataset = d4rl.qlearning_dataset(env)
if any(((s in env_name) for s in ('halfcheetah', 'hopper', 'walker2d'))):
(min_ret, max_ret) = return_range(dataset, max_episode_steps)
print(f'Dataset returns have range [{min_ret}, {max_ret}]')
dataset['rewards'] /= (max_ret - min_ret)
dataset['rewards'] *= max_episode_steps
elif ('antmaze' in env_name):
dataset['rewards'] -= 1.0
print('***********************************************************************')
print(f'Normalize for the state: {normalize}')
print('***********************************************************************')
if normalize:
mean = dataset['observations'].mean(0)
std = (dataset['observations'].std(0) + 0.001)
dataset['observations'] = ((dataset['observations'] - mean) / std)
dataset['next_observations'] = ((dataset['next_observations'] - mean) / std)
else:
obs_dim = dataset['observations'].shape[1]
(mean, std) = (np.zeros(obs_dim), np.ones(obs_dim))
for (k, v) in dataset.items():
dataset[k] = torchify(v)
return (env, dataset, mean, std)
|
def main(args):
wandb.init(project='project_name', entity='your_wandb_id', name=f'{args.env_name}', config={'env_name': args.env_name, 'normalize': args.normalize, 'tau': args.tau, 'alpha': args.alpha, 'seed': args.seed, 'type': args.type, 'value_lr': args.value_lr, 'policy_lr': args.policy_lr, 'pretrain': args.pretrain})
torch.set_num_threads(1)
(env, dataset, mean, std) = get_env_and_dataset(args.env_name, args.max_episode_steps, args.normalize)
obs_dim = dataset['observations'].shape[1]
act_dim = dataset['actions'].shape[1]
set_seed(args.seed, env=env)
policy = GaussianPolicy((obs_dim + obs_dim), act_dim, hidden_dim=1024, n_hidden=2)
goal_policy = GaussianPolicy(obs_dim, obs_dim, hidden_dim=args.hidden_dim, n_hidden=args.n_hidden)
por = POR(vf=TwinV(obs_dim, layer_norm=args.layer_norm, hidden_dim=args.hidden_dim, n_hidden=args.n_hidden), policy=policy, goal_policy=goal_policy, max_steps=args.train_steps, tau=args.tau, alpha=args.alpha, discount=args.discount, value_lr=args.value_lr, policy_lr=args.policy_lr)
def eval_por(step):
eval_returns = np.array([evaluate_por(env, policy, goal_policy, mean, std) for _ in range(args.n_eval_episodes)])
normalized_returns = (d4rl.get_normalized_score(args.env_name, eval_returns) * 100.0)
wandb.log({'return mean': eval_returns.mean(), 'normalized return mean': normalized_returns.mean()}, step=step)
return normalized_returns.mean()
if (any(((s in args.env_name) for s in ('halfcheetah', 'hopper', 'walker2d'))) and (args.type == 'por_q')):
b_goal_policy = GaussianPolicy(obs_dim, obs_dim, hidden_dim=args.hidden_dim, n_hidden=args.n_hidden)
por.pretrain_init(b_goal_policy)
if args.pretrain:
for _ in trange(args.pretrain_steps):
por.pretrain(**sample_batch(dataset, args.batch_size))
algo_name = f'pretrain_step-{args.pretrain_steps}_normalize-{args.normalize}'
os.makedirs(f'{args.model_dir}/{args.env_name}', exist_ok=True)
por.save_pretrain(f'{args.model_dir}/{args.env_name}/{algo_name}')
else:
algo_name = f'pretrain_step-{args.pretrain_steps}_normalize-{args.normalize}'
por.load_pretrain(f'{args.model_dir}/{args.env_name}/{algo_name}')
if (not args.pretrain):
algo_name = f'{args.type}_tau-{args.tau}_alpha-{args.alpha}_normalize-{args.normalize}'
os.makedirs(f'{args.log_dir}/{args.env_name}/{algo_name}', exist_ok=True)
eval_log = open(f'{args.log_dir}/{args.env_name}/{algo_name}/seed-{args.seed}.txt', 'w')
for step in trange(args.train_steps):
if (args.type == 'por_r'):
por.por_residual_update(**sample_batch(dataset, args.batch_size))
elif (args.type == 'por_q'):
por.por_qlearning_update(**sample_batch(dataset, args.batch_size))
if (((step + 1) % args.eval_period) == 0):
average_returns = eval_por(step)
eval_log.write(f'''{(step + 1)} {average_returns}
''')
eval_log.flush()
eval_log.close()
os.makedirs(f'{args.model_dir}/{args.env_name}', exist_ok=True)
por.save(f'{args.model_dir}/{args.env_name}/{algo_name}')
|
class GaussianPolicy(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_dim=256, n_hidden=2):
super().__init__()
self.net = mlp([obs_dim, *([hidden_dim] * n_hidden), act_dim])
self.log_std = nn.Parameter(torch.zeros(act_dim, dtype=torch.float32))
def forward(self, obs):
mean = self.net(obs)
std = torch.exp(self.log_std.clamp(LOG_STD_MIN, LOG_STD_MAX))
scale_tril = torch.diag(std)
return MultivariateNormal(mean, scale_tril=scale_tril)
def act(self, obs, deterministic=False, enable_grad=False):
with torch.set_grad_enabled(enable_grad):
dist = self(obs)
return (dist.mean if deterministic else dist.sample())
|
class DeterministicPolicy(nn.Module):
def __init__(self, obs_dim, act_dim, hidden_dim=256, n_hidden=2):
super().__init__()
self.net = mlp([obs_dim, *([hidden_dim] * n_hidden), act_dim], output_activation=nn.Tanh)
def forward(self, obs):
return self.net(obs)
def act(self, obs, deterministic=False, enable_grad=False):
with torch.set_grad_enabled(enable_grad):
return self(obs)
|
def asymmetric_l2_loss(u, tau):
return torch.mean((torch.abs((tau - (u < 0).float())) * (u ** 2)))
|
class POR(nn.Module):
def __init__(self, vf, policy, goal_policy, max_steps, tau, alpha, value_lr=0.0001, policy_lr=0.0001, discount=0.99, beta=0.005):
super().__init__()
self.vf = vf.to(DEFAULT_DEVICE)
self.v_target = copy.deepcopy(vf).requires_grad_(False).to(DEFAULT_DEVICE)
self.policy = policy.to(DEFAULT_DEVICE)
self.goal_policy = goal_policy.to(DEFAULT_DEVICE)
self.v_optimizer = torch.optim.Adam(self.vf.parameters(), lr=value_lr)
self.policy_optimizer = torch.optim.Adam(self.policy.parameters(), lr=policy_lr)
self.policy_lr_schedule = CosineAnnealingLR(self.policy_optimizer, max_steps)
self.goal_policy_optimizer = torch.optim.Adam(self.goal_policy.parameters(), lr=policy_lr)
self.goal_lr_schedule = CosineAnnealingLR(self.goal_policy_optimizer, max_steps)
self.tau = tau
self.alpha = alpha
self.discount = discount
self.beta = beta
self.step = 0
self.pretrain_step = 0
def por_residual_update(self, observations, actions, next_observations, rewards, terminals):
with torch.no_grad():
next_v = self.v_target(next_observations)
target_v = (rewards + (((1.0 - terminals.float()) * self.discount) * next_v))
vs = self.vf.both(observations)
v_loss = (sum((asymmetric_l2_loss((target_v - v), self.tau) for v in vs)) / len(vs))
self.v_optimizer.zero_grad(set_to_none=True)
v_loss.backward()
self.v_optimizer.step()
update_exponential_moving_average(self.v_target, self.vf, self.beta)
v = self.vf(observations)
adv = (target_v - v)
weight = torch.exp((self.alpha * adv))
weight = torch.clamp_max(weight, EXP_ADV_MAX).detach()
goal_out = self.goal_policy(observations)
g_loss = (- goal_out.log_prob(next_observations))
g_loss = torch.mean((weight * g_loss))
self.goal_policy_optimizer.zero_grad(set_to_none=True)
g_loss.backward()
self.goal_policy_optimizer.step()
self.goal_lr_schedule.step()
policy_out = self.policy(torch.concat([observations, next_observations], dim=1))
bc_losses = (- policy_out.log_prob(actions))
policy_loss = torch.mean(bc_losses)
self.policy_optimizer.zero_grad(set_to_none=True)
policy_loss.backward()
self.policy_optimizer.step()
self.policy_lr_schedule.step()
if (((self.step + 1) % 100000) == 0):
wandb.log({'v_loss': v_loss, 'v_value': v.mean()}, step=self.step)
self.step += 1
def pretrain_init(self, b_goal_policy):
self.b_goal_policy = b_goal_policy.to(DEFAULT_DEVICE)
self.b_goal_policy_optimizer = torch.optim.Adam(self.b_goal_policy.parameters(), lr=0.0001)
def pretrain(self, observations, actions, next_observations, rewards, terminals):
b_goal_out = self.b_goal_policy(observations)
b_g_loss = (- b_goal_out.log_prob(next_observations).mean())
b_g_loss = torch.mean(b_g_loss)
self.b_goal_policy_optimizer.zero_grad(set_to_none=True)
b_g_loss.backward()
self.b_goal_policy_optimizer.step()
if (((self.pretrain_step + 1) % 10000) == 0):
wandb.log({'b_g_loss': b_g_loss}, step=self.pretrain_step)
self.pretrain_step += 1
def por_qlearning_update(self, observations, actions, next_observations, rewards, terminals):
with torch.no_grad():
next_v = self.v_target(next_observations)
target_v = (rewards + (((1.0 - terminals.float()) * self.discount) * next_v))
vs = self.vf.both(observations)
v_loss = (sum((asymmetric_l2_loss((target_v - v), self.tau) for v in vs)) / len(vs))
self.v_optimizer.zero_grad(set_to_none=True)
v_loss.backward()
self.v_optimizer.step()
update_exponential_moving_average(self.v_target, self.vf, self.beta)
v = self.vf(observations)
goal_out = self.goal_policy(observations)
b_goal_out = self.b_goal_policy(observations)
g_sample = goal_out.rsample()
g_loss1 = (- self.vf(g_sample))
g_loss2 = (- b_goal_out.log_prob(g_sample).mean())
lmbda = (self.alpha / g_loss1.abs().mean().detach())
g_loss = torch.mean(((lmbda * g_loss1) + g_loss2))
self.goal_policy_optimizer.zero_grad(set_to_none=True)
g_loss.backward()
self.goal_policy_optimizer.step()
self.goal_lr_schedule.step()
policy_out = self.policy(torch.concat([observations, next_observations], dim=1))
bc_losses = (- policy_out.log_prob(actions))
policy_loss = torch.mean(bc_losses)
self.policy_optimizer.zero_grad(set_to_none=True)
policy_loss.backward()
self.policy_optimizer.step()
self.policy_lr_schedule.step()
if (((self.step + 1) % 100000) == 0):
wandb.log({'v_loss': v_loss, 'v_value': v.mean(), 'g_loss1': g_loss1.mean(), 'g_loss2': g_loss2.mean()}, step=self.step)
self.step += 1
def save_pretrain(self, filename):
torch.save(self.b_goal_policy.state_dict(), (filename + '-behavior_goal_network'))
print(f'***save models to {filename}***')
def load_pretrain(self, filename):
self.b_goal_policy.load_state_dict(torch.load((filename + '-behavior_goal_network'), map_location=DEFAULT_DEVICE))
print(f'***load models from {filename}***')
def save(self, filename):
torch.save(self.policy.state_dict(), (filename + '-policy_network'))
torch.save(self.goal_policy.state_dict(), (filename + '-goal_network'))
print(f'***save models to {filename}***')
def load(self, filename):
self.policy.load_state_dict(torch.load((filename + '-policy_network'), map_location=torch.device('cpu')))
print(f'***load the RvS policy model from {filename}***')
|
class TwinQ(nn.Module):
def __init__(self, state_dim, action_dim, hidden_dim=256, n_hidden=2):
super().__init__()
dims = [(state_dim + action_dim), *([hidden_dim] * n_hidden), 1]
self.q1 = mlp(dims, squeeze_output=True)
self.q2 = mlp(dims, squeeze_output=True)
def both(self, state, action):
sa = torch.cat([state, action], 1)
return (self.q1(sa), self.q2(sa))
def forward(self, state, action):
return torch.min(*self.both(state, action))
|
class ValueFunction(nn.Module):
def __init__(self, state_dim, hidden_dim=256, n_hidden=2):
super().__init__()
dims = [state_dim, *([hidden_dim] * n_hidden), 1]
self.v = mlp(dims, squeeze_output=True)
def forward(self, state):
return self.v(state)
|
class TwinV(nn.Module):
def __init__(self, state_dim, layer_norm=False, hidden_dim=256, n_hidden=2):
super().__init__()
dims = [state_dim, *([hidden_dim] * n_hidden), 1]
self.v1 = mlp(dims, layer_norm=layer_norm, squeeze_output=True)
self.v2 = mlp(dims, layer_norm=layer_norm, squeeze_output=True)
def both(self, state):
return (self.v1(state), self.v2(state))
def forward(self, state):
return torch.min(*self.both(state))
|
def transformer(batch, chan, flow, U, out_size, name='SpatialTransformer', **kwargs):
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, ((- 1), 1)), rep)
return tf.reshape(x, [(- 1)])
def _repeat2(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1)
rep = tf.cast(rep, 'int32')
x = tf.matmul(rep, tf.reshape(x, (1, (- 1))))
return tf.reshape(x, [(- 1)])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast((tf.shape(im)[1] - 1), 'int32')
max_x = tf.cast((tf.shape(im)[2] - 1), 'int32')
x = (tf.cast(_repeat2(tf.range(0, width), (height * num_batch)), 'float32') + (x * WIDTH))
y = (tf.cast(_repeat2(_repeat(tf.range(0, height), width), num_batch), 'float32') + (y * HEIGHT))
x0 = tf.cast(tf.floor(x), 'int32')
x1 = (x0 + 1)
y0 = tf.cast(tf.floor(y), 'int32')
y1 = (y0 + 1)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = (width * height)
base = _repeat((tf.range(num_batch) * dim1), (out_height * out_width))
base_y0 = (base + (y0 * dim2))
base_y1 = (base + (y1 * dim2))
idx_a = (base_y0 + x0)
idx_b = (base_y1 + x0)
idx_c = (base_y0 + x1)
idx_d = (base_y1 + x1)
im_flat = tf.reshape(im, tf.stack([(- 1), channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)
wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)
wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)
output = tf.add_n([(wa * Ia), (wb * Ib), (wc * Ic), (wd * Id)])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace((- 1.0), 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace((- 1.0), 1.0, height), 1), tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, (- 1)))
y_t_flat = tf.reshape(y_t, (1, (- 1)))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
return grid
def _transform(x_s, y_s, input_dim, out_size):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
height = tf.shape(input_dim)[1]
width = tf.shape(input_dim)[2]
num_channels = tf.shape(input_dim)[3]
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
x_s_flat = tf.reshape(x_s, [(- 1)])
y_s_flat = tf.reshape(y_s, [(- 1)])
input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)
output = tf.reshape(input_transformed, tf.stack([batch, out_height, out_width, chan]))
return output
with tf.variable_scope(name):
(dx, dy) = tf.split(flow, 2, 3)
output = _transform(dx, dy, U, out_size)
return output
|
def warp_img(batch_size, imga, imgb, reuse, scope='easyflow'):
(n, h, w, c) = imga.get_shape().as_list()
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)), slim.arg_scope([slim.conv2d_transpose], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)):
inputs = tf.concat([imga, imgb], 3, name='flow_inp')
c1 = slim.conv2d(inputs, 24, [5, 5], stride=2, scope='c1')
c2 = slim.conv2d(c1, 24, [3, 3], scope='c2')
c3 = slim.conv2d(c2, 24, [5, 5], stride=2, scope='c3')
c4 = slim.conv2d(c3, 24, [3, 3], scope='c4')
c5 = slim.conv2d(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5')
c5_hr = tf.reshape(c5, [n, int((h / 4)), int((w / 4)), 2, 4, 4])
c5_hr = tf.transpose(c5_hr, [0, 1, 4, 2, 5, 3])
c5_hr = tf.reshape(c5_hr, [n, h, w, 2])
img_warp1 = transformer(batch_size, c, c5_hr, imgb, [h, w])
c5_pack = tf.concat([inputs, c5_hr, img_warp1], 3, name='cat')
s1 = slim.conv2d(c5_pack, 24, [5, 5], stride=2, scope='s1')
s2 = slim.conv2d(s1, 24, [3, 3], scope='s2')
s3 = slim.conv2d(s2, 24, [3, 3], scope='s3')
s4 = slim.conv2d(s3, 24, [3, 3], scope='s4')
s5 = slim.conv2d(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5')
s5_hr = tf.reshape(s5, [n, int((h / 2)), int((w / 2)), 2, 2, 2])
s5_hr = tf.transpose(s5_hr, [0, 1, 4, 2, 5, 3])
s5_hr = tf.reshape(s5_hr, [n, h, w, 2])
uv = (c5_hr + s5_hr)
img_warp2 = transformer(batch_size, c, uv, imgb, [h, w])
s5_pack = tf.concat([inputs, uv, img_warp2], 3, name='cat2')
a1 = slim.conv2d(s5_pack, 24, [3, 3], scope='a1')
a2 = slim.conv2d(a1, 24, [3, 3], scope='a2')
a3 = slim.conv2d(a2, 24, [3, 3], scope='a3')
a4 = slim.conv2d(a3, 24, [3, 3], scope='a4')
a5 = slim.conv2d(a4, 2, [3, 3], activation_fn=tf.nn.tanh, scope='a5')
a5_hr = tf.reshape(a5, [n, h, w, 2, 1, 1])
a5_hr = tf.transpose(a5_hr, [0, 1, 4, 2, 5, 3])
a5_hr = tf.reshape(a5_hr, [n, h, w, 2])
uv2 = (a5_hr + uv)
img_warp3 = transformer(batch_size, c, uv2, imgb, [h, w])
tf.summary.histogram('c5_hr', c5_hr)
tf.summary.histogram('s5_hr', s5_hr)
tf.summary.histogram('uv', uv)
tf.summary.histogram('a5', uv)
tf.summary.histogram('uv2', uv)
return img_warp3
|
def load_stack(type_process, ite_stack):
'Load stack npy.\n\n type_process: "tra" or "val".\n ite_stack: start from 0.'
stack_name = (((('stack_' + type_process) + '_pre_') + str(ite_stack)) + '.hdf5')
pre_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_pre'][:]
print('pre loaded.')
stack_name = (((('stack_' + type_process) + '_cmp_') + str(ite_stack)) + '.hdf5')
cmp_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_cmp'][:]
print('cmp loaded.')
stack_name = (((('stack_' + type_process) + '_sub_') + str(ite_stack)) + '.hdf5')
sub_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_sub'][:]
print('sub loaded.')
stack_name = (((('stack_' + type_process) + '_raw_') + str(ite_stack)) + '.hdf5')
raw_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_raw'][:]
print('raw loaded.')
return (pre_list, cmp_list, sub_list, raw_list)
|
def cal_MSE(img1, img2):
'Calculate MSE of two images.\n\n img: [0,1].'
MSE = tf.reduce_mean(tf.pow(tf.subtract(img1, img2), 2.0))
return MSE
|
def cal_PSNR(img1, img2):
'Calculate PSNR of two images.\n\n img: [0,1].'
MSE = cal_MSE(img1, img2)
PSNR = ((10.0 * tf.log((1.0 / MSE))) / tf.log(10.0))
return PSNR
|
def main_train():
'Fine tune a model from step2 and continue training.\n\n Train and evaluate model.'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
os.environ['CUDA_VISIBLE_DEVICES'] = GPU
config = tf.ConfigProto(allow_soft_placement=True)
sess = tf.Session(config=config)
x1 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
x2 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
x3 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
x5 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
is_training = tf.placeholder_with_default(False, shape=())
PSNR_0 = cal_PSNR(x2, x5)
x1to2 = warp_img(tf.shape(x2)[0], x2, x1, False)
x3to2 = warp_img(tf.shape(x2)[0], x2, x3, True)
FlowLoss_1 = cal_MSE(x1to2, x2)
FlowLoss_2 = cal_MSE(x3to2, x2)
flow_loss = (FlowLoss_1 + FlowLoss_2)
x2_enhanced = net_MFCNN.network(x1to2, x2, x3to2, is_training)
MSE = cal_MSE(x2_enhanced, x5)
PSNR = cal_PSNR(x2_enhanced, x5)
delta_PSNR = (PSNR - PSNR_0)
OptimizeLoss_1 = (flow_loss + (ratio_small * MSE))
OptimizeLoss_2 = ((ratio_small * flow_loss) + MSE)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
Training_step1 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_1)
Training_step2 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_2)
tf.summary.scalar('PSNR improvement', delta_PSNR)
tf.summary.scalar('PSNR before enhancement', PSNR_0)
tf.summary.scalar('PSNR after enhancement', PSNR)
tf.summary.scalar('MSE loss of motion compensation', flow_loss)
tf.summary.scalar('MSE loss of final quality enhancement', MSE)
tf.summary.scalar('MSE loss for training step1 (mainly MC-subnet)', OptimizeLoss_1)
tf.summary.scalar('MSE loss for training step2 (mainly QE-subnet)', OptimizeLoss_2)
tf.summary.image('cmp', x2)
tf.summary.image('enhanced', x2_enhanced)
tf.summary.image('raw', x5)
tf.summary.image('x1to2', x1to2)
tf.summary.image('x3to2', x3to2)
summary_writer = tf.summary.FileWriter(dir_model, sess.graph)
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=None)
num_params = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
num_params += reduce(mul, [dim.value for dim in shape], 1)
print(('# num of parameters: %d #' % num_params))
file_object.write(('# num of parameters: %d #\n' % num_params))
file_object.flush()
stack_name = os.path.join(dir_stack, 'stack_tra_pre_*')
num_TrainingStack = len(glob.glob(stack_name))
stack_name = os.path.join(dir_stack, 'stack_val_pre_*')
num_ValidationStack = len(glob.glob(stack_name))
saver_res = tf.train.Saver()
saver_res.restore(sess, model_res_path)
print(('successfully restore model %d!' % (int(res_index) + 1)))
file_object.write(('successfully restore model %d!\n' % (int(res_index) + 1)))
file_object.flush()
print('##### Start running! #####')
num_TrainingBatch_count = 0
for ite_epoch in range(epoch_step2):
for ite_stack in range(num_TrainingStack):
if ((ite_epoch == 0) and (ite_stack == 0)):
(pre_list, cmp_list, sub_list, raw_list) = load_stack('tra', ite_stack)
num_batch = int((len(pre_list) / BATCH_SIZE))
for ite_batch in range(num_batch):
print(('\rstep 2 - epoch %2d/%2d - training stack %2d/%2d - batch %3d/%3d' % ((ite_epoch + 1), epoch_step2, (ite_stack + 1), num_TrainingStack, (ite_batch + 1), num_batch)), end='')
start_index = (ite_batch * BATCH_SIZE)
next_start_index = ((ite_batch + 1) * BATCH_SIZE)
Training_step2.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: True})
num_TrainingBatch_count += 1
if (((ite_batch + 1) == int((num_batch / 2))) or ((ite_batch + 1) == num_batch)):
(summary, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch) = sess.run([summary_op, delta_PSNR, PSNR_0, flow_loss, MSE], feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False})
summary_writer.add_summary(summary, (num_TrainingBatch_count + ((int(res_index) + 21) * num_batch)))
print(('\rstep 2 - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f' % (((ite_epoch + int(res_index)) + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch)))
file_object.write(('step 2 - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f\n' % (((ite_epoch + int(res_index)) + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch)))
file_object.flush()
CheckPoint_path = os.path.join(dir_model, 'model_step2.ckpt')
saver.save(sess, CheckPoint_path, global_step=((ite_epoch + int(res_index)) + 1))
sum_improved_PSNR = 0
num_patch_count = 0
for ite_stack in range(num_ValidationStack):
(pre_list, cmp_list, sub_list, raw_list) = ([], [], [], [])
gc.collect()
(pre_list, cmp_list, sub_list, raw_list) = load_stack('val', ite_stack)
gc.collect()
num_batch = int((len(pre_list) / BATCH_SIZE))
for ite_batch in range(num_batch):
print(('\rstep 2 - epoch %2d/%2d - validation stack %2d/%2d ' % ((((ite_epoch + 1) + int(res_index)) + 1), ((epoch_step2 + int(res_index)) + 1), (ite_stack + 1), num_ValidationStack)), end='')
start_index = (ite_batch * BATCH_SIZE)
next_start_index = ((ite_batch + 1) * BATCH_SIZE)
delta_PSNR_batch = sess.run(delta_PSNR, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False})
sum_improved_PSNR += (delta_PSNR_batch * BATCH_SIZE)
num_patch_count += BATCH_SIZE
if (num_patch_count != 0):
print(('\n### imp PSNR by model after step 2 - epoch %2d/%2d: %.3f ###\n' % ((((ite_epoch + 1) + int(res_index)) + 1), ((epoch_step2 + int(res_index)) + 1), (sum_improved_PSNR / num_patch_count))))
file_object.write(('### imp PSNR by model after step 2 - epoch %2d/%2d: %.3f ###\n' % ((ite_epoch + 1), ((epoch_step2 + int(res_index)) + 1), (sum_improved_PSNR / num_patch_count))))
file_object.flush()
|
def transformer(batch, chan, flow, U, out_size, name='SpatialTransformer', **kwargs):
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, ((- 1), 1)), rep)
return tf.reshape(x, [(- 1)])
def _repeat2(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1)
rep = tf.cast(rep, 'int32')
x = tf.matmul(rep, tf.reshape(x, (1, (- 1))))
return tf.reshape(x, [(- 1)])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast((tf.shape(im)[1] - 1), 'int32')
max_x = tf.cast((tf.shape(im)[2] - 1), 'int32')
x = (tf.cast(_repeat2(tf.range(0, width), (height * num_batch)), 'float32') + (x * WIDTH))
y = (tf.cast(_repeat2(_repeat(tf.range(0, height), width), num_batch), 'float32') + (y * HEIGHT))
x0 = tf.cast(tf.floor(x), 'int32')
x1 = (x0 + 1)
y0 = tf.cast(tf.floor(y), 'int32')
y1 = (y0 + 1)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = (width * height)
base = _repeat((tf.range(num_batch) * dim1), (out_height * out_width))
base_y0 = (base + (y0 * dim2))
base_y1 = (base + (y1 * dim2))
idx_a = (base_y0 + x0)
idx_b = (base_y1 + x0)
idx_c = (base_y0 + x1)
idx_d = (base_y1 + x1)
im_flat = tf.reshape(im, tf.stack([(- 1), channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)
wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)
wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)
output = tf.add_n([(wa * Ia), (wb * Ib), (wc * Ic), (wd * Id)])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace((- 1.0), 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace((- 1.0), 1.0, height), 1), tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, (- 1)))
y_t_flat = tf.reshape(y_t, (1, (- 1)))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
return grid
def _transform(x_s, y_s, input_dim, out_size):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
height = tf.shape(input_dim)[1]
width = tf.shape(input_dim)[2]
num_channels = tf.shape(input_dim)[3]
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
x_s_flat = tf.reshape(x_s, [(- 1)])
y_s_flat = tf.reshape(y_s, [(- 1)])
input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)
output = tf.reshape(input_transformed, tf.stack([batch, out_height, out_width, chan]))
return output
with tf.variable_scope(name):
(dx, dy) = tf.split(flow, 2, 3)
output = _transform(dx, dy, U, out_size)
return output
|
def warp_img(batch_size, imga, imgb, reuse, scope='easyflow'):
(n, h, w, c) = imga.get_shape().as_list()
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)), slim.arg_scope([slim.conv2d_transpose], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)):
inputs = tf.concat([imga, imgb], 3, name='flow_inp')
c1 = slim.conv2d(inputs, 24, [5, 5], stride=2, scope='c1')
c2 = slim.conv2d(c1, 24, [3, 3], scope='c2')
c3 = slim.conv2d(c2, 24, [5, 5], stride=2, scope='c3')
c4 = slim.conv2d(c3, 24, [3, 3], scope='c4')
c5 = slim.conv2d(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5')
c5_hr = tf.reshape(c5, [n, int((h / 4)), int((w / 4)), 2, 4, 4])
c5_hr = tf.transpose(c5_hr, [0, 1, 4, 2, 5, 3])
c5_hr = tf.reshape(c5_hr, [n, h, w, 2])
img_warp1 = transformer(batch_size, c, c5_hr, imgb, [h, w])
c5_pack = tf.concat([inputs, c5_hr, img_warp1], 3, name='cat')
s1 = slim.conv2d(c5_pack, 24, [5, 5], stride=2, scope='s1')
s2 = slim.conv2d(s1, 24, [3, 3], scope='s2')
s3 = slim.conv2d(s2, 24, [3, 3], scope='s3')
s4 = slim.conv2d(s3, 24, [3, 3], scope='s4')
s5 = slim.conv2d(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5')
s5_hr = tf.reshape(s5, [n, int((h / 2)), int((w / 2)), 2, 2, 2])
s5_hr = tf.transpose(s5_hr, [0, 1, 4, 2, 5, 3])
s5_hr = tf.reshape(s5_hr, [n, h, w, 2])
uv = (c5_hr + s5_hr)
img_warp2 = transformer(batch_size, c, uv, imgb, [h, w])
s5_pack = tf.concat([inputs, uv, img_warp2], 3, name='cat2')
a1 = slim.conv2d(s5_pack, 24, [3, 3], scope='a1')
a2 = slim.conv2d(a1, 24, [3, 3], scope='a2')
a3 = slim.conv2d(a2, 24, [3, 3], scope='a3')
a4 = slim.conv2d(a3, 24, [3, 3], scope='a4')
a5 = slim.conv2d(a4, 2, [3, 3], activation_fn=tf.nn.tanh, scope='a5')
a5_hr = tf.reshape(a5, [n, h, w, 2, 1, 1])
a5_hr = tf.transpose(a5_hr, [0, 1, 4, 2, 5, 3])
a5_hr = tf.reshape(a5_hr, [n, h, w, 2])
uv2 = (a5_hr + uv)
img_warp3 = transformer(batch_size, c, uv2, imgb, [h, w])
tf.summary.histogram('c5_hr', c5_hr)
tf.summary.histogram('s5_hr', s5_hr)
tf.summary.histogram('uv', uv)
tf.summary.histogram('a5', uv)
tf.summary.histogram('uv2', uv)
return img_warp3
|
def load_stack(type_process, ite_stack):
'Load stack npy.\n\n type_process: "tra" or "val".\n ite_stack: start from 0.'
stack_name = (((('stack_' + type_process) + '_pre_') + str(ite_stack)) + '.hdf5')
stack_path = os.path.join(dir_stack, stack_name)
pre_list = h5py.File(stack_path, 'r')['stack_pre'][:]
print('pre loaded.')
stack_name = (((('stack_' + type_process) + '_cmp_') + str(ite_stack)) + '.hdf5')
stack_path = os.path.join(dir_stack, stack_name)
cmp_list = h5py.File(stack_path, 'r')['stack_cmp'][:]
print('cmp loaded.')
stack_name = (((('stack_' + type_process) + '_sub_') + str(ite_stack)) + '.hdf5')
stack_path = os.path.join(dir_stack, stack_name)
sub_list = h5py.File(stack_path, 'r')['stack_sub'][:]
print('sub loaded.')
stack_name = (((('stack_' + type_process) + '_raw_') + str(ite_stack)) + '.hdf5')
stack_path = os.path.join(dir_stack, stack_name)
raw_list = h5py.File(stack_path, 'r')['stack_raw'][:]
print('raw loaded.')
return (pre_list, cmp_list, sub_list, raw_list)
|
def cal_MSE(img1, img2):
'Calculate MSE of two images.\n\n img: [0,1].'
MSE = tf.reduce_mean(tf.pow(tf.subtract(img1, img2), 2.0))
return MSE
|
def cal_PSNR(img1, img2):
'Calculate PSNR of two images.\n\n img: [0,1].'
MSE = cal_MSE(img1, img2)
PSNR = ((10.0 * tf.log((1.0 / MSE))) / tf.log(10.0))
return PSNR
|
def main_train():
'Train and evaluate model.\n\n Output: model_QPxx, record_train_QPxx.'
sess = tf.Session(config=config)
x1 = tf.placeholder(tf.float32, [BATCH_SIZE, WIDTH, HEIGHT, CHANNEL])
x2 = tf.placeholder(tf.float32, [BATCH_SIZE, WIDTH, HEIGHT, CHANNEL])
x3 = tf.placeholder(tf.float32, [BATCH_SIZE, WIDTH, HEIGHT, CHANNEL])
x5 = tf.placeholder(tf.float32, [BATCH_SIZE, WIDTH, HEIGHT, CHANNEL])
if (QP in net1_list):
is_training = tf.placeholder_with_default(False, shape=())
PSNR_0 = cal_PSNR(x2, x5)
x1to2 = warp_img(tf.shape(x2)[0], x2, x1, False)
x3to2 = warp_img(tf.shape(x2)[0], x2, x3, True)
FlowLoss_1 = cal_MSE(x1to2, x2)
FlowLoss_2 = cal_MSE(x3to2, x2)
flow_loss = (FlowLoss_1 + FlowLoss_2)
if (QP in net1_list):
x2_enhanced = net_MFCNN.network(x1to2, x2, x3to2, is_training)
else:
x2_enhanced = net_MFCNN.network2(x1to2, x2, x3to2)
MSE = cal_MSE(x2_enhanced, x5)
PSNR = cal_PSNR(x2_enhanced, x5)
delta_PSNR = (PSNR - PSNR_0)
OptimizeLoss_1 = (flow_loss + (ratio_small * MSE))
OptimizeLoss_2 = ((ratio_small * flow_loss) + MSE)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
Training_step1 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_1)
Training_step2 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_2)
saver = tf.train.Saver(max_to_keep=None)
sess.run(tf.global_variables_initializer())
saver_res = tf.train.Saver()
saver_res.restore(sess, model_res_path)
print(('successfully restore model %d!' % (int(args.res_index) + 1)))
file_object.write(('successfully restore model %d!\n' % (int(args.res_index) + 1)))
file_object.flush()
tf.summary.scalar('PSNR improvement', delta_PSNR)
tf.summary.scalar('PSNR before enhancement', PSNR_0)
tf.summary.scalar('PSNR after enhancement', PSNR)
tf.summary.scalar('MSE loss of motion compensation', flow_loss)
tf.summary.scalar('MSE loss of final quality enhancement', MSE)
tf.summary.scalar('MSE loss for training step1 (mainly MC-subnet)', OptimizeLoss_1)
tf.summary.scalar('MSE loss for training step2 (mainly QE-subnet)', OptimizeLoss_2)
tf.summary.image('cmp', x2)
tf.summary.image('enhanced', x2_enhanced)
tf.summary.image('raw', x5)
tf.summary.image('x1to2', x1to2)
tf.summary.image('x3to2', x3to2)
summary_writer = tf.summary.FileWriter(dir_model, sess.graph)
summary_op = tf.summary.merge_all()
num_params = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
num_params += reduce(mul, [dim.value for dim in shape], 1)
print(('# num of parameters: %d #' % num_params))
file_object.write(('# num of parameters: %d #\n' % num_params))
file_object.flush()
stack_name = os.path.join(dir_stack, 'stack_tra_pre_*')
num_TrainingStack = len(glob.glob(stack_name))
stack_name = os.path.join(dir_stack, 'stack_val_pre_*')
num_ValidationStack = len(glob.glob(stack_name))
print('##### Start running! #####')
num_TrainingBatch_count = 0
for ite_step in [1, 2]:
if (ite_step == 1):
num_epoch = epoch_step1
else:
num_epoch = epoch_step2
for ite_epoch in range(num_epoch):
for ite_stack in range(num_TrainingStack):
if ((ite_epoch == 0) and (ite_stack == 0)):
(pre_list, cmp_list, sub_list, raw_list) = load_stack('tra', ite_stack)
num_batch = int((len(pre_list) / BATCH_SIZE))
for ite_batch in range(num_batch):
print(('\rstep %1d - epoch %2d/%2d - training stack %2d/%2d - batch %3d/%3d' % (ite_step, (ite_epoch + 1), num_epoch, (ite_stack + 1), num_TrainingStack, (ite_batch + 1), num_batch)), end='')
start_index = (ite_batch * BATCH_SIZE)
next_start_index = ((ite_batch + 1) * BATCH_SIZE)
if (ite_step == 1):
if (QP in net1_list):
Training_step1.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: True})
else:
Training_step1.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index]})
elif (QP in net1_list):
Training_step2.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: True})
else:
Training_step2.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index]})
num_TrainingBatch_count += 1
if (((ite_batch + 1) == int((num_batch / 2))) or ((ite_batch + 1) == num_batch)):
if (QP in net1_list):
(summary, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch) = sess.run([summary_op, delta_PSNR, PSNR_0, flow_loss, MSE], feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False})
else:
(summary, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch) = sess.run([summary_op, delta_PSNR, PSNR_0, flow_loss, MSE], feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index]})
summary_writer.add_summary(summary, num_TrainingBatch_count)
print(('\rstep %1d - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f' % (ite_step, (ite_epoch + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch)))
file_object.write(('step %1d - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f\n' % (ite_step, (ite_epoch + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch)))
file_object.flush()
if (ite_step == 1):
CheckPoint_path = os.path.join(dir_model, 'model_step1.ckpt')
else:
CheckPoint_path = os.path.join(dir_model, 'model_step2.ckpt')
saver.save(sess, CheckPoint_path, global_step=ite_epoch)
sum_improved_PSNR = 0
num_patch_count = 0
for ite_stack in range(num_ValidationStack):
(pre_list, cmp_list, sub_list, raw_list) = ([], [], [], [])
gc.collect()
(pre_list, cmp_list, sub_list, raw_list) = load_stack('val', ite_stack)
gc.collect()
num_batch = int((len(pre_list) / BATCH_SIZE))
for ite_batch in range(num_batch):
print(('\rstep %1d - epoch %2d/%2d - validation stack %2d/%2d ' % (ite_step, (ite_epoch + 1), num_epoch, (ite_stack + 1), num_ValidationStack)), end='')
start_index = (ite_batch * BATCH_SIZE)
next_start_index = ((ite_batch + 1) * BATCH_SIZE)
if (QP in net1_list):
delta_PSNR_batch = sess.run(delta_PSNR, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False})
else:
delta_PSNR_batch = sess.run(delta_PSNR, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index]})
sum_improved_PSNR += (delta_PSNR_batch * BATCH_SIZE)
num_patch_count += BATCH_SIZE
if (num_patch_count != 0):
print(('\n### imp PSNR by model after step %1d - epoch %2d/%2d: %.3f ###\n' % (ite_step, (ite_epoch + 1), num_epoch, (sum_improved_PSNR / num_patch_count))))
file_object.write(('### imp PSNR by model after step %1d - epoch %2d/%2d: %.3f ###\n' % (ite_step, (ite_epoch + 1), num_epoch, (sum_improved_PSNR / num_patch_count))))
file_object.flush()
|
def transformer(batch, chan, flow, U, out_size, name='SpatialTransformer', **kwargs):
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, ((- 1), 1)), rep)
return tf.reshape(x, [(- 1)])
def _repeat2(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1)
rep = tf.cast(rep, 'int32')
x = tf.matmul(rep, tf.reshape(x, (1, (- 1))))
return tf.reshape(x, [(- 1)])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast((tf.shape(im)[1] - 1), 'int32')
max_x = tf.cast((tf.shape(im)[2] - 1), 'int32')
x = (tf.cast(_repeat2(tf.range(0, width), (height * num_batch)), 'float32') + (x * WIDTH))
y = (tf.cast(_repeat2(_repeat(tf.range(0, height), width), num_batch), 'float32') + (y * HEIGHT))
x0 = tf.cast(tf.floor(x), 'int32')
x1 = (x0 + 1)
y0 = tf.cast(tf.floor(y), 'int32')
y1 = (y0 + 1)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = (width * height)
base = _repeat((tf.range(num_batch) * dim1), (out_height * out_width))
base_y0 = (base + (y0 * dim2))
base_y1 = (base + (y1 * dim2))
idx_a = (base_y0 + x0)
idx_b = (base_y1 + x0)
idx_c = (base_y0 + x1)
idx_d = (base_y1 + x1)
im_flat = tf.reshape(im, tf.stack([(- 1), channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)
wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)
wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)
output = tf.add_n([(wa * Ia), (wb * Ib), (wc * Ic), (wd * Id)])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace((- 1.0), 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace((- 1.0), 1.0, height), 1), tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, (- 1)))
y_t_flat = tf.reshape(y_t, (1, (- 1)))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
return grid
def _transform(x_s, y_s, input_dim, out_size):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
height = tf.shape(input_dim)[1]
width = tf.shape(input_dim)[2]
num_channels = tf.shape(input_dim)[3]
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
x_s_flat = tf.reshape(x_s, [(- 1)])
y_s_flat = tf.reshape(y_s, [(- 1)])
input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)
output = tf.reshape(input_transformed, tf.stack([batch, out_height, out_width, chan]))
return output
with tf.variable_scope(name):
(dx, dy) = tf.split(flow, 2, 3)
output = _transform(dx, dy, U, out_size)
return output
|
def warp_img(batch_size, imga, imgb, reuse, scope='easyflow'):
(n, h, w, c) = imga.get_shape().as_list()
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)), slim.arg_scope([slim.conv2d_transpose], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)):
inputs = tf.concat([imga, imgb], 3, name='flow_inp')
c1 = slim.conv2d(inputs, 24, [5, 5], stride=2, scope='c1')
c2 = slim.conv2d(c1, 24, [3, 3], scope='c2')
c3 = slim.conv2d(c2, 24, [5, 5], stride=2, scope='c3')
c4 = slim.conv2d(c3, 24, [3, 3], scope='c4')
c5 = slim.conv2d(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5')
c5_hr = tf.reshape(c5, [n, int((h / 4)), int((w / 4)), 2, 4, 4])
c5_hr = tf.transpose(c5_hr, [0, 1, 4, 2, 5, 3])
c5_hr = tf.reshape(c5_hr, [n, h, w, 2])
img_warp1 = transformer(batch_size, c, c5_hr, imgb, [h, w])
c5_pack = tf.concat([inputs, c5_hr, img_warp1], 3, name='cat')
s1 = slim.conv2d(c5_pack, 24, [5, 5], stride=2, scope='s1')
s2 = slim.conv2d(s1, 24, [3, 3], scope='s2')
s3 = slim.conv2d(s2, 24, [3, 3], scope='s3')
s4 = slim.conv2d(s3, 24, [3, 3], scope='s4')
s5 = slim.conv2d(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5')
s5_hr = tf.reshape(s5, [n, int((h / 2)), int((w / 2)), 2, 2, 2])
s5_hr = tf.transpose(s5_hr, [0, 1, 4, 2, 5, 3])
s5_hr = tf.reshape(s5_hr, [n, h, w, 2])
uv = (c5_hr + s5_hr)
img_warp2 = transformer(batch_size, c, uv, imgb, [h, w])
s5_pack = tf.concat([inputs, uv, img_warp2], 3, name='cat2')
a1 = slim.conv2d(s5_pack, 24, [3, 3], scope='a1')
a2 = slim.conv2d(a1, 24, [3, 3], scope='a2')
a3 = slim.conv2d(a2, 24, [3, 3], scope='a3')
a4 = slim.conv2d(a3, 24, [3, 3], scope='a4')
a5 = slim.conv2d(a4, 2, [3, 3], activation_fn=tf.nn.tanh, scope='a5')
a5_hr = tf.reshape(a5, [n, h, w, 2, 1, 1])
a5_hr = tf.transpose(a5_hr, [0, 1, 4, 2, 5, 3])
a5_hr = tf.reshape(a5_hr, [n, h, w, 2])
uv2 = (a5_hr + uv)
img_warp3 = transformer(batch_size, c, uv2, imgb, [h, w])
tf.summary.histogram('c5_hr', c5_hr)
tf.summary.histogram('s5_hr', s5_hr)
tf.summary.histogram('uv', uv)
tf.summary.histogram('a5', uv)
tf.summary.histogram('uv2', uv)
return img_warp3
|
def load_stack(type_process, ite_stack):
'Load stack npy.\n\n type_process: "tra" or "val".\n ite_stack: start from 0.'
stack_name = (((('stack_' + type_process) + '_pre_') + str(ite_stack)) + '.hdf5')
pre_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_pre'][:]
print('pre loaded.')
stack_name = (((('stack_' + type_process) + '_cmp_') + str(ite_stack)) + '.hdf5')
cmp_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_cmp'][:]
print('cmp loaded.')
stack_name = (((('stack_' + type_process) + '_sub_') + str(ite_stack)) + '.hdf5')
sub_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_sub'][:]
print('sub loaded.')
stack_name = (((('stack_' + type_process) + '_raw_') + str(ite_stack)) + '.hdf5')
raw_list = h5py.File(os.path.join(dir_stack, stack_name), 'r')['stack_raw'][:]
print('raw loaded.')
return (pre_list, cmp_list, sub_list, raw_list)
|
def cal_MSE(img1, img2):
'Calculate MSE of two images.\n\n img: [0,1].'
MSE = tf.reduce_mean(tf.pow(tf.subtract(img1, img2), 2.0))
return MSE
|
def cal_PSNR(img1, img2):
'Calculate PSNR of two images.\n\n img: [0,1].'
MSE = cal_MSE(img1, img2)
PSNR = ((10.0 * tf.log((1.0 / MSE))) / tf.log(10.0))
return PSNR
|
def main_train():
'Train and evaluate model.\n\n Output: model_QPxx, record_train_QPxx.'
sess = tf.Session(config=config)
x1 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
x2 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
x3 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
x5 = tf.placeholder(tf.float32, [BATCH_SIZE, HEIGHT, WIDTH, CHANNEL])
is_training = tf.placeholder_with_default(False, shape=())
PSNR_0 = cal_PSNR(x2, x5)
x1to2 = warp_img(tf.shape(x2)[0], x2, x1, False)
x3to2 = warp_img(tf.shape(x2)[0], x2, x3, True)
FlowLoss_1 = cal_MSE(x1to2, x2)
FlowLoss_2 = cal_MSE(x3to2, x2)
flow_loss = (FlowLoss_1 + FlowLoss_2)
x2_enhanced = net_MFCNN.network(x1to2, x2, x3to2, is_training)
MSE = cal_MSE(x2_enhanced, x5)
PSNR = cal_PSNR(x2_enhanced, x5)
delta_PSNR = (PSNR - PSNR_0)
OptimizeLoss_1 = (flow_loss + (ratio_small * MSE))
OptimizeLoss_2 = ((ratio_small * flow_loss) + MSE)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
Training_step1 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_1)
Training_step2 = tf.train.AdamOptimizer(lr_ori).minimize(OptimizeLoss_2)
tf.summary.scalar('MSE loss of motion compensation', flow_loss)
tf.summary.scalar('MSE loss of final quality enhancement', MSE)
tf.summary.scalar('MSE loss for training step1 (mainly MC-subnet)', OptimizeLoss_1)
tf.summary.scalar('MSE loss for training step2 (mainly QE-subnet)', OptimizeLoss_2)
tf.summary.scalar('PSNR before enhancement', PSNR_0)
tf.summary.scalar('PSNR after enhancement', PSNR)
tf.summary.scalar('PSNR improvement', delta_PSNR)
tf.summary.image('cmp', x2)
tf.summary.image('x1to2', x1to2)
tf.summary.image('x3to2', x3to2)
tf.summary.image('enhanced', x2_enhanced)
tf.summary.image('raw', x5)
summary_writer = tf.summary.FileWriter(dir_model, sess.graph)
summary_op = tf.summary.merge_all()
saver = tf.train.Saver(max_to_keep=None)
sess.run(tf.global_variables_initializer())
num_params = 0
for variable in tf.trainable_variables():
shape = variable.get_shape()
num_params += reduce(mul, [dim.value for dim in shape], 1)
print(('# num of parameters: %d #' % num_params))
file_object.write(('# num of parameters: %d #\n' % num_params))
file_object.flush()
stack_name = os.path.join(dir_stack, 'stack_tra_pre_*')
num_TrainingStack = len(glob.glob(stack_name))
stack_name = os.path.join(dir_stack, 'stack_val_pre_*')
num_ValidationStack = len(glob.glob(stack_name))
print('##### Start running! #####')
num_TrainingBatch_count = 0
for ite_step in [1, 2]:
if (ite_step == 1):
num_epoch = epoch_step1
else:
num_epoch = epoch_step2
for ite_epoch in range(num_epoch):
for ite_stack in range(num_TrainingStack):
(pre_list, cmp_list, sub_list, raw_list) = ([], [], [], [])
gc.collect()
(pre_list, cmp_list, sub_list, raw_list) = load_stack('tra', ite_stack)
gc.collect()
num_batch = int((len(pre_list) / BATCH_SIZE))
for ite_batch in range(num_batch):
print(('\rstep %1d - epoch %2d/%2d - training stack %2d/%2d - batch %3d/%3d' % (ite_step, (ite_epoch + 1), num_epoch, (ite_stack + 1), num_TrainingStack, (ite_batch + 1), num_batch)), end='')
start_index = (ite_batch * BATCH_SIZE)
next_start_index = ((ite_batch + 1) * BATCH_SIZE)
if (ite_step == 1):
Training_step1.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: True})
else:
Training_step2.run(session=sess, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: True})
num_TrainingBatch_count += 1
if (((ite_batch + 1) == int((num_batch / 2))) or ((ite_batch + 1) == num_batch)):
(summary, delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch) = sess.run([summary_op, delta_PSNR, PSNR_0, flow_loss, MSE], feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False})
summary_writer.add_summary(summary, num_TrainingBatch_count)
print(('\rstep %1d - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f' % (ite_step, (ite_epoch + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch)))
file_object.write(('step %1d - epoch %2d - imp PSNR: %.3f - ori PSNR: %.3f - MSE loss of MC: %.5f - MSE loss of QE: %.8f\n' % (ite_step, (ite_epoch + 1), delta_PSNR_batch, PSNR_0_batch, FlowLoss_batch, MSE_batch)))
file_object.flush()
if (ite_step == 1):
CheckPoint_path = os.path.join(dir_model, 'model_step1.ckpt')
else:
CheckPoint_path = os.path.join(dir_model, 'model_step2.ckpt')
saver.save(sess, CheckPoint_path, global_step=ite_epoch)
sum_improved_PSNR = 0
num_patch_count = 0
for ite_stack in range(num_ValidationStack):
(pre_list, cmp_list, sub_list, raw_list) = ([], [], [], [])
gc.collect()
(pre_list, cmp_list, sub_list, raw_list) = load_stack('val', ite_stack)
gc.collect()
num_batch = int((len(pre_list) / BATCH_SIZE))
for ite_batch in range(num_batch):
print(('step %1d - epoch %2d/%2d - validation stack %2d/%2d ' % (ite_step, (ite_epoch + 1), num_epoch, (ite_stack + 1), num_ValidationStack)))
start_index = (ite_batch * BATCH_SIZE)
next_start_index = ((ite_batch + 1) * BATCH_SIZE)
delta_PSNR_batch = sess.run(delta_PSNR, feed_dict={x1: pre_list[start_index:next_start_index], x2: cmp_list[start_index:next_start_index], x3: sub_list[start_index:next_start_index], x5: raw_list[start_index:next_start_index], is_training: False})
sum_improved_PSNR += (delta_PSNR_batch * BATCH_SIZE)
num_patch_count += BATCH_SIZE
if (num_patch_count != 0):
print(('### imp PSNR by model after step %1d - epoch %2d/%2d: %.3f ###' % (ite_step, (ite_epoch + 1), num_epoch, (sum_improved_PSNR / num_patch_count))))
file_object.write(('### imp PSNR by model after step %1d - epoch %2d/%2d: %.3f ###\n' % (ite_step, (ite_epoch + 1), num_epoch, (sum_improved_PSNR / num_patch_count))))
file_object.flush()
|
def network(frame1, frame2, frame3, is_training, reuse=False, scope='netflow'):
with tf.variable_scope(scope, reuse=reuse):
c3_1_w = tf.get_variable('c3_1_w', shape=[3, 3, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_1_b = tf.get_variable('c3_1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c3_2_w = tf.get_variable('c3_2_w', shape=[3, 3, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_2_b = tf.get_variable('c3_2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c3_3_w = tf.get_variable('c3_3_w', shape=[3, 3, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_3_b = tf.get_variable('c3_3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_1_w = tf.get_variable('c5_1_w', shape=[5, 5, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_1_b = tf.get_variable('c5_1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_2_w = tf.get_variable('c5_2_w', shape=[5, 5, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_2_b = tf.get_variable('c5_2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_3_w = tf.get_variable('c5_3_w', shape=[5, 5, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_3_b = tf.get_variable('c5_3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c7_1_w = tf.get_variable('c7_1_w', shape=[7, 7, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c7_1_b = tf.get_variable('c7_1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c7_2_w = tf.get_variable('c7_2_w', shape=[7, 7, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c7_2_b = tf.get_variable('c7_2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c7_3_w = tf.get_variable('c7_3_w', shape=[7, 7, 1, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c7_3_b = tf.get_variable('c7_3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c1_w = tf.get_variable('c1_w', shape=[3, 3, ((32 * 3) * 3), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c1_b = tf.get_variable('c1_b', shape=[32], initializer=tf.constant_initializer(0.0))
c2_w = tf.get_variable('c2_w', shape=[3, 3, 32, 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c2_b = tf.get_variable('c2_b', shape=[32], initializer=tf.constant_initializer(0.0))
c3_w = tf.get_variable('c3_w', shape=[3, 3, (32 * 2), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c3_b = tf.get_variable('c3_b', shape=[32], initializer=tf.constant_initializer(0.0))
c4_w = tf.get_variable('c4_w', shape=[3, 3, (32 * 3), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c4_b = tf.get_variable('c4_b', shape=[32], initializer=tf.constant_initializer(0.0))
c5_w = tf.get_variable('c5_w', shape=[3, 3, (32 * 4), 32], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c5_b = tf.get_variable('c5_b', shape=[32], initializer=tf.constant_initializer(0.0))
c6_w = tf.get_variable('c6_w', shape=[3, 3, 32, 1], initializer=tf.contrib.layers.xavier_initializer(uniform=True))
c6_b = tf.get_variable('c6_b', shape=[1], initializer=tf.constant_initializer(0.0))
c3_1 = tf.nn.conv2d(frame1, c3_1_w, strides=[1, 1, 1, 1], padding='SAME')
c3_1 = tf.nn.bias_add(c3_1, c3_1_b)
c3_1 = tflearn.activations.prelu(c3_1)
c5_1 = tf.nn.conv2d(frame1, c5_1_w, strides=[1, 1, 1, 1], padding='SAME')
c5_1 = tf.nn.bias_add(c5_1, c5_1_b)
c5_1 = tflearn.activations.prelu(c5_1)
c7_1 = tf.nn.conv2d(frame1, c7_1_w, strides=[1, 1, 1, 1], padding='SAME')
c7_1 = tf.nn.bias_add(c7_1, c7_1_b)
c7_1 = tflearn.activations.prelu(c7_1)
cc_1 = tf.concat([c3_1, c5_1, c7_1], 3)
c3_2 = tf.nn.conv2d(frame2, c3_2_w, strides=[1, 1, 1, 1], padding='SAME')
c3_2 = tf.nn.bias_add(c3_2, c3_2_b)
c3_2 = tflearn.activations.prelu(c3_2)
c5_2 = tf.nn.conv2d(frame2, c5_2_w, strides=[1, 1, 1, 1], padding='SAME')
c5_2 = tf.nn.bias_add(c5_2, c5_2_b)
c5_2 = tflearn.activations.prelu(c5_2)
c7_2 = tf.nn.conv2d(frame2, c7_2_w, strides=[1, 1, 1, 1], padding='SAME')
c7_2 = tf.nn.bias_add(c7_2, c7_2_b)
c7_2 = tflearn.activations.prelu(c7_2)
cc_2 = tf.concat([c3_2, c5_2, c7_2], 3)
c3_3 = tf.nn.conv2d(frame3, c3_3_w, strides=[1, 1, 1, 1], padding='SAME')
c3_3 = tf.nn.bias_add(c3_3, c3_3_b)
c3_3 = tflearn.activations.prelu(c3_3)
c5_3 = tf.nn.conv2d(frame3, c5_3_w, strides=[1, 1, 1, 1], padding='SAME')
c5_3 = tf.nn.bias_add(c5_3, c5_3_b)
c5_3 = tflearn.activations.prelu(c5_3)
c7_3 = tf.nn.conv2d(frame3, c7_3_w, strides=[1, 1, 1, 1], padding='SAME')
c7_3 = tf.nn.bias_add(c7_3, c7_3_b)
c7_3 = tflearn.activations.prelu(c7_3)
cc_3 = tf.concat([c3_3, c5_3, c7_3], 3)
c_concat = tf.concat([cc_1, cc_2, cc_3], 3)
c1 = tf.nn.conv2d(c_concat, c1_w, strides=[1, 1, 1, 1], padding='SAME')
c1 = tf.nn.bias_add(c1, c1_b)
c1 = tf.layers.batch_normalization(c1, training=is_training)
c1 = tflearn.activations.prelu(c1)
c2 = tf.nn.conv2d(c1, c2_w, strides=[1, 1, 1, 1], padding='SAME')
c2 = tf.nn.bias_add(c2, c2_b)
c2 = tf.layers.batch_normalization(c2, training=is_training)
c2 = tflearn.activations.prelu(c2)
cc2 = tf.concat([c1, c2], 3)
c3 = tf.nn.conv2d(cc2, c3_w, strides=[1, 1, 1, 1], padding='SAME')
c3 = tf.nn.bias_add(c3, c3_b)
c3 = tf.layers.batch_normalization(c3, training=is_training)
c3 = tflearn.activations.prelu(c3)
cc3 = tf.concat([c1, c2, c3], 3)
c4 = tf.nn.conv2d(cc3, c4_w, strides=[1, 1, 1, 1], padding='SAME')
c4 = tf.nn.bias_add(c4, c4_b)
c4 = tf.layers.batch_normalization(c4, training=is_training)
c4 = tflearn.activations.prelu(c4)
cc4 = tf.concat([c1, c2, c3, c4], 3)
c5 = tf.nn.conv2d(cc4, c5_w, strides=[1, 1, 1, 1], padding='SAME')
c5 = tf.nn.bias_add(c5, c5_b)
c5 = tf.layers.batch_normalization(c5, training=is_training)
c5 = tflearn.activations.prelu(c5)
c6 = tf.nn.conv2d(c5, c6_w, strides=[1, 1, 1, 1], padding='SAME')
c6 = tf.nn.bias_add(c6, c6_b)
c6 = tf.layers.batch_normalization(c6, training=is_training)
c6 = tflearn.activations.prelu(c6)
output = tf.add(c6, frame2)
return output
|
def network2(frame1, frame2, frame3, reuse=False, scope='netflow'):
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)):
c3_1 = slim.conv2d(frame1, 32, [3, 3], scope='conv3_1')
c5_1 = slim.conv2d(frame1, 32, [5, 5], scope='conv5_1')
c7_1 = slim.conv2d(frame1, 32, [7, 7], scope='conv7_1')
cc_1 = tf.concat([c3_1, c5_1, c7_1], 3, name='concat_1')
c3_2 = slim.conv2d(frame2, 32, [3, 3], scope='conv3_2')
c5_2 = slim.conv2d(frame2, 32, [5, 5], scope='conv5_2')
c7_2 = slim.conv2d(frame2, 32, [7, 7], scope='conv7_2')
cc_2 = tf.concat([c3_2, c5_2, c7_2], 3, name='concat_2')
c3_3 = slim.conv2d(frame3, 32, [3, 3], scope='conv3_3')
c5_3 = slim.conv2d(frame3, 32, [5, 5], scope='conv5_3')
c7_3 = slim.conv2d(frame3, 32, [7, 7], scope='conv7_3')
cc_3 = tf.concat([c3_3, c5_3, c7_3], 3, name='concat_3')
c_concat = tf.concat([cc_1, cc_2, cc_3], 3, name='c_concat')
cc1 = slim.conv2d(c_concat, 32, [3, 3], scope='cconv1')
cc2 = slim.conv2d(cc1, 32, [3, 3], scope='cconv2')
cc3 = slim.conv2d(cc2, 32, [3, 3], scope='cconv3')
cc4 = slim.conv2d(cc3, 32, [3, 3], scope='cconv4')
cc5 = slim.conv2d(cc4, 32, [3, 3], scope='cconv5')
cc6 = slim.conv2d(cc5, 32, [3, 3], scope='cconv6')
cc7 = slim.conv2d(cc6, 32, [3, 3], scope='cconv7')
cc8 = slim.conv2d(cc7, 16, [3, 3], scope='cconv8')
cout = slim.conv2d(cc8, 1, [3, 3], activation_fn=None, scope='cout')
output = tf.add(cout, frame2)
return output
|
def transformer(batch, chan, flow, U, out_size, name='SpatialTransformer', **kwargs):
def _repeat(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.transpose(tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1), [1, 0])
rep = tf.cast(rep, 'int32')
x = tf.matmul(tf.reshape(x, ((- 1), 1)), rep)
return tf.reshape(x, [(- 1)])
def _repeat2(x, n_repeats):
with tf.variable_scope('_repeat'):
rep = tf.expand_dims(tf.ones(shape=tf.stack([n_repeats])), 1)
rep = tf.cast(rep, 'int32')
x = tf.matmul(rep, tf.reshape(x, (1, (- 1))))
return tf.reshape(x, [(- 1)])
def _interpolate(im, x, y, out_size):
with tf.variable_scope('_interpolate'):
num_batch = tf.shape(im)[0]
height = tf.shape(im)[1]
width = tf.shape(im)[2]
channels = tf.shape(im)[3]
x = tf.cast(x, 'float32')
y = tf.cast(y, 'float32')
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
zero = tf.zeros([], dtype='int32')
max_y = tf.cast((tf.shape(im)[1] - 1), 'int32')
max_x = tf.cast((tf.shape(im)[2] - 1), 'int32')
x = (tf.cast(_repeat2(tf.range(0, width), (height * num_batch)), 'float32') + (x * 64))
y = (tf.cast(_repeat2(_repeat(tf.range(0, height), width), num_batch), 'float32') + (y * 64))
x0 = tf.cast(tf.floor(x), 'int32')
x1 = (x0 + 1)
y0 = tf.cast(tf.floor(y), 'int32')
y1 = (y0 + 1)
x0 = tf.clip_by_value(x0, zero, max_x)
x1 = tf.clip_by_value(x1, zero, max_x)
y0 = tf.clip_by_value(y0, zero, max_y)
y1 = tf.clip_by_value(y1, zero, max_y)
dim2 = width
dim1 = (width * height)
base = _repeat((tf.range(num_batch) * dim1), (out_height * out_width))
base_y0 = (base + (y0 * dim2))
base_y1 = (base + (y1 * dim2))
idx_a = (base_y0 + x0)
idx_b = (base_y1 + x0)
idx_c = (base_y0 + x1)
idx_d = (base_y1 + x1)
im_flat = tf.reshape(im, tf.stack([(- 1), channels]))
im_flat = tf.cast(im_flat, 'float32')
Ia = tf.gather(im_flat, idx_a)
Ib = tf.gather(im_flat, idx_b)
Ic = tf.gather(im_flat, idx_c)
Id = tf.gather(im_flat, idx_d)
x0_f = tf.cast(x0, 'float32')
x1_f = tf.cast(x1, 'float32')
y0_f = tf.cast(y0, 'float32')
y1_f = tf.cast(y1, 'float32')
wa = tf.expand_dims(((x1_f - x) * (y1_f - y)), 1)
wb = tf.expand_dims(((x1_f - x) * (y - y0_f)), 1)
wc = tf.expand_dims(((x - x0_f) * (y1_f - y)), 1)
wd = tf.expand_dims(((x - x0_f) * (y - y0_f)), 1)
output = tf.add_n([(wa * Ia), (wb * Ib), (wc * Ic), (wd * Id)])
return output
def _meshgrid(height, width):
with tf.variable_scope('_meshgrid'):
x_t = tf.matmul(tf.ones(shape=tf.stack([height, 1])), tf.transpose(tf.expand_dims(tf.linspace((- 1.0), 1.0, width), 1), [1, 0]))
y_t = tf.matmul(tf.expand_dims(tf.linspace((- 1.0), 1.0, height), 1), tf.ones(shape=tf.stack([1, width])))
x_t_flat = tf.reshape(x_t, (1, (- 1)))
y_t_flat = tf.reshape(y_t, (1, (- 1)))
ones = tf.ones_like(x_t_flat)
grid = tf.concat(axis=0, values=[x_t_flat, y_t_flat, ones])
return grid
def _transform(x_s, y_s, input_dim, out_size):
with tf.variable_scope('_transform'):
num_batch = tf.shape(input_dim)[0]
height = tf.shape(input_dim)[1]
width = tf.shape(input_dim)[2]
num_channels = tf.shape(input_dim)[3]
height_f = tf.cast(height, 'float32')
width_f = tf.cast(width, 'float32')
out_height = out_size[0]
out_width = out_size[1]
x_s_flat = tf.reshape(x_s, [(- 1)])
y_s_flat = tf.reshape(y_s, [(- 1)])
input_transformed = _interpolate(input_dim, x_s_flat, y_s_flat, out_size)
output = tf.reshape(input_transformed, tf.stack([batch, out_height, out_width, chan]))
return output
with tf.variable_scope(name):
(dx, dy) = tf.split(flow, 2, 3)
output = _transform(dx, dy, U, out_size)
return output
|
def warp_img(batch_size, imga, imgb, reuse, scope='easyflow'):
(n, h, w, c) = imga.get_shape().as_list()
with tf.variable_scope(scope, reuse=reuse):
with slim.arg_scope([slim.conv2d], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)), slim.arg_scope([slim.conv2d_transpose], activation_fn=tflearn.activations.prelu, weights_initializer=tf.contrib.layers.xavier_initializer(uniform=True), biases_initializer=tf.constant_initializer(0.0)):
inputs = tf.concat([imga, imgb], 3, name='flow_inp')
c1 = slim.conv2d(inputs, 24, [5, 5], stride=2, scope='c1')
c2 = slim.conv2d(c1, 24, [3, 3], scope='c2')
c3 = slim.conv2d(c2, 24, [5, 5], stride=2, scope='c3')
c4 = slim.conv2d(c3, 24, [3, 3], scope='c4')
c5 = slim.conv2d(c4, 32, [3, 3], activation_fn=tf.nn.tanh, scope='c5')
c5_hr = tf.reshape(c5, [n, int((h / 4)), int((w / 4)), 2, 4, 4])
c5_hr = tf.transpose(c5_hr, [0, 1, 4, 2, 5, 3])
c5_hr = tf.reshape(c5_hr, [n, h, w, 2])
img_warp1 = transformer(batch_size, c, c5_hr, imgb, [h, w])
c5_pack = tf.concat([inputs, c5_hr, img_warp1], 3, name='cat')
s1 = slim.conv2d(c5_pack, 24, [5, 5], stride=2, scope='s1')
s2 = slim.conv2d(s1, 24, [3, 3], scope='s2')
s3 = slim.conv2d(s2, 24, [3, 3], scope='s3')
s4 = slim.conv2d(s3, 24, [3, 3], scope='s4')
s5 = slim.conv2d(s4, 8, [3, 3], activation_fn=tf.nn.tanh, scope='s5')
s5_hr = tf.reshape(s5, [n, int((h / 2)), int((w / 2)), 2, 2, 2])
s5_hr = tf.transpose(s5_hr, [0, 1, 4, 2, 5, 3])
s5_hr = tf.reshape(s5_hr, [n, h, w, 2])
uv = (c5_hr + s5_hr)
img_warp2 = transformer(batch_size, c, uv, imgb, [h, w])
s5_pack = tf.concat([inputs, uv, img_warp2], 3, name='cat2')
a1 = slim.conv2d(s5_pack, 24, [3, 3], scope='a1')
a2 = slim.conv2d(a1, 24, [3, 3], scope='a2')
a3 = slim.conv2d(a2, 24, [3, 3], scope='a3')
a4 = slim.conv2d(a3, 24, [3, 3], scope='a4')
a5 = slim.conv2d(a4, 2, [3, 3], activation_fn=tf.nn.tanh, scope='a5')
a5_hr = tf.reshape(a5, [n, h, w, 2, 1, 1])
a5_hr = tf.transpose(a5_hr, [0, 1, 4, 2, 5, 3])
a5_hr = tf.reshape(a5_hr, [n, h, w, 2])
uv2 = (a5_hr + uv)
img_warp3 = transformer(batch_size, c, uv2, imgb, [h, w])
tf.summary.histogram('c5_hr', c5_hr)
tf.summary.histogram('s5_hr', s5_hr)
tf.summary.histogram('uv', uv)
tf.summary.histogram('a5', uv)
tf.summary.histogram('uv2', uv)
return img_warp3
|
def build_dataset(cfg, default_args=None):
'Build dataset.\n\n Difference to that in MMEditing: Use the DATASETS in PowerQE.\n '
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif (cfg['type'] == 'RepeatDataset'):
dataset = RepeatDataset(build_dataset(cfg['dataset'], default_args), cfg['times'])
elif isinstance(cfg.get('ann_file'), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
|
@DATASETS.register_module()
class PairedVideoDataset(SRAnnotationDataset):
"Paired video dataset with an annotation file.\n\n Differences to SRAnnotationDataset:\n Support versatile video loading. See arguments.\n\n Suppose the video sequences are stored as:\n\n powerqe\n `-- {gt,lq}_folder\n `-- {001,002,...,100}\n `-- im{1,2,...,7}.png\n\n Then the annotation file should be:\n\n 001\n 002\n ...\n 100\n\n Suppose the video sequences are stored as:\n\n powerqe\n `-- {gt,lq}_folder\n `-- {001,002,...,100}\n `-- {0001,0002,...,1000}\n `-- im{1,2,...,7}.png\n\n Then the annotation file should be:\n\n 001/0001\n 001/0002\n ...\n 001/1000\n 002/0001\n ...\n 100/1000\n\n Args:\n lq_folder (str | :obj:Path): LQ folder.\n gt_folder (str | :obj:Path): GT folder.\n pipeline (List[dict | callable]): A list of data transformations.\n ann_file (str | :obj:Path): Path to the annotation file.\n Each line records a sequence path relative to the GT/LQ folder.\n If empty, collect all sequences in a folder.\n Default: ''.\n scale (int): Upsampling scale ratio.\n Default: 1.\n test_mode (bool): Store True when building test dataset.\n Default: False.\n samp_len (int): Sample length.\n The default value -1 corresponds to the sequence length.\n Default: -1.\n stride (int): Sample stride.\n Default: 1.\n padding (bool): Set True to obtain more samples.\n Value False is recommended for training and True for testing.\n Default False.\n center_gt (bool): If True, only the center frame is recorded in GT.\n The samp_len is required to be odd.\n Note that gt_path is always a list. Default: False.\n "
def __init__(self, lq_folder, gt_folder, pipeline, ann_file='', scale=1, test_mode=False, samp_len=(- 1), stride=1, padding=False, center_gt=False):
self.samp_len = samp_len
self.stride = stride
self.padding = padding
self.center_gt = center_gt
super().__init__(lq_folder=lq_folder, gt_folder=gt_folder, ann_file=ann_file, pipeline=pipeline, scale=scale, test_mode=test_mode)
def find_neighboring_frames(self, center_idx, seq_len, nfrms_left, nfrms_right):
idxs = list(range((center_idx - nfrms_left), ((center_idx + nfrms_right) + 1)))
idxs = [max(min(x, (seq_len - 1)), 0) for x in idxs]
return idxs
def load_annotations(self):
'Load sequences according to the annotation file.\n\n The GT sequence includes all frames by default.\n\n Returned keys (sequence ID; also for image saving):\n\n (1) center_gt is True:\n\n 001/0001/im4.png # im1 is the center-frame name of this sample\n 001/0001/im2.png # im2 is the center-frame name of this sample\n ...\n 001/0001/im7.png # im7 is the center-frame name of this sample\n 001/0002/im1.png # im1 is the center-frame name of this sample\n ...\n\n (2) center_gt is False:\n\n 001/0001/{im1,im2,im3,im4,im5,im6,im7}.png\n 001/0002/{im1,im2,im3,im4,im5,im6,im7}.png\n ...\n 001/1000/{im1,im2,im3,im4,im5,im6,im7}.png\n ...\n\n See the image saving function in BasicVQERestorer for reasons.\n\n Returns:\n list[dict]: Each dict records the information for a sub-sequence to\n serve as a sample in training or testing.\n '
if self.ann_file:
with open(self.ann_file, 'r') as f:
keys = f.read().split('\n')
keys = [k.strip() for k in keys if ((k.strip() is not None) and (k != ''))]
keys = [key.replace('/', os.sep) for key in keys]
else:
sub_dirs = glob(osp.join(self.gt_folder, '*/'))
keys = [sub_dir.split('/')[(- 2)] for sub_dir in sub_dirs]
data_infos = []
for key in keys:
gt_seq = osp.join(self.gt_folder, key)
lq_seq = osp.join(self.lq_folder, key)
gt_paths = self.scan_folder(gt_seq)
assert (len(gt_paths) > 0), f'No images were found in "{gt_seq}".'
lq_paths = self.scan_folder(lq_seq)
assert (len(gt_paths) == len(lq_paths)), f'The GT and LQ sequences for key "{key}" should have the same number of images; GT has {len(gt_paths)} images while LQ has {len(lq_paths)} images.'
gt_names = [osp.basename(gt_path) for gt_path in gt_paths]
gt_names = sorted(gt_names, key=(lambda x: int(''.join(filter(str.isdigit, x)))))
gt_paths = [osp.join(gt_seq, gt_name) for gt_name in gt_names]
for gt_name in gt_names:
lq_path = osp.join(lq_seq, gt_name)
assert (lq_path in lq_paths), f'Cannot find "{lq_path}" in "{lq_seq}".'
samp_len = (len(gt_paths) if (self.samp_len == (- 1)) else self.samp_len)
assert (samp_len <= len(gt_paths)), f'The sample length ({samp_len}) should not be larger than the sequence length ({len(gt_paths)}).'
if (self.center_gt and ((samp_len % 2) == 0)):
raise ValueError(f'The sample length ({samp_len}) should be odd when requiring center GT.')
seq_len = len(gt_paths)
nfrms_left = (samp_len // 2)
nfrms_right = (0 if (samp_len == 1) else ((samp_len - nfrms_left) - 1))
samp_start = (0 if self.padding else nfrms_left)
samp_end = (seq_len if self.padding else (seq_len - nfrms_right))
center_idxs = list(range(samp_start, samp_end, self.stride))
if (self.test_mode and (((center_idxs[(- 1)] + nfrms_right) + 1) < seq_len)):
center_idxs.append(((seq_len - nfrms_right) - 1))
for center_idx in center_idxs:
lq_idxs = self.find_neighboring_frames(center_idx=center_idx, seq_len=seq_len, nfrms_left=nfrms_left, nfrms_right=nfrms_right)
if self.center_gt:
gt_idxs = [center_idx]
else:
gt_idxs = lq_idxs
samp_gt_paths = [gt_paths[idx] for idx in gt_idxs]
samp_lq_paths = [osp.join(lq_seq, gt_names[idx]) for idx in lq_idxs]
record_key = ((key + os.sep) + ','.join([gt_names[idx] for idx in gt_idxs]))
data_infos.append(dict(gt_path=samp_gt_paths, lq_path=samp_lq_paths, key=record_key))
return data_infos
|
@DATASETS.register_module()
class PairedVideoKeyFramesDataset(PairedVideoDataset):
'Paired video dataset with an annotation file. Return the paths of\n neighboring key frames.\n\n Differences to PairedVideoAnnotationDataset:\n Use high-quality key frames instead of neighboring frames.\n See "find_neighboring_frames".\n\n Suppose the video sequences are stored as:\n\n powerqe\n `-- {gt,lq}_folder\n `-- {001,002,...,100}\n `-- im{1,2,...,7}.png\n\n Then the annotation file should be:\n\n 001\n 002\n ...\n 100\n\n Suppose the video sequences are stored as:\n\n powerqe\n `-- {gt,lq}_folder\n `-- {001,002,...,100}\n `-- {0001,0002,...,1000}\n `-- im{1,2,...,7}.png\n\n Then the annotation file should be:\n\n 001/0001\n 001/0002\n ...\n 001/1000\n 002/0001\n ...\n 100/1000\n\n Args:\n lq_folder (str | :obj:Path): LQ folder.\n gt_folder (str | :obj:Path): GT folder.\n pipeline (List[dict | callable]): A list of data transformations.\n ann_file (str | :obj:Path): Path to the annotation file.\n Each line records a sequence path relative to the GT/LQ folder.\n If empty, collect all sequences in a folder.\n Default: \'\'.\n scale (int): Upsampling scale ratio.\n Default: 1.\n test_mode (bool): Store True when building test dataset.\n Default: False.\n samp_len (int): Sample length.\n The default value -1 corresponds to the sequence length.\n Default: -1.\n stride (int): Sample stride.\n Default: 1.\n padding (bool): Set True to obtain more samples.\n Value False is recommended for training and True for testing.\n Default False.\n center_gt (bool): If True, only the center frame is recorded in GT.\n The samp_len is required to be odd.\n Note that gt_path is always a list. Default: False.\n key_frames (list): Key-frame annotation for a sequence.\n "1" denotes key frames; "0" denotes non-key frames.\n Can be longer than the sequence.\n See the document for more details.\n '
def __init__(self, lq_folder, gt_folder, pipeline, ann_file='', scale=1, test_mode=False, samp_len=(- 1), stride=1, padding=False, center_gt=False, key_frames=None):
if (key_frames is None):
key_frames = [1, 0, 1, 0, 1, 0, 1]
self.key_frames = key_frames
super().__init__(lq_folder=lq_folder, gt_folder=gt_folder, ann_file=ann_file, pipeline=pipeline, scale=scale, test_mode=test_mode, samp_len=samp_len, stride=stride, padding=padding, center_gt=center_gt)
def find_neighboring_frames(self, seq_len, center_idx, nfrms_left, nfrms_right):
assert (len(self.key_frames) >= seq_len), f'The length of the key-frame annotation ({self.key_frames}) should be larger than that of the sequence ({len(seq_len)}).'
key_frames = self.key_frames[:seq_len]
key_idxs = [idx for idx in range(len(key_frames)) if key_frames[idx]]
key_idxs_left = [idx for idx in key_idxs if (idx < center_idx)]
if (len(key_idxs_left) == 0):
key_idxs_left = ([(center_idx - 1)] * nfrms_left)
elif (len(key_idxs_left) < nfrms_left):
key_idxs_left = (([key_idxs_left[0]] * (nfrms_left - len(key_idxs_left))) + key_idxs_left)
else:
key_idxs_left = key_idxs_left[(- nfrms_left):]
key_idxs_right = [idx for idx in key_idxs if (idx > center_idx)]
if (len(key_idxs_right) == 0):
key_idxs_right = ([(center_idx + 1)] * nfrms_right)
elif (len(key_idxs_right) < nfrms_right):
key_idxs_right = (key_idxs_right + ([key_idxs_right[(- 1)]] * (nfrms_right - len(key_idxs_right))))
else:
key_idxs_right = key_idxs_right[:nfrms_right]
idxs = ((key_idxs_left + [center_idx]) + key_idxs_right)
idxs = [max(min(x, (seq_len - 1)), 0) for x in idxs]
return idxs
|
@DATASETS.register_module()
class PairedVideoKeyFramesAnnotationDataset(PairedVideoDataset):
'Paired video dataset with an annotation file. Return the annotation of\n key frames in each sample.\n\n Differences to PairedVideoAnnotationDataset:\n Return key-frame annotation. See "load_annotations".\n\n Suppose the video sequences are stored as:\n\n powerqe\n `-- {gt,lq}_folder\n `-- {001,002,...,100}\n `-- im{1,2,...,7}.png\n\n Then the annotation file should be:\n\n 001\n 002\n ...\n 100\n\n Suppose the video sequences are stored as:\n\n powerqe\n `-- {gt,lq}_folder\n `-- {001,002,...,100}\n `-- {0001,0002,...,1000}\n `-- im{1,2,...,7}.png\n\n Then the annotation file should be:\n\n 001/0001\n 001/0002\n ...\n 001/1000\n 002/0001\n ...\n 100/1000\n\n Args:\n lq_folder (str | :obj:Path): LQ folder.\n gt_folder (str | :obj:Path): GT folder.\n pipeline (List[dict | callable]): A list of data transformations.\n ann_file (str | :obj:Path): Path to the annotation file.\n Each line records a sequence path relative to the GT/LQ folder.\n If empty, collect all sequences in a folder.\n Default: \'\'.\n scale (int): Upsampling scale ratio.\n Default: 1.\n test_mode (bool): Store True when building test dataset.\n Default: False.\n samp_len (int): Sample length.\n The default value -1 corresponds to the sequence length.\n Default: -1.\n stride (int): Sample stride.\n Default: 1.\n padding (bool): Set True to obtain more samples.\n Value False is recommended for training and True for testing.\n Default False.\n center_gt (bool): If True, only the center frame is recorded in GT.\n The samp_len is required to be odd.\n Note that gt_path is always a list. Default: False.\n key_frames (list): Key-frame annotation for a sequence.\n "1" denotes key frames; "0" denotes non-key frames.\n Can be longer than the sequence.\n See the document for more details.\n '
def __init__(self, lq_folder, gt_folder, pipeline, ann_file='', scale=1, test_mode=False, samp_len=(- 1), stride=1, padding=False, center_gt=False, key_frames=None):
if (key_frames is None):
key_frames = [1, 0, 1, 0, 1, 0, 1]
self.key_frames = key_frames
super().__init__(lq_folder=lq_folder, gt_folder=gt_folder, ann_file=ann_file, pipeline=pipeline, scale=scale, test_mode=test_mode, samp_len=samp_len, stride=stride, padding=padding, center_gt=center_gt)
def load_annotations(self):
'Load sequences according to the annotation file.\n\n The GT sequence includes all frames by default.\n\n Returned keys (sequence ID; also for image saving):\n\n (1) center_gt is True:\n\n 001/0001/im4.png # im1 is the center-frame name of this sample\n 001/0001/im2.png # im2 is the center-frame name of this sample\n ...\n 001/0001/im7.png # im7 is the center-frame name of this sample\n 001/0002/im1.png # im1 is the center-frame name of this sample\n ...\n\n (2) center_gt is False:\n\n 001/0001/im1.png,im2.png,...,im7.png\n 001/0002/im1.png,im2.png,...,im7.png\n ...\n 001/1000/im1.png,im2.png,...,im7.png\n ...\n\n See the image saving function in BasicVQERestorer for reasons.\n\n Returns:\n list[dict]: Each dict records the information for a sub-sequence to\n serve as a sample in training or testing.\n '
if self.ann_file:
with open(self.ann_file, 'r') as f:
keys = f.read().split('\n')
keys = [k.strip() for k in keys if ((k.strip() is not None) and (k != ''))]
keys = [key.replace('/', os.sep) for key in keys]
else:
sub_dirs = glob(osp.join(self.gt_folder, '*/'))
keys = [sub_dir.split('/')[(- 2)] for sub_dir in sub_dirs]
data_infos = []
for key in keys:
gt_seq = osp.join(self.gt_folder, key)
lq_seq = osp.join(self.lq_folder, key)
gt_paths = self.scan_folder(gt_seq)
assert (len(gt_paths) > 0), f'No images were found in "{gt_seq}".'
lq_paths = self.scan_folder(lq_seq)
assert (len(gt_paths) == len(lq_paths)), f'The GT and LQ sequences for key "{key}" should have the same number of images; GT has {len(gt_paths)} images while LQ has {len(lq_paths)} images.'
gt_names = [osp.basename(gt_path) for gt_path in gt_paths]
gt_names = sorted(gt_names, key=(lambda x: int(''.join(filter(str.isdigit, x)))))
gt_paths = [osp.join(gt_seq, gt_name) for gt_name in gt_names]
for gt_name in gt_names:
lq_path = osp.join(lq_seq, gt_name)
assert (lq_path in lq_paths), f'Cannot find "{lq_path}" in "{lq_seq}".'
samp_len = (len(gt_paths) if (self.samp_len == (- 1)) else self.samp_len)
assert (samp_len <= len(gt_paths)), f'The sample length ({samp_len}) should not be larger than the sequence length ({len(gt_paths)}).'
if (self.center_gt and ((samp_len % 2) == 0)):
raise ValueError(f'The sample length ({samp_len}) should be odd when requiring center GT.')
seq_len = len(gt_paths)
nfrms_left = (samp_len // 2)
nfrms_right = (0 if (samp_len == 1) else ((samp_len - nfrms_left) - 1))
samp_start = (0 if self.padding else nfrms_left)
samp_end = (seq_len if self.padding else (seq_len - nfrms_right))
center_idxs = list(range(samp_start, samp_end, self.stride))
if (self.test_mode and (((center_idxs[(- 1)] + nfrms_right) + 1) < seq_len)):
center_idxs.append(((seq_len - nfrms_right) - 1))
for center_idx in center_idxs:
lq_idxs = self.find_neighboring_frames(center_idx=center_idx, seq_len=seq_len, nfrms_left=nfrms_left, nfrms_right=nfrms_right)
if self.center_gt:
gt_idxs = [center_idx]
else:
gt_idxs = lq_idxs
samp_gt_paths = [gt_paths[idx] for idx in gt_idxs]
samp_lq_paths = [osp.join(lq_seq, gt_names[idx]) for idx in lq_idxs]
record_key = ((key + os.sep) + ','.join([gt_names[idx] for idx in gt_idxs]))
key_frms = [self.key_frames[idx] for idx in lq_idxs]
data_infos.append(dict(gt_path=samp_gt_paths, lq_path=samp_lq_paths, key=record_key, key_frms=key_frms))
return data_infos
|
@BACKBONES.register_module()
class ARCNN(BaseNet):
'AR-CNN network structure.\n\n Args:\n io_channels (int): Number of I/O channels.\n mid_channels_1 (int): Channel number of the first intermediate\n features.\n mid_channels_2 (int): Channel number of the second intermediate\n features.\n mid_channels_3 (int): Channel number of the third intermediate\n features.\n in_kernel_size (int): Kernel size of the first convolution.\n mid_kernel_size_1 (int): Kernel size of the first intermediate\n convolution.\n mid_kernel_size_2 (int): Kernel size of the second intermediate\n convolution.\n out_kernel_size (int): Kernel size of the last convolution.\n '
def __init__(self, io_channels=3, mid_channels_1=64, mid_channels_2=32, mid_channels_3=16, in_kernel_size=9, mid_kernel_size_1=7, mid_kernel_size_2=1, out_kernel_size=5):
super().__init__()
self.layers = nn.Sequential(nn.Conv2d(io_channels, mid_channels_1, in_kernel_size, padding=(in_kernel_size // 2)), nn.ReLU(inplace=False), nn.Conv2d(mid_channels_1, mid_channels_2, mid_kernel_size_1, padding=(mid_kernel_size_1 // 2)), nn.ReLU(inplace=False), nn.Conv2d(mid_channels_2, mid_channels_3, mid_kernel_size_2, padding=(mid_kernel_size_2 // 2)), nn.ReLU(inplace=False), nn.Conv2d(mid_channels_3, io_channels, out_kernel_size, padding=(out_kernel_size // 2)))
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with the shape of (N, C, H, W).\n\n Returns:\n Tensor\n '
return (self.layers(x) + x)
|
class BaseNet(nn.Module):
'Base network with the function init_weights.'
def __init__(self) -> None:
super().__init__()
def init_weights(self, pretrained=None, strict=True):
'Init weights for models.\n\n Args:\n pretrained (str): Path for pretrained weights.\n If given None, pretrained weights will not be loaded.\n Default: None.\n strict (bool): Whether strictly load the pretrained model.\n Default: True.\n '
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger)
elif (pretrained is None):
pass
else:
raise TypeError(f'"pretrained" must be a string or None; received "{type(pretrained)}".')
|
@BACKBONES.register_module()
class CBDNet(BaseNet):
'CBDNet network structure.\n\n Args:\n io_channels (int): Number of I/O channels.\n estimate_channels (int): Channel number of the features in the\n estimation module.\n nlevel_denoise (int): Level number of UNet for denoising.\n nf_base_denoise (int): Base channel number of the features in the\n denoising module.\n nf_gr_denoise (int): Growth rate of the channel number in the denoising\n module.\n nl_base_denoise (int): Base convolution layer number in the denoising\n module.\n nl_gr_denoise (int): Growth rate of the convolution layer number in the\n denoising module.\n down_denoise (str): Downsampling method in the denoising module.\n up_denoise (str): Upsampling method in the denoising module.\n reduce_denoise (str): Reduction method for the guidance/feature maps in\n the denoising module.\n '
def __init__(self, io_channels=3, estimate_channels=32, nlevel_denoise=3, nf_base_denoise=64, nf_gr_denoise=2, nl_base_denoise=1, nl_gr_denoise=2, down_denoise='avepool2d', up_denoise='transpose2d', reduce_denoise='add'):
super().__init__()
estimate_list = nn.ModuleList([nn.Conv2d(in_channels=io_channels, out_channels=estimate_channels, kernel_size=3, padding=(3 // 2)), nn.ReLU(inplace=True)])
for _ in range(3):
estimate_list += nn.ModuleList([nn.Conv2d(in_channels=estimate_channels, out_channels=estimate_channels, kernel_size=3, padding=(3 // 2)), nn.ReLU(inplace=True)])
estimate_list += nn.ModuleList([nn.Conv2d(estimate_channels, io_channels, 3, padding=(3 // 2)), nn.ReLU(inplace=True)])
self.estimate = nn.Sequential(*estimate_list)
self.denoise = UNet(nf_in=(io_channels * 2), nf_out=io_channels, nlevel=nlevel_denoise, nf_base=nf_base_denoise, nf_gr=nf_gr_denoise, nl_base=nl_base_denoise, nl_gr=nl_gr_denoise, down=down_denoise, up=up_denoise, reduce=reduce_denoise, residual=False)
def forward(self, x):
'Forward.\n\n Args:\n x (Tensor): Input tensor with the shape of (N, C, H, W).\n\n Returns:\n Tensor\n '
estimated_noise_map = self.estimate(x)
res = self.denoise(torch.cat([x, estimated_noise_map], dim=1))
out = (res + x)
return out
|
@BACKBONES.register_module()
class DCAD(BaseNet):
'DCAD network structure.\n\n Args:\n io_channels (int): Number of I/O channels.\n mid_channels (int): Channel number of intermediate features.\n num_blocks (int): Block number in the trunk network.\n '
def __init__(self, io_channels=3, mid_channels=64, num_blocks=8):
super().__init__()
layers = [nn.Conv2d(io_channels, mid_channels, 3, padding=1)]
for _ in range(num_blocks):
layers += [nn.ReLU(inplace=False), nn.Conv2d(mid_channels, mid_channels, 3, padding=1)]
layers += [nn.ReLU(inplace=False), nn.Conv2d(mid_channels, io_channels, 3, padding=1)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with the shape of (N, C, H, W).\n\n Returns:\n Tensor\n '
return (self.layers(x) + x)
|
@BACKBONES.register_module()
class DnCNN(BaseNet):
'DnCNN network structure.\n\n Momentum for nn.BatchNorm2d is 0.9 in\n "https://github.com/cszn/KAIR/blob\n /7e51c16c6f55ff94b59c218c2af8e6b49fe0668b/models/basicblock.py#L69",\n but is 0.1 default in PyTorch.\n\n Args:\n io_channels (int): Number of I/O channels.\n mid_channels (int): Channel number of intermediate features.\n num_blocks (int): Block number in the trunk network.\n if_bn (bool): If use BN layer. Default: False.\n '
def __init__(self, io_channels=3, mid_channels=64, num_blocks=15, if_bn=False):
super().__init__()
layers = [nn.Conv2d(io_channels, mid_channels, 3, padding=1)]
for _ in range(num_blocks):
layers.append(nn.ReLU(inplace=True))
if if_bn:
layers += [nn.Conv2d(mid_channels, mid_channels, 3, padding=1, bias=False), nn.BatchNorm2d(num_features=mid_channels, momentum=0.9, eps=0.0001, affine=True)]
else:
layers.append(nn.Conv2d(mid_channels, mid_channels, 3, padding=1))
layers += [nn.ReLU(inplace=True), nn.Conv2d(mid_channels, io_channels, 3, padding=1)]
self.layers = nn.Sequential(*layers)
def forward(self, x):
'Forward function.\n\n Args:\n x (Tensor): Input tensor with the shape of (N, C, H, W).\n\n Returns:\n Tensor\n '
return (self.layers(x) + x)
|
class Interpolate(nn.Module):
def __init__(self, scale_factor, mode):
super().__init__()
self.interp = nn.functional.interpolate
self.scale_factor = scale_factor
self.mode = mode
def forward(self, x):
x = self.interp(x, scale_factor=self.scale_factor, mode=self.mode, align_corners=False)
return x
|
@BACKBONES.register_module()
class RDNQE(BaseNet):
'RDN for quality enhancement.\n\n Differences to the RDN in MMEditing:\n Support rescaling before/after enhancement.\n\n Args:\n rescale (int): Rescaling factor.\n io_channels (int): Number of I/O channels.\n mid_channels (int): Channel number of intermediate features.\n num_blocks (int): Block number in the trunk network.\n num_layers (int): Layer number in the Residual Dense Block.\n channel_growth (int): Channels growth in each layer of RDB.\n '
def __init__(self, rescale, io_channels, mid_channels=64, num_blocks=8, num_layers=8, channel_growth=64):
super().__init__()
self.rescale = rescale
self.mid_channels = mid_channels
self.channel_growth = channel_growth
self.num_blocks = num_blocks
self.num_layers = num_layers
if (not math.log2(rescale).is_integer()):
raise ValueError(f'Rescale factor ({rescale}) should be a power of 2.')
if (rescale == 1):
self.downscale = nn.Identity()
else:
self.downscale = Interpolate(scale_factor=(1.0 / rescale), mode='bicubic')
self.sfe1 = nn.Conv2d(io_channels, mid_channels, kernel_size=3, padding=(3 // 2))
self.sfe2 = nn.Conv2d(mid_channels, mid_channels, kernel_size=3, padding=(3 // 2))
self.rdbs = nn.ModuleList()
for _ in range(self.num_blocks):
self.rdbs.append(RDB(self.mid_channels, self.channel_growth, self.num_layers))
self.gff = nn.Sequential(nn.Conv2d((self.mid_channels * self.num_blocks), self.mid_channels, kernel_size=1), nn.Conv2d(self.mid_channels, self.mid_channels, kernel_size=3, padding=(3 // 2)))
if (rescale == 1):
self.upscale = nn.Identity()
else:
self.upscale = []
for _ in range((rescale // 2)):
self.upscale.extend([nn.Conv2d(self.mid_channels, (self.mid_channels * (2 ** 2)), kernel_size=3, padding=(3 // 2)), nn.PixelShuffle(2)])
self.upscale = nn.Sequential(*self.upscale)
self.output = nn.Conv2d(self.mid_channels, io_channels, kernel_size=3, padding=(3 // 2))
def forward(self, x):
'Forward.\n\n Args:\n x (Tensor): Input tensor with the shape of (N, C, H, W).\n\n Returns:\n Tensor\n '
x = self.downscale(x)
sfe1 = self.sfe1(x)
sfe2 = self.sfe2(sfe1)
x = sfe2
local_features = []
for i in range(self.num_blocks):
x = self.rdbs[i](x)
local_features.append(x)
x = (self.gff(torch.cat(local_features, 1)) + sfe1)
x = self.upscale(x)
x = self.output(x)
return x
|
@BACKBONES.register_module()
class RRDBNetQE(RRDBNet):
'Networks consisting of Residual in Residual Dense Block, which is used\n in ESRGAN and Real-ESRGAN.\n\n ESRGAN: Enhanced Super-Resolution Generative Adversarial Networks.\n Currently, it supports [x1/x2/x4] upsampling scale factor.\n\n Args:\n io_channels (int): I/O channel number.\n mid_channels (int): Channel number of intermediate features.\n num_blocks (int): Block number in the trunk network.\n growth_channels (int): Channels for each growth.\n upscale_factor (int): Upsampling factor. Support x1, x2 and x4.\n '
def __init__(self, io_channels, mid_channels=64, num_blocks=23, growth_channels=32, upscale_factor=4):
super().__init__(in_channels=io_channels, out_channels=io_channels, mid_channels=mid_channels, num_blocks=num_blocks, growth_channels=growth_channels, upscale_factor=upscale_factor)
def init_weights(self, pretrained=None, strict=True, revise_keys=None):
"Init weights for models.\n\n Accept revise_keys for restorer ESRGANRestorer.\n Default value is equal to that of load_checkpoint.\n\n Args:\n pretrained (str, optional): Path for pretrained weights.\n If given None, pretrained weights will not be loaded.\n strict (boo, optional): Whether strictly load the pretrained model.\n revise_keys (list): A list of customized keywords to modify the\n state_dict in checkpoint. Each item is a (pattern, replacement)\n pair of the regular expression operations.\n Default: strip the prefix 'module.' by [(r'^module\\.', '')].\n "
if (revise_keys is None):
revise_keys = [('^module\\.', '')]
if isinstance(pretrained, str):
logger = get_root_logger()
load_checkpoint(self, pretrained, strict=strict, logger=logger, revise_keys=revise_keys)
elif (pretrained is None):
for m in [self.conv_first, self.conv_body, self.conv_up1, self.conv_up2, self.conv_hr, self.conv_last]:
default_init_weights(m, 0.1)
else:
raise TypeError(f'"pretrained" must be a string or None; received "{type(pretrained)}".')
|
def build(cfg, registry, default_args=None):
'Build module function.\n\n Args:\n cfg (dict): Configuration for building modules.\n registry (obj): Registry object.\n default_args (dict, optional): Default arguments.\n Default: None.\n '
if isinstance(cfg, list):
modules = [build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
|
def build_backbone(cfg):
'Build backbone.\n\n Args:\n cfg (dict): Configuration for building backbone.\n '
return build(cfg, BACKBONES)
|
def build_component(cfg):
'Build component.\n\n Args:\n cfg (dict): Configuration for building component.\n '
return build(cfg, COMPONENTS)
|
def build_loss(cfg):
'Build loss.\n\n Args:\n cfg (dict): Configuration for building loss.\n '
return build(cfg, LOSSES)
|
def build_model(cfg, train_cfg=None, test_cfg=None):
'Build model.\n\n Args:\n cfg (dict): Configuration for building model.\n train_cfg (dict): Training configuration. Default: None.\n test_cfg (dict): Testing configuration. Default: None.\n '
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
|
@LOSSES.register_module()
class PerceptualLossGray(PerceptualLoss):
'Perceptual loss for gray-scale images.\n\n Differences to PerceptualLoss: Input x is a gray-scale image.\n '
def forward(self, x, gt):
x = x.repeat(1, 3, 1, 1)
if self.norm_img:
x = ((x + 1.0) * 0.5)
gt = ((gt + 1.0) * 0.5)
x_features = self.vgg(x)
gt_features = self.vgg(gt.detach())
if (self.perceptual_weight > 0):
percep_loss = 0
for k in x_features.keys():
percep_loss += (self.criterion(x_features[k], gt_features[k]) * self.layer_weights[k])
percep_loss *= self.perceptual_weight
else:
percep_loss = None
if (self.style_weight > 0):
if (self.vgg_style is not None):
x_features = self.vgg_style(x)
gt_features = self.vgg_style(gt.detach())
style_loss = 0
for k in x_features.keys():
style_loss += (self.criterion(self._gram_mat(x_features[k]), self._gram_mat(gt_features[k])) * self.layer_weights_style[k])
style_loss *= self.style_weight
else:
style_loss = None
return (percep_loss, style_loss)
def _gram_mat(self, x):
'Calculate Gram matrix.\n\n Args:\n x (Tensor): Tensor with the shape of (N, C, H, W).\n\n Returns:\n Tensor: Gram matrix.\n '
(n, c, h, w) = x.size()
features = x.view(n, c, (w * h))
features_t = features.transpose(1, 2)
gram = (features.bmm(features_t) / ((c * h) * w))
return gram
|
@MODELS.register_module()
class ESRGANRestorer(BasicQERestorer):
'ESRGAN restorer for quality enhancement.\n\n Args:\n generator (dict): Config for the generator.\n discriminator (dict): Config for the discriminator. Default: None.\n gan_loss (dict): Config for the GAN loss.\n Note that the loss weight in GAN loss is only for the generator.\n pixel_loss (dict): Config for the pixel loss. Default: None.\n perceptual_loss (dict): Config for the perceptual loss. Default: None.\n train_cfg (dict): Config for training. Default: None.\n You may change the training of GAN by setting:\n disc_steps: how many discriminator updates after one generate\n update;\n disc_init_steps: how many discriminator updates at the start of\n the training.\n\n These two keys are useful when training with WGAN.\n test_cfg (dict): Config for testing.\n Default: None.\n pretrained (str): Path for pretrained model.\n Default: None.\n '
def __init__(self, generator, discriminator=None, gan_loss=None, pixel_loss=None, perceptual_loss=None, train_cfg=None, test_cfg=None, pretrained=None):
super().__init__(generator=generator, pixel_loss=pixel_loss, train_cfg=train_cfg, test_cfg=test_cfg, pretrained=pretrained)
self.discriminator = (build_component(discriminator) if discriminator else None)
self.gan_loss = (build_loss(gan_loss) if gan_loss else None)
self.perceptual_loss = (build_loss(perceptual_loss) if perceptual_loss else None)
self.disc_steps = (1 if (self.train_cfg is None) else self.train_cfg.get('disc_steps', 1))
self.disc_init_steps = (0 if (self.train_cfg is None) else self.train_cfg.get('disc_init_steps', 0))
self.step_counter = 0
def init_weights(self, pretrained=None):
'Init the generator weights using the generator\'s method.\n\n Therefore, "r\'^generator.\'" must be removed.\n\n Args:\n pretrained (str, optional): Path for pretrained weights.\n If given None, pretrained weights will not be loaded.\n '
self.generator.init_weights(pretrained=pretrained, revise_keys=[('^generator\\.', ''), ('^module\\.', '')])
def train_step(self, data_batch, optimizer):
'Train step.\n\n Args:\n data_batch (dict): A batch of data.\n optimizer (obj): Optimizer.\n\n Returns:\n dict: Returned output.\n '
lq = data_batch['lq']
gt = data_batch['gt']
fake_g_output = self.generator(lq)
losses = {}
log_vars = {}
set_requires_grad(self.discriminator, False)
if (((self.step_counter % self.disc_steps) == 0) and (self.step_counter >= self.disc_init_steps)):
if self.pixel_loss:
losses['loss_pix'] = self.pixel_loss(fake_g_output, gt)
if self.perceptual_loss:
(loss_percep, loss_style) = self.perceptual_loss(fake_g_output, gt)
if (loss_percep is not None):
losses['loss_perceptual'] = loss_percep
if (loss_style is not None):
losses['loss_style'] = loss_style
real_d_pred = self.discriminator(gt).detach()
fake_g_pred = self.discriminator(fake_g_output)
loss_gan_fake = self.gan_loss((fake_g_pred - torch.mean(real_d_pred)), target_is_real=True, is_disc=False)
loss_gan_real = self.gan_loss((real_d_pred - torch.mean(fake_g_pred)), target_is_real=False, is_disc=False)
losses['loss_gan'] = ((loss_gan_fake + loss_gan_real) / 2)
(loss_g, log_vars_g) = self.parse_losses(losses)
log_vars.update(log_vars_g)
optimizer['generator'].zero_grad()
loss_g.backward()
optimizer['generator'].step()
set_requires_grad(self.discriminator, True)
real_d_pred = self.discriminator(gt)
fake_d_pred = self.discriminator(fake_g_output).detach()
loss_d_real = (self.gan_loss((real_d_pred - torch.mean(fake_d_pred)), target_is_real=True, is_disc=True) * 0.5)
fake_d_pred = self.discriminator(fake_g_output.detach())
loss_d_fake = (self.gan_loss((fake_d_pred - torch.mean(real_d_pred.detach())), target_is_real=False, is_disc=True) * 0.5)
(loss_d, log_vars_d) = self.parse_losses({'loss_d_real': loss_d_real, 'loss_d_fake': loss_d_fake})
optimizer['discriminator'].zero_grad()
loss_d.backward()
optimizer['discriminator'].step()
log_vars_d['loss_discriminator'] = log_vars_d['loss']
log_vars_d.pop('loss')
log_vars.update(log_vars_d)
self.step_counter += 1
outputs = {'log_vars': log_vars, 'num_samples': len(gt.data), 'results': {'lq': lq.cpu(), 'gt': gt.cpu(), 'output': fake_g_output.cpu()}}
return outputs
|
def read_json(json_path, losses, metrics):
'\n Examples:\n {\n "exp_name": "exp2.6",\n "mmedit Version": "0.16.1",\n "seed": 0,\n "env_info": "test"\n }\n {\n "mode": "train",\n "epoch": 1,\n "iter": 100,\n "lr": {\n "generator": 5e-05\n },\n "memory": 324,\n "data_time": 0.00106,\n "loss_pix": 0.01336,\n "loss": 0.01336,\n "time": 0.15789\n }\n {\n "mode": "val",\n "epoch": 1,\n "iter": 50000,\n "lr": {\n "generator": 5e-05\n },\n "PSNR": 34.24646,\n "SSIM": 0.95762\n }\n {\n "mode": "val",\n "epoch": 1,\n "iter": 50000,\n "lr": {\n "generator": 5e-05\n }\n }\n '
with open(json_path, 'r') as file:
for line in file:
data = json.loads(line)
if (('mode' in data) and (data['mode'] == 'train')):
iters = data['iter']
loss = data['loss']
losses[iters] = loss
if (('mode' in data) and (data['mode'] == 'val')):
for metric in metrics.keys():
if (metric in data):
iters = data['iter']
result = data[metric]
metrics[metric][iters] = result
return (losses, metrics)
|
def plot_curve(data, ylabel, smooth=False, save_path=''):
keys = list(data.keys())
values = list(data.values())
if smooth:
window_size = 9
assert ((window_size % 2) == 1)
keys = keys[(window_size // 2):(- (window_size // 2))]
values = np.convolve(values, (np.ones(window_size) / window_size), mode='valid')
plt.plot(keys, values)
plt.xlabel('Iters')
plt.ylabel(ylabel)
plt.grid(True)
if save_path:
plt.savefig(save_path)
print(f'Saved to {save_path}')
plt.show()
plt.close()
|
def main():
parser = argparse.ArgumentParser(description='Parse JSON file')
parser.add_argument('json_path', type=str, help='Path to the JSON file')
parser.add_argument('--save-dir', type=str, default=None, help='Path to save the PNG')
args = parser.parse_args()
if osp.isdir(args.json_path):
json_files = glob(osp.join(args.json_path, '*.json'))
sorted_json_files = sorted(json_files, key=(lambda x: osp.getmtime(x)))
print(f'{len(sorted_json_files)} json files are found.')
else:
sorted_json_files = [args.json_path]
if (not args.save_dir):
args.save_dir = osp.dirname(sorted_json_files[0])
losses = dict()
metrics = dict(PSNR=dict(), SSIM=dict())
for json_file in sorted_json_files:
(losses, metrics) = read_json(json_file, losses=losses, metrics=metrics)
save_path = osp.join(args.save_dir, 'losses.png')
plot_curve(data=losses, ylabel='Loss', smooth=True, save_path=save_path)
for (k, v) in metrics.items():
if v:
save_path = osp.join(args.save_dir, f'{k}.png')
plot_curve(data=v, ylabel=k, save_path=save_path)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.