code stringlengths 17 6.64M |
|---|
def set_host_only(host_only: bool):
'ホスト(CPU)のみの指定\n\n True を設定するとデバイス(GPU)を未使用としてホスト(CPU)のみを利用\n\n Args:\n host_only (bool) : ホストのみの場合 True を指定\n '
core.Manager.set_host_only(host_only)
|
def get_cuda_driver_version():
'CUDAドライババージョンの取得\n\n Returns:\n driver_version (int) : CUDAドライババージョン\n '
return core.get_cuda_driver_version()
|
def get_cuda_driver_version_string():
'CUDAドライババージョン文字列の取得\n\n Returns:\n driver_version (str) : CUDAドライババージョン文字列\n '
driver_version = get_cuda_driver_version()
major = (driver_version // 1000)
minor = ((driver_version % 1000) // 10)
return '{}.{}'.format(major, minor)
|
def get_device_count():
'利用可能なデバイス(GPU)の個数を確認\n\n Returns:\n device_count (int) : 利用可能なデバイス(GPU)の個数を返す\n '
return core.get_device_count()
|
def set_device(device_id):
'利用するデバイス(GPU)を切り替え\n\n Args:\n device_id (int) : 利用するデバイス番号を指定\n '
core.set_device(device_id)
|
def get_device_properties_string(device_id):
'現在のデバイス(GPU)の情報を入れた文字列を取得\n\n Args:\n device_id (int) : 情報を取得するデバイス番号を指定\n\n Returns:\n device_properties_string (str) : 現在のデバイス(GPU)の情報を入れた文字列を返す\n '
return core.get_device_properties_string(device_id)
|
def get_device_properties(device_id):
return core.get_device_properties(device_id)
|
def get_device_name(device_id):
return core.get_device_name(device_id)
|
def get_device_allocated_memory_size():
return core.get_device_allocated_memory_size()
|
def garbage_collect_device_memory():
return core.garbage_collect_device_memory()
|
class Tensor(bb.Object):
'Tensor class\n\n 多次元データ構造。\n\n Args:\n shape (list[int]): Shape of created array\n dtype (int): Data type\n host_only (bool): flag of host only\n '
def __init__(self, shape: List[int]=None, *, dtype=bb.DType.FP32, host_only=False, core_tensor=None):
if (core_tensor is None):
if (shape is not None):
core_tensor = core.Tensor(shape, dtype.value, host_only)
super(Tensor, self).__init__(core_object=core_tensor)
@staticmethod
def from_core(core_tensor):
new_tensor = Tensor(shape=None, core_tensor=core_tensor)
return new_tensor
def is_host_only(self) -> bool:
return self.get_core().is_host_only()
def get_type(self) -> int:
'データ型取得\n \n Returns:\n data type.\n '
return bb.DType(self.get_core().get_type())
def get_shape(self) -> List[int]:
'データのシェイプ取得\n \n Returns:\n shape\n '
return self.get_core().get_shape()
def numpy(self) -> np.ndarray:
'NumPy の ndarray に変換\n \n Returns:\n ndarray (array)\n '
dtype = self.get_core().get_type()
if (dtype == bb.DType.FP32):
return self.get_core().numpy_fp32()
elif (dtype == bb.DType.FP64):
return self.get_core().numpy_fp64()
elif (dtype == bb.DType.INT8):
return self.get_core().numpy_int8()
elif (dtype == bb.DType.INT16):
return self.get_core().numpy_int16()
elif (dtype == bb.DType.INT32):
return self.get_core().numpy_int32()
elif (dtype == bb.DType.INT64):
return self.get_core().numpy_int64()
elif (dtype == bb.DType.UINT8):
return self.get_core().numpy_uint8()
elif (dtype == bb.DType.UINT16):
return self.get_core().numpy_uint16()
elif (dtype == bb.DType.UINT32):
return self.get_core().numpy_uint32()
elif (dtype == bb.DType.UINT64):
return self.get_core().numpy_uint64()
def set_numpy(self, ndarray: np.ndarray):
dtype = self.get_core().get_type()
assert (bb.dtype_numpy_to_bb(ndarray.dtype) == dtype)
assert (ndarray.shape == tuple(self.get_shape()))
if (dtype == bb.DType.FP32):
return self.get_core().set_numpy_fp32(ndarray)
elif (dtype == bb.DType.FP64):
return self.get_core().set_numpy_fp64(ndarray)
elif (dtype == bb.DType.INT8):
return self.get_core().set_numpy_int8(ndarray)
elif (dtype == bb.DType.INT16):
return self.get_core().set_numpy_int16(ndarray)
elif (dtype == bb.DType.INT32):
return self.get_core().set_numpy_int32(ndarray)
elif (dtype == bb.DType.INT64):
return self.get_core().set_numpy_int64(ndarray)
elif (dtype == bb.DType.UINT8):
return self.get_core().set_numpy_uint8(ndarray)
elif (dtype == bb.DType.UINT16):
return self.get_core().set_numpy_uint16(ndarray)
elif (dtype == bb.DType.UINT32):
return self.get_core().set_numpy_uint32(ndarray)
elif (dtype == bb.DType.UINT64):
return self.get_core().set_numpy_uint64(ndarray)
else:
assert 0
@staticmethod
def from_numpy(ndarray: np.ndarray, host_only=False):
'NumPy から生成\n \n Args:\n ndarray (ndarray): array of NumPy\n host_only (bool): flag of host only\n '
if (not ndarray.flags['C_CONTIGUOUS']):
ndarray = ndarray.copy(order='C')
if (ndarray.dtype == np.float32):
core_tensor = bb.core.Tensor.from_numpy_fp32(ndarray, host_only)
elif (ndarray.dtype == np.float64):
core_tensor = bb.core.Tensor.from_numpy_fp64(ndarray, host_only)
elif (ndarray.dtype == np.int8):
core_tensor = bb.core.Tensor.from_numpy_int8(ndarray, host_only)
elif (ndarray.dtype == np.int16):
core_tensor = bb.core.Tensor.from_numpy_int16(ndarray, host_only)
elif (ndarray.dtype == np.int32):
core_tensor = bb.core.Tensor.from_numpy_int32(ndarray, host_only)
elif (ndarray.dtype == np.int64):
core_tensor = bb.core.Tensor.from_numpy_int64(ndarray, host_only)
elif (ndarray.dtype == np.uint8):
core_tensor = bb.core.Tensor.from_numpy_uint8(ndarray, host_only)
elif (ndarray.dtype == np.uint16):
core_tensor = bb.core.Tensor.from_numpy_uint16(ndarray, host_only)
elif (ndarray.dtype == np.uint32):
core_tensor = bb.core.Tensor.from_numpy_uint32(ndarray, host_only)
elif (ndarray.dtype == np.uint64):
core_tensor = bb.core.Tensor.from_numpy_uint64(ndarray, host_only)
else:
core_tensor = None
raise TypeError('unsupported')
return Tensor(core_tensor=core_tensor)
def fill_zero(self):
self.get_core().fill_zero()
def fill(self, x):
self.get_core().fill(x)
def isnan(self):
core_tensor = self.get_core().isnan()
return Tensor(core_tensor=core_tensor)
def min(self):
core_tensor = self.get_core().min()
return Tensor(core_tensor=core_tensor)
def max(self):
core_tensor = self.get_core().max()
return Tensor(core_tensor=core_tensor)
def quantize(self, bits, scale=0.0, offset=0):
core_tensor = self.get_core().quantize(bits, scale, offset)
return Tensor(core_tensor=core_tensor)
def clamp_inplace(self, a, b):
self.get_core().clamp_inplace(a, b)
def sqrt_inplace(self):
self.get_core().sqrt_inplace()
def exp_inplace(self):
self.get_core().exp_inplace()
def sum(self):
core_tensor = self.get_core().sum()
return Tensor(core_tensor=core_tensor)
def mean(self):
core_tensor = self.get_core().mean()
return Tensor(core_tensor=core_tensor)
def var(self):
core_tensor = self.get_core().var()
return Tensor(core_tensor=core_tensor)
def std(self):
core_tensor = self.get_core().std()
return Tensor(core_tensor=core_tensor)
def __add__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() + x.get_core())
else:
core_tensor = (self.get_core() + float(x))
return Tensor(core_tensor=core_tensor)
def __sub__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() - x.get_core())
else:
core_tensor = (self.get_core() - float(x))
return Tensor(core_tensor=core_tensor)
def __mul__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() * x.get_core())
else:
core_tensor = (self.get_core() * float(x))
return Tensor(core_tensor=core_tensor)
def __truediv__(self, x):
if (type(x) == Tensor):
core_tensor = (self.get_core() / x.get_core())
else:
core_tensor = (self.get_core() / float(x))
return Tensor(core_tensor=core_tensor)
def __radd__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() + self.get_core())
else:
core_tensor = (float(x) + self.get_core())
return Tensor(core_tensor=core_tensor)
def __rsub__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() - self.get_core())
else:
core_tensor = (float(x) - self.get_core())
return Tensor(core_tensor=core_tensor)
def __rmul__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() * self.get_core())
else:
core_tensor = (float(x) * self.get_core())
return Tensor(core_tensor=core_tensor)
def __rtruediv__(self, x):
if (type(x) == Tensor):
core_tensor = (x.get_core() / self.get_core())
else:
core_tensor = (float(x) / self.get_core())
return Tensor(core_tensor=core_tensor)
def __iadd__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor += x.get_core()
else:
core_tensor += float(x)
return self
def __isub__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor -= x.get_core()
else:
core_tensor -= float(x)
return self
def __imul__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor *= x.get_core()
else:
core_tensor *= float(x)
return self
def __itruediv__(self, x):
core_tensor = self.get_core()
if (type(x) == Tensor):
core_tensor /= x.get_core()
else:
core_tensor /= float(x)
return self
|
class Variables():
'Variables class\n\n 学習の為の Optimizer と実際の学習ターゲットの変数の橋渡しに利用されるクラス。\n 内部的には各モデル内の重みや勾配を保有する Tensor をまとめて保持している。\n '
def __init__(self):
self.variables = core.Variables()
@staticmethod
def from_core(variables):
new_variables = Variables()
new_variables.variables = variables
return new_variables
def get_core(self):
return self.variables
def append(self, variables):
' 変数を追加\n\n Args:\n variables (Variables) : 追加する変数\n '
self.variables.push_back(variables.get_core())
def get_size(self):
return self.variables.get_size()
def at(self, item):
return self.variables.at(item)
def __len__(self):
return self.variables.get_size()
def __getitem__(self, item):
return self.variables.at(item)
|
def make_verilog_lut_layers(module_name: str, net, device=''):
' make verilog source of LUT layers\n 変換できないモデルは影響ない層とみなして無視するので注意\n \n Args:\n module_name (str): モジュール名\n net (Model): 変換するネット\n \n Returns:\n Verilog source code (str)\n '
layers = bb.get_model_list(net, flatten=True)
core_layers = []
for layer in layers:
core_layers.append(layer.get_core())
return core.make_verilog_lut_layers(module_name, core_layers, device)
|
def dump_verilog_lut_layers(f, module_name: str, net, device=''):
' dump verilog source of LUT layers\n 変換できないモデルは影響ない層とみなして無視するので注意\n \n Args:\n f (StreamIO) : 出力先ストリーム\n module_name (str): モジュール名\n net (Model): 変換するネット\n '
f.write(make_verilog_lut_layers(module_name=module_name, net=net, device=device))
|
def export_verilog_lut_layers(file_name: str, module_name: str, net):
with open(file_name, 'w') as f:
dump_verilog_lut_layers(f, module_name, net)
|
def make_verilog_lut_cnv_layers(module_name: str, net, device=''):
layers = bb.get_model_list_for_rtl(net)
core_layers = []
for layer in layers:
core_layers.append(layer.get_core())
return core.make_verilog_lut_cnv_layers(module_name, core_layers, device)
|
def dump_verilog_lut_cnv_layers(f, module_name: str, net, device=''):
' dump verilog source of Convolutional LUT layers\n \n 畳み込み層を含むネットを AXI4 Stream Video 形式のVerilogソースコードして\n 出力する。\n 縮小を伴う MaxPooling 層は最後に1個だけ挿入を許される\n\n Args:\n f (StreamIO) : 出力先ストリーム\n module_name (str): モジュール名\n net (Model): 変換するネット\n '
f.write(make_verilog_lut_cnv_layers(module_name, net, device))
|
def export_verilog_lut_cnv_layers(file_name: str, module_name: str, net, device=''):
with open(file_name, 'w') as f:
dump_verilog_lut_cnv_layers(f, module_name, net, device)
|
def __dump_bin_digit(f, v):
if v:
f.write('1')
else:
f.write('0')
|
def __dump_bin_int(f, v, digits):
for i in range(digits):
__dump_bin_digit(f, ((v >> ((digits - 1) - i)) & 1))
|
def __dump_bin_img(f, img):
img = np.array(img).flatten()[::(- 1)]
for v in img:
__dump_bin_digit(f, (v > 0.5))
|
def dump_verilog_readmemb_image_classification(f, loader, *, class_digits=8):
'verilog用データダンプ\n verilog の $readmemb() での読み込み用データ作成\n\n クラスID + 画像データの形式で出力する\n\n Args:\n f (StreamIO): 出力先\n loader (Loader): モジュール名\n class_digits (int): クラス分類のbit数\n '
for (images, labels) in loader:
for (x, t) in zip(images, labels):
__dump_bin_int(f, t, class_digits)
f.write('_')
__dump_bin_img(f, x)
f.write('\n')
|
def make_image_tile(rows, cols, img_gen):
'画像をタイル状に並べて大きな画像にする\n\n 学習用の c, h, w 順の画像データをタイル状に結合する\n\n Args:\n rows (int)): 縦の結合枚数\n cols (int)): 横の結合枚数\n gen (ndarray): 画像を返すジェネレータ\n\n Returns:\n img (ndarray) : 作成した画像\n '
def make_image_tile_h(cols, img_gen):
img = img_gen.__next__()
for _ in range(1, cols):
img = np.concatenate((img, img_gen.__next__()), axis=(img.ndim - 1))
return img
img = make_image_tile_h(cols, img_gen)
for _ in range(1, rows):
img = np.concatenate((img, make_image_tile_h(cols, img_gen)), axis=(img.ndim - 2))
return img
|
def write_ppm(fname, img):
'ppmファイルの出力\n\n 学習用の c, h, w 順の画像データを ppm形式で保存する\n\n Args:\n fname (str): 出力ファイル名\n img (ndarray): モジュール名\n '
if ((img.ndim == 3) and (img.shape[0] == 1)):
img = np.tile(img, (3, 1, 1))
elif (img.ndim == 2):
img = np.stack((img, img, img))
assert ((img.ndim == 3) and (img.shape[0] >= 3))
with open(fname, 'w') as f:
f.write('P3\n')
f.write(('%d %d\n' % (img.shape[2], img.shape[1])))
f.write('255\n')
img = img.transpose(1, 2, 0).reshape((- 1), 3)
for v in img:
f.write(('%d %d %d\n' % (v[0], v[1], v[2])))
|
def find_in_path(name, path):
for dir in path.split(os.pathsep):
binpath = pjoin(dir, name)
if os.path.exists(binpath):
return os.path.abspath(binpath)
return None
|
def search_cuda():
if (sys.platform.startswith('win32') and ('CUDA_PATH' in os.environ)):
cuda_home = os.environ['CUDA_PATH']
cuda_bin = pjoin(cuda_home, 'bin')
cuda_include = pjoin(cuda_home, 'include')
cuda_lib = pjoin(cuda_home, 'lib', 'x64')
cuda_nvcc = pjoin(cuda_bin, 'nvcc')
elif ('CUDAHOME' in os.environ):
cuda_home = os.environ['CUDAHOME']
cuda_bin = pjoin(cuda_home, 'bin')
cuda_include = pjoin(cuda_home, 'include')
cuda_lib = pjoin(cuda_home, 'lib64')
cuda_nvcc = pjoin(cuda_bin, 'nvcc')
else:
cuda_nvcc = find_in_path('nvcc', os.environ['PATH'])
if (cuda_nvcc is None):
return None
cuda_home = os.path.dirname(os.path.dirname(cuda_nvcc))
cuda_bin = pjoin(cuda_home, 'bin')
cuda_include = pjoin(cuda_home, 'include')
cuda_lib = pjoin(cuda_home, 'lib64')
return {'home': cuda_home, 'nvcc': cuda_nvcc, 'include': cuda_include, 'lib': cuda_lib}
|
class get_pybind_include(object):
'Helper class to determine the pybind11 include path\n The purpose of this class is to postpone importing pybind11\n until it is actually installed, so that the ``get_include()``\n method can be invoked. '
def __init__(self, user=False):
self.user = user
def __str__(self):
import pybind11
return pybind11.get_include(self.user)
|
def hook_compiler(self):
self.src_extensions.append('.cu')
super_compile_ = self._compile
super_compile = self.compile
super_link = self.link
def _compile(obj, src, ext, cc_args, extra_postargs, pp_opts):
if VERBOSE:
print('---------------------')
print('[_compile]')
print('obj =', obj)
print('src =', src)
print('ext =', ext)
print('cc_args =', cc_args)
print('extra_postargs =', extra_postargs)
print('pp_opts =', pp_opts)
print('---------------------')
if (os.path.splitext(src)[1] == '.cu'):
postargs = extra_postargs['cu']
elif (os.path.splitext(src)[1] == '.cpp'):
postargs = extra_postargs['cc']
else:
postargs = []
super_compile_(obj, src, ext, cc_args, postargs, pp_opts)
def compile(sources, output_dir=None, macros=None, include_dirs=None, debug=0, extra_preargs=None, extra_postargs=None, depends=None):
if VERBOSE:
print('---------------------')
print('[compile]')
print('sources =', sources)
print('output_dir =', output_dir)
print('macros =', macros)
print('include_dirs =', include_dirs)
print('debug =', debug)
print('extra_preargs =', extra_preargs)
print('extra_postargs =', extra_postargs)
print('---------------------')
if (self.compiler_type == 'unix'):
return super_compile(sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs, depends)
if (CUDA is not None):
(macros, objects, extra_postargs, _, _) = self._setup_compile(output_dir, macros, include_dirs, sources, depends, extra_postargs)
macs = []
for mac in macros:
if (len(mac) >= 2):
macs.append(((('-D' + mac[0]) + '=') + mac[1]))
else:
macs.append(('-D' + mac[0]))
incs = []
if (self.compiler_type == 'msvc'):
incs += [(('-I"' + str(inc)) + '"') for inc in include_dirs]
else:
incs += [('-I' + str(inc)) for inc in include_dirs]
objects = []
for src in sources:
postargs = []
if (os.path.splitext(src)[1] == '.cu'):
postargs = extra_postargs['cu']
elif (os.path.splitext(src)[1] == '.cpp'):
postargs = extra_postargs['cc']
(fname, _) = os.path.splitext(os.path.basename(src))
obj = os.path.join(output_dir, (fname + self.obj_extension))
objects.append(obj)
args = (((([CUDA['nvcc'], '-c', '-o', obj] + incs) + macs) + [src]) + postargs)
print(' '.join(args))
subprocess.call(args)
return objects
else:
return super_compile(sources, output_dir, macros, include_dirs, debug, extra_preargs, extra_postargs['cc'], depends)
def link(target_desc, objects, output_filename, output_dir=None, libraries=None, library_dirs=None, runtime_library_dirs=None, export_symbols=None, debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, target_lang=None):
if VERBOSE:
print('---------------------')
print('[link]')
print('target_desc =', target_desc)
print('objects =', objects)
print('libraries =', libraries)
print('library_dirs =', library_dirs)
print('runtime_library_dirs =', runtime_library_dirs)
print('export_symbols =', export_symbols)
print('debug =', debug)
print('extra_preargs =', extra_preargs)
print('extra_postargs =', extra_postargs)
print('build_temp =', build_temp)
print('target_lang =', target_lang)
print('---------------------')
if (CUDA is not None):
(libraries, library_dirs, runtime_library_dirs) = self._fix_lib_args(libraries, library_dirs, runtime_library_dirs)
lib_dirs = []
if (self.compiler_type == 'msvc'):
lib_dirs += [(('-L"' + str(libdir)) + '"') for libdir in library_dirs]
else:
lib_dirs += [('-L' + str(libdir)) for libdir in library_dirs]
args = ((([CUDA['nvcc'], '-shared', '-o', output_filename] + objects) + lib_dirs) + extra_postargs)
print(' '.join(args))
subprocess.call(args)
else:
super_link(target_desc, objects, output_filename, output_dir, libraries, library_dirs, runtime_library_dirs, export_symbols, debug, extra_preargs, extra_postargs, build_temp, target_lang)
if (self.compiler_type == 'unix'):
self._compile = _compile
self.compile = compile
self.link = link
|
class BuildExt(build_ext):
'A custom build extension for adding compiler-specific options.'
cc_args = {'unix': [], 'msvc': []}
cu_args = {'unix': [], 'msvc': []}
ar_args = {'unix': [], 'msvc': []}
if (CUDA is None):
cc_args['unix'] += ['-mavx2', '-mfma', '-fopenmp', '-std=c++14']
ar_args['unix'] += ['-fopenmp', '-lstdc++', '-lm']
cc_args['msvc'] += ['/EHsc', '/Oi', '/MT', '/arch:AVX2', '/openmp', '/std:c++14', '/wd"4819"']
ar_args['msvc'] += []
else:
cc_args['unix'] += ['-gencode=arch=compute_35,code=sm_35', '-gencode=arch=compute_37,code=sm_37', '-gencode=arch=compute_50,code=sm_50', '-gencode=arch=compute_52,code=sm_52', '-gencode=arch=compute_53,code=sm_53', '-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61', '-gencode=arch=compute_62,code=sm_62', '-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_72,code=sm_72', '-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_80,code=sm_80', '-gencode=arch=compute_86,code=sm_86', '-gencode=arch=compute_87,code=sm_87', '-gencode=arch=compute_89,code=sm_89', '-gencode=arch=compute_90,code=sm_90', '-Xcompiler', '-pthread', '-Xcompiler', '-mavx2', '-Xcompiler', '-mfma', '-Xcompiler', '-fopenmp', '-Xcompiler', '-std=c++14', '-Xcompiler', '-fPIC']
cu_args['unix'] += ['-gencode=arch=compute_35,code=sm_35', '-gencode=arch=compute_37,code=sm_37', '-gencode=arch=compute_50,code=sm_50', '-gencode=arch=compute_52,code=sm_52', '-gencode=arch=compute_53,code=sm_53', '-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61', '-gencode=arch=compute_62,code=sm_62', '-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_72,code=sm_72', '-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_80,code=sm_80', '-gencode=arch=compute_86,code=sm_86', '-gencode=arch=compute_87,code=sm_87', '-gencode=arch=compute_89,code=sm_89', '-gencode=arch=compute_90,code=sm_90', '-std=c++11', '-Xcompiler', '-fPIC']
ar_args['unix'] += ['-Xcompiler', '-pthread', '-Xcompiler', '-fopenmp', '-lstdc++', '-lm', '-lcublas']
cc_args['msvc'] += ['-O3', '-Xcompiler', '/bigobj', '-Xcompiler', '/EHsc', '-Xcompiler', '/O2', '-Xcompiler', '/Oi', '-Xcompiler', '/FS', '-Xcompiler', '/Zi', '-Xcompiler', '/MT', '-Xcompiler', '/arch:AVX2', '-Xcompiler', '/openmp', '-Xcompiler', '/std:c++14', '-Xcompiler', '/wd"4819"']
cu_args['msvc'] += ['-O3', '-std=c++17', '-gencode=arch=compute_35,code=sm_35', '-gencode=arch=compute_37,code=sm_37', '-gencode=arch=compute_50,code=sm_50', '-gencode=arch=compute_52,code=sm_52', '-gencode=arch=compute_53,code=sm_53', '-gencode=arch=compute_60,code=sm_60', '-gencode=arch=compute_61,code=sm_61', '-gencode=arch=compute_62,code=sm_62', '-gencode=arch=compute_70,code=sm_70', '-gencode=arch=compute_72,code=sm_72', '-gencode=arch=compute_75,code=sm_75', '-gencode=arch=compute_80,code=sm_80', '-gencode=arch=compute_86,code=sm_86', '-gencode=arch=compute_87,code=sm_87', '-gencode=arch=compute_89,code=sm_89', '-gencode=arch=compute_90,code=sm_90', '-Xcompiler', '/bigobj', '-Xcompiler', '/EHsc', '-Xcompiler', '/O2', '-Xcompiler', '/Oi', '-Xcompiler', '/FS', '-Xcompiler', '/Zi', '-Xcompiler', '/MT', '-Xcompiler', '/wd"4819"']
ar_args['msvc'] += ['-lcublas']
if (sys.platform == 'darwin'):
darwin_args = ['-stdlib=libc++', '-mmacosx-version-min=10.7']
cc_args['unix'] += darwin_args
ar_args['unix'] += darwin_args
def build_extensions(self):
if (CUDA is not None):
self.compiler.set_executable('compiler_so', CUDA['nvcc'])
self.compiler.set_executable('compiler_cxx', CUDA['nvcc'])
hook_compiler(self.compiler)
ct = self.compiler.compiler_type
for ext in self.extensions:
ext.extra_compile_args = {'cc': self.cc_args[ct], 'cu': self.cu_args[ct]}
ext.extra_link_args = self.ar_args[ct]
build_ext.build_extensions(self)
|
def make_conv_layer(output_shape, filter_size, bin_dtype):
return bb.Convolution2d(bb.Sequential([bb.DenseAffine(output_shape), bb.BatchNormalization(), bb.ReLU(bin_dtype=bin_dtype)]), filter_size=filter_size, fw_dtype=bin_dtype)
|
def learning(net_name, frame_modulation_size, depth_modulation_size=1, epochs=8, bin_mode=True):
save_path = os.path.join(data_path, net_name)
bin_dtype = (bb.DType.BIT if bin_mode else bb.DType.FP32)
net = bb.Sequential([make_conv_layer([32], filter_size=(3, 3), bin_dtype=bin_dtype), make_conv_layer([64], filter_size=(3, 3), bin_dtype=bin_dtype), bb.MaxPooling(filter_size=(2, 2), fw_dtype=bin_dtype), make_conv_layer([64], filter_size=(3, 3), bin_dtype=bin_dtype), make_conv_layer([128], filter_size=(3, 3), bin_dtype=bin_dtype), bb.MaxPooling(filter_size=(2, 2), fw_dtype=bin_dtype), make_conv_layer([512], filter_size=(5, 5), bin_dtype=bin_dtype), make_conv_layer([10], filter_size=(1, 1), bin_dtype=bin_dtype)])
if bin_mode:
net = bb.Sequential([bb.RealToBinary(frame_modulation_size=frame_modulation_size, depth_modulation_size=depth_modulation_size, bin_dtype=bin_dtype), net, bb.BinaryToReal(frame_integration_size=frame_modulation_size, bin_dtype=bin_dtype)])
net.set_input_shape([3, 32, 32])
if bin_mode:
net.send_command('binary true')
loss = bb.LossSoftmaxCrossEntropy()
metrics = bb.MetricsCategoricalAccuracy()
optimizer = bb.OptimizerAdam()
optimizer.set_variables(net.get_parameters(), net.get_gradients())
train_loss = []
train_acc = []
test_loss = []
test_acc = []
with tqdm(range(epochs)) as t:
for epoch in t:
loss.clear()
metrics.clear()
for (images, labels) in loader_train:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=True)
dy_buf = loss.calculate(y_buf, t_buf)
metrics.calculate(y_buf, t_buf)
net.backward(dy_buf)
optimizer.update()
train_loss.append(loss.get())
train_acc.append(metrics.get())
loss.clear()
metrics.clear()
for (images, labels) in loader_test:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=False)
loss.calculate(y_buf, t_buf)
metrics.calculate(y_buf, t_buf)
test_loss.append(loss.get())
test_acc.append(metrics.get())
bb.save_networks(save_path, net)
t.set_postfix(loss=loss.get(), acc=metrics.get())
return (train_loss, train_acc, test_loss, test_acc)
|
class DifferentiableLutBlock(bb.Sequential):
def __init__(self, output_shape, depth, name=None, batch_norm=True, binarize=True, average=True, bin_dtype=bb.DType.FP32):
self.layers = []
for i in range(depth):
if (name is None):
layer_name = None
else:
layer_name = ((name + '_') + str(i))
connection = ('serial' if (i < (depth - 1)) else 'random')
if ((i == 0) and average):
self.layers.insert(0, bb.AverageLut(output_shape, connection=connection, binarize=binarize, name=layer_name, bin_dtype=bin_dtype))
else:
self.layers.insert(0, bb.DifferentiableLut(output_shape, connection=connection, batch_norm=batch_norm, binarize=binarize, name=layer_name, bin_dtype=bin_dtype))
output_shape[0] *= 6
super(DifferentiableLutBlock, self).__init__(self.layers, name=name)
|
class DifferentiableLutConvolution2d(bb.Convolution2d):
def __init__(self, output_ch, depth, filter_size=(3, 3), padding='valid', batch_norm=True, binarize=True, name=None, fw_dtype=bb.DType.FP32):
super(DifferentiableLutConvolution2d, self).__init__(DifferentiableLutBlock([output_ch, 1, 1], depth, batch_norm=batch_norm, binarize=binarize, name=name, bin_dtype=fw_dtype), filter_size=filter_size, padding=padding, name=name, fw_dtype=fw_dtype)
|
def make_conv_layer(output_ch, hidden_ch, padding='valid', bin_dtype=bb.DType.BIT):
return bb.Sequential([bb.Convolution2d(bb.Sequential([bb.DifferentiableLut([(hidden_ch * 6), 1, 1], bin_dtype=bin_dtype), bb.DifferentiableLut([hidden_ch, 1, 1], connection='serial', bin_dtype=bin_dtype)]), filter_size=(1, 1), fw_dtype=bin_dtype), bb.Convolution2d(bb.Sequential([bb.DifferentiableLut([hidden_ch, 1, 1], connection='depthwise', bin_dtype=bin_dtype)]), filter_size=(3, 3), padding=padding, fw_dtype=bin_dtype), bb.Convolution2d(bb.Sequential([bb.DifferentiableLut([(output_ch * 6), 1, 1], connection='serial', bin_dtype=bin_dtype), bb.DifferentiableLut([output_ch, 1, 1], connection='serial', bin_dtype=bin_dtype)]), filter_size=(1, 1), fw_dtype=bin_dtype)])
|
def main():
data_path = './data/'
net_name = 'MnistDifferentiableLutSimple'
data_path = os.path.join('./data/', net_name)
rtl_sim_path = '../../verilog/mnist/tb_mnist_lut_simple'
rtl_module_name = 'MnistLutSimple'
output_velilog_file = os.path.join(data_path, (net_name + '.v'))
sim_velilog_file = os.path.join(rtl_sim_path, (rtl_module_name + '.v'))
epochs = 4
mini_batch_size = 64
frame_modulation_size = 15
dataset_path = './data/'
dataset_train = torchvision.datasets.MNIST(root=dataset_path, train=True, transform=transforms.ToTensor(), download=True)
dataset_test = torchvision.datasets.MNIST(root=dataset_path, train=False, transform=transforms.ToTensor(), download=True)
loader_train = torch.utils.data.DataLoader(dataset=dataset_train, batch_size=mini_batch_size, shuffle=True, num_workers=2)
loader_test = torch.utils.data.DataLoader(dataset=dataset_test, batch_size=mini_batch_size, shuffle=False, num_workers=2)
net = bb.Sequential([bb.RealToBinary(frame_modulation_size=frame_modulation_size), bb.DifferentiableLut([((6 * 6) * 64)]), bb.DifferentiableLut([(6 * 64)]), bb.DifferentiableLut([64]), bb.DifferentiableLut([((6 * 6) * 10)]), bb.DifferentiableLut([(6 * 10)]), bb.DifferentiableLut([10]), bb.BinaryToReal(frame_integration_size=frame_modulation_size)])
net.set_input_shape([1, 28, 28])
net.send_command('binary true')
bb.load_networks(data_path, net)
loss = bb.LossSoftmaxCrossEntropy()
metrics = bb.MetricsCategoricalAccuracy()
optimizer = bb.OptimizerAdam()
optimizer.set_variables(net.get_parameters(), net.get_gradients())
for epoch in range(epochs):
loss.clear()
with tqdm(loader_train) as t:
for (images, labels) in t:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=True)
dy_buf = loss.calculate(y_buf, t_buf)
net.backward(dy_buf)
optimizer.update()
t.set_postfix(loss=loss.get(), acc=metrics.get())
loss.clear()
metrics.clear()
for (images, labels) in loader_test:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=False)
loss.calculate(y_buf, t_buf)
metrics.calculate(y_buf, t_buf)
print(('epoch[%d] : loss=%f accuracy=%f' % (epoch, loss.get(), metrics.get())))
bb.save_networks(data_path, net)
print(('write : %s' % output_velilog_file))
with open(output_velilog_file, 'w') as f:
f.write('`timescale 1ns / 1ps\n\n')
bb.dump_verilog_lut_layers(f, module_name=rtl_module_name, net=net)
print(('copy : %s -> %s' % (output_velilog_file, sim_velilog_file)))
shutil.copyfile(output_velilog_file, sim_velilog_file)
print(('write : %s' % os.path.join(rtl_sim_path, 'mnist_test.txt')))
with open(os.path.join(rtl_sim_path, 'mnist_test.txt'), 'w') as f:
bb.dump_verilog_readmemb_image_classification(f, loader_test)
|
def make_conv_layer(output_shape, filter_size, bin_dtype):
return bb.Convolution2d(bb.Sequential([bb.DenseAffine(output_shape), bb.BatchNormalization(), bb.ReLU(bin_dtype=bin_dtype)]), filter_size=filter_size, fw_dtype=bin_dtype)
|
def learning(net_name, frame_modulation_size, depth_modulation_size=1, epochs=8, bin_mode=True):
save_path = os.path.join(data_path, net_name)
bin_dtype = (bb.DType.BIT if bin_mode else bb.DType.FP32)
net = bb.Sequential([make_conv_layer([32], filter_size=(3, 3), bin_dtype=bin_dtype), make_conv_layer([64], filter_size=(3, 3), bin_dtype=bin_dtype), bb.MaxPooling(filter_size=(2, 2), fw_dtype=bin_dtype), make_conv_layer([64], filter_size=(3, 3), bin_dtype=bin_dtype), make_conv_layer([128], filter_size=(3, 3), bin_dtype=bin_dtype), bb.MaxPooling(filter_size=(2, 2), fw_dtype=bin_dtype), make_conv_layer([512], filter_size=(5, 5), bin_dtype=bin_dtype), make_conv_layer([10], filter_size=(1, 1), bin_dtype=bin_dtype)])
if bin_mode:
net = bb.Sequential([bb.RealToBinary(frame_modulation_size=frame_modulation_size, depth_modulation_size=depth_modulation_size, bin_dtype=bin_dtype), net, bb.BinaryToReal(frame_modulation_size=frame_modulation_size, bin_dtype=bin_dtype)])
net.set_input_shape([3, 32, 32])
if bin_mode:
net.send_command('binary true')
loss = bb.LossSoftmaxCrossEntropy()
metrics = bb.MetricsCategoricalAccuracy()
optimizer = bb.OptimizerAdam()
optimizer.set_variables(net.get_parameters(), net.get_gradients())
train_loss = []
train_acc = []
test_loss = []
test_acc = []
with tqdm(range(epochs)) as t:
for epoch in t:
loss.clear()
metrics.clear()
for (images, labels) in loader_train:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=True)
dy_buf = loss.calculate(y_buf, t_buf)
metrics.calculate(y_buf, t_buf)
net.backward(dy_buf)
optimizer.update()
train_loss.append(loss.get())
train_acc.append(metrics.get())
loss.clear()
metrics.clear()
for (images, labels) in loader_test:
x_buf = bb.FrameBuffer.from_numpy(np.array(images).astype(np.float32))
t_buf = bb.FrameBuffer.from_numpy(np.identity(10)[np.array(labels)].astype(np.float32))
y_buf = net.forward(x_buf, train=False)
loss.calculate(y_buf, t_buf)
metrics.calculate(y_buf, t_buf)
test_loss.append(loss.get())
test_acc.append(metrics.get())
bb.save_networks(save_path, net)
t.set_postfix(loss=loss.get(), acc=metrics.get())
return (train_loss, train_acc, test_loss, test_acc)
|
class DifferentiableLutBlock(bb.Sequential):
def __init__(self, output_shape, depth, name=None, batch_norm=True, binarize=True, average=True, bin_dtype=bb.DType.FP32):
self.layers = []
for i in range(depth):
if (name is None):
layer_name = None
else:
layer_name = ((name + '_') + str(i))
connection = ('serial' if (i < (depth - 1)) else 'random')
if ((i == 0) and average):
self.layers.insert(0, bb.AverageLut(output_shape, connection=connection, binarize=binarize, name=layer_name, bin_dtype=bin_dtype))
else:
self.layers.insert(0, bb.DifferentiableLut(output_shape, connection=connection, batch_norm=batch_norm, binarize=binarize, name=layer_name, bin_dtype=bin_dtype))
output_shape[0] *= 6
super(DifferentiableLutBlock, self).__init__(self.layers, name=name)
|
class DifferentiableLutConvolution2d(bb.Convolution2d):
def __init__(self, output_ch, depth, filter_size=(3, 3), padding='valid', batch_norm=True, binarize=True, name=None, fw_dtype=bb.DType.FP32):
super(DifferentiableLutConvolution2d, self).__init__(DifferentiableLutBlock([output_ch, 1, 1], depth, batch_norm=batch_norm, binarize=binarize, name=name, bin_dtype=fw_dtype), filter_size=filter_size, padding=padding, name=name, fw_dtype=fw_dtype)
|
def make_conv_layer(hidden_ch, output_ch, padding='same', bin_dtype=bb.DType.BIT):
return bb.Sequential([bb.Convolution2d(bb.Sequential([bb.DifferentiableLut([(hidden_ch * 6), 1, 1], bin_dtype=bin_dtype), bb.DifferentiableLut([hidden_ch, 1, 1], connection='serial', bin_dtype=bin_dtype)]), filter_size=(1, 1), fw_dtype=bin_dtype), bb.Convolution2d(bb.Sequential([bb.DifferentiableLut([hidden_ch, 1, 6], connection='depthwise', bin_dtype=bin_dtype), bb.DifferentiableLut([hidden_ch, 1, 1], connection='depthwise', bin_dtype=bin_dtype)]), filter_size=(3, 3), padding=padding, fw_dtype=bin_dtype), bb.Convolution2d(bb.Sequential([bb.DifferentiableLut([(output_ch * 6), 1, 1], bin_dtype=bin_dtype), bb.DifferentiableLut([output_ch, 1, 1], connection='serial', bin_dtype=bin_dtype)]), filter_size=(1, 1), fw_dtype=bin_dtype)])
|
def make_lut_func_name(name, node):
return ('%s_%d' % (name, node))
|
def dump_hls_lut_node5(f, name, lut, node):
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
f.write(('Q(%s,0x%016xLL)\n' % (make_lut_func_name(name, node), tbl)))
|
def dump_hls_lut_node4(f, name, lut, node):
f.write(('\nap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write((' return ((0x%016xLL >> index) & 1);\n' % tbl))
f.write('}\n\n')
|
def dump_hls_lut_node3(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write((' static Lut6Model table(0x%016xLL);\n' % tbl))
f.write(' return table.Get(index);\n')
f.write('}\n\n')
|
def dump_hls_lut_node2(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write('#pragma HLS inline\n\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write(' \n')
f.write((' const ap_uint<1> table[%d] = {' % s))
for i in range(s):
f.write(('%d,' % lut.get_lut_table(node, i)))
f.write('};\n')
f.write(' #pragma HLS bind_storage variable=table type=ROM_1P impl=LUTRAM\n')
f.write(' return table[index];\n')
f.write('}\n\n')
|
def dump_hls_lut_node1(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write('#pragma HLS inline\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write((' const ap_uint<%d> table= 0x%016xLL;\n' % (s, tbl)))
f.write(' return table[index];\n')
f.write('}\n\n')
|
def dump_hls_lut(f, name, lut):
ins = lut.get_input_node_size()
outs = lut.get_output_node_size()
for node in range(outs):
dump_hls_lut_node5(f, name, lut, node)
f.write('\n')
f.write(('inline ap_uint<%d> %s(ap_uint<%d> i)\n' % (outs, name, ins)))
f.write('{\n')
f.write(('ap_uint<%d> o;\n' % outs))
for node in range(outs):
f.write(('o[%d]=%s(' % (node, make_lut_func_name(name, node))))
n = lut.get_node_connection_size(node)
for i in range(n):
f.write(('i[%d]' % lut.get_node_connection_index(node, i)))
if (i < (n - 1)):
f.write(',')
else:
f.write(');\n')
f.write('return o;\n')
f.write('}\n\n')
|
def make_lut_func_name(name, node):
return ('%s_%d' % (name, node))
|
def dump_hls_lut_node5(f, name, lut, node):
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
f.write(('Q(%s,0x%016xLL)\n' % (make_lut_func_name(name, node), tbl)))
|
def dump_hls_lut_node4(f, name, lut, node):
f.write(('\nap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write((' return ((0x%016xLL >> index) & 1);\n' % tbl))
f.write('}\n\n')
|
def dump_hls_lut_node3(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write((' static Lut6Model table(0x%016xLL);\n' % tbl))
f.write(' return table.Get(index);\n')
f.write('}\n\n')
|
def dump_hls_lut_node2(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write('#pragma HLS inline\n\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write(' \n')
f.write((' const ap_uint<1> table[%d] = {' % s))
for i in range(s):
f.write(('%d,' % lut.get_lut_table(node, i)))
f.write('};\n')
f.write(' #pragma HLS bind_storage variable=table type=ROM_1P impl=LUTRAM\n')
f.write(' return table[index];\n')
f.write('}\n\n')
|
def dump_hls_lut_node1(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write('#pragma HLS inline\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write((' const ap_uint<%d> table= 0x%016xLL;\n' % (s, tbl)))
f.write(' return table[index];\n')
f.write('}\n\n')
|
def dump_hls_lut(f, name, lut):
ins = lut.get_input_node_size()
outs = lut.get_output_node_size()
for node in range(outs):
dump_hls_lut_node5(f, name, lut, node)
f.write('\n')
f.write(('inline ap_uint<%d> %s(ap_uint<%d> i)\n' % (outs, name, ins)))
f.write('{\n')
f.write(('ap_uint<%d> o;\n' % outs))
for node in range(outs):
f.write(('o[%d]=%s(' % (node, make_lut_func_name(name, node))))
n = lut.get_node_connection_size(node)
for i in range(n):
f.write(('i[%d]' % lut.get_node_connection_index(node, i)))
if (i < (n - 1)):
f.write(',')
else:
f.write(');\n')
f.write('return o;\n')
f.write('}\n\n')
|
def make_lut_func_name(name, node):
return ('%s_lut_%d' % (name, node))
|
def dump_hls_lut_node2(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write('#pragma HLS inline\n\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write(' \n')
f.write((' ap_uint<1> table[%d] = {' % s))
for i in range(s):
f.write(('%d,' % lut.get_lut_table(node, i)))
f.write('};\n')
f.write(' #pragma HLS bind_storage variable=table type=ROM_1P impl=LUTRAM\n')
f.write(' return table[index];\n')
f.write('}\n\n')
|
def dump_hls_lut_node(f, name, lut, node):
f.write(('\ninline ap_uint<1> %s(\n' % make_lut_func_name(name, node)))
n = lut.get_node_connection_size(node)
s = lut.get_lut_table_size(node)
tbl = 0
for i in range(s):
if lut.get_lut_table(node, i):
tbl += (1 << i)
for i in range(n):
f.write((' ap_uint<1> in_data%d' % i))
if (i < (n - 1)):
f.write(',\n')
else:
f.write(')\n')
f.write('{\n')
f.write('#pragma HLS inline\n')
f.write((' ap_uint<%d> index;\n' % n))
for i in range(n):
f.write((' index[%d] = in_data%d;\n' % (i, i)))
f.write((' ap_uint<%d> table= 0x%016xLL;\n' % (s, tbl)))
f.write(' return table[index];\n')
f.write('}\n\n')
|
def dump_hls_lut(f, name, lut):
ins = lut.get_input_node_size()
outs = lut.get_output_node_size()
for node in range(outs):
dump_hls_lut_node2(f, name, lut, node)
f.write('\n')
f.write(('inline ap_uint<%d> %s(ap_uint<%d> in_data)\n' % (outs, name, ins)))
f.write('{\n')
f.write((' ap_uint<%d> out_data;\n' % outs))
for node in range(outs):
f.write((' out_data[%d] = %s(' % (node, make_lut_func_name(name, node))))
n = lut.get_node_connection_size(node)
for i in range(n):
f.write(('in_data[%d]' % lut.get_node_connection_index(node, i)))
if (i < (n - 1)):
f.write(',')
else:
f.write(');\n')
f.write(' return out_data;\n')
f.write('}\n\n')
|
def plot_image(img):
img = img.reshape(3, 32, 32).transpose(1, 2, 0)
plt.imshow(img)
|
def create_conv_layer(shape, w, h, batch_norm=False, act=True, padding='valid'):
sub_net = bb.Sequential.create()
sub_net.add(bb.DenseAffine.create(shape))
if batch_norm:
sub_net.add(bb.BatchNormalization.create())
if act:
sub_net.add(bb.ReLU.create())
return bb.LoweringConvolution.create(sub_net, w, h, 1, 1, padding=padding)
|
def plot_image(img):
img = img.reshape(3, 32, 32).transpose(1, 2, 0)
plt.imshow(img)
|
def create_conv_layer(sub_layers, w, h, padding='valid'):
sub_net = bb.Sequential.create()
for layer in sub_layers:
sub_net.add(layer)
return bb.LoweringConvolutionBit.create(sub_net, w, h, 1, 1, padding=padding)
|
def loadTags(filename):
with open(filename) as f:
reader = csv.reader(f)
data = list(reader)
tagName = [r[0] for r in data]
return (tagName, dict(zip(tagName, range(len(tagName)))))
|
def getTagScore(scores, tags, tag2IDs):
scores = np.exp(scores)
scores /= scores.sum()
tagScore = []
for r in tags:
tagScore.append((r, scores[tag2IDs[r]]))
return tagScore
|
def showAttMap(img, attMaps, tagName, overlap=True, blur=False):
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
(f, ax) = plt.subplots(((len(tagName) / 2) + 1), 2)
if (len(ax.shape) == 1):
ax[0].imshow(img)
else:
ax[(0, 0)].imshow(img)
for i in range(len(tagName)):
attMap = attMaps[i].copy()
attMap -= attMap.min()
if (attMap.max() > 0):
attMap /= attMap.max()
attMap = transform.resize(attMap, img.shape[:2], order=3, mode='nearest')
if blur:
attMap = filters.gaussian_filter(attMap, (0.02 * max(img.shape[:2])))
attMap -= attMap.min()
attMap /= attMap.max()
cmap = plt.get_cmap('jet')
attMapV = cmap(attMap)
attMapV = np.delete(attMapV, 3, 2)
if overlap:
attMap = (((1 * (1 - (attMap ** 0.8)).reshape((attMap.shape + (1,)))) * img) + ((attMap ** 0.8).reshape((attMap.shape + (1,))) * attMapV))
if (len(ax.shape) == 1):
ax[(i + 1)].imshow(attMap, interpolation='bicubic')
ax[(i + 1)].set_title(tagName[i])
else:
ax[(((i + 1) / 2), ((i + 1) % 2))].imshow(attMap, interpolation='bicubic')
ax[(((i + 1) / 2), ((i + 1) % 2))].set_title(tagName[i])
|
def Normalize(a):
return ((a - a.min()) / (a.max() - a.min()))
|
def doGradCAM(net, img, tagID, top=topLayerName, bottom=outputLayerName):
caffe.set_mode_gpu()
out = net.forward(end=top)
net.blobs[top].diff[0][...] = 0
net.blobs[top].diff[0][tagID] = 1
fprop_maps = net.blobs[bottom].data[0]
out = net.backward(start=top, end=bottom)
map_weights = net.blobs[bottom].diff[0].sum(1).sum(1)
map_weights = map_weights.repeat((fprop_maps.shape[1] * fprop_maps.shape[2])).reshape(map_weights.shape[0], fprop_maps.shape[1], fprop_maps.shape[2])
gradCAM_beforeReLU = np.multiply(fprop_maps, map_weights).sum(0)
gradCAM = Normalize(np.maximum(gradCAM_beforeReLU, 0))
gradCAM = transform.resize(gradCAM, (224, 224))
return gradCAM
|
def repro_fig_3(gpu=None, interp='nearest'):
net = caffe.Net('/home/ruthfong/packages/caffe/models/vgg16/VGG_ILSVRC_16_layers_deploy_force_backward.prototxt', '/home/ruthfong/packages/caffe/models/vgg16/VGG_ILSVRC_16_layers.caffemodel', caffe.TEST)
transformer = get_ILSVRC_net_transformer(net)
topName = 'fc8'
bottomNames = ['pool5', 'pool4', 'pool3', 'pool2', 'pool1']
tabby_i = 281
img_path = '/home/ruthfong/neural_coding/images/tabby_cat_cropped.jpg'
img = caffe.io.load_image(img_path)
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
(f, ax) = plt.subplots(1, (len(bottomNames) + 1))
ax[0].imshow(img)
for i in range(len(bottomNames)):
heatmap = compute_heatmap(net=net, transformer=transformer, paths=img_path, labels=tabby_i, heatmap_type='excitation_backprop', topBlobName=topName, topLayerName=topName, outputBlobName=bottomNames[i], outputLayerName=bottomNames[i], gpu=gpu)
ax[(i + 1)].imshow(overlay_map(img, heatmap, overlay=False, interp=interp), interpolation=interp)
|
def repro_fig_4(gpu=None, interp='bicubic'):
net = caffe.Net('/home/ruthfong/packages/caffe/models/bvlc_googlenet/deploy_force_backward.prototxt', '/home/ruthfong/packages/caffe/models/bvlc_googlenet/bvlc_googlenet.caffemodel', caffe.TEST)
topName = 'loss3/classifier'
bottomName = 'pool2/3x3_s2'
zebra_i = 340
elephant_i = 386
transformer = get_ILSVRC_net_transformer(net)
img_path = '/home/ruthfong/neural_coding/fnn_images/zeb-ele1.jpg'
zebra_map = compute_heatmap(net=net, transformer=transformer, paths=img_path, labels=zebra_i, heatmap_type='excitation_backprop', topBlobName=topName, topLayerName=topName, outputBlobName=bottomName, outputLayerName=bottomName, gpu=gpu)
elephant_map = compute_heatmap(net=net, transformer=transformer, paths=img_path, labels=elephant_i, heatmap_type='excitation_backprop', topBlobName=topName, topLayerName=topName, outputBlobName=bottomName, outputLayerName=bottomName, gpu=gpu)
img = caffe.io.load_image(img_path)
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
(f, ax) = plt.subplots(1, 3)
ax[0].imshow(img)
ax[1].imshow(overlay_map(img, zebra_map, overlay=False, interp=interp), interpolation=interp)
ax[2].imshow(overlay_map(img, elephant_map, overlay=False, interp=interp), interpolation=interp)
|
def main():
gpu = 0
net_type = 'googlenet'
caffe.set_device(gpu)
caffe.set_mode_gpu()
net = get_net(net_type)
labels_desc = np.loadtxt('/home/ruthfong/packages/caffe/data/ilsvrc12/synset_words.txt', str, delimiter='\t')
synsets = np.loadtxt('/home/ruthfong/packages/caffe/data/ilsvrc12/synsets.txt', str, delimiter='\t')
(paths, labels) = read_imdb('/home/ruthfong/packages/caffe/data/ilsvrc12/annotated_train_heldout_imdb.txt')
paths = np.array(paths)
labels = np.array(labels)
res_dir = '/data/ruthfong/neural_coding/pycaffe_results'
mask_rel_dir = 'googlenet_train_heldout_given_grad_1_norm_0/min_top0_prob_blur/lr_-1.00_l1_lambda_-4.00_tv_lambda_-inf_l1_lambda_2_-2.00_beta_3.00_mask_scale_8_blur_mask_5_jitter_4_noise_-inf_num_iters_300_tv2_mask_init'
mask_paths = [os.path.join(res_dir, mask_rel_dir, x) for x in os.listdir(os.path.join(res_dir, mask_rel_dir))]
num_top = 0
transformer = get_ILSVRC_net_transformer(net)
pylab.rcParams['figure.figsize'] = (12.0, 12.0)
for i in range(100):
img = transformer.preprocess('data', caffe.io.load_image(paths[i]))
scores = forward_pass(net, img)
sorted_idx = np.argsort(scores)
target = np.zeros(scores.shape)
if (num_top == 0):
target[labels[i]] = 1
else:
target[sorted_idx[:(- (num_top + 1)):(- 1)]] = 1
fig_path = os.path.join('/data/ruthfong/neural_coding/sanity_checks', mask_rel_dir, ('%d.png' % i))
check_mask_generalizability(net, paths[i], target, mask_paths[i], last_layer='prob', fig_path=fig_path)
plt.close()
|
def load_valid_paths():
with open('./valid_paths.txt', 'r') as fp:
paths = [line.strip() for line in fp if (line.strip() != '')]
return paths
|
def get_third_party():
txt_files = list(Path('./requirements').rglob('*.txt'))
package_list = []
for file in txt_files:
with open(file, 'r') as fp:
for line in fp:
line = line.strip()
if (line == ''):
continue
package_list.append(line.split(' ')[0])
return package_list
|
def run_command(command: str):
try:
check_output(command.split(' '))
except CalledProcessError as e:
print(e.output.decode('utf-8'))
raise
|
def main():
parser = argparse.ArgumentParser()
parser.add_argument('files', type=str, nargs='*', default=[], help='If no file is given, use the files under ./valid_paths.txt')
parser.add_argument('--check', action='store_true', help='Only checks the files')
args = parser.parse_args()
if (len(args.files) == 0):
args.files = load_valid_paths()
print(f'Formatting files: {args.files}')
args.files = ' '.join(args.files)
print('Run flake8')
run_command(f'flake8 {args.files} --count --select=E9,F63,F7,F82 --show-source --statistics')
run_command(f'flake8 {args.files} --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics')
print('Run black')
if args.check:
run_command(f'black --check {args.files}')
else:
run_command(f'black {args.files}')
print('Run isort')
third_party = get_third_party()
third_party = ','.join(third_party)
if args.check:
run_command(f'isort --profile black --thirdparty {third_party} --check {args.files}')
else:
run_command(f'isort --profile black --thirdparty {third_party} {args.files}')
if args.check:
print('Successfully passed the format check!')
|
def linkcode_resolve(domain, info):
def find_source():
obj = sys.modules[info['module']]
for part in info['fullname'].split('.'):
obj = getattr(obj, part)
if isinstance(obj, property):
return None
file_parts = Path(inspect.getsourcefile(obj)).parts
reversed_parts = []
for part in reversed(file_parts):
if (part == 's3prl'):
reversed_parts.append(part)
break
else:
reversed_parts.append(part)
fn = '/'.join(reversed(reversed_parts))
(source, lineno) = inspect.getsourcelines(obj)
return (fn, lineno, ((lineno + len(source)) - 1))
if ((domain != 'py') or (not info['module'])):
return None
tag = ('master' if ('dev' in release) else ('v' + release))
try:
filename = ('%s#L%d-L%d' % find_source())
except Exception:
filename = (info['module'].replace('.', '/') + '.py')
return ('https://github.com/s3prl/s3prl/blob/%s/%s' % (tag, filename))
|
class LowResourceLinearSuperbASR(SuperbASR):
def prepare_data(self, prepare_data: dict, target_dir: str, cache_dir: str, get_path_only=False):
(train_path, valid_path, test_paths) = super().prepare_data(prepare_data, target_dir, cache_dir, get_path_only)
df = pd.read_csv(train_path)
df = df.iloc[:100]
df.to_csv(train_path, index=False)
return (train_path, valid_path, test_paths)
def build_downstream(self, build_downstream: dict, downstream_input_size: int, downstream_output_size: int, downstream_input_stride: int):
import torch
class Model(torch.nn.Module):
def __init__(self, input_size, output_size) -> None:
super().__init__()
self.linear = torch.nn.Linear(input_size, output_size)
def forward(self, x, x_len):
return (self.linear(x), x_len)
return Model(downstream_input_size, downstream_output_size)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('problem', help='The problem module. E.g. `s3prl.problem.ssl.tera.Tera`')
parser.add_argument('dataset_root', help='The dataset root for pretrain.')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--n_jobs', type=int, default=8)
parser.add_argument('--override', default=None, help='Override the default_config of the problem module. E.g. --override ValidSampler.batch_size=4,,TestSampler.batch_size=4')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--seed', type=int, default=1337)
args = parser.parse_args()
fix_random_seeds(args.seed)
problem = qualname_to_cls(args.problem)
config = Container(deepcopy(problem.default_config))
for (key, value) in vars(args).items():
if (key not in ['override']):
config[key] = value
if args.dryrun:
config.override(DRYRUN_CONFIG)
if (isinstance(args.override, str) and (len(args.override) > 0)):
override_dict = parse_override(args.override)
config.override(override_dict)
return (problem, config)
|
def main():
logging.basicConfig(level=logging.INFO)
(problem, config) = parse_args()
save_to = Path(config.save_to)
save_to.mkdir(exist_ok=True, parents=True)
body = problem.Body(**config.Body)
head = problem.Head(**config.Head)
loss = problem.Loss(**config.Loss)
stats = Container()
logger.info('Preparing corpus')
corpus = problem.Corpus(config.dataset_root, **config.Corpus)
(train_data, valid_data, test_data, corpus_stats) = corpus().split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = AugmentedDynamicItemDataset(train_data, tools=stats)
train_dataset = problem.TrainData(**config.TrainData)(train_dataset)
assert (train_dataset.get_tool('feat_dim') == problem.input_size)
train_sampler = DistributedBatchSamplerWrapper(problem.TrainSampler(train_dataset, **config.TrainSampler), num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, train_sampler, num_workers=config.n_jobs)
stats.add(train_dataset.all_tools())
logger.info('Preparing valid data')
valid_dataset = AugmentedDynamicItemDataset(valid_data, tools=stats)
valid_dataset = problem.ValidData(**config.ValidData)(valid_dataset)
valid_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(valid_dataset, **config.ValidSampler), num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, valid_sampler, num_workers=12)
logger.info('Preparing test data')
test_dataset = AugmentedDynamicItemDataset(test_data, tools=stats)
test_dataset = problem.TestData(**config.TestData)(test_dataset)
test_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(test_dataset, **config.TestSampler), num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, test_sampler, num_workers=12)
sorted_ckpt_dirs = sorted([file for file in save_to.iterdir() if (file.is_dir() and str(file).endswith('.ckpts'))], key=os.path.getmtime)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint((sorted_ckpt_dirs[1] / 'task.ckpt')).to(device)
else:
logger.info('Create a new model')
task = problem.Task(body, head, loss, **stats)
task = task.to(device)
(opt_cls_qualname, opt_cfgs) = config.Optimizer.split(1)
optimizer = qualname_to_cls(opt_cls_qualname)(task.parameters(), **opt_cfgs)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
optimizer.load_state_dict(torch.load((sorted_ckpt_dirs[(- 1)] / 'optimizer.ckpt')))
if config.Trainer.use_valid:
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
valid_best_score = torch.load((sorted_ckpt_dirs[(- 1)] / 'valid_best_score.ckpt'))[config.Trainer.valid_metric]
else:
valid_best_score = ((- 100000) if config.Trainer.valid_higher_better else 100000)
def save_checkpoint(name):
ckpt_dir: Path = (save_to / f'{name}.ckpts')
ckpt_dir.mkdir(parents=True, exist_ok=True)
logger.info(f'Save checkpoint to: {ckpt_dir}')
if hasattr(problem, 'save_checkpoint'):
logger.info(f'Save upstream checkpoint to: {ckpt_dir}')
problem.save_checkpoint(config, body, head, (ckpt_dir / 'upstream.ckpt'))
task.save_checkpoint((ckpt_dir / 'task.ckpt'))
torch.save(optimizer.state_dict(), (ckpt_dir / 'optimizer.ckpt'))
torch.save({config.Trainer.valid_metric: valid_best_score}, (ckpt_dir / 'valid_best_score.ckpt'))
pbar = tqdm(total=config.Trainer.total_steps, desc='Total')
train_completed = False
accum_grad_steps = 0
while (not train_completed):
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss /= config.Trainer.gradient_accumulate_steps
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=config.Trainer.gradient_clipping)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
optimizer.zero_grad()
accum_grad_steps = 0
else:
accum_grad_steps += 1
if (accum_grad_steps == config.Trainer.gradient_accumulate_steps):
optimizer.step()
optimizer.zero_grad()
accum_grad_steps = 0
batch_results.append(result.cacheable())
if ((global_step % config.Trainer.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for (name, value) in logs.Scalar.items():
if (name == 'loss'):
value *= config.Trainer.gradient_accumulate_steps
logger.info(f'{name}: {value}')
batch_results = []
if ((global_step % config.Trainer.valid_step) == 0):
with torch.no_grad():
if config.Trainer.use_valid:
valid_results = []
for (batch_idx, batch) in enumerate(tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
task.eval()
result = task.valid_step(**batch)
valid_results.append(result.cacheable())
logs: Logs = task.valid_reduction(valid_results).slice(1)
logger.info(f'[Valid] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
if (name == config.Trainer.valid_metric):
cond1 = (config.Trainer.valid_higher_better and (value > valid_best_score))
cond2 = ((not config.Trainer.valid_higher_better) and (value < valid_best_score))
if (cond1 or cond2):
valid_best_score = value
save_checkpoint('valid_best')
if (((global_step % config.Trainer.save_step) == 0) or (global_step == config.Trainer.total_steps)):
save_checkpoint(f'global_step_{global_step}')
if (global_step == config.Trainer.total_steps):
train_completed = True
break
test_results = []
for (batch_idx, batch) in enumerate(tqdm(test_dataloader, desc='Test', total=len(test_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
result = task.test_step(**batch)
test_results.append(result.cacheable())
logs: Logs = task.test_reduction(test_results).slice(1)
logger.info(f'[Test] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('upstream', help='The upstream name. E.g. wav2vec2')
parser.add_argument('problem', help='The problem module. E.g. s3prl.problem.SuperbSID')
parser.add_argument('dataset_root', help='The dataset root of your problem.')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--feature_selection', default='hidden_states')
parser.add_argument('--n_jobs', type=int, default=6)
parser.add_argument('--override', default=None, help='Override the default_config of the problem module. E.g. --override ValidSampler.batch_size=4,,TestSampler.batch_size=4')
parser.add_argument('--resume', action='store_true')
parser.add_argument('--dryrun', action='store_true')
parser.add_argument('--seed', type=int, default=1337)
args = parser.parse_args()
fix_random_seeds(args.seed)
problem = qualname_to_cls(args.problem)
config = Container(deepcopy(problem.default_config))
for (key, value) in vars(args).items():
if (key not in ['override']):
config[key] = value
if args.dryrun:
config.override(DRYRUN_CONFIG)
if (isinstance(args.override, str) and (len(args.override) > 0)):
override_dict = parse_override(args.override)
config.override(override_dict)
return (problem, config)
|
def main():
logging.basicConfig(level=logging.INFO)
(problem, config) = parse_args()
save_to = Path(config.save_to)
save_to.mkdir(exist_ok=True, parents=True)
upstream = S3PRLUpstream(config.upstream, config.feature_selection)
stats = Container(upstream_rate=upstream.downsample_rate)
logger.info('Preparing corpus')
corpus = problem.Corpus(config.dataset_root, **config.Corpus)
(train_data, valid_data, test_data, corpus_stats) = corpus().split(3)
stats.add(corpus_stats)
logger.info('Preparing train data')
train_dataset = AugmentedDynamicItemDataset(train_data, tools=stats)
train_dataset = problem.TrainData(**config.TrainData)(train_dataset)
train_sampler = DistributedBatchSamplerWrapper(problem.TrainSampler(train_dataset, **config.TrainSampler), num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, train_sampler, num_workers=config.n_jobs)
stats.add(train_dataset.all_tools())
logger.info('Preparing valid data')
valid_dataset = AugmentedDynamicItemDataset(valid_data, tools=stats)
valid_dataset = problem.ValidData(**config.ValidData)(valid_dataset)
valid_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(valid_dataset, **config.ValidSampler), num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, valid_sampler, num_workers=12)
logger.info('Preparing test data')
test_dataset = AugmentedDynamicItemDataset(test_data, tools=stats)
test_dataset = problem.TestData(**config.TestData)(test_dataset)
test_sampler = DistributedBatchSamplerWrapper(problem.ValidSampler(test_dataset, **config.TestSampler), num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, test_sampler, num_workers=12)
sorted_ckpt_dirs = sorted([file for file in save_to.iterdir() if (file.is_dir() and str(file).endswith('.ckpts'))], key=os.path.getmtime)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint((sorted_ckpt_dirs[1] / 'task.ckpt')).to(device)
else:
logger.info('Create a new model')
downstream = problem.Downstream(upstream.output_size, **stats)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, **stats, **config.Task)
task = task.to(device)
(opt_cls_qualname, opt_cfgs) = config.Optimizer.split(1)
optimizer = qualname_to_cls(opt_cls_qualname)(task.parameters(), **opt_cfgs)
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
optimizer.load_state_dict(torch.load((sorted_ckpt_dirs[(- 1)] / 'optimizer.ckpt')))
if config.Trainer.use_valid:
if (config.resume and (len(sorted_ckpt_dirs) > 0)):
valid_best_score = torch.load((sorted_ckpt_dirs[(- 1)] / 'valid_best_score.ckpt'))[config.Trainer.valid_metric]
else:
valid_best_score = ((- 100000) if config.Trainer.valid_higher_better else 100000)
def save_checkpoint(name):
ckpt_dir: Path = (save_to / f'{name}.ckpts')
ckpt_dir.mkdir(parents=True, exist_ok=True)
logger.info(f'Save checkpoint to: {ckpt_dir}')
task.save_checkpoint((ckpt_dir / 'task.ckpt'))
torch.save(optimizer.state_dict(), (ckpt_dir / 'optimizer.ckpt'))
torch.save({config.Trainer.valid_metric: valid_best_score}, (ckpt_dir / 'valid_best_score.ckpt'))
pbar = tqdm(total=config.Trainer.total_steps, desc='Total')
train_completed = False
accum_grad_steps = 0
while (not train_completed):
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss /= config.Trainer.gradient_accumulate_steps
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=config.Trainer.gradient_clipping)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
optimizer.zero_grad()
accum_grad_steps = 0
else:
accum_grad_steps += 1
if (accum_grad_steps == config.Trainer.gradient_accumulate_steps):
optimizer.step()
optimizer.zero_grad()
accum_grad_steps = 0
batch_results.append(result.cacheable())
if ((global_step % config.Trainer.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for (name, value) in logs.Scalar.items():
if (name == 'loss'):
value *= config.Trainer.gradient_accumulate_steps
logger.info(f'{name}: {value}')
batch_results = []
if ((global_step % config.Trainer.valid_step) == 0):
with torch.no_grad():
if config.Trainer.use_valid:
valid_results = []
for (batch_idx, batch) in enumerate(tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
task.eval()
result = task.valid_step(**batch)
valid_results.append(result.cacheable())
logs: Logs = task.valid_reduction(valid_results).slice(1)
logger.info(f'[Valid] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
if (name == config.Trainer.valid_metric):
cond1 = (config.Trainer.valid_higher_better and (value > valid_best_score))
cond2 = ((not config.Trainer.valid_higher_better) and (value < valid_best_score))
if (cond1 or cond2):
valid_best_score = value
save_checkpoint('valid_best')
if (((global_step % config.Trainer.save_step) == 0) or (global_step == config.Trainer.total_steps)):
save_checkpoint(f'global_step_{global_step}')
if (global_step == config.Trainer.total_steps):
train_completed = True
break
test_results = []
for (batch_idx, batch) in enumerate(tqdm(test_dataloader, desc='Test', total=len(test_dataloader))):
if (batch_idx == config.Trainer.get('eval_batch', (- 1))):
break
batch = batch.to(device)
result = task.test_step(**batch)
test_results.append(result.cacheable())
logs: Logs = task.test_reduction(test_results).slice(1)
logger.info(f'[Test] step {global_step}')
for (name, value) in logs.Scalar.items():
logger.info(f'{name}: {value}')
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('load_from', help='The directory containing all the checkpoints')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
load_from = Path(args.load_from)
task: Task = Object.load_checkpoint((load_from / 'task.ckpt')).to(device)
task.eval()
test_dataset: Dataset = Object.load_checkpoint((load_from / 'test_dataset.ckpt'))
test_dataloader = test_dataset.to_dataloader(batch_size=1, num_workers=6)
with torch.no_grad():
for batch in test_dataloader:
batch: Output = batch.to(device)
result = task(**batch.subset('x', 'x_len', as_type='dict'))
for (name, prediction) in zip(batch.name, result.prediction):
print(name, prediction)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('librispeech', help='The root directory of LibriSpeech')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
librispeech = Path(args.librispeech)
assert librispeech.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(librispeech, splits=['train-clean-100', 'dev-clean', 'test-clean'])
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 1000), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=4, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.ValidSampler(valid_dataset, 8)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=4, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_sampler=test_sampler, num_workers=4, collate_fn=test_dataset.collate_fn)
latest_task = (save_to / 'task.ckpt')
if latest_task.is_file():
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint(latest_task).to(device)
else:
logger.info('No last checkpoint found. Create new model')
upstream = S3PRLUpstream('apc')
downstream = problem.DownstreamModel(upstream.output_size, preprocessor.statistics().output_size, hidden_size=[512], dropout=[0.2])
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().label_loader)
task = task.to(device)
optimizer = optim.Adam(task.parameters(), lr=0.001)
latest_optimizer = (save_to / 'optimizer.ckpt')
if latest_optimizer.is_file():
optimizer.load_state_dict(torch.load((save_to / 'optimizer.ckpt')))
else:
optimizer = optim.Adam(task.parameters(), lr=0.001)
pbar = tqdm(total=args.total_steps, desc='Total')
while True:
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
optimizer.zero_grad()
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=1.0)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
else:
optimizer.step()
cacheable_result = result.cacheable()
batch_results.append(cacheable_result)
if (((global_step + 1) % args.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
batch_results = []
if (((global_step + 1) % args.eval_step) == 0):
with torch.no_grad():
task.eval()
valid_results = []
for batch in tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader)):
batch = batch.to(device)
result = task.valid_step(**batch)
cacheable_result = result.cacheable()
valid_results.append(cacheable_result)
logs: Logs = task.valid_reduction(valid_results).logs
logger.info(f'[Valid] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
if (((global_step + 1) % args.save_step) == 0):
task.save_checkpoint((save_to / 'task.ckpt'))
torch.save(optimizer.state_dict(), (save_to / 'optimizer.ckpt'))
with torch.no_grad():
test_results = []
for batch in tqdm(test_dataloader, desc='Test', total=len(test_dataloader)):
batch = batch.to(device)
result = task.test_step(**batch)
cacheable_result = result.cacheable()
test_results.append(cacheable_result)
logs: Logs = task.test_reduction(test_results).logs
logger.info(f'[Test] step results')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('librispeech', help='The root directory of LibriSpeech')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--not_resume', action='store_true', help="Don't resume from the last checkpoint")
parser.add_argument('--limit_train_batches', type=int)
parser.add_argument('--limit_val_batches', type=int)
parser.add_argument('--fast_dev_run', action='store_true')
args = parser.parse_args()
return args
|
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
librispeech = Path(args.librispeech)
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(librispeech)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_dataloader = train_dataset.to_dataloader(batch_size=8, num_workers=6, shuffle=True)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataloader = valid_dataset.to_dataloader(batch_size=8, num_workers=6)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataloader = test_dataset.to_dataloader(batch_size=8, num_workers=6)
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
upstream = S3PRLUpstream('apc')
downstream = problem.DownstreamModel(upstream.output_size, preprocessor.statistics().output_size)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().label_loader)
optimizer = optim.Adam(task.parameters(), lr=0.001)
lightning_task = LightningModuleSimpleWrapper(task, optimizer)
checkpoint_callback = ModelCheckpoint(dirpath=str(save_to), filename='superb-asr-{step:02d}-{valid_0_wer:.2f}', monitor='valid_0_wer', save_last=True, save_top_k=3, mode='min', every_n_train_steps=args.save_step)
trainer = Trainer(callbacks=[checkpoint_callback], accelerator='gpu', gpus=1, max_steps=args.total_steps, log_every_n_steps=args.log_step, val_check_interval=args.eval_step, limit_val_batches=(args.limit_val_batches or 1.0), limit_train_batches=(args.limit_train_batches or 1.0), fast_dev_run=args.fast_dev_run)
last_ckpt = (save_to / 'last.ckpt')
if (args.not_resume or (not last_ckpt.is_file())):
last_ckpt = None
trainer.fit(lightning_task, train_dataloader, val_dataloaders=[valid_dataloader, test_dataloader], ckpt_path=last_ckpt)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('load_from', help='The directory containing all the checkpoints')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
load_from = Path(args.load_from)
task: Task = Object.load_checkpoint((load_from / 'task.ckpt')).to(device)
task.eval()
test_dataset: Dataset = Object.load_checkpoint((load_from / 'test_dataset.ckpt'))
test_dataloader = test_dataset.to_dataloader(batch_size=1, num_workers=6)
with torch.no_grad():
for batch in test_dataloader:
batch: Output = batch.to(device)
result = task(**batch.subset('x', 'x_len', as_type='dict'))
for (name, prediction) in zip(batch.name, result.prediction):
print(name, prediction)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--resume', action='store_true')
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
assert voxceleb1.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 200), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=12, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.ValidSampler(valid_dataset, 8)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=12, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_size=8, num_workers=12, collate_fn=test_dataset.collate_fn)
latest_task = (save_to / 'task.ckpt')
if (args.resume and latest_task.is_file()):
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint(latest_task).to(device)
else:
logger.info('No last checkpoint found. Create new model')
upstream = S3PRLUpstream('wav2vec2')
downstream = problem.DownstreamModel(upstream.output_size, len(preprocessor.statistics().category))
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().category)
task = task.to(device)
optimizer = optim.Adam(task.parameters(), lr=0.001)
latest_optimizer = (save_to / 'optimizer.ckpt')
if (args.resume and latest_optimizer.is_file()):
optimizer.load_state_dict(torch.load((save_to / 'optimizer.ckpt')))
else:
optimizer = optim.Adam(task.parameters(), lr=0.001)
pbar = tqdm(total=args.total_steps, desc='Total')
while True:
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
optimizer.zero_grad()
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=1.0)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
else:
optimizer.step()
cacheable_result = result.cacheable()
batch_results.append(cacheable_result)
if (((global_step + 1) % args.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
batch_results = []
if (((global_step + 1) % args.eval_step) == 0):
with torch.no_grad():
task.eval()
valid_results = []
for batch in tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader)):
batch = batch.to(device)
result = task.valid_step(**batch)
cacheable_result = result.cacheable()
valid_results.append(cacheable_result)
logs: Logs = task.valid_reduction(valid_results).logs
logger.info(f'[Valid] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
test_results = []
for batch in tqdm(test_dataloader, desc='Test', total=len(test_dataloader)):
batch = batch.to(device)
result = task.test_step(**batch)
cacheable_result = result.cacheable()
test_results.append(cacheable_result)
logs: Logs = task.test_reduction(test_results).logs
logger.info(f'[Test] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
if (((global_step + 1) % args.save_step) == 0):
task.save_checkpoint((save_to / 'task.ckpt'))
torch.save(optimizer.state_dict(), (save_to / 'optimizer.ckpt'))
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('save_to', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=5000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--not_resume', action='store_true', help="Don't resume from the last checkpoint")
parser.add_argument('--limit_train_batches', type=int)
parser.add_argument('--limit_val_batches', type=int)
parser.add_argument('--fast_dev_run', action='store_true')
args = parser.parse_args()
return args
|
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_dataloader = train_dataset.to_dataloader(batch_size=8, num_workers=6, shuffle=True)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataloader = valid_dataset.to_dataloader(batch_size=8, num_workers=6)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataloader = test_dataset.to_dataloader(batch_size=8, num_workers=6)
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
upstream = S3PRLUpstream('apc')
downstream = problem.DownstreamModel(upstream.output_size, len(preprocessor.statistics().category))
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model, preprocessor.statistics().category)
optimizer = optim.Adam(task.parameters(), lr=0.001)
lightning_task = LightningModuleSimpleWrapper(task, optimizer)
checkpoint_callback = ModelCheckpoint(dirpath=str(save_to), filename='superb-sid-{step:02d}-{valid_0_accuracy:.2f}', monitor='valid_0_accuracy', save_last=True, save_top_k=3, mode='max', every_n_train_steps=args.save_step)
trainer = Trainer(callbacks=[checkpoint_callback], accelerator='gpu', gpus=1, max_steps=args.total_steps, log_every_n_steps=args.log_step, val_check_interval=args.eval_step, limit_val_batches=(args.limit_val_batches or 1.0), limit_train_batches=(args.limit_train_batches or 1.0), fast_dev_run=args.fast_dev_run)
last_ckpt = (save_to / 'last.ckpt')
if (args.not_resume or (not last_ckpt.is_file())):
last_ckpt = None
trainer.fit(lightning_task, train_dataloader, val_dataloaders=[valid_dataloader, test_dataloader], ckpt_path=last_ckpt)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--load_from', type=str, default='result/sv', help='The directory containing all the checkpoints')
args = parser.parse_args()
return args
|
def main():
args = parse_args()
load_from = Path(args.load_from)
task: Task = Object.load_checkpoint((load_from / 'task.ckpt')).to(device)
task.eval()
test_dataset: Dataset = Object.load_checkpoint((load_from / 'test_dataset.ckpt'))
test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=6, collate_fn=test_dataset.collate_fn)
with torch.no_grad():
for batch in test_dataloader:
batch: Output = batch.to(device)
result = task(**batch.subset('x', 'x_len', as_type='dict'))
print(result.hidden_states.shape)
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--voxceleb1', type=str, default='/work/jason410/PublicData/Voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('--save_to', type=str, default='result/sv', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=200)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--backbone', type=str, default='XVector')
parser.add_argument('--pooling_type', type=str, default='TAP')
parser.add_argument('--loss_type', type=str, default='softmax')
parser.add_argument('--spk_embd_dim', type=int, default=1500)
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
assert voxceleb1.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 1000), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=6, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.TrainSampler(valid_dataset, max_timestamp=(16000 * 1000), shuffle=True)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=6, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=6, collate_fn=test_dataset.collate_fn)
latest_task = (save_to / 'task.ckpt')
if latest_task.is_file():
logger.info('Last checkpoint found. Load model and optimizer from checkpoint')
task = Object.load_checkpoint(latest_task).to(device)
else:
logger.info('No last checkpoint found. Create new model')
upstream = S3PRLUpstream('apc')
downstream = problem.speaker_embedding_extractor(backbone=args.backbone, pooling_type=args.pooling_type, input_size=upstream.output_size, output_size=args.spk_embd_dim)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model=model, categories=preprocessor.statistics().category, loss_type=args.loss_type, trials=test_dataset.statistics().label)
task = task.to(device)
optimizer = optim.Adam(task.parameters(), lr=0.001)
latest_optimizer = (save_to / 'optimizer.ckpt')
if latest_optimizer.is_file():
optimizer.load_state_dict(torch.load((save_to / 'optimizer.ckpt')))
else:
optimizer = optim.Adam(task.parameters(), lr=0.001)
pbar = tqdm(total=args.total_steps, desc='Total')
while True:
batch_results = []
for batch in tqdm(train_dataloader, desc='Train', total=len(train_dataloader)):
pbar.update(1)
global_step = pbar.n
assert isinstance(batch, Output)
optimizer.zero_grad()
batch = batch.to(device)
task.train()
result = task.train_step(**batch)
assert isinstance(result, Output)
result.loss.backward()
grad_norm = torch.nn.utils.clip_grad_norm_(task.parameters(), max_norm=1.0)
if math.isnan(grad_norm):
logger.warning(f'Grad norm is NaN at step {global_step}')
else:
optimizer.step()
cacheable_result = result.cacheable()
batch_results.append(cacheable_result)
if (((global_step + 1) % args.log_step) == 0):
logs: Logs = task.train_reduction(batch_results).logs
logger.info(f'[Train] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
batch_results = []
if (((global_step + 1) % args.eval_step) == 0):
with torch.no_grad():
task.eval()
valid_results = []
for batch in tqdm(valid_dataloader, desc='Valid', total=len(valid_dataloader)):
batch = batch.to(device)
result = task.valid_step(**batch)
cacheable_result = result.cacheable()
valid_results.append(cacheable_result)
logs: Logs = task.valid_reduction(valid_results).logs
logger.info(f'[Valid] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
test_results = []
for batch in tqdm(test_dataloader, desc='Test', total=len(test_dataloader)):
batch = batch.to(device)
result = task.test_step(**batch)
test_results.append(result)
logs: Logs = task.test_reduction(batch_results=test_results).logs
logger.info(f'[Test] step {global_step}')
for log in logs.values():
logger.info(f'{log.name}: {log.data}')
if (((global_step + 1) % args.save_step) == 0):
task.save_checkpoint((save_to / 'task.ckpt'))
torch.save(optimizer.state_dict(), (save_to / 'optimizer.ckpt'))
|
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--voxceleb1', type=str, default='/work/jason410/PublicData/Voxceleb1', help='The root directory of VoxCeleb1')
parser.add_argument('--save_to', type=str, default='lightning_result/sv', help='The directory to save checkpoint')
parser.add_argument('--total_steps', type=int, default=200000)
parser.add_argument('--log_step', type=int, default=100)
parser.add_argument('--eval_step', type=int, default=1000)
parser.add_argument('--save_step', type=int, default=100)
parser.add_argument('--not_resume', action='store_true', help="Don't resume from the last checkpoint")
parser.add_argument('--limit_train_batches', type=int)
parser.add_argument('--limit_val_batches', type=int)
parser.add_argument('--fast_dev_run', action='store_true')
parser.add_argument('--backbone', type=str, default='XVector')
parser.add_argument('--pooling_type', type=str, default='TAP')
parser.add_argument('--loss_type', type=str, default='softmax')
parser.add_argument('--spk_embd_dim', type=int, default=1500)
args = parser.parse_args()
return args
|
def main():
logging.basicConfig()
logger.setLevel(logging.INFO)
args = parse_args()
voxceleb1 = Path(args.voxceleb1)
assert voxceleb1.is_dir()
save_to = Path(args.save_to)
save_to.mkdir(exist_ok=True, parents=True)
logger.info('Preparing preprocessor')
preprocessor = problem.Preprocessor(voxceleb1)
logger.info('Preparing train dataloader')
train_dataset = problem.TrainDataset(**preprocessor.train_data())
train_sampler = problem.TrainSampler(train_dataset, max_timestamp=(16000 * 1000), shuffle=True)
train_sampler = DistributedBatchSamplerWrapper(train_sampler, num_replicas=1, rank=0)
train_dataloader = DataLoader(train_dataset, batch_sampler=train_sampler, num_workers=6, collate_fn=train_dataset.collate_fn)
logger.info('Preparing valid dataloader')
valid_dataset = problem.ValidDataset(**preprocessor.valid_data(), **train_dataset.statistics())
valid_dataset.save_checkpoint((save_to / 'valid_dataset.ckpt'))
valid_sampler = problem.TrainSampler(valid_dataset, max_timestamp=(16000 * 1000), shuffle=True)
valid_sampler = DistributedBatchSamplerWrapper(valid_sampler, num_replicas=1, rank=0)
valid_dataloader = DataLoader(valid_dataset, batch_sampler=valid_sampler, num_workers=6, collate_fn=valid_dataset.collate_fn)
logger.info('Preparing test dataloader')
test_dataset = problem.TestDataset(**preprocessor.test_data(), **train_dataset.statistics())
test_dataset.save_checkpoint((save_to / 'test_dataset.ckpt'))
test_sampler = problem.TestSampler(test_dataset, 8)
test_sampler = DistributedBatchSamplerWrapper(test_sampler, num_replicas=1, rank=0)
test_dataloader = DataLoader(test_dataset, batch_size=1, num_workers=6, collate_fn=test_dataset.collate_fn)
upstream = S3PRLUpstream('apc')
downstream = problem.speaker_embedding_extractor(backbone=args.backbone, pooling_type=args.pooling_type, input_size=upstream.output_size, output_size=args.spk_embd_dim)
model = UpstreamDownstreamModel(upstream, downstream)
task = problem.Task(model=model, categories=preprocessor.statistics().category, loss_type=args.loss_type, trials=test_dataset.statistics().label)
optimizer = optim.Adam(task.parameters(), lr=0.001)
lightning_task = LightningModuleSimpleWrapper(task, optimizer)
checkpoint_callback = ModelCheckpoint(dirpath=str(save_to), filename='superb-sv-{step:02d}-{valid_0_accuracy:.2f}', monitor='valid_0_accuracy', save_last=True, save_top_k=3, mode='max', every_n_train_steps=args.save_step)
trainer = Trainer(callbacks=[checkpoint_callback], accelerator='gpu', gpus=1, max_steps=args.total_steps, log_every_n_steps=args.log_step, val_check_interval=args.eval_step, limit_val_batches=(args.limit_val_batches or 1.0), limit_train_batches=(args.limit_train_batches or 1.0), fast_dev_run=args.fast_dev_run)
last_ckpt = (save_to / 'last.ckpt')
if (args.not_resume or (not last_ckpt.is_file())):
last_ckpt = None
trainer.fit(lightning_task, train_dataloader, val_dataloaders=valid_dataloader, ckpt_path=last_ckpt)
trainer.test(lightning_task, dataloaders=test_dataloader, ckpt_path=last_ckpt)
|
def default_collate_fn(samples, padding_value: int=0):
'\n Each item in **DynamicItemDataset** is a dict\n This function pad (or transform into numpy list) a batch of dict\n\n Args:\n samples (List[dict]): Suppose each Container is in\n\n .. code-block:: yaml\n\n wav: a single waveform\n label: a single string\n\n Return:\n dict\n\n .. code-block:: yaml\n\n wav: padded waveforms\n label: np.array([a list of string labels])\n '
assert isinstance(samples[0], dict)
keys = samples[0].keys()
padded_samples = dict()
for key in keys:
values = [sample[key] for sample in samples]
if isinstance(values[0], int):
values = torch.LongTensor(values)
elif isinstance(values[0], float):
values = torch.FloatTensor(values)
elif isinstance(values[0], np.ndarray):
values = [torch.from_numpy(value).float() for value in values]
values = pad_sequence(values, batch_first=True, padding_value=padding_value)
elif isinstance(values[0], torch.Tensor):
values = pad_sequence(values, batch_first=True, padding_value=padding_value)
else:
values = np.array(values, dtype='object')
padded_samples[key] = values
return padded_samples
|
class Corpus():
@property
@abc.abstractmethod
def all_data(self) -> dict:
raise NotImplementedError
@property
@abc.abstractmethod
def data_split_ids(self):
raise NotImplementedError
@property
def data_split(self):
(train_ids, valid_ids, test_ids) = self.data_split_ids
all_data = self.all_data
train_data = {idx: all_data[idx] for idx in train_ids}
valid_data = {idx: all_data[idx] for idx in valid_ids}
test_data = {idx: all_data[idx] for idx in test_ids}
return (train_data, valid_data, test_data)
@staticmethod
def dataframe_to_datapoints(df: pd.DataFrame, unique_name_fn: callable):
data_points = {}
for (_, row) in df.iterrows():
data_point = dict()
for (name, value) in row.iteritems():
data_point[name] = value
unique_name = unique_name_fn(data_point)
data_points[unique_name] = data_point
assert (len(data_points) == len(df)), f'{len(data_point)} != {len(df)}'
return data_points
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.