code stringlengths 17 6.64M |
|---|
def compute_conv2d_ds(in_h, in_w, in_ch, out_ch, k_w, k_h):
'Complexity for Depthwise Separable\n\n $$ O_{ds} = O_pw + O_dw $$\n '
pw = compute_conv2d_pw(in_h, in_w, in_ch, out_ch)
dw = compute_conv2d_dw(in_h, in_w, in_ch, k_w, k_h)
return (pw + dw)
|
def is_training_scope(scope):
patterns = ('/random_uniform', '/weight_regularizer', '/dropout_', '/dropout/', 'AssignMovingAvg')
is_training = False
for t in patterns:
if (t in scope):
is_training = True
return is_training
|
def analyze_model(build_func, input_shapes, n_classes):
from tensorflow.python.framework import graph_util
import tensorflow.python.framework.ops as ops
from tensorflow.compat.v1.graph_util import remove_training_nodes
from tensorflow.python.tools import optimize_for_inference_lib
g = tf.Graph()
run_meta = tf.RunMetadata()
with tf.Session(graph=g) as sess:
keras.backend.set_session(sess)
base = build_func()
inputs = []
for shape in input_shapes:
input_shape = ([1] + list(shape))
inp = tf.placeholder(tf.float32, input_shape)
inputs.append(inp)
model = base(inputs)
opts = tf.profiler.ProfileOptionBuilder().trainable_variables_parameter()
opts['output'] = 'none'
params_stats = tf.profiler.profile(g, run_meta=run_meta, cmd='scope', options=opts)
params = {}
for scope in params_stats.children:
params[scope.name] = scope.total_parameters
flops = {}
opts = tf.profiler.ProfileOptionBuilder().float_operation()
opts['output'] = 'none'
flops_stats = tf.profiler.profile(g, run_meta=run_meta, cmd='scope', options=opts)
for scope in flops_stats.children:
flops[scope.name] = scope.total_float_ops
return (flops, params)
|
def layer_info(model):
df = pandas.DataFrame({'name': [l.name for l in model.layers], 'type': [l.__class__.__name__ for l in model.layers], 'shape_in': [l.get_input_shape_at(0)[1:] for l in model.layers], 'shape_out': [l.get_output_shape_at(0)[1:] for l in model.layers]})
df['size_in'] = df.shape_in.apply(numpy.prod)
df['size_out'] = df.shape_out.apply(numpy.prod)
return df
|
def stm32layer_sizes(stats):
activation_types = set(['_output_array', '_output_in_array', '_output_out_array'])
weight_types = set(['_weights_array', '_bias_array', '_scale_array'])
array_types = activation_types.union(weight_types)
def lazy_add(d, key, value):
if (d.get(key, None) is None):
d[key] = 0
d[key] += value
activations = {}
weights = {}
for (name, size) in stats['arrays'].items():
known = False
for suffix in array_types:
if name.endswith(suffix):
layer_name = name.rstrip(suffix)
out = (activations if (suffix in activation_types) else weights)
lazy_add(out, layer_name, size)
known = True
assert known, 'Unknown array {}'.format(name)
layers = set(activations.keys()).union(set(weights.keys()))
df = pandas.DataFrame({'activations': [activations.get(n, math.nan) for n in layers], 'weights': [weights.get(n, math.nan) for n in layers]}, dtype='int', index=list(layers))
return df
|
def model_info(model):
with tempfile.TemporaryDirectory(prefix='microesc') as tempdir:
out_dir = tempdir
if (type(model) == str):
model_path = model
model = keras.models.load_model(model_path)
else:
model_path = os.path.join(out_dir, 'model.hd5f')
model.save(model_path)
out_path = os.path.join(out_dir, 'gen')
stats = stm32convert.generatecode(model_path, out_path, name='network', model_type='keras', compression=None)
layers = layer_info(model)
sizes = stm32layer_sizes(stats)
combined = layers.join(sizes, on='name', how='inner')
del stats['arrays']
return (stats, combined)
|
def check_model_constraints(model, max_ram=64000.0, max_maccs=(4500000.0 * 0.72), max_flash=512000.0):
(stats, combined) = model_info(model)
def check(val, limit, message):
assert (val <= limit), message.format(val, limit)
check(stats['flash_usage'], max_flash, 'FLASH use too high: {} > {}')
check(stats['ram_usage_max'], max_ram, 'RAM use too high: {} > {}')
check(stats['maccs_frame'], max_maccs, 'CPU use too high: {} > {}')
return (stats, combined)
|
def main():
sample_rate = 44100
window_stride_ms = 10
def build_speech_tiny():
return speech.build_tiny_conv(input_frames=frames, input_bins=bands, n_classes=10)
models = {'SB-CNN': (sbcnn.build_model, [(128, 128, 1)])}
model_params = {}
model_flops = {}
model_stats = {name: analyze_model(build, shape, n_classes=10) for (name, (build, shape)) in models.items()}
for (name, stats) in model_stats.items():
(flops, params) = stats
inference_flops = {name: v for (name, v) in flops.items() if (not is_training_scope(name))}
total_flops = sum(inference_flops.values())
total_params = sum(params.values())
model_params[name] = total_params
model_flops[name] = total_flops
print(name)
print('Total: {:.2f}M FLOPS, {:.2f}K params'.format((total_flops / 1000000.0), (total_params / 1000.0)))
print('\n'.join(['\t{}: {} flops'.format(name, v) for (name, v) in inference_flops.items()]))
print('')
print('\n'.join(['\t{}: {} params'.format(name, v) for (name, v) in params.items()]))
print('\n')
print('p', model_params)
print('f', model_flops)
|
def generate_config(model_path, out_path, name='network', model_type='keras', compression=None):
data = {'name': name, 'toolbox': model_options[model_type], 'models': {'1': [model_path, ''], '2': [model_path, ''], '3': [model_path, ''], '4': [model_path]}, 'compression': compression, 'pinnr_path': out_path, 'src_path': out_path, 'inc_path': out_path, 'plot_file': os.path.join(out_path, 'network.png')}
return json.dumps(data)
|
def parse_with_unit(s):
(number, unit) = s.split()
number = float(number)
multipliers = {'KBytes': 1000.0, 'MBytes': 1000000.0}
mul = multipliers[unit]
return (number * mul)
|
def extract_stats(output):
regex = ' ([^:]*):(.*)'
out = {}
matches = re.finditer(regex, output.decode('utf-8'), re.MULTILINE)
for (i, match) in enumerate(matches, start=1):
(key, value) = match.groups()
key = key.strip()
value = value.strip()
if (key == 'MACC / frame'):
out['maccs_frame'] = int(value)
pass
elif (key == 'RAM size'):
(ram, min) = value.split('(Minimum:')
out['ram_usage_max'] = parse_with_unit(ram)
out['ram_usage_min'] = parse_with_unit(min.rstrip(')'))
pass
elif (key == 'ROM size'):
out['flash_usage'] = parse_with_unit(value)
pass
return out
|
def test_ram_use():
examples = [('\n AI_ARRAY_OBJ_DECLARE(\n input_1_output_array, AI_DATA_FORMAT_FLOAT, \n NULL, NULL, 1860,\n AI_STATIC)\n AI_ARRAY_OBJ_DECLARE(\n conv2d_1_output_array, AI_DATA_FORMAT_FLOAT, \n NULL, NULL, 29760,\n AI_STATIC)\n ', {'input_1_output_array': 1860, 'conv2d_1_output_array': 29760})]
for (input, expected) in examples:
out = extract_ram_use(input)
assert (out == expected), out
|
def extract_ram_use(str):
regex = 'AI_ARRAY_OBJ_DECLARE\\(([^)]*)\\)'
matches = re.finditer(regex, str, re.MULTILINE)
out = {}
for (i, match) in enumerate(matches):
(items,) = match.groups()
items = [i.strip() for i in items.split(',')]
(name, format, _, _, size, modifiers) = items
out[name] = int(size)
return out
|
def generatecode(model_path, out_path, name, model_type, compression):
home = str(pathlib.Path.home())
version = os.environ.get('XCUBEAI_VERSION', '3.4.0')
platform_name = platform.system().lower()
if (platform_name == 'darwin'):
platform_name = 'mac'
p = 'STM32Cube/Repository/Packs/STMicroelectronics/X-CUBE-AI/{version}/Utilities/{os}/generatecode'.format(version=version, os=platform_name)
default_path = os.path.join(home, p)
cmd_path = os.environ.get('XCUBEAI_GENERATECODE', default_path)
if (not os.path.exists(out_path)):
os.makedirs(out_path)
config = generate_config(model_path, out_path, name=name, model_type=model_type, compression=compression)
config_path = os.path.join(out_path, 'config.ai')
with open(config_path, 'w') as f:
f.write(config)
args = [cmd_path, '--auto', '-c', config_path]
stdout = subprocess.check_output(args, stderr=subprocess.STDOUT)
stats = extract_stats(stdout)
assert len(stats.keys()), 'No model output. Stdout: {}'.format(stdout)
with open(os.path.join(out_path, 'network.c'), 'r') as f:
network_c = f.read()
ram = extract_ram_use(network_c)
stats['arrays'] = ram
return stats
|
def parse():
parser = argparse.ArgumentParser(description='Process some integers.')
a = parser.add_argument
supported_types = '|'.join(model_options.keys())
a('model', metavar='PATH', type=str, help='The model to convert')
a('out', metavar='DIR', type=str, help='Where to write generated output')
a('--type', default='keras', help='Type of model. {}'.format(supported_types))
a('--name', default='network', help='Name of the generated network')
a('--compression', default=None, type=int, help='Compression setting to use. Valid: 4|8')
args = parser.parse_args()
return args
|
def main():
args = parse()
test_ram_use()
stats = generatecode(args.model, args.out, name=args.name, model_type=args.type, compression=args.compression)
print('Wrote model to', args.out)
print('Model status: ', json.dumps(stats))
|
def load_model_info(jobs_dir, job_dir):
(experiment, date, time, rnd, fold) = job_dir.split('-')
hist_path = os.path.join(jobs_dir, job_dir, 'train.csv')
df = pandas.read_csv(hist_path)
df['epoch'] = (df.epoch + 1)
df['fold'] = int(fold.lstrip('fold'))
df['experiment'] = experiment
df['run'] = '-'.join([date, time, rnd])
models = []
for fname in os.listdir(os.path.join(jobs_dir, job_dir)):
if fname.endswith('model.hdf5'):
models.append(fname)
def get_epoch(s):
e = s.split('-')[0].lstrip('e')
e = int(e)
return e
models = sorted(models, key=get_epoch)
assert models[0].startswith('e01-')
last_model = models[(len(models) - 1)]
expected_last = 'e{:02d}-'.format(len(models))
assert last_model.startswith(expected_last), (last_model, expected_last)
df['model_path'] = [os.path.join(jobs_dir, job_dir, m) for m in models]
return df
|
def load_train_history(jobs_dir, limit=None):
jobs = os.listdir(jobs_dir)
if limit:
matching = [d for d in jobs if (limit in d)]
else:
matching = jobs
dataframes = []
for job_dir in matching:
try:
df = load_model_info(jobs_dir, job_dir)
except (FileNotFoundError, ValueError) as e:
print('Failed to load job {}: {}'.format(job_dir, str(e)))
continue
dataframes.append(df)
df = pandas.concat(dataframes)
return df
|
def test_load_history():
jobs_dir = '../../jobs'
job_id = 'sbcnn44k128aug-20190227-0220-48ba'
df = load_history()
|
def pick_best(history, n_best=1):
def best_by_loss(df):
return df.sort_values('voted_val_acc', ascending=False).head(n_best)
return history.groupby(['experiment', 'fold']).apply(best_by_loss)
|
def evaluate_model(predictor, model_path, val_data, test_data):
def score(model, data):
y_true = data.classID
p = predictor(model, data)
y_pred = numpy.argmax(p, axis=1)
acc = sklearn.metrics.accuracy_score(y_true, y_pred)
labels = list(range(len(urbansound8k.classnames)))
confusion = sklearn.metrics.confusion_matrix(y_true, y_pred, labels=labels)
return (acc, confusion)
model = keras.models.load_model(model_path)
salience_info = {'foreground': 1, 'background': 2}
test_info = {'val': val_data, 'test': test_data}
out = {}
for (setname, data) in test_info.items():
for (variant, salience) in salience_info.items():
key = '{}_{}'.format(setname, variant)
(acc, confusion) = score(model, data[(data.salience == salience)])
print('acc for ', key, acc)
out[key] = confusion
out['val'] = (out['val_foreground'] + out['val_background'])
out['test'] = (out['test_foreground'] + out['test_background'])
return out
|
def evaluate(models, folds_data, predictor, out_dir, dry_run=False):
def eval_experiment(df):
results = {}
by_fold = df.sort_index(level='fold', ascending=True)
for (idx, row) in by_fold.iterrows():
fold = row['fold']
assert (fold > 0), 'fold number should be 1 indexed'
print('Testing model {} fold={}'.format(row['experiment'], fold))
model_path = row['model_path']
val = folds_data[(fold - 1)][1]
test = folds_data[(fold - 1)][2]
test_folds = test.fold.unique()
assert (len(test_folds) == 1)
assert (test_folds[0] == fold)
val_folds = val.fold.unique()
assert (len(val_folds) == 1)
assert (val_folds[0] != fold)
train_data = folds_data[(fold - 1)][0]
train_files = set(train_data.slice_file_name.unique())
assert (len(train_files) > 6500), len(train_files)
test_files = set(test.slice_file_name.unique())
assert (len(test_files) > 700)
common_files = train_files.intersection(test_files)
assert (len(common_files) == 0), common_files
if dry_run:
val = test[0:20]
test = test[0:20]
result = evaluate_model(predictor, model_path, val, test)
for (k, v) in result.items():
if (results.get(k) is None):
results[k] = []
results[k].append(v)
exname = df['experiment'].unique()[0]
results_path = os.path.join(out_dir, '{}.confusion.npz'.format(exname))
numpy.savez(results_path, **results)
print('Wrote', results_path)
return results_path
out = models.groupby(level='experiment').apply(eval_experiment)
return out
|
def parse(args):
import argparse
parser = argparse.ArgumentParser(description='Test trained models')
a = parser.add_argument
common.add_arguments(parser)
a('--run', dest='run', default='', help='%(default)s')
a('--check', action='store_true', default='', help='Run a check pass, not actually evaluating')
a('--skip-stats', action='store_true', default='', help='Do not compute on-device stats')
a('--out', dest='results_dir', default='./data/results', help='%(default)s')
parsed = parser.parse_args(args)
return parsed
|
def main():
args = parse(sys.argv[1:])
out_dir = os.path.join(args.results_dir, args.run)
common.ensure_directories(out_dir)
urbansound8k.maybe_download_dataset(args.datasets_dir)
data = urbansound8k.load_dataset()
folds = urbansound8k.folds(data)
exsettings = common.load_settings_path(args.settings_path)
frames = exsettings['frames']
voting = exsettings['voting']
overlap = exsettings['voting_overlap']
settings = features.settings(exsettings)
def load_sample(sample):
return features.load_sample(sample, settings, start_time=sample.start, window_frames=frames, feature_dir=args.features_dir, normalize=exsettings['normalize'])
def predict(model, data):
return features.predict_voted(exsettings, model, data, loader=load_sample, method=voting, overlap=overlap)
history = load_train_history(args.models_dir, args.run)
n_folds = len(history.fold.unique())
n_experiments = len(history.experiment.unique())
print('Found {} experiments across {} folds', n_folds, n_experiments)
best = pick_best(history)
print('Best models\n', best[['epoch', 'fold', 'voted_val_acc']])
print('Computing model info')
def get_stats(row):
ex = row.iloc[0]
model = ex['model_path']
(model_stats, layer_info) = stats.model_info(model)
layer_info_path = os.path.join(out_dir, '{}.layers.csv'.format(ex['experiment']))
layer_info.to_csv(layer_info_path)
return pandas.Series(model_stats)
if (not args.skip_stats):
model_stats = best.groupby(level='experiment').apply(get_stats)
print('Model stats\n', model_stats)
model_stats.to_csv(os.path.join(out_dir, 'stm32stats.csv'))
print('Testing models...')
results = evaluate(best, folds, predictor=predict, out_dir=out_dir, dry_run=args.check)
|
def maybe_download_dataset(workdir):
if (not os.path.exists(workdir)):
os.makedirs(workdir)
dir_path = os.path.join(workdir, 'UrbanSound8K')
archive_path = (dir_path + '.tar.gz')
last_progress = None
def download_progress(count, blocksize, totalsize):
nonlocal last_progress
p = int((((count * blocksize) * 100) / totalsize))
if (p != last_progress):
print('\r{}%'.format(p), end='\r')
last_progress = p
if (not os.path.exists(dir_path)):
print('Could not find', dir_path)
if (not os.path.exists(archive_path)):
u = download_urls[0]
print('Downloading...', u)
urllib.request.urlretrieve(u, archive_path, reporthook=download_progress)
print('Extracting...', archive_path)
with tarfile.open(archive_path, 'r:gz') as archive:
archive.extractall(workdir)
return dir_path
|
def load_dataset():
metadata_path = os.path.join(here, 'datasets/UrbanSound8K.csv')
samples = pandas.read_csv(metadata_path)
return samples
|
def sample_path(sample, dataset_path=None):
if (not dataset_path):
dataset_path = default_path
return os.path.join(dataset_path, 'audio', ('fold' + str(sample.fold)), sample.slice_file_name)
|
def folds(data):
fold_idxs = folds_idx(n_folds=10)
assert (len(fold_idxs) == 10)
folds = []
for fold in fold_idxs:
(train, val, test) = fold
train = (numpy.array(train) + 1)
val = (numpy.array(val) + 1)
test = (numpy.array(test) + 1)
fold_train = data[data.fold.isin(train)]
fold_val = data[data.fold.isin(val)]
fold_test = data[data.fold.isin(test)]
train_folds = set(fold_train.fold.unique())
val_folds = set(fold_val.fold.unique())
test_folds = set(fold_test.fold.unique())
assert (len(train_folds) == 8), len(train_folds)
assert (train_folds.intersection(val_folds) == set())
assert (train_folds.intersection(test_folds) == set())
assert (val_folds.intersection(test_folds) == set())
folds.append((fold_train, fold_val, fold_test))
return folds
|
def ensure_valid_fold(fold, n_folds=10):
(train, val, test) = fold
assert (len(train) == (n_folds - 2)), len(train)
assert (0 <= train[0] < n_folds), train[0]
assert (len(val) == 1), len(val)
assert (0 <= val[0] < n_folds), val[0]
assert (len(test) == 1), len(test)
assert (0 <= test[0] < n_folds), test[0]
assert (test[0] != val[0])
test_overlap = set(train).intersection(set(test))
val_overlap = set(train).intersection(set(val))
assert (test_overlap == set()), test_overlap
assert (val_overlap == set()), val_overlap
assert (sorted(((train + val) + test)) == list(range(0, n_folds)))
return True
|
def folds_idx(n_folds):
'Generate fold indices for cross-validation.\n Each fold has 1 validation, 1 test set and the remaining train'
test_fold = 10
folds = []
all_folds = list(range(0, n_folds))
for idx in range(0, n_folds):
test = [all_folds[idx]]
val = [all_folds[(idx - 1)]]
train = list(set(all_folds).difference(set((test + val))))
fold = (train, val, test)
ensure_valid_fold(fold)
folds.append(fold)
assert (len(folds) == n_folds), len(folds)
return folds
|
def sbcnn_generator(n_iter=400, random_state=1):
from sklearn.model_selection import ParameterSampler
params = dict(kernel_t=range(3, 10, 2), kernel_f=range(3, 10, 2), pool_t=range(2, 5), pool_f=range(2, 5), kernels_start=range(16, 64), fully_connected=range(16, 128))
sampler = ParameterSampler(params, n_iter=n_iter, random_state=random_state)
out_models = []
out_total_params = []
for p in sampler:
s = {'model': 'sbcnn', 'frames': 31, 'n_mels': 60, 'samplerate': 22050}
pool = (p['pool_f'], p['pool_t'])
kernel = (p['kernel_f'], p['kernel_t'])
for (k, v) in p.items():
s[k] = v
s['pool'] = pool
s['kernel'] = kernel
(yield (p, s))
|
def generate_models():
gen = sbcnn_generator()
data = {'model_path': [], 'gen_path': [], 'id': []}
for out in iter(gen):
model = None
try:
(params, settings) = out
model = models.build(settings.copy())
except ValueError as e:
print('Error:', e)
continue
for (k, v) in params.items():
if (data.get(k) is None):
data[k] = []
data[k].append(v)
model_id = str(uuid.uuid4())
out_dir = os.path.join('scan', model_id)
os.makedirs(out_dir)
model_path = os.path.join(out_dir, 'model.orig.hdf5')
out_path = os.path.join(out_dir, 'gen')
model.save(model_path)
stats = stm32convert.generatecode(model_path, out_path, name='network', model_type='keras', compression=None)
data['model_path'].append(model_path)
data['gen_path'].append(out_path)
data['id'].append(model_id)
for (k, v) in stats.items():
if (data.get(k) is None):
data[k] = []
data[k].append(v)
df = pandas.DataFrame(data)
return df
|
def main():
df = generate_models()
df.to_csv('scan.csv')
|
def main():
settings = common.load_experiment('experiments', 'ldcnn20k60')
def build():
return train.sb_cnn(settings)
m = build()
m.summary()
m.save('model.wip.hdf5')
s = settings
shape = (s['n_mels'], s['frames'], 1)
model_stats = stats.analyze_model(build, [shape], n_classes=10)
(flops, params) = model_stats
inference_flops = {name: v for (name, v) in flops.items() if (not stats.is_training_scope(name))}
for (name, flop) in inference_flops.items():
print(name, flop)
|
def check_missing(df, field, name='name'):
missing = df[df[field].isna()]
if len(missing):
print('WARN. Missing "{}" for {}'.format(field, list(missing[name])))
|
def logmel_models(data_path):
df = pandas.read_csv(data_path)
df = df[df['features'].str.contains('logmel')]
df.index = df['name']
df['params'] = (df['kparams'] * 1000.0)
df['window'] = ((df.frames * df.hop) / df.samplerate)
df['t_step'] = (df.hop / df.samplerate)
df['f_res'] = (df.samplerate / df.bands)
df['macc_s'] = ((df['mmacc'] * 1000000.0) / df.window)
return df
|
def model_table(data_path):
df = logmel_models(data_path)
table = pandas.DataFrame()
table['Accuracy (%)'] = (df.accuracy * 100)
table['MACC / second'] = ['{} M'.format(int((v / 1000000.0))) for v in df.macc_s]
table['Model parameters'] = ['{} k'.format(int((v / 1000.0))) for v in df.params]
table = table.sort_values('Accuracy (%)', ascending=False)
return table.to_latex(column_format='lrrr')
|
def plot_models(data_path, figsize=(12, 4), max_params=128000.0, max_maccs=4500000.0):
df = logmel_models(data_path)
(fig, ax) = plt.subplots(1, figsize=figsize)
check_missing(df, 'accuracy')
check_missing(df, 'kparams')
check_missing(df, 'mmacc')
df.plot.scatter(x='params', y='macc_s', logx=True, logy=True, ax=ax)
ax.set_xlabel('Model parameters')
ax.set_ylabel('MACC / second')
feasible_x = max_params
feasible_y = max_maccs
x = [0, feasible_x, feasible_x, 0]
y = [0, 0, feasible_y, feasible_y]
ax.fill(x, y, color='green', alpha=0.5)
linestyle = dict(color='black', linewidth=0.5)
ax.axvline(feasible_x, **linestyle)
ax.axhline(feasible_y, **linestyle)
def add_labels(row):
xy = (row.params, row.macc_s)
label = '{} {:.1f}%'.format(row['name'], (100 * row.accuracy))
ax.annotate(label, xy, xytext=(5, 40), textcoords='offset points', size=12, rotation=25, color='darkslategrey')
df.apply(add_labels, axis=1)
fig.tight_layout()
return fig
|
def flatten(list):
out = []
for x in list:
for y in x:
out.append(y)
return out
|
def plot_spectrogram(f, ax=None, cmap=None):
(y, sr) = librosa.load(f, sr=44100)
fig = None
if (not ax):
(fig, ax) = plt.subplots(1, figsize=(16, 4))
S = numpy.abs(librosa.stft(y))
S = librosa.amplitude_to_db(S, ref=numpy.max)
kwargs = dict(ax=ax, y_axis='log', x_axis='time', sr=sr)
if (cmap is not None):
kwargs['cmap'] = cmap
librosa.display.specshow(S, **kwargs)
return fig
|
def plot_spectrograms(files, titles, out=None):
assert (len(files) == len(titles))
(fig, axs) = plt.subplots(2, (len(files) // 2), sharex=True, figsize=(16, 6))
axs = flatten(axs)
for (i, (path, title, ax)) in enumerate(zip(files, titles, axs)):
plot_spectrogram(path, ax=ax)
ax.set_title(title)
if ((i != 0) and (i != (len(files) / 2))):
ax.set_ylabel('')
ax.set_yticks([])
if (i < (len(files) / 2)):
ax.set_xlabel('')
if out:
fig.savefig(out, bbox_inches='tight', pad_inches=0)
return fig
|
def plot_examples(examples):
examples = urbansound8k_examples
here = os.path.dirname(__file__)
base = os.path.join(here, '../microesc/../data/datasets/UrbanSound8K/audio/')
paths = [os.path.join(base, e[0]) for e in examples.values()]
fig = plot_spectrograms(paths, examples.keys())
return fig
|
def main():
plotname = os.path.basename(sys.argv[1])
here = os.path.dirname(__file__)
plot_func = plots.get(plotname, None)
if (not plot_func):
sys.stderr.write('Plot {} not found. Supported: \n{}'.format(plotname, plots.keys()))
return 1
out = plot_func()
out_path = os.path.join(here, 'plots', plotname)
if (not os.path.exists(os.path.dirname(out_path))):
os.makedirs(os.path.dirname(out_path))
ext = os.path.splitext(plotname)[1]
if (ext == '.png'):
out.savefig(out_path, bbox_inches='tight')
elif (ext == '.tex'):
with open(out_path, 'w') as f:
f.write(out)
else:
raise ValueError('Unknown extension {}'.format(ext))
|
def strformat(fmt, series):
return [fmt.format(i) for i in series]
|
def downsample_from_name(name):
if name.startswith('Stride'):
return 'stride'
else:
return 'maxpool'
|
def accuracies(df, col):
mean = (df[(col + '_mean')] * 100)
std = (df[(col + '_std')] * 100)
fmt = ['{:.1f}% +-{:.1f}'.format(*t) for t in zip(mean, std)]
return fmt
|
def cpu_use(df):
usage = (((df.utilization * 1000) * 1) / df.classifications_per_second).astype(int)
return ['{:d} ms'.format(i).ljust(3) for i in usage]
|
def plot_augmentations(y, sr, time_shift=3000, pitch_shift=12, time_stretch=1.3):
augmentations = {'Original': y, 'Timeshift left': y[time_shift:], 'Timeshift right': numpy.concatenate([numpy.zeros(time_shift), y[:(- time_shift)]]), 'Timestretch faster': librosa.effects.time_stretch(y, time_stretch), 'Timestretch slower': librosa.effects.time_stretch(y, (1 / time_stretch)), 'Pitchshift up': librosa.effects.pitch_shift(y, sr, pitch_shift), 'Pitchshift down': librosa.effects.pitch_shift(y, sr, (- pitch_shift))}
layout = [['Original', 'Original', 'Original'], ['Timeshift right', 'Timestretch faster', 'Pitchshift up'], ['Timeshift left', 'Timestretch slower', 'Pitchshift down']]
shape = numpy.array(layout).shape
(fig, axs) = plt.subplots(shape[0], shape[1], figsize=(16, 6), sharex=True)
for row in range(shape[0]):
for col in range(shape[1]):
description = layout[row][col]
ax = axs[row][col]
data = augmentations[description]
S = numpy.abs(librosa.stft(data))
S = scipy.ndimage.filters.gaussian_filter(S, 0.7)
S = librosa.amplitude_to_db(S, ref=numpy.max)
S -= S.mean()
librosa.display.specshow(S, ax=ax, sr=sr, y_axis='hz')
ax.set_ylim(0, 5000)
ax.set_title(description)
return fig
|
def main():
path = '163459__littlebigsounds__lbs-fx-dog-small-alert-bark001.wav'
(y, sr) = librosa.load(path, offset=0.1, duration=1.2)
fig = plot_augmentations(y, sr)
out = __file__.replace('.py', '.png')
fig.savefig(out, bbox_inches='tight')
|
def bandpass_filter(lowcut, highcut, fs, order, output='sos'):
assert ((order % 2) == 0), 'order must be multiple of 2'
assert ((highcut * 0.95) < (fs / 2.0)), 'highcut {} above Nyquist for fs={}'.format(highcut, fs)
assert (lowcut > 0.0), 'lowcut must be above 0'
nyq = (0.5 * fs)
low = (lowcut / nyq)
high = min((highcut / nyq), 0.99)
output = scipy.signal.butter((order / 2), [low, high], btype='band', output=output)
return output
|
def filterbank(center, fraction, fs, order):
reference = acoustics.octave.REFERENCE
center = [f for f in center if (f < (fs / 2.0))]
center = numpy.asarray(center)
indices = acoustics.octave.index_of_frequency(center, fraction=fraction, ref=reference)
center = acoustics.octave.exact_center_frequency(None, fraction=fraction, n=indices, ref=reference)
lower = acoustics.octave.lower_frequency(center, fraction=fraction)
upper = acoustics.octave.upper_frequency(center, fraction=fraction)
nominal = acoustics.octave.nominal_center_frequency(None, fraction, indices)
def f(low, high):
return bandpass_filter(low, high, fs=fs, order=order)
filterbank = [f(low, high) for (low, high) in zip(lower, upper)]
return (nominal, filterbank)
|
def third_octave_filterbank(fs, order=8):
from acoustics.standards import iec_61672_1_2013 as iec_61672
center = iec_61672.NOMINAL_THIRD_OCTAVE_CENTER_FREQUENCIES
return filterbank(center, fraction=3, fs=fs, order=order)
|
def plot_filterbank_oct(ax, fs=44100):
filterbank = third_octave_filterbank((fs / 2))
for (center, sos) in zip(filterbank[0], filterbank[1]):
(w, h) = scipy.signal.sosfreqz(sos, worN=4096, fs=fs)
db = (20 * numpy.log10((numpy.abs(h) + 1e-09)))
ax.plot(w, db)
ax.set_title('1/3 octave')
ax.set_ylabel('Attenuation (dB)')
ax.set_ylim((- 60), 5)
ax.set_xlim(20.0, 20000.0)
|
def plot_filterbank_gammatone(ax, fs=44100):
np = numpy
from pyfilterbank import gammatone
gfb = gammatone.GammatoneFilterbank(samplerate=44100, startband=(- 6), endband=26, density=1.5)
def plotfun(x, y):
xx = (x * fs)
ax.plot(xx, (20 * np.log10((np.abs(y) + 1e-09))))
gfb.freqz(nfft=(2 * 4096), plotfun=plotfun)
ax.set_title('Gammatone')
ax.set_ylim([(- 80), 1])
ax.set_xlim([10, 20000.0])
return gfb
|
def plot_filterbank_mel(ax, n_mels=32, n_fft=4097, fmin=10, fmax=22050, fs=44100):
from pyfilterbank import melbank
(melmat, (melfreq, fftfreq)) = melbank.compute_melmat(n_mels, fmin, fmax, num_fft_bands=4097, sample_rate=fs)
ax.plot(fftfreq, (20 * numpy.log10((melmat.T + 1e-09))))
ax.set_title('Mel-scale')
ax.set_xlabel('Frequency (Hz)')
|
def main():
(fig, (gt_ax, oct_ax, mel_ax)) = plt.subplots(3, sharex=True, sharey=True, figsize=(12, 5))
axes = fig.gca()
plot_filterbank_gammatone(gt_ax)
plot_filterbank_mel(mel_ax)
plot_filterbank_oct(oct_ax)
axes.set_ylim([(- 40), 3])
axes.set_xlim([100, 20000])
fig.tight_layout()
out = __file__.replace('.py', '.png')
fig.savefig(out, bbox_inches='tight')
|
def plot_logloss(figsize=(6, 3)):
(fig, ax) = plt.subplots(1, figsize=figsize)
yhat = numpy.linspace(0.0, 1.0, 300)
losses_0 = [log_loss([0], [x], labels=[0, 1]) for x in yhat]
losses_1 = [log_loss([1], [x], labels=[0, 1]) for x in yhat]
ax.plot(yhat, losses_0, label='true=0')
ax.plot(yhat, losses_1, label='true=1')
ax.legend()
ax.set_ylim(0, 8)
ax.set_xlim(0, 1)
return fig
|
def main():
fig = plot_logloss()
fig.tight_layout()
out = __file__.replace('.py', '.png')
fig.savefig(out, bbox_inches='tight')
|
def arglist(options):
args = ['--{}={}'.format(k, v) for (k, v) in options.items()]
return args
|
def command_for_job(options):
args = ['python3', 'train.py']
args += arglist(options)
return args
|
def generate_train_jobs(experiments, settings_path, folds, overrides):
timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M')
unique = str(uuid.uuid4())[0:4]
def name(experiment, fold):
name = '-'.join([experiment, timestamp, unique])
return (name + '-fold{}'.format(fold))
def job(exname, experiment):
for fold in folds:
n = name(exname, fold)
options = {'name': n, 'fold': fold, 'settings': settings_path}
for (k, v) in experiment.items():
options[k] = v
for (k, v) in overrides.items():
options[k] = v
cmd = command_for_job(options)
return cmd
jobs = [job(str(idx), ex) for (idx, ex) in experiments.iterrows()]
return jobs
|
def parse(args):
import argparse
parser = argparse.ArgumentParser(description='Generate jobs')
a = parser.add_argument
a('--models', default='models.csv', help='%(default)s')
a('--settings', default='experiments/ldcnn20k60.yaml', help='%(default)s')
a('--jobs', dest='jobs_dir', default='./data/jobs', help='%(default)s')
a('--check', action='store_true', help='Only run a pre-flight check')
parsed = parser.parse_args(args)
return parsed
|
def main():
args = parse(sys.argv[1:])
models = pandas.read_csv(args.models)
settings = common.load_settings_path(args.settings)
overrides = {}
folds = list(range(0, 9))
if args.check:
folds = (1,)
overrides['train_samples'] = (settings['batch'] * 1)
overrides['val_samples'] = (settings['batch'] * 1)
cmds = generate_train_jobs(models, args.settings, folds, overrides)
print('\n'.join((' '.join(cmd) for cmd in cmds)))
|
@pytest.mark.skip('fails right now')
@pytest.mark.parametrize('family', FAMILIES)
def test_models_basic(family):
s = settings.load_settings({'model': family, 'frames': 31, 'n_mels': 60, 'samplerate': 22050})
if (family == 'sbcnn'):
s['downsample_size'] = (3, 2)
s['conv_size'] = (3, 3)
if (family == 'strided'):
s['downsample_size'] = (3, 3)
s['conv_size'] = (3, 3)
s['conv_block'] = 'conv'
s['filters'] = 12
m = models.build(s)
assert isinstance(m, keras.Model)
|
@pytest.mark.parametrize('conv_type', CONV_TYPES)
def test_strided_variations(conv_type):
s = settings.load_settings({'model': 'strided', 'frames': 31, 'n_mels': 60, 'samplerate': 22050, 'conv_block': conv_type, 'filters': 20})
s['conv_size'] = (3, 3)
s['downsample_size'] = (2, 2)
m = models.build(s)
assert isinstance(m, keras.Model)
|
def test_conv_ds():
k = (5, 5)
i = (60, 31, 16)
ch = 16
conv = stats.compute_conv2d(*i, ch, *k)
ds = stats.compute_conv2d_ds(*i, ch, *k)
ratio = (conv / ds)
assert (ratio > 9.0)
|
def test_conv_ds3x3():
k = (3, 3)
i = (60, 31, 64)
ch = 64
conv = stats.compute_conv2d(*i, ch, *k)
ds = stats.compute_conv2d_ds(*i, ch, *k)
ratio = (conv / ds)
assert (ratio > 7.5)
|
@pytest.mark.skip('fails')
def test_generator_fake_loader():
dataset_path = 'data/UrbanSound8K/'
urbansound8k.default_path = dataset_path
data = urbansound8k.load_dataset()
(folds, test) = urbansound8k.folds(data)
data_length = 16
batch_size = 8
frames = 72
bands = 32
n_classes = 10
def zero_loader(s):
return numpy.zeros((bands, frames, 1))
fold = folds[0][0]
X = fold[0:data_length]
Y = fold.classID[0:data_length]
g = train.dataframe_generator(X, Y, loader=zero_loader, batchsize=batch_size, n_classes=n_classes)
n_batches = 3
batches = list(itertools.islice(g, n_batches))
assert (len(batches) == n_batches)
assert (len(batches[0]) == 2)
assert (batches[0][0].shape == (batch_size, bands, frames, 1))
assert (batches[0][1].shape == (batch_size, n_classes))
|
def test_windows_shorter_than_window():
frame_samples = 256
window_frames = 64
fs = 16000
length = (0.4 * fs)
w = list(features.sample_windows(int(length), frame_samples, window_frames))
assert (len(w) == 1), len(w)
assert (w[(- 1)][1] == length)
|
def test_window_typical():
frame_samples = 256
window_frames = 64
fs = 16000
length = (4.0 * fs)
w = list(features.sample_windows(int(length), frame_samples, window_frames))
assert (len(w) == 8), len(w)
assert (w[(- 1)][1] == length)
|
def _test_predict_windowed():
t = test[0:10]
sbcnn16k32_settings = dict(feature='mels', samplerate=16000, n_mels=32, fmin=0, fmax=8000, n_fft=512, hop_length=256, augmentations=5)
def load_sample32(sample):
return features.load_sample(sample, sbcnn16k32_settings, window_frames=72, feature_dir='../../scratch/aug')
mean_m = features.predict_voted(sbcnn16k32_settings, model, t, loader=load_sample32, method='mean')
|
def test_precompute():
settings = dict(feature='mels', samplerate=16000, n_mels=32, fmin=0, fmax=8000, n_fft=512, hop_length=256, augmentations=12)
dir = './pre2'
if os.path.exists(dir):
shutil.rmtree(dir)
workdir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../data/'))
data = urbansound8k.load_dataset()
urbansound8k.maybe_download_dataset(workdir)
d = os.path.join(dir, features.settings_id(settings))
expect_path = features.feature_path(data.iloc[0], d)
assert (not os.path.exists(expect_path)), expect_path
preprocess.precompute(data[0:4], settings, out_dir=d, verbose=0, force=True, n_jobs=2)
assert os.path.exists(expect_path), expect_path
|
def test_grouped_confusion():
cm = numpy.array([[82, 0, 3, 0, 0, 10, 0, 4, 1, 0], [3, 29, 0, 0, 0, 0, 1, 0, 0, 0], [4, 3, 37, 14, 4, 4, 0, 0, 2, 32], [5, 2, 5, 78, 4, 0, 0, 0, 0, 6], [23, 2, 4, 1, 55, 4, 2, 6, 3, 0], [9, 0, 0, 4, 3, 70, 0, 5, 1, 1], [0, 0, 0, 5, 0, 0, 27, 0, 0, 0], [0, 0, 2, 0, 1, 1, 1, 91, 0, 0], [9, 11, 9, 4, 0, 1, 0, 0, 46, 3], [1, 7, 7, 0, 7, 0, 0, 0, 3, 75]])
(gcm, gnames) = report.grouped_confusion(cm, report.groups)
assert (numpy.sum(cm) == numpy.sum(gcm))
assert (gnames[0] == 'social_activity')
assert (gnames[3] == 'domestic_machines')
expect_correct_social = (((((37 + 78) + 75) + (14 + 32)) + (5 + 6)) + (7 + 0))
assert (gcm[0][0] == expect_correct_social), (gcm[0][0], expect_correct_social)
assert (gcm[3][3] == 82)
|
@pytest.mark.parametrize('example', CORRECT_FOLDS.keys())
def test_ensure_valid_fold_passes_correct(example):
fold = CORRECT_FOLDS[example]
folds.ensure_valid_fold(fold)
|
@pytest.mark.parametrize('example', WRONG_FOLDS.keys())
def test_ensure_valid_fold_detects_wrong(example):
fold = WRONG_FOLDS[example]
with pytest.raises(AssertionError) as e_info:
folds.ensure_valid_fold(fold)
|
def test_folds_idx():
f = folds.folds_idx(10)
print(('\n' + '\n'.join([str(i) for i in f])))
assert (f[0][2][0] == 0), 'first test fold should be 0'
assert (f[(- 1)][2][0] == 9), 'last test fold should be 9'
|
def test_folds():
data = urbansound8k.load_dataset()
f = urbansound8k.folds(data)
assert (len(f) == 10)
|
@dataclass
class DataTrainingArguments():
'\n Arguments pertaining to what data we are going to input our model for training and eval.\n '
task_name: Optional[str] = field(default='ner', metadata={'help': 'The name of the task (ner, pos...).'})
dataset_name: Optional[str] = field(default=None, metadata={'help': 'The name of the dataset to use (via the datasets library).'})
dataset_config_name: Optional[str] = field(default=None, metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'})
train_file: Optional[str] = field(default=None, metadata={'help': 'The input training data file (a csv or JSON file).'})
validation_file: Optional[str] = field(default=None, metadata={'help': 'An optional input evaluation data file to evaluate on (a csv or JSON file).'})
test_file: Optional[str] = field(default=None, metadata={'help': 'An optional input test data file to predict on (a csv or JSON file).'})
overwrite_cache: bool = field(default=False, metadata={'help': 'Overwrite the cached training and evaluation sets'})
preprocessing_num_workers: Optional[int] = field(default=None, metadata={'help': 'The number of processes to use for the preprocessing.'})
pad_to_max_length: bool = field(default=True, metadata={'help': 'Whether to pad all samples to model maximum sentence length. If False, will pad the samples dynamically when batching to the maximum length in the batch. More efficient on GPU but very bad for TPU.'})
max_train_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of training examples to this value if set.'})
max_val_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of validation examples to this value if set.'})
max_test_samples: Optional[int] = field(default=None, metadata={'help': 'For debugging purposes or quicker training, truncate the number of test examples to this value if set.'})
label_all_tokens: bool = field(default=False, metadata={'help': 'Whether to put the label for one word on all tokens of generated by that word or just on the one (in which case the other tokens will have a padding index).'})
return_entity_level_metrics: bool = field(default=False, metadata={'help': 'Whether to return all the entity levels during evaluation or just the overall ones.'})
|
@dataclass
class XFUNDataTrainingArguments(DataTrainingArguments):
lang: Optional[str] = field(default='en')
additional_langs: Optional[str] = field(default=None)
|
@dataclass
class DataCollatorForKeyValueExtraction():
"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer (:class:`~transformers.PreTrainedTokenizer` or :class:`~transformers.PreTrainedTokenizerFast`):\n The tokenizer used for encoding the data.\n padding (:obj:`bool`, :obj:`str` or :class:`~transformers.file_utils.PaddingStrategy`, `optional`, defaults to :obj:`True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n * :obj:`True` or :obj:`'longest'`: Pad to the longest sequence in the batch (or no padding if only a single\n sequence if provided).\n * :obj:`'max_length'`: Pad to a maximum length specified with the argument :obj:`max_length` or to the\n maximum acceptable input length for the model if that argument is not provided.\n * :obj:`False` or :obj:`'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of\n different lengths).\n max_length (:obj:`int`, `optional`):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (:obj:`int`, `optional`):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (:obj:`int`, `optional`, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignore by PyTorch loss functions).\n "
tokenizer: PreTrainedTokenizerBase
padding: Union[(bool, str, PaddingStrategy)] = True
max_length: Optional[int] = None
pad_to_multiple_of: Optional[int] = None
label_pad_token_id: int = (- 100)
def __call__(self, features):
label_name = ('label' if ('label' in features[0].keys()) else 'labels')
labels = ([feature[label_name] for feature in features] if (label_name in features[0].keys()) else None)
has_image_input = ('image' in features[0])
has_bbox_input = ('bbox' in features[0])
if has_image_input:
image = ImageList.from_tensors([torch.tensor(feature['image']) for feature in features], 32)
for feature in features:
del feature['image']
batch = self.tokenizer.pad(features, padding=self.padding, max_length=self.max_length, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors=('pt' if (labels is None) else None))
if (labels is None):
return batch
sequence_length = torch.tensor(batch['input_ids']).shape[1]
padding_side = self.tokenizer.padding_side
if (padding_side == 'right'):
batch['labels'] = [(label + ([self.label_pad_token_id] * (sequence_length - len(label)))) for label in labels]
if has_bbox_input:
batch['bbox'] = [(bbox + ([[0, 0, 0, 0]] * (sequence_length - len(bbox)))) for bbox in batch['bbox']]
else:
batch['labels'] = [(([self.label_pad_token_id] * (sequence_length - len(label))) + label) for label in labels]
if has_bbox_input:
batch['bbox'] = [(([[0, 0, 0, 0]] * (sequence_length - len(bbox))) + bbox) for bbox in batch['bbox']]
batch = {k: (torch.tensor(v, dtype=torch.int64) if isinstance(v[0], list) else v) for (k, v) in batch.items()}
if has_image_input:
batch['image'] = image
return batch
|
class FunsdConfig(datasets.BuilderConfig):
'BuilderConfig for FUNSD'
def __init__(self, **kwargs):
'BuilderConfig for FUNSD.\n\n Args:\n **kwargs: keyword arguments forwarded to super.\n '
super(FunsdConfig, self).__init__(**kwargs)
|
class Funsd(datasets.GeneratorBasedBuilder):
'Conll2003 dataset.'
BUILDER_CONFIGS = [FunsdConfig(name='funsd', version=datasets.Version('1.0.0'), description='FUNSD dataset')]
def _info(self):
return datasets.DatasetInfo(description=_DESCRIPTION, features=datasets.Features({'id': datasets.Value('string'), 'tokens': datasets.Sequence(datasets.Value('string')), 'bboxes': datasets.Sequence(datasets.Sequence(datasets.Value('int64'))), 'ner_tags': datasets.Sequence(datasets.features.ClassLabel(names=['O', 'B-HEADER', 'I-HEADER', 'B-QUESTION', 'I-QUESTION', 'B-ANSWER', 'I-ANSWER'])), 'image': datasets.Array3D(shape=(3, 224, 224), dtype='uint8')}), supervised_keys=None, homepage='https://guillaumejaume.github.io/FUNSD/', citation=_CITATION)
def _split_generators(self, dl_manager):
'Returns SplitGenerators.'
downloaded_file = dl_manager.download_and_extract('https://guillaumejaume.github.io/FUNSD/dataset.zip')
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepath': f'{downloaded_file}/dataset/training_data/'}), datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={'filepath': f'{downloaded_file}/dataset/testing_data/'})]
def _generate_examples(self, filepath):
logger.info('⏳ Generating examples from = %s', filepath)
ann_dir = os.path.join(filepath, 'annotations')
img_dir = os.path.join(filepath, 'images')
for (guid, file) in enumerate(sorted(os.listdir(ann_dir))):
tokens = []
bboxes = []
ner_tags = []
file_path = os.path.join(ann_dir, file)
with open(file_path, 'r', encoding='utf8') as f:
data = json.load(f)
image_path = os.path.join(img_dir, file)
image_path = image_path.replace('json', 'png')
(image, size) = load_image(image_path)
for item in data['form']:
(words, label) = (item['words'], item['label'])
words = [w for w in words if (w['text'].strip() != '')]
if (len(words) == 0):
continue
if (label == 'other'):
for w in words:
tokens.append(w['text'])
ner_tags.append('O')
bboxes.append(normalize_bbox(item['box'], size))
else:
tokens.append(words[0]['text'])
ner_tags.append(('B-' + label.upper()))
bboxes.append(normalize_bbox(item['box'], size))
for w in words[1:]:
tokens.append(w['text'])
ner_tags.append(('I-' + label.upper()))
bboxes.append(normalize_bbox(item['box'], size))
(yield (guid, {'id': str(guid), 'tokens': tokens, 'bboxes': bboxes, 'ner_tags': ner_tags, 'image': image}))
|
class XFUNConfig(datasets.BuilderConfig):
'BuilderConfig for XFUN.'
def __init__(self, lang, additional_langs=None, **kwargs):
'\n Args:\n lang: string, language for the input text\n **kwargs: keyword arguments forwarded to super.\n '
super(XFUNConfig, self).__init__(**kwargs)
self.lang = lang
self.additional_langs = additional_langs
|
class XFUN(datasets.GeneratorBasedBuilder):
'XFUN dataset.'
BUILDER_CONFIGS = [XFUNConfig(name=f'xfun.{lang}', lang=lang) for lang in _LANG]
tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-base')
def _info(self):
return datasets.DatasetInfo(features=datasets.Features({'id': datasets.Value('string'), 'input_ids': datasets.Sequence(datasets.Value('int64')), 'bbox': datasets.Sequence(datasets.Sequence(datasets.Value('int64'))), 'labels': datasets.Sequence(datasets.ClassLabel(names=['O', 'B-QUESTION', 'B-ANSWER', 'B-HEADER', 'I-ANSWER', 'I-QUESTION', 'I-HEADER'])), 'image': datasets.Array3D(shape=(3, 224, 224), dtype='uint8'), 'entities': datasets.Sequence({'start': datasets.Value('int64'), 'end': datasets.Value('int64'), 'label': datasets.ClassLabel(names=['HEADER', 'QUESTION', 'ANSWER'])}), 'relations': datasets.Sequence({'head': datasets.Value('int64'), 'tail': datasets.Value('int64'), 'start_index': datasets.Value('int64'), 'end_index': datasets.Value('int64')})}), supervised_keys=None)
def _split_generators(self, dl_manager):
'Returns SplitGenerators.'
file_dir = 'xfund&funsd/'
train_files_for_many_langs = [[(file_dir + f'{self.config.lang}.train.json'), (file_dir + f'{self.config.lang}')]]
val_files_for_many_langs = [[(file_dir + f'{self.config.lang}.val.json'), (file_dir + f'{self.config.lang}')]]
if self.config.additional_langs:
additional_langs = self.config.additional_langs.split('+')
if ('all' in additional_langs):
additional_langs = [lang for lang in _LANG if (lang != self.config.lang)]
for lang in additional_langs:
train_files_for_many_langs.append([(file_dir + f'{lang}.train.json'), (file_dir + f'{lang}')])
logger.info(f'Training on {self.config.lang} with additional langs({self.config.additional_langs})')
logger.info(f'Evaluating on {self.config.lang}')
logger.info(f'Testing on {self.config.lang}')
return [datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={'filepaths': train_files_for_many_langs}), datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={'filepaths': val_files_for_many_langs})]
def _generate_examples(self, filepaths):
for filepath in filepaths:
logger.info('Generating examples from = %s', filepath)
with open(filepath[0], 'r') as f:
data = json.load(f)
for doc in data['documents']:
doc['img']['fpath'] = os.path.join(filepath[1], doc['img']['fname'])
(image, size) = load_image(doc['img']['fpath'])
document = doc['document']
tokenized_doc = {'input_ids': [], 'bbox': [], 'labels': []}
entities = []
relations = []
id2label = {}
entity_id_to_index_map = {}
empty_entity = set()
for line in document:
if (len(line['text']) == 0):
empty_entity.add(line['id'])
continue
id2label[line['id']] = line['label']
relations.extend([tuple(sorted(l)) for l in line['linking']])
if ('/en' in filepath[0]):
tokenized_inputs = self.tokenizer(' '.join([q['text'].replace(u'\uf703', '') for q in line['words']]), add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False)
else:
tokenized_inputs = self.tokenizer(line['text'], add_special_tokens=False, return_offsets_mapping=True, return_attention_mask=False)
text_length = 0
ocr_length = 0
bbox = []
last_box = None
for (token_id, offset) in zip(tokenized_inputs['input_ids'], tokenized_inputs['offset_mapping']):
if (token_id == 6):
bbox.append(None)
continue
text_length += (offset[1] - offset[0])
tmp_box = []
while (ocr_length < text_length):
ocr_word = line['words'].pop(0)
ocr_length += len(self.tokenizer._tokenizer.normalizer.normalize_str(ocr_word['text'].strip()))
tmp_box.append(simplify_bbox(line['box']))
if (len(tmp_box) == 0):
tmp_box = last_box
bbox.append(normalize_bbox(merge_bbox(tmp_box), size))
last_box = tmp_box
bbox = [([bbox[(i + 1)][0], bbox[(i + 1)][1], bbox[(i + 1)][0], bbox[(i + 1)][1]] if (b is None) else b) for (i, b) in enumerate(bbox)]
if (line['label'] == 'other'):
label = (['O'] * len(bbox))
else:
label = ([f"I-{line['label'].upper()}"] * len(bbox))
label[0] = f"B-{line['label'].upper()}"
tokenized_inputs.update({'bbox': bbox, 'labels': label})
if (label[0] != 'O'):
entity_id_to_index_map[line['id']] = len(entities)
entities.append({'start': len(tokenized_doc['input_ids']), 'end': (len(tokenized_doc['input_ids']) + len(tokenized_inputs['input_ids'])), 'label': line['label'].upper()})
for i in tokenized_doc:
tokenized_doc[i] = (tokenized_doc[i] + tokenized_inputs[i])
relations = list(set(relations))
relations = [rel for rel in relations if ((rel[0] not in empty_entity) and (rel[1] not in empty_entity))]
kvrelations = []
for rel in relations:
pair = [id2label[rel[0]], id2label[rel[1]]]
if (pair == ['question', 'answer']):
kvrelations.append({'head': entity_id_to_index_map[rel[0]], 'tail': entity_id_to_index_map[rel[1]]})
elif (pair == ['answer', 'question']):
kvrelations.append({'head': entity_id_to_index_map[rel[1]], 'tail': entity_id_to_index_map[rel[0]]})
else:
continue
def get_relation_span(rel):
bound = []
for entity_index in [rel['head'], rel['tail']]:
bound.append(entities[entity_index]['start'])
bound.append(entities[entity_index]['end'])
return (min(bound), max(bound))
relations = sorted([{'head': rel['head'], 'tail': rel['tail'], 'start_index': get_relation_span(rel)[0], 'end_index': get_relation_span(rel)[1]} for rel in kvrelations], key=(lambda x: x['head']))
chunk_size = 512
for (chunk_id, index) in enumerate(range(0, len(tokenized_doc['input_ids']), chunk_size)):
item = {}
for k in tokenized_doc:
item[k] = tokenized_doc[k][index:(index + chunk_size)]
entities_in_this_span = []
global_to_local_map = {}
for (entity_id, entity) in enumerate(entities):
if ((index <= entity['start'] < (index + chunk_size)) and (index <= entity['end'] < (index + chunk_size))):
entity['start'] = (entity['start'] - index)
entity['end'] = (entity['end'] - index)
global_to_local_map[entity_id] = len(entities_in_this_span)
entities_in_this_span.append(entity)
relations_in_this_span = []
for relation in relations:
if ((index <= relation['start_index'] < (index + chunk_size)) and (index <= relation['end_index'] < (index + chunk_size))):
relations_in_this_span.append({'head': global_to_local_map[relation['head']], 'tail': global_to_local_map[relation['tail']], 'start_index': (relation['start_index'] - index), 'end_index': (relation['end_index'] - index)})
item.update({'id': f"{doc['id']}_{chunk_id}", 'image': image, 'entities': entities_in_this_span, 'relations': relations_in_this_span})
(yield (f"{doc['id']}_{chunk_id}", item))
|
def normalize_bbox(bbox, size):
return [int(((1000 * bbox[0]) / size[0])), int(((1000 * bbox[1]) / size[1])), int(((1000 * bbox[2]) / size[0])), int(((1000 * bbox[3]) / size[1]))]
|
def simplify_bbox(bbox):
return [min(bbox[0::2]), min(bbox[1::2]), max(bbox[2::2]), max(bbox[3::2])]
|
def merge_bbox(bbox_list):
(x0, y0, x1, y1) = list(zip(*bbox_list))
return [min(x0), min(y0), max(x1), max(y1)]
|
def load_image(image_path):
image = read_image(image_path, format='BGR')
h = image.shape[0]
w = image.shape[1]
img_trans = TransformList([ResizeTransform(h=h, w=w, new_h=224, new_w=224)])
image = torch.tensor(img_trans.apply_image(image).copy()).permute(2, 0, 1)
return (image, (w, h))
|
def get_last_checkpoint(folder):
content = os.listdir(folder)
checkpoints = [path for path in content if ((_re_checkpoint.search(path) is not None) and os.path.isdir(os.path.join(folder, path)))]
if (len(checkpoints) == 0):
return
return os.path.join(folder, max(checkpoints, key=(lambda x: int(_re_checkpoint.search(x).groups()[0]))))
|
def re_score(pred_relations, gt_relations, mode='strict'):
'Evaluate RE predictions\n\n Args:\n pred_relations (list) : list of list of predicted relations (several relations in each sentence)\n gt_relations (list) : list of list of ground truth relations\n\n rel = { "head": (start_idx (inclusive), end_idx (exclusive)),\n "tail": (start_idx (inclusive), end_idx (exclusive)),\n "head_type": ent_type,\n "tail_type": ent_type,\n "type": rel_type}\n\n vocab (Vocab) : dataset vocabulary\n mode (str) : in \'strict\' or \'boundaries\''
assert (mode in ['strict', 'boundaries'])
relation_types = [v for v in [0, 1] if (not (v == 0))]
scores = {rel: {'tp': 0, 'fp': 0, 'fn': 0} for rel in (relation_types + ['ALL'])}
n_sents = len(gt_relations)
n_rels = sum([len([rel for rel in sent]) for sent in gt_relations])
n_found = sum([len([rel for rel in sent]) for sent in pred_relations])
for (pred_sent, gt_sent) in zip(pred_relations, gt_relations):
for rel_type in relation_types:
if (mode == 'strict'):
pred_rels = {(rel['head'], rel['head_type'], rel['tail'], rel['tail_type']) for rel in pred_sent if (rel['type'] == rel_type)}
gt_rels = {(rel['head'], rel['head_type'], rel['tail'], rel['tail_type']) for rel in gt_sent if (rel['type'] == rel_type)}
elif (mode == 'boundaries'):
pred_rels = {(rel['head'], rel['tail']) for rel in pred_sent if (rel['type'] == rel_type)}
gt_rels = {(rel['head'], rel['tail']) for rel in gt_sent if (rel['type'] == rel_type)}
scores[rel_type]['tp'] += len((pred_rels & gt_rels))
scores[rel_type]['fp'] += len((pred_rels - gt_rels))
scores[rel_type]['fn'] += len((gt_rels - pred_rels))
for rel_type in scores.keys():
if scores[rel_type]['tp']:
scores[rel_type]['p'] = (scores[rel_type]['tp'] / (scores[rel_type]['fp'] + scores[rel_type]['tp']))
scores[rel_type]['r'] = (scores[rel_type]['tp'] / (scores[rel_type]['fn'] + scores[rel_type]['tp']))
else:
(scores[rel_type]['p'], scores[rel_type]['r']) = (0, 0)
if (not ((scores[rel_type]['p'] + scores[rel_type]['r']) == 0)):
scores[rel_type]['f1'] = (((2 * scores[rel_type]['p']) * scores[rel_type]['r']) / (scores[rel_type]['p'] + scores[rel_type]['r']))
else:
scores[rel_type]['f1'] = 0
tp = sum([scores[rel_type]['tp'] for rel_type in relation_types])
fp = sum([scores[rel_type]['fp'] for rel_type in relation_types])
fn = sum([scores[rel_type]['fn'] for rel_type in relation_types])
if tp:
precision = (tp / (tp + fp))
recall = (tp / (tp + fn))
f1 = (((2 * precision) * recall) / (precision + recall))
else:
(precision, recall, f1) = (0, 0, 0)
scores['ALL']['p'] = precision
scores['ALL']['r'] = recall
scores['ALL']['f1'] = f1
scores['ALL']['tp'] = tp
scores['ALL']['fp'] = fp
scores['ALL']['fn'] = fn
scores['ALL']['Macro_f1'] = np.mean([scores[ent_type]['f1'] for ent_type in relation_types])
scores['ALL']['Macro_p'] = np.mean([scores[ent_type]['p'] for ent_type in relation_types])
scores['ALL']['Macro_r'] = np.mean([scores[ent_type]['r'] for ent_type in relation_types])
logger.info(f'RE Evaluation in *** {mode.upper()} *** mode')
logger.info('processed {} sentences with {} relations; found: {} relations; correct: {}.'.format(n_sents, n_rels, n_found, tp))
logger.info('\tALL\t TP: {};\tFP: {};\tFN: {}'.format(scores['ALL']['tp'], scores['ALL']['fp'], scores['ALL']['fn']))
logger.info('\t\t(m avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (micro)'.format(precision, recall, f1))
logger.info('\t\t(M avg): precision: {:.2f};\trecall: {:.2f};\tf1: {:.2f} (Macro)\n'.format(scores['ALL']['Macro_p'], scores['ALL']['Macro_r'], scores['ALL']['Macro_f1']))
for rel_type in relation_types:
logger.info('\t{}: \tTP: {};\tFP: {};\tFN: {};\tprecision: {:.2f};\trecall: {:.2f};\tf1: {:.2f};\t{}'.format(rel_type, scores[rel_type]['tp'], scores[rel_type]['fp'], scores[rel_type]['fn'], scores[rel_type]['p'], scores[rel_type]['r'], scores[rel_type]['f1'], (scores[rel_type]['tp'] + scores[rel_type]['fp'])))
return scores
|
@dataclass
class ModelArguments():
'\n Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.\n '
model_name_or_path: str = field(metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'})
config_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained config name or path if not the same as model_name'})
tokenizer_name: Optional[str] = field(default=None, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'})
cache_dir: Optional[str] = field(default=None, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'})
model_revision: str = field(default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'})
use_auth_token: bool = field(default=False, metadata={'help': 'Will use the token generated when running `transformers-cli login` (necessary to use this script with private models).'})
|
class BiaffineAttention(torch.nn.Module):
'Implements a biaffine attention operator for binary relation classification.\n\n PyTorch implementation of the biaffine attention operator from "End-to-end neural relation\n extraction using deep biaffine attention" (https://arxiv.org/abs/1812.11275) which can be used\n as a classifier for binary relation classification.\n\n Args:\n in_features (int): The size of the feature dimension of the inputs.\n out_features (int): The size of the feature dimension of the output.\n\n Shape:\n - x_1: `(N, *, in_features)` where `N` is the batch dimension and `*` means any number of\n additional dimensisons.\n - x_2: `(N, *, in_features)`, where `N` is the batch dimension and `*` means any number of\n additional dimensions.\n - Output: `(N, *, out_features)`, where `N` is the batch dimension and `*` means any number\n of additional dimensions.\n\n Examples:\n >>> batch_size, in_features, out_features = 32, 100, 4\n >>> biaffine_attention = BiaffineAttention(in_features, out_features)\n >>> x_1 = torch.randn(batch_size, in_features)\n >>> x_2 = torch.randn(batch_size, in_features)\n >>> output = biaffine_attention(x_1, x_2)\n >>> print(output.size())\n torch.Size([32, 4])\n '
def __init__(self, in_features, out_features):
super(BiaffineAttention, self).__init__()
self.in_features = in_features
self.out_features = out_features
self.bilinear = torch.nn.Bilinear(in_features, in_features, out_features, bias=False)
self.linear = torch.nn.Linear((2 * in_features), out_features, bias=True)
self.reset_parameters()
def forward(self, x_1, x_2):
return (self.bilinear(x_1, x_2) + self.linear(torch.cat((x_1, x_2), dim=(- 1))))
def reset_parameters(self):
self.bilinear.reset_parameters()
self.linear.reset_parameters()
|
class REDecoder(nn.Module):
def __init__(self, config, input_size):
super().__init__()
self.entity_emb = nn.Embedding(3, input_size, scale_grad_by_freq=True)
projection = nn.Sequential(nn.Linear((input_size * 2), config.hidden_size), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob), nn.Linear(config.hidden_size, (config.hidden_size // 2)), nn.ReLU(), nn.Dropout(config.hidden_dropout_prob))
self.ffnn_head = copy.deepcopy(projection)
self.ffnn_tail = copy.deepcopy(projection)
self.rel_classifier = BiaffineAttention((config.hidden_size // 2), 2)
self.loss_fct = CrossEntropyLoss()
def build_relation(self, relations, entities):
batch_size = len(relations)
new_relations = []
for b in range(batch_size):
if (len(entities[b]['start']) <= 2):
entities[b] = {'end': [1, 1], 'label': [0, 0], 'start': [0, 0]}
all_possible_relations = set([(i, j) for i in range(len(entities[b]['label'])) for j in range(len(entities[b]['label'])) if ((entities[b]['label'][i] == 1) and (entities[b]['label'][j] == 2))])
if (len(all_possible_relations) == 0):
all_possible_relations = set([(0, 1)])
positive_relations = set(list(zip(relations[b]['head'], relations[b]['tail'])))
negative_relations = (all_possible_relations - positive_relations)
positive_relations = set([i for i in positive_relations if (i in all_possible_relations)])
reordered_relations = (list(positive_relations) + list(negative_relations))
relation_per_doc = {'head': [], 'tail': [], 'label': []}
relation_per_doc['head'] = [i[0] for i in reordered_relations]
relation_per_doc['tail'] = [i[1] for i in reordered_relations]
relation_per_doc['label'] = (([1] * len(positive_relations)) + ([0] * (len(reordered_relations) - len(positive_relations))))
assert (len(relation_per_doc['head']) != 0)
new_relations.append(relation_per_doc)
return (new_relations, entities)
def get_predicted_relations(self, logits, relations, entities):
pred_relations = []
for (i, pred_label) in enumerate(logits.argmax((- 1))):
if (pred_label != 1):
continue
rel = {}
rel['head_id'] = relations['head'][i]
rel['head'] = (entities['start'][rel['head_id']], entities['end'][rel['head_id']])
rel['head_type'] = entities['label'][rel['head_id']]
rel['tail_id'] = relations['tail'][i]
rel['tail'] = (entities['start'][rel['tail_id']], entities['end'][rel['tail_id']])
rel['tail_type'] = entities['label'][rel['tail_id']]
rel['type'] = 1
pred_relations.append(rel)
return pred_relations
def forward(self, hidden_states, entities, relations):
(batch_size, max_n_words, context_dim) = hidden_states.size()
device = hidden_states.device
(relations, entities) = self.build_relation(relations, entities)
loss = 0
all_pred_relations = []
all_logits = []
all_labels = []
for b in range(batch_size):
head_entities = torch.tensor(relations[b]['head'], device=device)
tail_entities = torch.tensor(relations[b]['tail'], device=device)
relation_labels = torch.tensor(relations[b]['label'], device=device)
entities_start_index = torch.tensor(entities[b]['start'], device=device)
entities_labels = torch.tensor(entities[b]['label'], device=device)
head_index = entities_start_index[head_entities]
head_label = entities_labels[head_entities]
head_label_repr = self.entity_emb(head_label)
tail_index = entities_start_index[tail_entities]
tail_label = entities_labels[tail_entities]
tail_label_repr = self.entity_emb(tail_label)
head_repr = torch.cat((hidden_states[b][head_index], head_label_repr), dim=(- 1))
tail_repr = torch.cat((hidden_states[b][tail_index], tail_label_repr), dim=(- 1))
heads = self.ffnn_head(head_repr)
tails = self.ffnn_tail(tail_repr)
logits = self.rel_classifier(heads, tails)
pred_relations = self.get_predicted_relations(logits, relations[b], entities[b])
all_pred_relations.append(pred_relations)
all_logits.append(logits)
all_labels.append(relation_labels)
all_logits = torch.cat(all_logits, 0)
all_labels = torch.cat(all_labels, 0)
loss = self.loss_fct(all_logits, all_labels)
return (loss, all_pred_relations)
|
class FunsdTrainer(Trainer):
def _prepare_inputs(self, inputs: Dict[(str, Union[(torch.Tensor, Any)])]) -> Dict[(str, Union[(torch.Tensor, Any)])]:
'\n Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and\n handling potential state.\n '
for (k, v) in inputs.items():
if (hasattr(v, 'to') and hasattr(v, 'device')):
inputs[k] = v.to(self.args.device)
if ((self.args.past_index >= 0) and (self._past is not None)):
inputs['mems'] = self._past
return inputs
|
class XfunSerTrainer(FunsdTrainer):
pass
|
class XfunReTrainer(FunsdTrainer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.label_names.append('relations')
def prediction_step(self, model: nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])], prediction_loss_only: bool, ignore_keys: Optional[List[str]]=None) -> Tuple[(Optional[float], Optional[torch.Tensor], Optional[torch.Tensor])]:
inputs = self._prepare_inputs(inputs)
with torch.no_grad():
if self.use_amp:
with autocast():
outputs = model(**inputs)
else:
outputs = model(**inputs)
labels = tuple((inputs.get(name) for name in self.label_names))
return (outputs, labels)
def prediction_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> PredictionOutput:
'\n Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`.\n\n Works both with or without labels.\n '
if (not isinstance(dataloader.dataset, collections.abc.Sized)):
raise ValueError('dataset must implement __len__')
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.args.prediction_loss_only)
if (self.args.deepspeed and (not self.args.do_train)):
logger.info('Detected the deepspeed argument but it will not be used for evaluation')
model = self._wrap_model(self.model, training=False)
if ((not self.is_in_train) and self.args.fp16_full_eval):
model = model.half().to(self.args.device)
batch_size = dataloader.batch_size
num_examples = self.num_examples(dataloader)
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', num_examples)
logger.info(' Batch size = %d', batch_size)
model.eval()
self.callback_handler.eval_dataloader = dataloader
re_labels = None
pred_relations = None
entities = None
for (step, inputs) in enumerate(dataloader):
(outputs, labels) = self.prediction_step(model, inputs, prediction_loss_only, ignore_keys=ignore_keys)
re_labels = (labels[1] if (re_labels is None) else (re_labels + labels[1]))
pred_relations = (outputs.pred_relations if (pred_relations is None) else (pred_relations + outputs.pred_relations))
entities = (outputs.entities if (entities is None) else (entities + outputs.entities))
self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control)
gt_relations = []
for b in range(len(re_labels)):
rel_sent = []
for (head, tail) in zip(re_labels[b]['head'], re_labels[b]['tail']):
rel = {}
rel['head_id'] = head
rel['head'] = (entities[b]['start'][rel['head_id']], entities[b]['end'][rel['head_id']])
rel['head_type'] = entities[b]['label'][rel['head_id']]
rel['tail_id'] = tail
rel['tail'] = (entities[b]['start'][rel['tail_id']], entities[b]['end'][rel['tail_id']])
rel['tail_type'] = entities[b]['label'][rel['tail_id']]
rel['type'] = 1
rel_sent.append(rel)
gt_relations.append(rel_sent)
re_metrics = self.compute_metrics(EvalPrediction(predictions=pred_relations, label_ids=gt_relations))
re_metrics = {'precision': re_metrics['ALL']['p'], 'recall': re_metrics['ALL']['r'], 'f1': re_metrics['ALL']['f1']}
re_metrics[f'{metric_key_prefix}_loss'] = outputs.loss.mean().item()
metrics = {}
for key in list(re_metrics.keys()):
if (not key.startswith(f'{metric_key_prefix}_')):
metrics[f'{metric_key_prefix}_{key}'] = re_metrics.pop(key)
else:
metrics[f'{key}'] = re_metrics.pop(key)
return metrics
def evaluate(self, eval_dataset: Optional[Dataset]=None, ignore_keys: Optional[List[str]]=None, metric_key_prefix: str='eval') -> Dict[(str, float)]:
'\n Run evaluation and returns metrics.\n\n The calling script will be responsible for providing a method to compute metrics, as they are task-dependent\n (pass it to the init :obj:`compute_metrics` argument).\n\n You can also subclass and override this method to inject custom behavior.\n\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`,\n columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the\n :obj:`__len__` method.\n ignore_keys (:obj:`Lst[str]`, `optional`):\n A list of keys in the output of your model (if it is a dictionary) that should be ignored when\n gathering predictions.\n metric_key_prefix (:obj:`str`, `optional`, defaults to :obj:`"eval"`):\n An optional prefix to be used as the metrics key prefix. For example the metrics "bleu" will be named\n "eval_bleu" if the prefix is "eval" (default)\n\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The\n dictionary also contains the epoch number which comes from the training state.\n '
if ((eval_dataset is not None) and (not isinstance(eval_dataset, collections.abc.Sized))):
raise ValueError('eval_dataset must implement __len__')
self.args.local_rank = (- 1)
eval_dataloader = self.get_eval_dataloader(eval_dataset)
self.args.local_rank = torch.distributed.get_rank()
start_time = time.time()
metrics = self.prediction_loop(eval_dataloader, description='Evaluation', prediction_loss_only=(True if (self.compute_metrics is None) else None), ignore_keys=ignore_keys, metric_key_prefix=metric_key_prefix)
n_samples = len((eval_dataset if (eval_dataset is not None) else self.eval_dataset))
metrics.update(speed_metrics(metric_key_prefix, start_time, n_samples))
self.log(metrics)
self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, metrics)
return metrics
def create_optimizer(self, speedup_r=4.0):
if (self.optimizer is None):
decay_parameters = get_parameter_names(self.model, [torch.nn.LayerNorm])
decay_parameters = [name for name in decay_parameters if ('bias' not in name)]
speedup_parameters = [name for name in get_parameter_names(self.model, []) if (('extractor' in name) and ('rel_classifier' not in name))]
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if ((n in decay_parameters) and (n in speedup_parameters))], 'weight_decay': self.args.weight_decay, 'lr': (self.args.learning_rate * speedup_r)}, {'params': [p for (n, p) in self.model.named_parameters() if ((n not in decay_parameters) and (n in speedup_parameters))], 'weight_decay': 0.0, 'lr': (self.args.learning_rate * speedup_r)}, {'params': [p for (n, p) in self.model.named_parameters() if ((n in decay_parameters) and (n not in speedup_parameters))], 'weight_decay': self.args.weight_decay, 'lr': self.args.learning_rate}, {'params': [p for (n, p) in self.model.named_parameters() if ((n not in decay_parameters) and (n not in speedup_parameters))], 'weight_decay': 0.0, 'lr': self.args.learning_rate}]
optimizer_cls = (Adafactor if self.args.adafactor else AdamW)
if self.args.adafactor:
optimizer_cls = Adafactor
optimizer_kwargs = {'scale_parameter': False, 'relative_step': False}
else:
optimizer_cls = AdamW
optimizer_kwargs = {'betas': (self.args.adam_beta1, self.args.adam_beta2), 'eps': self.args.adam_epsilon}
if (self.sharded_ddp == ShardedDDPOption.SIMPLE):
self.optimizer = OSS(params=optimizer_grouped_parameters, optim=optimizer_cls, **optimizer_kwargs)
else:
self.optimizer = optimizer_cls(optimizer_grouped_parameters, **optimizer_kwargs)
if is_sagemaker_mp_enabled():
import smdistributed.modelparallel.torch as smp
self.optimizer = smp.DistributedOptimizer(self.optimizer)
|
@dataclass
class ReOutput(ModelOutput):
loss: Optional[torch.FloatTensor] = None
logits: torch.FloatTensor = None
hidden_states: Optional[Tuple[torch.FloatTensor]] = None
attentions: Optional[Tuple[torch.FloatTensor]] = None
entities: Optional[Dict] = None
relations: Optional[Dict] = None
pred_relations: Optional[Dict] = None
|
def main():
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
datasets = load_dataset(os.path.abspath(LiLTfinetune.data.datasets.funsd.__file__))
if training_args.do_train:
column_names = datasets['train'].column_names
features = datasets['train'].features
else:
column_names = datasets['validation'].column_names
features = datasets['validation'].features
text_column_name = ('tokens' if ('tokens' in column_names) else column_names[0])
label_column_name = (f'{data_args.task_name}_tags' if (f'{data_args.task_name}_tags' in column_names) else column_names[1])
remove_columns = column_names
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None), add_prefix_space=True)
model = AutoModelForTokenClassification.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this requirement')
padding = ('max_length' if data_args.pad_to_max_length else False)
def tokenize_and_align_labels(examples):
tokenized_inputs = tokenizer(examples[text_column_name], padding=padding, truncation=True, return_overflowing_tokens=True, is_split_into_words=True)
labels = []
bboxes = []
images = []
for batch_index in range(len(tokenized_inputs['input_ids'])):
word_ids = tokenized_inputs.word_ids(batch_index=batch_index)
org_batch_index = tokenized_inputs['overflow_to_sample_mapping'][batch_index]
label = examples[label_column_name][org_batch_index]
bbox = examples['bboxes'][org_batch_index]
image = examples['image'][org_batch_index]
previous_word_idx = None
label_ids = []
bbox_inputs = []
for word_idx in word_ids:
if (word_idx is None):
label_ids.append((- 100))
bbox_inputs.append([0, 0, 0, 0])
elif (word_idx != previous_word_idx):
label_ids.append(label_to_id[label[word_idx]])
bbox_inputs.append(bbox[word_idx])
else:
label_ids.append((label_to_id[label[word_idx]] if data_args.label_all_tokens else (- 100)))
bbox_inputs.append(bbox[word_idx])
previous_word_idx = word_idx
labels.append(label_ids)
bboxes.append(bbox_inputs)
images.append(image)
tokenized_inputs['labels'] = labels
tokenized_inputs['bbox'] = bboxes
tokenized_inputs['image'] = images
return tokenized_inputs
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_eval:
if ('validation' not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = datasets['validation']
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
eval_dataset = eval_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
if training_args.do_predict:
if ('test' not in datasets):
raise ValueError('--do_predict requires a test dataset')
test_dataset = datasets['test']
if (data_args.max_test_samples is not None):
test_dataset = test_dataset.select(range(data_args.max_test_samples))
test_dataset = test_dataset.map(tokenize_and_align_labels, batched=True, remove_columns=remove_columns, num_proc=data_args.preprocessing_num_workers, load_from_cache_file=(not data_args.overwrite_cache))
data_collator = DataCollatorForKeyValueExtraction(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None), padding=padding, max_length=512)
metric = load_metric('seqeval')
def compute_metrics(p):
(predictions, labels) = p
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
true_labels = [[label_list[l] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
results = metric.compute(predictions=true_predictions, references=true_labels)
if data_args.return_entity_level_metrics:
final_results = {}
for (key, value) in results.items():
if isinstance(value, dict):
for (n, v) in value.items():
final_results[f'{key}_{n}'] = v
else:
final_results[key] = value
return final_results
else:
return {'precision': results['overall_precision'], 'recall': results['overall_recall'], 'f1': results['overall_f1'], 'accuracy': results['overall_accuracy']}
trainer = Trainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
if training_args.do_train:
checkpoint = (last_checkpoint if last_checkpoint else None)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model()
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
if training_args.do_predict:
logger.info('*** Predict ***')
(predictions, labels, metrics) = trainer.predict(test_dataset)
predictions = np.argmax(predictions, axis=2)
true_predictions = [[label_list[p] for (p, l) in zip(prediction, label) if (l != (- 100))] for (prediction, label) in zip(predictions, labels)]
trainer.log_metrics('test', metrics)
trainer.save_metrics('test', metrics)
output_test_predictions_file = os.path.join(training_args.output_dir, 'test_predictions.txt')
if trainer.is_world_process_zero():
with open(output_test_predictions_file, 'w') as writer:
for prediction in true_predictions:
writer.write((' '.join(prediction) + '\n'))
|
def _mp_fn(index):
main()
|
def main():
parser = HfArgumentParser((ModelArguments, XFUNDataTrainingArguments, TrainingArguments))
if ((len(sys.argv) == 2) and sys.argv[1].endswith('.json')):
(model_args, data_args, training_args) = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
(model_args, data_args, training_args) = parser.parse_args_into_dataclasses()
last_checkpoint = None
if (os.path.isdir(training_args.output_dir) and training_args.do_train and (not training_args.overwrite_output_dir)):
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if ((last_checkpoint is None) and (len(os.listdir(training_args.output_dir)) > 0)):
raise ValueError(f'Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome.')
elif (last_checkpoint is not None):
logger.info(f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch.')
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', handlers=[logging.StreamHandler(sys.stdout)])
logger.setLevel((logging.INFO if is_main_process(training_args.local_rank) else logging.WARN))
logger.warning((f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}' + f'distributed training: {bool((training_args.local_rank != (- 1)))}, 16-bits training: {training_args.fp16}'))
if is_main_process(training_args.local_rank):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info(f'Training/evaluation parameters {training_args}')
set_seed(training_args.seed)
datasets = load_dataset(os.path.abspath(LiLTfinetune.data.datasets.xfun.__file__), f'xfun.{data_args.lang}', additional_langs=data_args.additional_langs, keep_in_memory=True)
if training_args.do_train:
column_names = datasets['train'].column_names
features = datasets['train'].features
else:
column_names = datasets['validation'].column_names
features = datasets['validation'].features
text_column_name = 'input_ids'
label_column_name = 'labels'
remove_columns = column_names
def get_label_list(labels):
unique_labels = set()
for label in labels:
unique_labels = (unique_labels | set(label))
label_list = list(unique_labels)
label_list.sort()
return label_list
if isinstance(features[label_column_name].feature, ClassLabel):
label_list = features[label_column_name].feature.names
label_to_id = {i: i for i in range(len(label_list))}
else:
label_list = get_label_list(datasets['train'][label_column_name])
label_to_id = {l: i for (i, l) in enumerate(label_list)}
num_labels = len(label_list)
config = AutoConfig.from_pretrained((model_args.config_name if model_args.config_name else model_args.model_name_or_path), num_labels=num_labels, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
tokenizer = AutoTokenizer.from_pretrained((model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path), cache_dir=model_args.cache_dir, use_fast=True, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
model = AutoModelForRelationExtraction.from_pretrained(model_args.model_name_or_path, from_tf=bool(('.ckpt' in model_args.model_name_or_path)), config=config, cache_dir=model_args.cache_dir, revision=model_args.model_revision, use_auth_token=(True if model_args.use_auth_token else None))
if (not isinstance(tokenizer, PreTrainedTokenizerFast)):
raise ValueError('This example script only works for models that have a fast tokenizer. Checkout the big table of models at https://huggingface.co/transformers/index.html#bigtable to find the model types that meet this requirement')
padding = ('max_length' if data_args.pad_to_max_length else False)
if training_args.do_train:
if ('train' not in datasets):
raise ValueError('--do_train requires a train dataset')
train_dataset = datasets['train']
if (data_args.max_train_samples is not None):
train_dataset = train_dataset.select(range(data_args.max_train_samples))
if training_args.do_eval:
if ('validation' not in datasets):
raise ValueError('--do_eval requires a validation dataset')
eval_dataset = datasets['validation']
if (data_args.max_val_samples is not None):
eval_dataset = eval_dataset.select(range(data_args.max_val_samples))
if training_args.do_predict:
if ('test' not in datasets):
raise ValueError('--do_predict requires a test dataset')
test_dataset = datasets['test']
if (data_args.max_test_samples is not None):
test_dataset = test_dataset.select(range(data_args.max_test_samples))
data_collator = DataCollatorForKeyValueExtraction(tokenizer, pad_to_multiple_of=(8 if training_args.fp16 else None), padding=padding, max_length=512)
def compute_metrics(p):
(pred_relations, gt_relations) = p
score = re_score(pred_relations, gt_relations, mode='boundaries')
return score
trainer = XfunReTrainer(model=model, args=training_args, train_dataset=(train_dataset if training_args.do_train else None), eval_dataset=(eval_dataset if training_args.do_eval else None), tokenizer=tokenizer, data_collator=data_collator, compute_metrics=compute_metrics)
if training_args.do_train:
checkpoint = (last_checkpoint if last_checkpoint else None)
train_result = trainer.train(resume_from_checkpoint=checkpoint)
metrics = train_result.metrics
trainer.save_model()
max_train_samples = (data_args.max_train_samples if (data_args.max_train_samples is not None) else len(train_dataset))
metrics['train_samples'] = min(max_train_samples, len(train_dataset))
trainer.log_metrics('train', metrics)
trainer.save_metrics('train', metrics)
trainer.save_state()
if training_args.do_eval:
logger.info('*** Evaluate ***')
metrics = trainer.evaluate()
max_val_samples = (data_args.max_val_samples if (data_args.max_val_samples is not None) else len(eval_dataset))
metrics['eval_samples'] = min(max_val_samples, len(eval_dataset))
trainer.log_metrics('eval', metrics)
trainer.save_metrics('eval', metrics)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.