code stringlengths 17 6.64M |
|---|
def store_args(method):
'Stores provided method args as instance attributes.\n '
argspec = inspect.getfullargspec(method)
defaults = {}
if (argspec.defaults is not None):
defaults = dict(zip(argspec.args[(- len(argspec.defaults)):], argspec.defaults))
if (argspec.kwonlydefaults is not None):
defaults.update(argspec.kwonlydefaults)
arg_names = argspec.args[1:]
@functools.wraps(method)
def wrapper(*positional_args, **keyword_args):
self = positional_args[0]
args = defaults.copy()
for (name, value) in zip(arg_names, positional_args[1:]):
args[name] = value
args.update(keyword_args)
self.__dict__.update(args)
return method(*positional_args, **keyword_args)
return wrapper
|
def import_function(spec):
'Import a function identified by a string like "pkg.module:fn_name".\n '
(mod_name, fn_name) = spec.split(':')
module = importlib.import_module(mod_name)
fn = getattr(module, fn_name)
return fn
|
def flatten_grads(var_list, grads):
'Flattens a variables and their gradients.\n '
return tf.concat([tf.reshape(grad, [U.numel(v)]) for (v, grad) in zip(var_list, grads)], 0)
|
def nn(input, layers_sizes, reuse=None, flatten=False, name=''):
'Creates a simple neural network\n '
for (i, size) in enumerate(layers_sizes):
activation = (tf.nn.relu if (i < (len(layers_sizes) - 1)) else None)
input = tf.layers.dense(inputs=input, units=size, kernel_initializer=tf.contrib.layers.xavier_initializer(), reuse=reuse, name=((name + '_') + str(i)))
if activation:
input = activation(input)
if flatten:
assert (layers_sizes[(- 1)] == 1)
input = tf.reshape(input, [(- 1)])
return input
|
def install_mpi_excepthook():
import sys
from mpi4py import MPI
old_hook = sys.excepthook
def new_hook(a, b, c):
old_hook(a, b, c)
sys.stdout.flush()
sys.stderr.flush()
MPI.COMM_WORLD.Abort()
sys.excepthook = new_hook
|
def mpi_fork(n, binding='core'):
'Re-launches the current script with workers\n Returns "parent" for original parent, "child" for MPI children\n '
if (n <= 1):
return 'child'
if (os.getenv('IN_MPI') is None):
env = os.environ.copy()
env.update(MKL_NUM_THREADS='1', OMP_NUM_THREADS='1', IN_MPI='1')
if (platform.system() == 'Darwin'):
args = ['mpirun', '-np', str(n), sys.executable]
else:
args = ['mpirun', '-np', str(n), '-bind-to', binding, '-allow-run-as-root', sys.executable]
args += sys.argv
subprocess.check_call(args, env=env)
return 'parent'
else:
install_mpi_excepthook()
return 'child'
|
def convert_episode_to_batch_major(episode):
'Converts an episode to have the batch dimension in the major (first)\n dimension.\n '
episode_batch = {}
for key in episode.keys():
val = np.array(episode[key]).copy()
episode_batch[key] = val.swapaxes(0, 1)
return episode_batch
|
def transitions_in_episode_batch(episode_batch):
'Number of transitions in a given episode batch.\n '
shape = episode_batch['u'].shape
return (shape[0] * shape[1])
|
def reshape_for_broadcasting(source, target):
'Reshapes a tensor (source) to have the correct shape and dtype of the target\n before broadcasting it with MPI.\n '
dim = len(target.get_shape())
shape = (([1] * (dim - 1)) + [(- 1)])
return tf.reshape(tf.cast(source, target.dtype), shape)
|
def split_observation_np(env_name, obs):
obs_excludes_goal = np.zeros(0)
obs_achieved_goal = np.zeros(0)
if (env_name in ['FetchPush-v1', 'FetchSlide-v1', 'FetchPickAndPlace-v1']):
assert (obs.shape[(- 1)] == 25), 'Observation dimension changed.'
(grip_pos, object_pos, object_rel_pos, gripper_state, object_rot, object_velp, object_velr, grip_velp, gripper_vel) = np.split(obs, [3, 6, 9, 11, 14, 17, 20, 23], axis=(- 1))
obs_achieved_goal = object_pos.copy()
obs_excludes_goal = grip_pos.copy()
return (obs_excludes_goal, obs_achieved_goal)
|
def split_observation_tf(env_name, o):
dimo = o.get_shape().as_list()[(- 1)]
if (env_name in ['FetchPush-v1', 'FetchSlide-v1', 'FetchPickAndPlace-v1']):
assert (dimo == 25), 'Observation dimension changed.'
(grip_pos, object_pos, object_rel_pos, gripper_state, object_rot, object_velp, object_velr, grip_velp, gripper_vel) = tf.split(o, [3, 3, 3, 2, 3, 3, 3, 3, 2], axis=(- 1))
obs_achieved_goal = object_pos
obs_excludes_goal = grip_pos
return (obs_excludes_goal, obs_achieved_goal)
|
def make_dir(filename):
folder = os.path.dirname(filename)
if (not os.path.exists(folder)):
os.makedirs(folder)
|
def save_video(ims, filename, lib='cv2'):
make_dir(filename)
fps = 30.0
(height, width, _) = ims[0].shape
if (lib == 'cv2'):
import cv2
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
writer = cv2.VideoWriter(filename, fourcc, fps, (width, height))
elif (lib == 'imageio'):
import imageio
writer = imageio.get_writer(filename, fps=fps)
for i in range(ims.shape[0]):
if (lib == 'cv2'):
writer.write(cv2.cvtColor(np.uint8(ims[i]), cv2.COLOR_RGB2BGR))
elif (lib == 'imageio'):
writer.append_data(ims[i])
if (lib == 'cv2'):
writer.release()
elif (lib == 'imageio'):
writer.close()
|
def dumpJson(dirname, episodes, epoch, rank):
os = []
for episode in episodes:
episode['o'] = episode['o'].tolist()
os.append(episode['o'])
with open((dirname + '/rollout_{0}_{1}.txt'.format(epoch, rank)), 'w') as file:
file.write(json.dumps(os))
|
def loadJson(dirname, epoch, rank):
filename = '/rollout_{0}_{1}.txt'.format(epoch, rank)
with open((dirname + filename), 'r') as file:
os = json.loads(file.read())
return os
|
def save_weight(sess, collection=tf.GraphKeys.GLOBAL_VARIABLES):
return {v.name: sess.run(v) for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope=(('ddpg' + '/') + ''))}
|
def load_weight(sess, data, include=[]):
for scope in include:
for v in tf.global_variables():
if ((v.name in data.keys()) and (scope in v.name)):
if (v.shape == data[v.name].shape):
sess.run(v.assign(data[v.name]))
print('load weight: ', v.name)
|
class KVWriter(object):
def writekvs(self, kvs):
raise NotImplementedError
|
class SeqWriter(object):
def writeseq(self, seq):
raise NotImplementedError
|
class HumanOutputFormat(KVWriter, SeqWriter):
def __init__(self, filename_or_file):
if isinstance(filename_or_file, str):
self.file = open(filename_or_file, 'wt')
self.own_file = True
else:
assert hasattr(filename_or_file, 'read'), ('expected file or str, got %s' % filename_or_file)
self.file = filename_or_file
self.own_file = False
def writekvs(self, kvs):
key2str = {}
for (key, val) in sorted(kvs.items()):
if isinstance(val, float):
valstr = ('%-8.3g' % (val,))
else:
valstr = str(val)
key2str[self._truncate(key)] = self._truncate(valstr)
if (len(key2str) == 0):
print('WARNING: tried to write empty key-value dict')
return
else:
keywidth = max(map(len, key2str.keys()))
valwidth = max(map(len, key2str.values()))
dashes = ('-' * ((keywidth + valwidth) + 7))
lines = [dashes]
for (key, val) in sorted(key2str.items()):
lines.append(('| %s%s | %s%s |' % (key, (' ' * (keywidth - len(key))), val, (' ' * (valwidth - len(val))))))
lines.append(dashes)
self.file.write(('\n'.join(lines) + '\n'))
self.file.flush()
def _truncate(self, s):
return ((s[:20] + '...') if (len(s) > 23) else s)
def writeseq(self, seq):
for arg in seq:
self.file.write(arg)
self.file.write('\n')
self.file.flush()
def close(self):
if self.own_file:
self.file.close()
|
class JSONOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'wt')
def writekvs(self, kvs):
for (k, v) in sorted(kvs.items()):
if hasattr(v, 'dtype'):
v = v.tolist()
kvs[k] = float(v)
self.file.write((json.dumps(kvs) + '\n'))
self.file.flush()
def close(self):
self.file.close()
|
class CSVOutputFormat(KVWriter):
def __init__(self, filename):
self.file = open(filename, 'w+t')
self.keys = []
self.sep = ','
def writekvs(self, kvs):
extra_keys = (kvs.keys() - self.keys)
if extra_keys:
self.keys.extend(extra_keys)
self.file.seek(0)
lines = self.file.readlines()
self.file.seek(0)
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
self.file.write(k)
self.file.write('\n')
for line in lines[1:]:
self.file.write(line[:(- 1)])
self.file.write((self.sep * len(extra_keys)))
self.file.write('\n')
for (i, k) in enumerate(self.keys):
if (i > 0):
self.file.write(',')
v = kvs.get(k)
if (v is not None):
self.file.write(str(v))
self.file.write('\n')
self.file.flush()
def close(self):
self.file.close()
|
class TensorBoardOutputFormat(KVWriter):
"\n Dumps key/value pairs into TensorBoard's numeric format.\n "
def __init__(self, dir):
os.makedirs(dir, exist_ok=True)
self.dir = dir
self.step = 1
prefix = 'events'
path = osp.join(osp.abspath(dir), prefix)
import tensorflow as tf
from tensorflow.python import pywrap_tensorflow
from tensorflow.core.util import event_pb2
from tensorflow.python.util import compat
self.tf = tf
self.event_pb2 = event_pb2
self.pywrap_tensorflow = pywrap_tensorflow
self.writer = pywrap_tensorflow.EventsWriter(compat.as_bytes(path))
def writekvs(self, kvs):
def summary_val(k, v):
kwargs = {'tag': k, 'simple_value': float(v)}
return self.tf.Summary.Value(**kwargs)
summary = self.tf.Summary(value=[summary_val(k, v) for (k, v) in kvs.items()])
event = self.event_pb2.Event(wall_time=time.time(), summary=summary)
event.step = self.step
self.writer.WriteEvent(event)
self.writer.Flush()
self.step += 1
def close(self):
if self.writer:
self.writer.Close()
self.writer = None
|
def make_output_format(format, ev_dir, log_suffix=''):
os.makedirs(ev_dir, exist_ok=True)
if (format == 'stdout'):
return HumanOutputFormat(sys.stdout)
elif (format == 'log'):
return HumanOutputFormat(osp.join(ev_dir, ('log%s.txt' % log_suffix)))
elif (format == 'json'):
return JSONOutputFormat(osp.join(ev_dir, ('progress%s.json' % log_suffix)))
elif (format == 'csv'):
return CSVOutputFormat(osp.join(ev_dir, ('progress%s.csv' % log_suffix)))
elif (format == 'tensorboard'):
return TensorBoardOutputFormat(osp.join(ev_dir, ('tb%s' % log_suffix)))
else:
raise ValueError(('Unknown format specified: %s' % (format,)))
|
def logkv(key, val):
'\n Log a value of some diagnostic\n Call this once for each diagnostic quantity, each iteration\n If called many times, last value will be used.\n '
Logger.CURRENT.logkv(key, val)
|
def logkv_mean(key, val):
'\n The same as logkv(), but if called many times, values averaged.\n '
Logger.CURRENT.logkv_mean(key, val)
|
def logkvs(d):
'\n Log a dictionary of key-value pairs\n '
for (k, v) in d.items():
logkv(k, v)
|
def dumpkvs():
"\n Write all of the diagnostics from the current iteration\n\n level: int. (see logger.py docs) If the global logger level is higher than\n the level argument here, don't print to stdout.\n "
Logger.CURRENT.dumpkvs()
|
def getkvs():
return Logger.CURRENT.name2val
|
def log(*args, level=INFO):
"\n Write the sequence of args, with no separators, to the console and output files (if you've configured an output file).\n "
Logger.CURRENT.log(*args, level=level)
|
def debug(*args):
log(*args, level=DEBUG)
|
def info(*args):
log(*args, level=INFO)
|
def warn(*args):
log(*args, level=WARN)
|
def error(*args):
log(*args, level=ERROR)
|
def set_level(level):
'\n Set logging threshold on current logger.\n '
Logger.CURRENT.set_level(level)
|
def get_dir():
"\n Get directory that log files are being written to.\n will be None if there is no output directory (i.e., if you didn't call start)\n "
return Logger.CURRENT.get_dir()
|
class ProfileKV():
'\n Usage:\n with logger.ProfileKV("interesting_scope"):\n code\n '
def __init__(self, n):
self.n = ('wait_' + n)
def __enter__(self):
self.t1 = time.time()
def __exit__(self, type, value, traceback):
Logger.CURRENT.name2val[self.n] += (time.time() - self.t1)
|
def profile(n):
'\n Usage:\n @profile("my_func")\n def my_func(): code\n '
def decorator_with_name(func):
def func_wrapper(*args, **kwargs):
with ProfileKV(n):
return func(*args, **kwargs)
return func_wrapper
return decorator_with_name
|
class Logger(object):
DEFAULT = None
CURRENT = None
def __init__(self, dir, output_formats):
self.name2val = defaultdict(float)
self.name2cnt = defaultdict(int)
self.level = INFO
self.dir = dir
self.output_formats = output_formats
def logkv(self, key, val):
self.name2val[key] = val
def logkv_mean(self, key, val):
if (val is None):
self.name2val[key] = None
return
(oldval, cnt) = (self.name2val[key], self.name2cnt[key])
self.name2val[key] = (((oldval * cnt) / (cnt + 1)) + (val / (cnt + 1)))
self.name2cnt[key] = (cnt + 1)
def dumpkvs(self):
if (self.level == DISABLED):
return
for fmt in self.output_formats:
if isinstance(fmt, KVWriter):
fmt.writekvs(self.name2val)
self.name2val.clear()
self.name2cnt.clear()
def log(self, *args, level=INFO):
if (self.level <= level):
self._do_log(args)
def set_level(self, level):
self.level = level
def get_dir(self):
return self.dir
def close(self):
for fmt in self.output_formats:
fmt.close()
def _do_log(self, args):
for fmt in self.output_formats:
if isinstance(fmt, SeqWriter):
fmt.writeseq(map(str, args))
|
def configure(dir=None, format_strs=None):
if (dir is None):
dir = os.getenv('OPENAI_LOGDIR')
if (dir is None):
dir = osp.join(tempfile.gettempdir(), datetime.datetime.now().strftime('openai-%Y-%m-%d-%H-%M-%S-%f'))
assert isinstance(dir, str)
os.makedirs(dir, exist_ok=True)
log_suffix = ''
from mpi4py import MPI
rank = MPI.COMM_WORLD.Get_rank()
if (rank > 0):
log_suffix = ('-rank%03i' % rank)
if (format_strs is None):
(strs, strs_mpi) = (os.getenv('OPENAI_LOG_FORMAT'), os.getenv('OPENAI_LOG_FORMAT_MPI'))
format_strs = (strs_mpi if (rank > 0) else strs)
if (format_strs is not None):
format_strs = format_strs.split(',')
else:
format_strs = (LOG_OUTPUT_FORMATS_MPI if (rank > 0) else LOG_OUTPUT_FORMATS)
output_formats = [make_output_format(f, dir, log_suffix) for f in format_strs]
Logger.CURRENT = Logger(dir=dir, output_formats=output_formats)
log(('Logging to %s' % dir))
|
def reset():
if (Logger.CURRENT is not Logger.DEFAULT):
Logger.CURRENT.close()
Logger.CURRENT = Logger.DEFAULT
log('Reset logger')
|
class scoped_configure(object):
def __init__(self, dir=None, format_strs=None):
self.dir = dir
self.format_strs = format_strs
self.prevlogger = None
def __enter__(self):
self.prevlogger = Logger.CURRENT
configure(dir=self.dir, format_strs=self.format_strs)
def __exit__(self, *args):
Logger.CURRENT.close()
Logger.CURRENT = self.prevlogger
|
def _demo():
info('hi')
debug("shouldn't appear")
set_level(DEBUG)
debug('should appear')
dir = '/tmp/testlogging'
if os.path.exists(dir):
shutil.rmtree(dir)
configure(dir=dir)
logkv('a', 3)
logkv('b', 2.5)
dumpkvs()
logkv('b', (- 2.5))
logkv('a', 5.5)
dumpkvs()
info('^^^ should see a = 5.5')
logkv_mean('b', (- 22.5))
logkv_mean('b', (- 44.4))
logkv('a', 5.5)
dumpkvs()
info('^^^ should see b = 33.3')
logkv('b', (- 2.5))
dumpkvs()
logkv('a', 'longasslongasslongasslongasslongasslongassvalue')
dumpkvs()
|
def read_json(fname):
import pandas
ds = []
with open(fname, 'rt') as fh:
for line in fh:
ds.append(json.loads(line))
return pandas.DataFrame(ds)
|
def read_csv(fname):
import pandas
return pandas.read_csv(fname, index_col=None, comment='#')
|
def read_tb(path):
'\n path : a tensorboard file OR a directory, where we will find all TB files\n of the form events.*\n '
import pandas
import numpy as np
from glob import glob
from collections import defaultdict
import tensorflow as tf
if osp.isdir(path):
fnames = glob(osp.join(path, 'events.*'))
elif osp.basename(path).startswith('events.'):
fnames = [path]
else:
raise NotImplementedError(('Expected tensorboard file or directory containing them. Got %s' % path))
tag2pairs = defaultdict(list)
maxstep = 0
for fname in fnames:
for summary in tf.train.summary_iterator(fname):
if (summary.step > 0):
for v in summary.summary.value:
pair = (summary.step, v.simple_value)
tag2pairs[v.tag].append(pair)
maxstep = max(summary.step, maxstep)
data = np.empty((maxstep, len(tag2pairs)))
data[:] = np.nan
tags = sorted(tag2pairs.keys())
for (colidx, tag) in enumerate(tags):
pairs = tag2pairs[tag]
for (step, value) in pairs:
data[((step - 1), colidx)] = value
return pandas.DataFrame(data, columns=tags)
|
def rolling_window(a, window):
shape = (a.shape[:(- 1)] + (((a.shape[(- 1)] - window) + 1), window))
strides = (a.strides + (a.strides[(- 1)],))
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
|
def window_func(x, y, window, func):
yw = rolling_window(y, window)
yw_func = func(yw, axis=(- 1))
return (x[(window - 1):], yw_func)
|
def ts2xy(ts, xaxis):
if (xaxis == X_TIMESTEPS):
x = np.cumsum(ts.l.values)
y = ts.r.values
elif (xaxis == X_EPISODES):
x = np.arange(len(ts))
y = ts.r.values
elif (xaxis == X_WALLTIME):
x = (ts.t.values / 3600.0)
y = ts.r.values
else:
raise NotImplementedError
return (x, y)
|
def plot_curves(xy_list, xaxis, title):
plt.figure(figsize=(8, 2))
maxx = max((xy[0][(- 1)] for xy in xy_list))
minx = 0
for (i, (x, y)) in enumerate(xy_list):
color = COLORS[i]
plt.scatter(x, y, s=2)
(x, y_mean) = window_func(x, y, EPISODES_WINDOW, np.mean)
plt.plot(x, y_mean, color=color)
plt.xlim(minx, maxx)
plt.title(title)
plt.xlabel(xaxis)
plt.ylabel('Episode Rewards')
plt.tight_layout()
|
def plot_results(dirs, num_timesteps, xaxis, task_name):
tslist = []
for dir in dirs:
ts = load_results(dir)
ts = ts[(ts.l.cumsum() <= num_timesteps)]
tslist.append(ts)
xy_list = [ts2xy(ts, xaxis) for ts in tslist]
plot_curves(xy_list, xaxis, task_name)
|
def main():
import argparse
import os
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dirs', help='List of log directories', nargs='*', default=['./log'])
parser.add_argument('--num_timesteps', type=int, default=int(10000000.0))
parser.add_argument('--xaxis', help='Varible on X-axis', default=X_TIMESTEPS)
parser.add_argument('--task_name', help='Title of plot', default='Breakout')
args = parser.parse_args()
args.dirs = [os.path.abspath(dir) for dir in args.dirs]
plot_results(args.dirs, args.num_timesteps, args.xaxis, args.task_name)
plt.show()
|
class Attention(Layer):
def __init__(self, W_regularizer=None, b_regularizer=None, W_constraint=None, b_constraint=None, bias=True, **kwargs):
'\n Keras Layer that implements an Content Attention mechanism.\n Supports Masking.\n '
self.supports_masking = True
self.init = initializers.get('glorot_uniform')
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.W_constraint = constraints.get(W_constraint)
self.b_constraint = constraints.get(b_constraint)
self.bias = bias
super(Attention, self).__init__(**kwargs)
def build(self, input_shape):
assert (type(input_shape) == list)
self.steps = input_shape[0][1]
self.W = self.add_weight((input_shape[0][(- 1)], input_shape[1][(- 1)]), initializer=self.init, name='{}_W'.format(self.name), regularizer=self.W_regularizer, constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((1,), initializer='zeros', name='{}_b'.format(self.name), regularizer=self.b_regularizer, constraint=self.b_constraint)
else:
self.b = None
self.built = True
def compute_mask(self, input_tensor, mask=None):
assert (type(input_tensor) == list)
assert (type(mask) == list)
return None
def call(self, input_tensor, mask=None):
x = input_tensor[0]
aspect = input_tensor[1]
mask = mask[0]
aspect = K.transpose(K.dot(self.W, K.transpose(aspect)))
aspect = K.expand_dims(aspect, axis=(- 2))
aspect = K.repeat_elements(aspect, self.steps, axis=1)
eij = K.sum((x * aspect), axis=(- 1))
if self.bias:
b = K.repeat_elements(self.b, self.steps, axis=0)
eij += b
eij = K.tanh(eij)
a = K.exp(eij)
if (mask is not None):
a *= K.cast(mask, K.floatx())
a /= K.cast((K.sum(a, axis=1, keepdims=True) + K.epsilon()), K.floatx())
return a
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][1])
|
class WeightedSum(Layer):
def __init__(self, **kwargs):
self.supports_masking = True
super(WeightedSum, self).__init__(**kwargs)
def call(self, input_tensor, mask=None):
assert (type(input_tensor) == list)
assert (type(mask) == list)
x = input_tensor[0]
a = input_tensor[1]
a = K.expand_dims(a)
weighted_input = (x * a)
return K.sum(weighted_input, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0][0], input_shape[0][(- 1)])
def compute_mask(self, x, mask=None):
return None
|
class Average(Layer):
def __init__(self, mask_zero=True, **kwargs):
self.mask_zero = mask_zero
self.supports_masking = True
super(Average, self).__init__(**kwargs)
def call(self, x, mask=None):
if self.mask_zero:
mask = K.cast(mask, K.floatx())
mask = K.expand_dims(mask)
x = (x * mask)
return (K.sum(x, axis=1) / (K.sum(mask, axis=1) + K.epsilon()))
else:
return K.mean(x, axis=1)
def compute_output_shape(self, input_shape):
return (input_shape[0], input_shape[(- 1)])
def compute_mask(self, x, mask):
return None
|
def get_optimizer(args):
clipvalue = 0
clipnorm = 10
if (args.algorithm == 'rmsprop'):
optimizer = opt.RMSprop(lr=0.001, rho=0.9, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'sgd'):
optimizer = opt.SGD(lr=0.01, momentum=0.0, decay=0.0, nesterov=False, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adagrad'):
optimizer = opt.Adagrad(lr=0.01, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adadelta'):
optimizer = opt.Adadelta(lr=1.0, rho=0.95, epsilon=1e-06, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adam'):
optimizer = opt.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
elif (args.algorithm == 'adamax'):
optimizer = opt.Adamax(lr=0.002, beta_1=0.9, beta_2=0.999, epsilon=1e-08, clipnorm=clipnorm, clipvalue=clipvalue)
return optimizer
|
class W2VEmbReader():
def __init__(self, args, emb_path):
logger.info(('Loading embeddings from: ' + emb_path))
self.embeddings = {}
emb_file = codecs.open(emb_path, 'r', encoding='utf8')
self.vocab_size = 0
self.emb_dim = (- 1)
for line in emb_file:
tokens = line.split()
if (len(tokens) == 0):
continue
if (self.emb_dim == (- 1)):
self.emb_dim = (len(tokens) - 1)
assert (self.emb_dim == args.emb_dim)
word = tokens[0]
vec = tokens[1:]
self.embeddings[word] = vec
self.vocab_size += 1
emb_file.close()
if args.is_pretrain:
if (args.domain == 'lt'):
f = open(('../pretrained_weights/word_emb_lt%.1f.pkl' % args.percetage), 'rb')
else:
f = open(('../pretrained_weights/word_emb_res%.1f.pkl' % args.percetage), 'rb')
emb_dict = pickle.load(f)
for word in emb_dict:
self.embeddings[word] = emb_dict[word]
logger.info((' #vectors: %i, #dimensions: %i' % (len(self.embeddings), self.emb_dim)))
def get_emb_matrix_given_vocab(self, vocab, emb_matrix):
counter = 0.0
for (word, index) in vocab.iteritems():
try:
emb_matrix[0][index] = self.embeddings[word]
counter += 1
except KeyError:
pass
logger.info(('%i/%i word vectors initialized (hit rate: %.2f%%)' % (counter, len(vocab), ((100 * counter) / len(vocab)))))
return emb_matrix
|
class W2VEmbReader():
def __init__(self, emb_path, emb_dim=None):
logger.info(('Loading embeddings from: ' + emb_path))
self.embeddings = {}
emb_file = codecs.open(emb_path, 'r', encoding='utf8')
self.vocab_size = 0
self.emb_dim = (- 1)
for line in emb_file:
tokens = line.split()
if (len(tokens) == 0):
continue
if (self.emb_dim == (- 1)):
self.emb_dim = (len(tokens) - 1)
assert (self.emb_dim == emb_dim)
word = tokens[0]
vec = tokens[1:]
self.embeddings[word] = vec
self.vocab_size += 1
emb_file.close()
logger.info((' #vectors: %i, #dimensions: %i' % (self.vocab_size, self.emb_dim)))
def get_emb_matrix_given_vocab(self, vocab, emb_matrix):
counter = 0.0
for (word, index) in vocab.iteritems():
try:
emb_matrix[0][index] = self.embeddings[word]
counter += 1
except KeyError:
pass
logger.info(('%i/%i word vectors initialized (hit rate: %.2f%%)' % (counter, len(vocab), ((100 * counter) / len(vocab)))))
return emb_matrix
|
class BatchIter():
def __init__(self, dataset, batch_size, batch_first=False):
self.dataset = dataset
self.batch_size = batch_size
self.id = 0
self.batch_first = batch_first
def __len__(self):
return math.ceil((len(self.dataset) / self.batch_size))
def __getitem__(self, index):
if (self.id >= len(self.dataset)):
self.id = 0
raise StopIteration()
sens = self.dataset[self.id:(self.id + self.batch_size)]
sens = pad_sequence(sens, padding_value=0, batch_first=self.batch_first)
self.id += self.batch_size
return sens
def totext(self, sen):
return self.dataset.totext(sen)
|
class Corpus(Dataset):
def __init__(self, path, word_dic=None, min_word_count=4):
print('start to load Corpus data')
with open(path, 'r') as f:
corpus = f.readlines()
corpus = [self._split(i.strip()) for i in corpus]
corpus = [(t, len(t)) for t in corpus]
corpus.sort(key=operator.itemgetter(1))
self.texts = [x for (x, _) in corpus][:2000000]
print('start to build dictionary')
if (word_dic is not None):
self.word_id = word_dic
else:
doc = ' '.join([' '.join(i) for i in self.texts])
self.word_id = self._make_dic(doc, min_word_count)
self.id_word = self._make_inv_dic(self.word_id)
self.voca_size = len(self.word_id)
self.sos_token = torch.tensor([1])
self.eos_token = torch.tensor([2])
print('start to make one-hot vectors')
self.textcodes = [self._txt_vecs(l) for l in self.texts]
self.doc_size = len(self.texts)
self.max_length = max((len(i) for i in self.texts))
def _split(self, sen):
sen = sen.lower()
sen = re.sub('[.]+', '.', sen)
sen = re.sub("([.?!,]|'s)", ' \\1', sen)
return re.split('\\s+', sen)
def _split2(self, labels):
labels = re.sub('[ ]', '-', labels)
return re.split(',-*', labels)
def _make_dic(self, doc, min_word_count, addflag=True):
flag_count = (4 if addflag else 0)
doc_ = re.split('\\s', ''.join(doc))
words = sorted(doc_)
word_count = [(w, sum((1 for _ in c))) for (w, c) in groupby(words)]
word_count = [(w, c) for (w, c) in word_count if (c >= min_word_count)]
word_count.sort(key=operator.itemgetter(1), reverse=True)
word_id = dict([(w, (i + flag_count)) for (i, (w, _)) in enumerate(word_count)])
if addflag:
word_id['<pad>'] = 0
word_id['<unk0>'] = 1
word_id['<sos>'] = 2
word_id['<eos>'] = 3
self.pad_id = 0
return word_id
def _make_inv_dic(self, word_id_dic):
id_word = dict([(i, w) for (w, i) in word_id_dic.items()])
return id_word
def _word_onehot(self, word):
v = torch.zeros([self.voca_size], dtype=torch.long)
v[self.word_id.get(word, 1)] = 1
return v
def _txt_vecs(self, txt):
v = [self.word_id.get(w, 1) for w in txt]
v = (([2] + v) + [3])
v = torch.tensor(v)
return v
def __getitem__(self, index):
return self.textcodes[index]
def __len__(self):
return self.doc_size
def totext(self, sen):
text = [self.id_word[i] for i in sen]
return ' '.join(text)
|
class Discriminator_MLP(nn.Module):
def __init__(self, h_d, c_d, device):
super(Discriminator_MLP, self).__init__()
self.f = nn.Linear(c_d, h_d)
self.act = nn.ReLU()
self.device = device
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if (m.bias is not None):
m.bias.data.fill_(0.0)
def forward(self, c, h):
'\n '
out_c = self.f(c)
logits = torch.sum((out_c * h), 1)
return self.act(logits)
|
class Discriminator_Bilinear(nn.Module):
def __init__(self, n_h, n_c, device):
super(Discriminator_Bilinear, self).__init__()
self.f_k = nn.Bilinear(n_h, n_c, 1)
self.device = device
for m in self.modules():
self.weights_init(m)
def weights_init(self, m):
if isinstance(m, nn.Bilinear):
torch.nn.init.xavier_uniform_(m.weight.data)
if (m.bias is not None):
m.bias.data.fill_(0.0)
def forward(self, c, h):
'\n '
logits = torch.squeeze(self.f_k(c, h))
return logits
|
class HGNN(nn.Module):
def __init__(self, in_ch, out_ch, device):
super(HGNN, self).__init__()
self.device = device
self.f = nn.Sequential(nn.Linear(in_ch, in_ch), nn.ReLU())
def forward(self, x, H, batch):
'\n x: the embedding of the vectors (V, d)\n H: the edge matrix (V, E) batch-wise diagnal matrix\n edge_num: the number of edges of each data samples (batch_size)\n '
expand_x = x.unsqueeze(1).repeat([1, H.shape[(- 1)], 1])
expand_H = H.unsqueeze(2)
edge_emb_tensor = (expand_x * expand_H)
edge_dim = scatter(edge_emb_tensor, batch, dim=0, reduce='mean')
edge_dim_flatten = edge_dim.view((- 1), edge_dim.shape[2])
edge_dim_flatten = self.f(edge_dim_flatten)
edge_dim = edge_dim_flatten.view(edge_dim.shape)
rep_index = scatter(torch.ones_like(batch), batch)
repeat_edge_dim = torch.repeat_interleave(edge_dim, rep_index, dim=0)
node_emb_tensor = (repeat_edge_dim * expand_H)
node_emb = node_emb_tensor.mean(dim=1)
h_e = edge_dim.view((- 1), edge_dim.shape[(- 1)])
c = global_mean_pool(node_emb, batch)
return (c, h_e)
|
class INFOMAX(nn.Module):
def __init__(self, n_h, n_neg, device):
super(INFOMAX, self).__init__()
'\n n_h: the dimension of the embedding\n n_neg: the number of negative samples for each positive samples\n '
self.sigm = nn.Sigmoid()
self.disc_hc = Discriminator_Bilinear(n_h, n_h, device)
self.disc_cc = Discriminator_Bilinear(n_h, n_h, device)
self.disc_hh = Discriminator_Bilinear(n_h, n_h, device)
self.device = device
self.n_h = n_h
self.n_neg = n_neg
self.mask = nn.Dropout(0.1)
self.b_xent = nn.BCELoss()
def random_gen(self, base, num):
'\n base: the embeddings come from\n num: the number of randoms to be generated\n '
idx = torch.randint(0, base.shape[0], [(num * self.n_neg)])
shuf = base[idx].squeeze()
return shuf
def h_c(self, c_p, h_p, edge_batch_p, c_n, h_n, edge_batch_n):
c_all_pp = self.random_gen(c_p, h_p.shape[0])
c_all_nn = self.random_gen(c_n, h_n.shape[0])
c_all_pn = self.random_gen(c_p, h_n.shape[0])
c_all_np = self.random_gen(c_n, h_p.shape[0])
h_p = h_p.repeat([self.n_neg, 1])
h_n = h_n.repeat([self.n_neg, 1])
c = torch.cat((c_all_pp, c_all_nn, c_all_pn, c_all_np), dim=0)
h = torch.cat((h_p, h_n, h_n, h_p), dim=0)
ret = self.disc_hc(c, h)
ret = self.sigm(ret)
lbl_pp = torch.ones(c_all_pp.shape[0])
lbl_nn = torch.ones(c_all_nn.shape[0])
lbl_pn = torch.zeros(c_all_pn.shape[0])
lbl_np = torch.zeros(c_all_np.shape[0])
lbl = torch.cat((lbl_pp, lbl_nn, lbl_pn, lbl_np))
lbl = lbl.to(self.device)
return self.b_xent(ret, lbl)
def forward(self, c_p, h_p, edge_batch_p, c_n, h_n, edge_batch_n):
loss_hc = self.h_c(c_p, h_p, edge_batch_p, c_n, h_n, edge_batch_n)
return loss_hc
|
class Disc_INFOMIN(nn.Module):
def __init__(self, n_h, n_neg, device):
super(Disc_INFOMIN, self).__init__()
self.sigm = nn.Sigmoid()
self.disc = Discriminator_Bilinear(n_h, n_h, device)
self.n_neg = n_neg
self.device = device
self.drop = nn.Dropout(0.1)
self.b_xnet = nn.BCELoss()
def forward(self, h, edge_batch):
n_edges = edge_batch[(edge_batch == 0)].shape[0]
rand_tail1 = torch.randint(0, n_edges, [(edge_batch.shape[0] * self.n_neg)], device=self.device)
neg_index1 = ((edge_batch.repeat_interleave(self.n_neg, 0) * n_edges) + rand_tail1)
h = h.repeat_interleave(self.n_neg, 0)
h1 = self.drop(h)
h2 = self.drop(h)
ret_pos = self.disc(h2, h1)
ret_neg2 = self.disc(h2, h1[neg_index1])
ret = torch.cat([ret_pos, ret_neg2], 0)
ret = self.sigm(ret)
lbl_p = torch.ones(ret_pos.shape[0])
lbl_n = torch.zeros(ret_neg2.shape[0])
lbl = torch.cat([lbl_p, lbl_n], 0).to(self.device)
loss = self.b_xnet(ret, lbl)
return loss
|
class MLPReadout(nn.Module):
def __init__(self, in_dim, out_dim, act):
'\n out_dim: the final prediction dim, usually 1\n act: the final activation, if rating then None, if CTR then sigmoid\n '
super(MLPReadout, self).__init__()
self.layer1 = nn.Linear(in_dim, out_dim)
self.act = nn.ReLU()
self.out_act = act
def forward(self, x):
ret = self.layer1(x)
return self.out_act(ret)
|
def evaluation(model, data_loader, device, is_test=False, epoch=(- 1)):
model.eval()
predictions = []
labels = []
user_ids = []
record = True
for data in data_loader:
data = data.to(device)
node_index = torch.squeeze(data.x)
edge_index = data.edge_index
batch = data.batch
(_, user_id_index) = np.unique(batch.detach().cpu().numpy(), return_index=True)
user_id = data.x.detach().cpu().numpy()[user_id_index]
y = data.y
if ((epoch != (- 1)) and record):
(pred, _) = model.run_pred((node_index, edge_index, batch), False, True, epoch)
record = False
else:
(pred, _) = model.run_pred((node_index, edge_index, batch), False)
pred = pred.squeeze().detach().cpu().numpy().astype('float64')
y = y.detach().cpu().numpy()
predictions.append(pred)
labels.append(y)
user_ids.append(user_id)
return process.eval_metrics(predictions, labels, user_ids, is_test)
|
class HyperInfomax(nn.Module):
def __init__(self, args, n_features, device, writer):
super(HyperInfomax, self).__init__()
self.feature_emb = nn.Embedding(n_features, args.dim)
self.feature_emb_edge = nn.Embedding(n_features, args.dim)
self.hgnn = HGNN(args.dim, args.hid_units, device)
self.edgePred = RestCross(args.dim, args.edge_num, eval(args.l0_para))
self.infomax = INFOMAX(args.hid_units, args.n_neg_max, device)
self.infomin = INFOMIN(args.hid_units, args.n_neg_min, device)
self.readout = MLPReadout(args.hid_units, 1, nn.Sigmoid())
self.args = args
self.device = device
def edge_stat(self, adj, batch):
round_mat = scatter(torch.round(adj), batch, dim=0)
grezero_mat = scatter((adj > 0).float(), batch, dim=0)
ones = torch.ones_like(round_mat)
round_stat_list = []
grezero_stat_list = []
for i in range((self.args.num_features + 1)):
round_stat = torch.sum((round_mat == i).float()).unsqueeze(0)
grezero_stat = torch.sum((grezero_mat == i).float()).unsqueeze(0)
round_stat_list.append(round_stat)
grezero_stat_list.append(grezero_stat)
round_stat_list = torch.cat(round_stat_list).unsqueeze(1)
grezero_stat_list = torch.cat(grezero_stat_list).unsqueeze(1)
return (round_stat_list, grezero_stat_list)
def edge_pred(self, features, batch, is_training, record=False, epoch=(- 1)):
(adj, l0_penaty, n_edge) = self.edgePred(features, batch, is_training)
n_edge = n_edge.to(self.device)
edge_batch = torch.LongTensor(range((batch.max() + 1))).to(self.device).repeat_interleave(n_edge)
(round_stat, grezero_stat) = (None, None)
return (adj, l0_penaty, edge_batch, (round_stat, grezero_stat))
def pred(self, pred_logits):
predictions = self.readout(pred_logits)
return torch.squeeze(predictions)
def l0_hirs(self, info_data, train, record=False, epoch=(- 1)):
(node_index, edge_index, batch) = info_data
nb_nodes = node_index.shape[0]
features = self.feature_emb(node_index)
features_edge = self.feature_emb_edge(node_index)
(adj, l0_penaty, edge_batch, edge_stats) = self.edge_pred(features_edge, batch, train, record, epoch)
(c, h) = self.hgnn(features, adj, batch)
edge_num = torch.sum(torch.ones_like(adj[(adj > 0)]))
return (c, h, edge_batch, l0_penaty, edge_num, edge_stats)
def run_pred(self, data, train, record=False, epoch=(- 1)):
(c, h, edge_batch, l0_penaty, edges, edge_stats) = self.l0_hirs(data, train, record=record, epoch=epoch)
pred = self.pred(c)
if (not train):
return (pred, (l0_penaty, edges))
else:
return (c, h, edge_batch, l0_penaty, edges, pred, edge_stats)
def cal_similarity(self, c_p, c_n):
cos = nn.CosineSimilarity(dim=0, eps=1e-06).to(self.device)
p_mean = torch.mean(c_p, dim=0)
n_mean = torch.mean(c_n, dim=0)
return cos(p_mean, n_mean)
def forward(self, pos_data, neg_data, train, record=False, epoch=(- 1)):
(c_p, h_p, edge_batch_p, l0_penalty_p, edges_p, pred_p, edge_stats_p) = self.run_pred(pos_data, train, record=record, epoch=epoch)
(c_n, h_n, edge_batch_n, l0_penalty_n, edges_n, pred_n, edge_stats_n) = self.run_pred(neg_data, train, record=record, epoch=epoch)
hc_loss = self.infomax(c_p, h_p, edge_batch_p, c_n, h_n, edge_batch_n)
infomax_loss = hc_loss
infomin_loss_p = self.infomin(h_p, edge_batch_p)
infomin_loss_n = self.infomin(h_n, edge_batch_n)
infomin_loss = ((infomin_loss_p + infomin_loss_n) / 2)
distance_c = self.cal_similarity(c_p, c_n)
distance_h = self.cal_similarity(h_p, h_n)
l0_penalty = (l0_penalty_p + l0_penalty_n)
n_edges = (edges_p + edges_n)
return (pred_p, pred_n, infomax_loss, infomin_loss, (distance_c, distance_h), (l0_penalty, n_edges), (edge_stats_p, edge_stats_n))
|
class Dataset(InMemoryDataset):
def __init__(self, root, dataset, rating_file, sep=',', sufix='', transform=None, pre_transform=None):
self.path = root
self.dataset = dataset
self.rating_file = rating_file
self.sep = sep
self.sufix = sufix
self.store_backup = True
super(Dataset, self).__init__(root, transform, pre_transform)
(self.data, self.slices) = torch.load(self.processed_paths[0])
self.stat_info = torch.load(self.processed_paths[1])
self.data_num = self.stat_info['data_num']
self.feature_num = self.stat_info['feature_num']
@property
def raw_file_names(self):
return ['{}{}/user_dict.pkl'.format(self.path, self.dataset), '{}{}/item_dict.pkl'.format(self.path, self.dataset), '{}{}/feature_dict.pkl'.format(self.path, self.dataset), '{}{}/{}'.format(self.path, self.dataset, self.rating_file)]
@property
def processed_file_names(self):
return ['{}/{}.dataset'.format((self.dataset + self.sufix), self.dataset), '{}/{}.statinfo'.format((self.dataset + self.sufix), self.dataset)]
def download(self):
pass
def data_2_graphs(self, ratings_df, dataset='train'):
graphs = []
graphs_pos = []
graphs_neg = []
processed_graphs = 0
num_graphs = ratings_df.shape[0]
one_per = int((num_graphs / 1000))
percent = 0.0
for i in range(len(ratings_df)):
if ((processed_graphs % one_per) == 0):
print(f'Processing [{dataset}]: {(percent / 10.0)}%, {processed_graphs}/{num_graphs}', end='\r')
percent += 1
processed_graphs += 1
line = ratings_df.iloc[i]
user_index = self.user_key_type(line[0])
item_index = self.item_key_type(line[1])
rating = int(line[2])
if ((item_index not in self.item_dict) or (user_index not in self.user_dict)):
error_num += 1
continue
user_id = self.user_dict[user_index]['name']
item_id = self.item_dict[item_index]['title']
user_attr_list = self.user_dict[user_index]['attribute']
item_attr_list = self.item_dict[item_index]['attribute']
feature_list = (([user_id, item_id] + user_attr_list) + item_attr_list)
graph = self.construct_graph(feature_list, rating)
if (dataset == 'train'):
if (rating > 0):
graphs_pos.append(graph)
else:
graphs_neg.append(graph)
else:
graphs.append(graph)
print()
if (dataset == 'train'):
return (graphs_pos, graphs_neg)
else:
return graphs
def read_data(self):
self.user_dict = pickle.load(open(self.userfile, 'rb'))
self.item_dict = pickle.load(open(self.itemfile, 'rb'))
self.user_key_type = type(list(self.user_dict.keys())[0])
self.item_key_type = type(list(self.item_dict.keys())[0])
feature_dict = pickle.load(open(self.featurefile, 'rb'))
data = []
error_num = 0
ratings_df = pd.read_csv(self.ratingfile, sep=self.sep, header=None)
(train_df, test_df) = train_test_split(ratings_df, test_size=0.15, random_state=2019, stratify=ratings_df[[0, 2]])
(train_df, valid_df) = train_test_split(train_df, test_size=(15 / 85), random_state=2019, stratify=train_df[[0, 2]])
if self.store_backup:
backup_path = f'{self.path}{self.dataset}/split_data_backup/'
if (not os.path.exists(backup_path)):
os.mkdir(backup_path)
train_df.to_csv(f'{backup_path}train_data.csv', index=False)
valid_df.to_csv(f'{backup_path}valid_data.csv', index=False)
test_df.to_csv(f'{backup_path}test_data.csv', index=False)
(train_graphs_p, train_graphs_n) = self.data_2_graphs(train_df, dataset='train')
valid_graphs = self.data_2_graphs(valid_df, dataset='valid')
test_graphs = self.data_2_graphs(test_df, dataset='test')
graphs = (((train_graphs_p + train_graphs_n) + valid_graphs) + test_graphs)
stat_info = {}
stat_info['data_num'] = len(graphs)
stat_info['feature_num'] = len(feature_dict)
len_p = len(train_graphs_p)
len_n = len(train_graphs_n)
len_valid = len(valid_graphs)
stat_info['train_test_split_index'] = [len_p, (len_p + len_n), ((len_p + len_n) + len_valid)]
print('error number of data:', error_num)
return (graphs, stat_info)
def construct_graph(self, node_list, rating):
x = torch.LongTensor(node_list).unsqueeze(1)
rating = torch.FloatTensor([rating])
return Data(x=x, y=rating)
def process(self):
self.userfile = self.raw_file_names[0]
self.itemfile = self.raw_file_names[1]
self.featurefile = self.raw_file_names[2]
self.ratingfile = self.raw_file_names[3]
(graphs, stat_info) = self.read_data()
if (not os.path.exists(f'{self.path}processed/{(self.dataset + self.sufix)}')):
os.mkdir(f'{self.path}processed/{(self.dataset + self.sufix)}')
(data, slices) = self.collate(graphs)
torch.save((data, slices), self.processed_paths[0])
torch.save(stat_info, self.processed_paths[1])
def node_M(self):
return self.feature_num
def data_N(self):
return self.data_num
|
def cal_ndcg(predicts, labels, user_ids, k_list):
d = {'user': np.squeeze(user_ids), 'predict': np.squeeze(predicts), 'label': np.squeeze(labels)}
df = pd.DataFrame(d)
user_unique = df.user.unique()
ndcgs = [[] for _ in range(len(k_list))]
for user_id in user_unique:
user_srow = df.loc[(df['user'] == user_id)]
upred = user_srow['predict'].tolist()
if (len(upred) < 2):
continue
ulabel = user_srow['label'].tolist()
for i in range(len(k_list)):
ndcgs[i].append(ndcg_score([ulabel], [upred], k=k_list[i]))
ndcg_mean = np.mean(np.array(ndcgs), axis=1)
return ndcg_mean
|
def cal_recall(predicts, labels, user_ids, k):
d = {'user': np.squeeze(user_ids), 'predict': np.squeeze(predicts), 'label': np.squeeze(labels)}
df = pd.DataFrame(d)
user_unique = df.user.unique()
recall = []
for user_id in user_unique:
user_sdf = df[(df['user'] == user_id)]
if (user_sdf.shape[0] < 2):
continue
user_sdf = user_sdf.sort_values(by=['predict'], ascending=False)
total_rel = min(user_sdf['label'].sum(), k)
intersect_at_k = user_sdf['label'][0:k].sum()
try:
recall.append((float(intersect_at_k) / float(total_rel)))
except:
continue
return np.mean(np.array(recall))
|
def eval_metrics(predictions, labels, user_ids, test=False):
predictions = np.concatenate(predictions, 0)
labels = np.concatenate(labels, 0)
user_ids = np.concatenate(user_ids, 0)
labels = labels.astype(int)
ndcg_list = cal_ndcg(predictions, labels, user_ids, [5, 10, 20])
if test:
recall5 = cal_recall(predictions, labels, user_ids, 5)
recall10 = cal_recall(predictions, labels, user_ids, 10)
recall20 = cal_recall(predictions, labels, user_ids, 20)
return (ndcg_list, (recall5, recall10, recall20))
else:
return ndcg_list
|
class BigFile():
def __init__(self, datadir, bin_file='feature.bin'):
(self.nr_of_images, self.ndims) = list(map(int, open(os.path.join(datadir, 'shape.txt')).readline().split()))
id_file = os.path.join(datadir, 'id.txt')
self.names = open(id_file, 'r').read().strip().split('\n')
if (len(self.names) != self.nr_of_images):
self.names = open(id_file, 'r').read().strip().split(' ')
assert (len(self.names) == self.nr_of_images)
self.name2index = dict(list(zip(self.names, list(range(self.nr_of_images)))))
self.binary_file = os.path.join(datadir, bin_file)
print(('[%s] %dx%d instances loaded from %s' % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir)))
self.torch_array = None
def read_all_and_store(self):
def readall(self, ndims):
torch_array = torch.zeros(ndims, dtype=torch.half)
index_name_array = [(self.name2index[x], x) for x in set(self.names) if (x in self.name2index)]
index_name_array.sort(key=(lambda v: v[0]))
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
offset = (np.float32(1).nbytes * self.ndims)
res1 = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek((index_name_array[0][0] * offset))
res1.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
torch_array[previous] = torch.tensor(res1)
for next in sorted_index[1:]:
res1 = array.array('f')
move = (((next - 1) - previous) * offset)
fr.seek(move, 1)
res1.fromfile(fr, self.ndims)
previous = next
torch_array[previous] = torch.tensor(res1)
return torch_array
self.torch_array = readall(self, self.shape())
def readall(self, isname=True):
index_name_array = [(self.name2index[x], x) for x in set(self.names) if (x in self.name2index)]
index_name_array.sort(key=(lambda v: v[0]))
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
vecs = ([None] * nr_of_images)
offset = (np.float32(1).nbytes * self.ndims)
res = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek((index_name_array[0][0] * offset))
res.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
for next in sorted_index[1:]:
move = (((next - 1) - previous) * offset)
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
previous = next
return ([x[1] for x in index_name_array], [res[(i * self.ndims):((i + 1) * self.ndims)].tolist() for i in range(nr_of_images)])
def _read_from_ram(self, requested, isname=True):
'\n 从内存中直接读\n :param requested:\n :param isname:\n :return: 这里主要是视频名字和 feature vector, 一般输出 list\n '
requested = set(requested)
if isname:
index_name_array = [(self.name2index[x], x) for x in requested if (x in self.name2index)]
else:
assert (min(requested) >= 0)
assert (max(requested) < len(self.names))
index_name_array = [(x, self.names[x]) for x in requested]
if (len(index_name_array) == 0):
return ([], [])
if (self.torch_array is None):
self.read_all_and_store()
res = self.torch_array[index_name_array[0][0]]
return ([index_name_array[0][1]], [res.tolist()])
def _read_one_(self, requested, isname=True):
'\n 根据文件名读取文件,具体是从 bin 文件中读取numpy 矩阵,这里主要是视频名字和 feature vector\n :param requested:\n :param isname:\n :return: 这里主要是视频名字和 feature vector, 一般输出 list\n '
if (self.fr_list is None):
return self.read(requested, isname)
requested = set(requested)
if isname:
index_name_array = [(self.name2index[x], x) for x in requested if (x in self.name2index)]
else:
assert (min(requested) >= 0)
assert (max(requested) < len(self.names))
index_name_array = [(x, self.names[x]) for x in requested]
if (len(index_name_array) == 0):
return ([], [])
offset = (np.float32(1).nbytes * self.ndims)
res = array.array('f')
try:
index = int((index_name_array[0][0] / self.segmentation))
if (index >= len(self.fr_list)):
index = (len(self.fr_list) - 1)
signal = True
while signal:
with self.mp_signal.get_lock():
for signal_index in range(len(self.fr_list[index])):
if (self.mp_signal[((index * len(self.fr_list[0])) + signal_index)] == 1):
self.mp_signal[((index * len(self.fr_list[0])) + signal_index)] = 0
signal = False
break
if signal:
time.sleep(0.0001)
fr = self.fr_list[index][signal_index]['fr']
move = ((index_name_array[0][0] * offset) - fr.tell())
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
fr.seek(((- move) - offset), 1)
self.mp_signal[((index * len(self.fr_list[0])) + signal_index)] = 1
except Exception as e:
print(e)
return ([index_name_array[0][1]], [res.tolist()])
def read(self, requested, isname=True):
'\n 根据文件名读取文件,具体是从 bin 文件中读取numpy 矩阵,这里主要是视频名字和 feature vector\n :param requested: []\n :param isname:\n :return: 这里主要是视频名字和 feature vector\n '
requested = set(requested)
if isname:
index_name_array = [(self.name2index[x], x) for x in requested if (x in self.name2index)]
else:
assert (min(requested) >= 0)
assert (max(requested) < len(self.names))
index_name_array = [(x, self.names[x]) for x in requested]
if (len(index_name_array) == 0):
return ([], [])
index_name_array.sort(key=(lambda v: v[0]))
sorted_index = [x[0] for x in index_name_array]
nr_of_images = len(index_name_array)
vecs = ([None] * nr_of_images)
offset = (np.float32(1).nbytes * self.ndims)
res = array.array('f')
fr = open(self.binary_file, 'rb')
fr.seek((index_name_array[0][0] * offset))
res.fromfile(fr, self.ndims)
previous = index_name_array[0][0]
for next in sorted_index[1:]:
move = (((next - 1) - previous) * offset)
fr.seek(move, 1)
res.fromfile(fr, self.ndims)
previous = next
fr.close()
return ([x[1] for x in index_name_array], [res[(i * self.ndims):((i + 1) * self.ndims)].tolist() for i in range(nr_of_images)])
def read_one(self, name):
(renamed, vectors) = self.read([name])
return vectors[0]
def shape(self):
return [self.nr_of_images, self.ndims]
def cal_time(self):
pass
|
class StreamFile():
def __init__(self, datadir):
self.feat_dir = datadir
(self.nr_of_images, self.ndims) = list(map(int, open(os.path.join(datadir, 'shape.txt')).readline().split()))
id_file = os.path.join(datadir, 'id.txt')
self.names = open(id_file, 'r').read().strip().split('\n')
if (len(self.names) != self.nr_of_images):
self.names = open(id_file, 'r').read().strip().split(' ')
assert (len(self.names) == self.nr_of_images)
self.name2index = dict(list(zip(self.names, list(range(self.nr_of_images)))))
self.binary_file = os.path.join(datadir, 'feature.bin')
print(('[%s] %dx%d instances loaded from %s' % (self.__class__.__name__, self.nr_of_images, self.ndims, datadir)))
self.fr = None
self.current = 0
def open(self):
self.fr = open(os.path.join(self.feat_dir, 'feature.bin'), 'rb')
self.current = 0
def close(self):
if self.fr:
self.fr.close()
self.fr = None
def __iter__(self):
return self
def __next__(self):
if (self.current >= self.nr_of_images):
self.close()
raise StopIteration
else:
res = array.array('f')
res.fromfile(self.fr, self.ndims)
_id = self.names[self.current]
self.current += 1
return (_id, res.tolist())
|
def read_from_txt_file(cap_file):
'\n 从 id string 格式文档中读取 string\n :param cap_file:\n :return:\n '
captions = []
with open(cap_file, 'r') as fr:
for line in fr:
if (len(line.strip().split(' ', 1)) < 2):
cap_id = line.strip().split(' ', 1)[0]
caption = ''
else:
(cap_id, caption) = line.split(' ', 1)
caption = re.sub('#\\d\\.\\d+', '', caption)
captions.append(caption.strip())
return captions
|
def build_vocab(cap_file, encoding, threshold, lang):
'\n 预处理,返回单词 index,以及 bow 字典\n :param cap_file:\n :param encoding: 词编码方法,bow, w2v, gru 等\n :param threshold:\n :param lang:\n :return:\n '
nosw = ('_nsw' in encoding)
logger.info('Build a simple vocabulary wrapper from %s', cap_file)
captions = read_from_txt_file(cap_file)
counter = Counter()
for (i, caption) in enumerate(captions):
tokens = TextTool.tokenize(caption, language=lang, remove_stopword=nosw)
counter.update(tokens)
word_counts = [(word, cnt) for (word, cnt) in list(counter.items()) if (cnt >= threshold)]
word_counts.sort(key=(lambda x: x[1]), reverse=True)
vocab = Vocabulary(encoding)
if ('gru' in encoding):
vocab.add('<pad>')
vocab.add('<start>')
vocab.add('<end>')
vocab.add('<unk>')
for (word, c) in word_counts:
vocab.add(word)
return (vocab, word_counts)
|
def process(options, collection):
overwrite = options.overwrite
rootpath = options.rootpath
threshold = options.threshold
encoding = options.encoding
language = options.language
folder_name = options.folder_name
caption_name = options.caption_name
vocab_file = os.path.join(rootpath, collection, 'TextData', folder_name, ('%s_%d.pkl' % (encoding, threshold)))
count_file = os.path.join(os.path.dirname(vocab_file), ('%s_%d.txt' % (encoding, threshold)))
if checkToSkip(vocab_file, overwrite):
return 0
cap_file = os.path.join(rootpath, collection, 'TextData', caption_name)
(vocab, word_counts) = build_vocab(cap_file, encoding, threshold=threshold, lang=language)
makedirsforfile(vocab_file)
with open(vocab_file, 'wb') as fw:
pickle.dump(vocab, fw, pickle.HIGHEST_PROTOCOL)
logger.info('Saved vocabulary of %d words to %s', len(vocab), vocab_file)
with open(count_file, 'w') as fw:
fw.write('\n'.join([('%s %d' % x) for x in word_counts]))
logger.info('Saved word-counts to %s', count_file)
|
def main(argv=None):
if (argv is None):
argv = sys.argv[1:]
from optparse import OptionParser
parser = OptionParser(usage='usage: %prog [options] collection')
parser.add_option('--overwrite', default=0, type='int', help='overwrite existing file (default: 0)')
parser.add_option('--rootpath', default=ROOT_PATH, type='string', help=('rootpath (default: %s)' % ROOT_PATH))
parser.add_option('--caption_name', default='train_collection.caption.txt', type='string', help='caption_name')
parser.add_option('--language', default=DEFAULT_LANG, type='string', help=('language (default: %s)' % DEFAULT_LANG))
parser.add_option('--encoding', default='bow', type='choice', choices=TEXT_ENCODINGS, help=('text encoding strategy. Valid choices are %s. (default: %s)' % (TEXT_ENCODINGS, DEFAULT_TEXT_ENCODING)))
parser.add_option('--threshold', default=5, type='int', help=('minimum word occurrence (default: %d)' % MIN_WORD_COUNT))
parser.add_option('--folder_name', default='vocab', type='string', help='The output folder name (default: vocab)')
(options, args) = parser.parse_args(argv)
if (len(args) < 1):
parser.print_help()
return 1
assert (options.language in ['en', 'zh']), ('language %s not supported' % options.language)
return process(options, args[0])
|
class No():
pass
|
class config(BaseConfig.config):
model_name = 'FrameLAFF'
dropout = 0.2
activation = 'tanh'
batch_norm = True
vis_fc_layers = ['0', 4096]
txt_fc_layers = '0-4096'
text_encoding = {'bow_encoding': {'name': 'bow_nsw'}, 'w2v_encoding': {'name': 'w2v_nsw'}, 'rnn_encoding': {'name': 'gru_mean'}, 'bert_encoding': {'name': 'noBert', 'dir_name': 'bert-base-uncased'}, 'CLIP_encoding': {'name': 'noCLIP', 'dir_name': 'clip_finetune_8frame_uniform_1103'}, 'NetVLAD_encoding': {'name': 'noNetVLAD'}}
clip_opt = {'size': 512, 'transform_batch_norm': True, 'transform_dropout': 0.0, 'transform_activation': 'tanh', 'frozen': True}
float16 = True
max_frame = 50
frame_feat_input = True
vid_frame_feats = ['clip_frame_feat_ViT-B_32,os']
vis_frame_attention = BaseConfig.config.attention_types[1]
attention_param_each_head = {'with_ave': False, 'mul': False, 'split_head': True}
multi_head_attention = {'dropout': 0.0, 'heads': 8, 'embed_dim_qkv': (4096 // 8)}
vid_feats = ['mean_clip_frame_feat_ViT-B_32,os']
frame_feat_with_video_feat = True
vis_attention_global_decay_rate = 0.0
txt_attention_global_decay_rate = 0.0
vis_no_transform = ['clip_finetune_8frame_uniform_1103', 'clip_frame_feat_ViT-B_32,os']
txt_no_transform = ['CLIP_encoder']
vis_frame_addFC = False
def adjust_parm(self, value):
vid_frame_feats = ['Frame_clip_finetune_8frame_uniform_1103', 'clip_frame_feat_ViT-B_32,os']
clip_precal_feats = ['clip_finetune_8frame_uniform_1103', 'CLIP_ViT-B32']
vid_feats_iterlist = [np.array([0]), np.array([1])]
text_encodings = [['nobow_nsw', 'now2v_nsw', 'nogru_mean', 'noBert', 'ViT-B/32', 'noNetVLAD'], ['bow_nsw', 'w2v_nsw', 'gru_mean', 'noBert', 'ViT-B/32', 'noNetVLAD'], ['bow_nsw', 'w2v_nsw', 'nogru_mean', 'noBert', 'ViT-B/32', 'noNetVLAD']]
a = []
for (i, each) in enumerate(value.split('_')):
a.append(eval(each))
self.vid_frame_feats = list(np.array(vid_frame_feats)[vid_feats_iterlist[a[0]]])
self.vis_no_transform = list(np.array(vid_frame_feats)[vid_feats_iterlist[a[0]]])
print('vid_frame_feats', self.vid_frame_feats)
self.text_encoding['CLIP_encoding']['dir_name'] = clip_precal_feats[a[0]]
self.vis_frame_attention = self.attention_types[a[1]]
for (i, each) in enumerate(self.text_encoding):
self.text_encoding[each]['name'] = text_encodings[a[2]][i]
self.txt_attention = self.txt_attentions[a[3]]
vid_feats = ['mean_clip_frame_feat_ViT-B_32,os', 'mean_resnext101_resnet152', 'mean_C3d_resneXt101_16f', 'mean_resnext101_32x48d_wsl,avgpool,os', 'mean_pyresnext-101_rbps13k,flatten0_output,os', 'HowTo100M_TimeSformer_divST_96x4_224', 'X3D_L', 'mean_irCSN_152_ig65m_from_scratch', 'random_feat_512', 'full_1_feat_512', 'mean_pyresnet-152_imagenet11k,flatten0_output,os']
vid_feats_iterlist = [np.array([2, 5, 6, 7]), np.array([4, 2, 3, 7])]
self.vid_feats = list(np.array(vid_feats)[vid_feats_iterlist[a[4]]])
print('vid_feats', self.vid_feats)
self.vis_attention = self.attention_types[a[5]]
|
class config(object):
def adjust_parm(self, value):
pass
def get_txt_encoder_num(self, text_encoding):
encoder_num = 0
for name in text_encoding:
encoder_value = text_encoding[name]['name']
if ('no' not in encoder_value):
encoder_num += 1
return encoder_num
model_name = 'w2vpp_mutivis_attention'
text_encoding = {'bow_encoding': {'name': 'bow_nsw'}, 'w2v_encoding': {'name': 'w2v_nsw'}, 'rnn_encoding': {'name': 'gru_mean'}, 'bert_encoding': {'name': 'noBert', 'dir_name': 'bert-base-uncased'}, 'CLIP_encoding': {'name': 'noCLIP', 'dir_name': 'CLIP_ViT-B32'}, 'NetVLAD_encoding': {'name': 'noNetVLAD'}}
preprocess_type = 'clip'
text_encoder_num = 3
threshold = 5
bow_norm = 0
we_dim = 500
rnn_size = 1024
rnn_layer = 1
txt_fc_layers = '0-2048'
txt_norm = 2
bert_size = 768
bert_frozen = False
bert_do_lower_case = True
bert_transform_batch_norm = True
bert_transform_dropout = 0
bert_transform_activation = 'tanh'
clip_opt = {'size': 512, 'transform_batch_norm': False, 'transform_dropout': 0.0, 'transform_activation': 'tanh', 'frozen': True, 'vocab_size': 49408}
slip_opt = {'size': 512, 'transform_batch_norm': False, 'transform_dropout': 0.0, 'transform_activation': 'tanh', 'frozen': True}
NetVLAD_opt = {'num_clusters': 32, 'alpha': 100, 'normalize_pooling': False}
vis_fc_layers = ['0', 2048]
vis_norm = 2
use_abs = False
batch_norm = False
batch_norm_momentum = 0.1
batch_norm_eps = 1e-05
dropout = 0.2
last_dropout = 0.2
activation = 'tanh'
last_activation = 'tanh'
loss = 'mrl'
margin = 0.2
direction = 't2i'
max_violation = True
cost_style = 'sum'
measure = 'cosine'
optimizer = 'rmsprop'
lr = 0.0001
lr_decay_rate = 0.99
grad_clip = 2
float16 = False
attention_types = ('attention_noAverageMul_Ave', 'average_AverageMul_noAve', 'con_attention', 'fc_attention', 'just_average', 'muti_head_attention', 'attention3', 'attention_noAveNoAverageMul', 'concat', 'attention_averageMul', 'muti_head_attention_official', 'my_self_attention', 'Multi_head_MyApply_Attention', 'Multi_head_MyApply_FusionAttention', 'Multi_head_Attention_layer_norm', 'Multi_head_Attention_distinct_fc', 'Attention_MMT')
attention_l2norm = False
muti_head_attention_official = {'agg': 'mean'}
vis_attentions = attention_types
vis_no_transform = []
txt_no_transform = []
my_self_attention_output_types = ['mean', 'max', 'first', 'last', 'cls_embedding', 'concat', 'max_embedding', 'mean_embedding', 'random', 'second', 'third', 'Attention_1']
my_self_attention_output_type = my_self_attention_output_types[0]
txt_attentions = attention_types
txt_attention = attention_types[1]
txt_attention_global_decay_rate = 0.8
txt_expert_embedding = {'expert': False, 'l2norm': False}
vid_feats = ['mean_resnext101_resnet152', 'irCSN_152_ig65m_16frms', 'mean_pyresnext-101_rbps13k,flatten0_output,os', 'ipcsn_sports1m_32frms', 'mean_C3d_resneXt101_16f', 'mean_resnext101_32x48d_wsl,avgpool,os', 'mean_clip_frame_feat_ViT-B_32,os', 'HowTo100M_TimeSformer_divST_96x4_224', 'X3D_L', 'I3D_NLN_8x8_R50']
vis_feat_add_concat = False
vis_attention = attention_types[1]
vis_attention_global_decay_rate = 0.8
vis_expert_embedding = {'expert': False, 'l2norm': False}
multi_head_attention = {'dropout': 0.0, 'heads': 4, 'embed_dim_qkv': (2048 // 4)}
attention_param_each_head = {'with_ave': True, 'mul': False, 'split_head': True}
multi_space = True
max_frame = 200
frame_feat_input = False
frame_feat_with_video_feat = False
vid_frame_feats = ['pyresnext-101_rbps13k,flatten0_output,os+pyresnet-152_imagenet11k,flatten0_output,os']
vis_frame_attention = attention_types[1]
vis_frame_addFC = True
tranformer_encoder_opt = {'nhead': 4, 'num_layers': 4}
add_vid_feats = False
csn = False
SGRAF = False
muti_feat = 'vg_label_feat_36dim_repeat'
img_dim = 2048
word_dim = 300
embed_size = 1024
sim_dim = 256
num_layers = 1
bi_gru = False
no_imgnorm = True
no_txtnorm = True
module_name = 'SGR'
sgr_step = 3
task2 = False
txt_feature_task2 = 'bow'
txt_fc_layers_task2 = '0-0'
text_encoding_task2 = 'bow_nsw'
threshold_task2 = 5
bow_norm_task2 = 0
batch_norm_task2 = True
activation_task2 = 'sigmoid'
dropout_task2 = 0.1
vis_fc_layers_task2 = '0-0'
task3_start = (- 1)
task3_loss_weight = 1
task3_margin = 0.2
loss_lambda = 0.2
measure_task2 = 'hist'
alpha = 0.2
negative = False
kl = False
mask = False
origin_vid_feats = None
origin_text_feats = None
task3_end = 100
task3_neg_weight = 1
task3_neg_retrival_weight = 0.001
task3_bottommargin = 0.1
task3_uppermargin = 0.6
task3_bottommargin_t2t = 0.1
task3_uppermargin_t2t = 0.3
max_txtlength = 77
frame_loader = False
frame_sample_type_train = 'random'
frame_sample_type_test = 'uniform'
sample_frame = 8
txt_fc_same_with_vis_fc = False
txt_fc_same_with_vis_fc_dict = {'CLIP_encoder': 'clip2video_global_visual_output_MSVD'}
skip_feature = {'visual': None, 'text': None}
|
class config(BaseConfig.config):
model_name = 'LAFF'
dropout = 0.2
activation = 'tanh'
vis_fc_layers = ['0', 4096]
txt_fc_layers = '0-4096'
text_encoding = {'bow_encoding': {'name': 'bow_nsw'}, 'w2v_encoding': {'name': 'w2v_nsw'}, 'rnn_encoding': {'name': 'gru_mean'}, 'bert_encoding': {'name': 'noBert', 'dir_name': 'bert-base-uncased'}, 'CLIP_encoding': {'name': 'noCLIP', 'dir_name': 'clip_finetune_8frame_uniform_1103'}, 'NetVLAD_encoding': {'name': 'noNetVLAD'}}
bert_size = 768
bert_frozen = True
bert_do_lower_case = True
bert_transform_batch_norm = True
bert_transform_dropout = 0
bert_transform_activation = 'tanh'
clip_opt = {'size': 512, 'transform_batch_norm': True, 'transform_dropout': 0.0, 'transform_activation': 'tanh', 'frozen': True, 'vocab_size': 49408}
attention_param_each_head = {'with_ave': True, 'mul': False, 'split_head': True}
multi_head_attention = {'dropout': 0.0, 'heads': 8, 'embed_dim_qkv': (4096 // 8)}
vis_attention_global_decay_rate = 0.8
txt_attention_global_decay_rate = 0.8
vis_no_transform = ['clip_finetune_8frame_uniform_1103']
txt_no_transform = ['CLIP_encoder']
def adjust_parm(self, value):
vid_feats = ['clip_finetune_8frame_uniform_1103', 'mean_resnext101_resnet152', 'mean_C3d_resneXt101_16f', 'mean_resnext101_32x48d_wsl,avgpool,os', 'mean_pyresnext-101_rbps13k,flatten0_output,os', 'HowTo100M_TimeSformer_divST_96x4_224', 'X3D_L', 'mean_irCSN_152_ig65m_from_scratch']
vid_feats_iterlist = [np.array([0, 5, 6, 7])]
text_encodings = [['bow_nsw', 'w2v_nsw', 'gru_mean', 'noBert', 'ViT-B/32', 'noNetVLAD']]
a = []
for (i, each) in enumerate(value.split('_')):
a.append(eval(each))
print(a)
self.vid_feats = list(np.array(vid_feats)[vid_feats_iterlist[a[0]]])
print('vid_feats', self.vid_feats)
self.vis_attention = self.vis_attentions[a[1]]
for (i, each) in enumerate(self.text_encoding):
self.text_encoding[each]['name'] = text_encodings[a[2]][i]
self.txt_attention = self.txt_attentions[a[3]]
self.attention_param_each_head['with_ave'] = (True if (a[4] == 1) else False)
self.attention_param_each_head['mul'] = (True if (a[5] == 1) else False)
self.attention_param_each_head['split_head'] = (True if (a[6] == 1) else False)
|
def parse_args():
parser = argparse.ArgumentParser('check data')
parser.add_argument('--rootpath', type=str, default=ROOT_PATH, help=('path to datasets. (default: %s)' % ROOT_PATH))
parser.add_argument('dataset', type=str, help='test dataset')
args = parser.parse_args()
return args
|
def parse_args():
parser = argparse.ArgumentParser('W2VVPP training script.')
parser.add_argument('--rootpath', type=str, default=ROOT_PATH, help=('path to datasets. (default: %s)' % ROOT_PATH))
parser.add_argument('trainCollection', type=str, default='msrvtt10k', help='train collection')
parser.add_argument('valCollection', type=str, default='tv2016train', help='validation collection')
parser.add_argument('--trainCollection2', type=str, default='None', help='train collection')
parser.add_argument('--task2_caption', type=str, default='no_task2_caption', help='the suffix of task2 caption.(It looks like "caption.nouns vocab_nouns") Default is nouns.')
parser.add_argument('--train_strategy', type=str, default='usual', help='train strategy.("usual, subset") Default is usual.')
parser.add_argument('--overwrite', type=int, default=0, choices=[0, 1], help='overwrite existed vocabulary file. (default: 0)')
parser.add_argument('--val_set', type=str, default='setA', help='validation collection set (no, setA, setB). (default: setA)')
parser.add_argument('--metric', type=str, default='mir', choices=['r1', 'r5', 'medr', 'meanr', 'mir'], help='performance metric on validation set')
parser.add_argument('--num_epochs', default=80, type=int, help='Number of training epochs.')
parser.add_argument('--batch_size', default=128, type=int, help='Size of a training mini-batch.')
parser.add_argument('--workers', default=2, type=int, help='Number of data loader workers.')
parser.add_argument('--model_prefix', default='runs_0', type=str, help='Path to save the model and Tensorboard log.')
parser.add_argument('--config_name', type=str, default='w2vvpp_resnext101-resnet152_subspace', help='model configuration file. (default: w2vvpp_resnext101-resnet152_subspace')
parser.add_argument('--parm_adjust_config', type=str, default='None', help='the config parm you need to set. (default: None')
parser.add_argument('--device', default=0, type=str, help='cuda:n or cpu (default: 0)')
parser.add_argument('--random_seed', default=2, type=int, help='random_seed of the trainer')
parser.add_argument('--local_rank', default=0, type=int, help='distributed rank if use muti-gpu')
parser.add_argument('--pretrained_file_path', default='None', type=str, help='Whether use previous model to train')
parser.add_argument('--save_mean_last', default=0, type=int, choices=[0, 1], help='Whether save the average of last 10 epoch model')
parser.add_argument('--task3_caption', type=str, default='no_task3_caption', help='the suffix of task3 caption.(It looks like "caption.false ") Default is false.')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.device
import torch
print(torch.cuda.device_count())
return args
|
class CustomObjectScope(object):
"Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.\n\n Code within a `with` statement will be able to access custom objects\n by name. Changes to global custom objects persist\n within the enclosing `with` statement. At end of the `with` statement,\n global custom objects are reverted to state\n at beginning of the `with` statement.\n\n # Example\n\n Consider a custom object `MyObject` (e.g. a class):\n\n ```python\n with CustomObjectScope({'MyObject':MyObject}):\n layer = Dense(..., kernel_regularizer='MyObject')\n # save, load, etc. will recognize custom object by name\n ```\n "
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
|
def custom_object_scope(*args):
"Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.\n\n Convenience wrapper for `CustomObjectScope`.\n Code within a `with` statement will be able to access custom objects\n by name. Changes to global custom objects persist\n within the enclosing `with` statement. At end of the `with` statement,\n global custom objects are reverted to state\n at beginning of the `with` statement.\n\n # Example\n\n Consider a custom object `MyObject`\n\n ```python\n with custom_object_scope({'MyObject':MyObject}):\n layer = Dense(..., kernel_regularizer='MyObject')\n # save, load, etc. will recognize custom object by name\n ```\n\n # Arguments\n *args: Variable length list of dictionaries of name,\n class pairs to add to custom objects.\n\n # Returns\n Object of type `CustomObjectScope`.\n "
return CustomObjectScope(*args)
|
def get_custom_objects():
"Retrieves a live reference to the global dictionary of custom objects.\n\n Updating and clearing custom objects using `custom_object_scope`\n is preferred, but `get_custom_objects` can\n be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.\n\n # Example\n\n ```python\n get_custom_objects().clear()\n get_custom_objects()['MyObject'] = MyObject\n ```\n\n # Returns\n Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).\n "
return _GLOBAL_CUSTOM_OBJECTS
|
def serialize_keras_object(instance):
if (instance is None):
return None
if hasattr(instance, 'get_config'):
return {'class_name': instance.__class__.__name__, 'config': instance.get_config()}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
|
def deserialize_keras_object(identifier, module_objects=None, custom_objects=None, printable_module_name='object'):
if isinstance(identifier, dict):
config = identifier
if (('class_name' not in config) or ('config' not in config)):
raise ValueError(('Improper config format: ' + str(config)))
class_name = config['class_name']
if (custom_objects and (class_name in custom_objects)):
cls = custom_objects[class_name]
elif (class_name in _GLOBAL_CUSTOM_OBJECTS):
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = (module_objects or {})
cls = module_objects.get(class_name)
if (cls is None):
raise ValueError(((('Unknown ' + printable_module_name) + ': ') + class_name))
if hasattr(cls, 'from_config'):
custom_objects = (custom_objects or {})
if has_arg(cls.from_config, 'custom_objects'):
return cls.from_config(config['config'], custom_objects=dict((list(_GLOBAL_CUSTOM_OBJECTS.items()) + list(custom_objects.items()))))
with CustomObjectScope(custom_objects):
return cls.from_config(config['config'])
else:
custom_objects = (custom_objects or {})
with CustomObjectScope(custom_objects):
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if (custom_objects and (function_name in custom_objects)):
fn = custom_objects.get(function_name)
elif (function_name in _GLOBAL_CUSTOM_OBJECTS):
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if (fn is None):
raise ValueError(((('Unknown ' + printable_module_name) + ':') + function_name))
return fn
else:
raise ValueError(((('Could not interpret serialized ' + printable_module_name) + ': ') + identifier))
|
def func_dump(func):
'Serializes a user defined function.\n\n # Arguments\n func: the function to serialize.\n\n # Returns\n A tuple `(code, defaults, closure)`.\n '
raw_code = marshal.dumps(func.__code__)
code = codecs.encode(raw_code, 'base64').decode('ascii')
defaults = func.__defaults__
if func.__closure__:
closure = tuple((c.cell_contents for c in func.__closure__))
else:
closure = None
return (code, defaults, closure)
|
def func_load(code, defaults=None, closure=None, globs=None):
'Deserializes a user defined function.\n\n # Arguments\n code: bytecode of the function.\n defaults: defaults of the function.\n closure: closure of the function.\n globs: dictionary of global objects.\n\n # Returns\n A function object.\n '
if isinstance(code, (tuple, list)):
(code, defaults, closure) = code
if isinstance(defaults, list):
defaults = tuple(defaults)
def ensure_value_to_cell(value):
'Ensures that a value is converted to a python cell object.\n\n # Arguments\n value: Any value that needs to be casted to the cell type\n\n # Returns\n A value wrapped as a cell object (see function "func_load")\n\n '
def dummy_fn():
value
cell_value = dummy_fn.__closure__[0]
if (not isinstance(value, type(cell_value))):
return cell_value
else:
return value
if (closure is not None):
closure = tuple((ensure_value_to_cell(_) for _ in closure))
try:
raw_code = codecs.decode(code.encode('ascii'), 'base64')
code = marshal.loads(raw_code)
except (UnicodeEncodeError, binascii.Error, ValueError):
raw_code = code.encode('raw_unicode_escape')
code = marshal.loads(raw_code)
if (globs is None):
globs = globals()
return python_types.FunctionType(code, globs, name=code.co_name, argdefs=defaults, closure=closure)
|
def has_arg(fn, name, accept_all=False):
'Checks if a callable accepts a given keyword argument.\n\n For Python 2, checks if there is an argument with the given name.\n\n For Python 3, checks if there is an argument with the given name, and\n also whether this argument can be called with a keyword (i.e. if it is\n not a positional-only argument).\n\n # Arguments\n fn: Callable to inspect.\n name: Check if `fn` can be called with `name` as a keyword argument.\n accept_all: What to return if there is no parameter called `name`\n but the function accepts a `**kwargs` argument.\n\n # Returns\n bool, whether `fn` accepts a `name` keyword argument.\n '
if (sys.version_info < (3,)):
arg_spec = inspect.getargspec(fn)
if (accept_all and (arg_spec.keywords is not None)):
return True
return (name in arg_spec.args)
elif (sys.version_info < (3, 3)):
arg_spec = inspect.getfullargspec(fn)
if (accept_all and (arg_spec.varkw is not None)):
return True
return ((name in arg_spec.args) or (name in arg_spec.kwonlyargs))
else:
signature = inspect.signature(fn)
parameter = signature.parameters.get(name)
if (parameter is None):
if accept_all:
for param in list(signature.parameters.values()):
if (param.kind == inspect.Parameter.VAR_KEYWORD):
return True
return False
return (parameter.kind in (inspect.Parameter.POSITIONAL_OR_KEYWORD, inspect.Parameter.KEYWORD_ONLY))
|
class Progbar(object):
'Displays a progress bar.\n\n # Arguments\n target: Total number of steps expected, None if unknown.\n width: Progress bar width on screen.\n verbose: Verbosity mode, 0 (silent), 1 (verbose), 2 (semi-verbose)\n stateful_metrics: Iterable of string names of metrics that\n should *not* be averaged over time. Metrics in this list\n will be displayed as-is. All others will be averaged\n by the progbar before display.\n interval: Minimum visual progress update interval (in seconds).\n '
def __init__(self, target, width=30, verbose=1, interval=0.05, stateful_metrics=None):
self.target = target
self.width = width
self.verbose = verbose
self.interval = interval
if stateful_metrics:
self.stateful_metrics = set(stateful_metrics)
else:
self.stateful_metrics = set()
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') and sys.stdout.isatty()) or ('ipykernel' in sys.modules))
self._total_width = 0
self._seen_so_far = 0
self._values = collections.OrderedDict()
self._start = time.time()
self._last_update = 0
def update(self, current, values=None):
'Updates the progress bar.\n\n # Arguments\n current: Index of current step.\n values: List of tuples:\n `(name, value_for_last_step)`.\n If `name` is in `stateful_metrics`,\n `value_for_last_step` will be displayed as-is.\n Else, an average of the metric over time will be displayed.\n '
values = (values or [])
for (k, v) in values:
if (k not in self.stateful_metrics):
if (k not in self._values):
self._values[k] = [(v * (current - self._seen_so_far)), (current - self._seen_so_far)]
else:
self._values[k][0] += (v * (current - self._seen_so_far))
self._values[k][1] += (current - self._seen_so_far)
else:
self._values[k] = v
self._seen_so_far = current
now = time.time()
info = (' - %.0fs' % (now - self._start))
if (self.verbose == 1):
if (((now - self._last_update) < self.interval) and (self.target is not None) and (current < self.target)):
return
prev_total_width = self._total_width
if self._dynamic_display:
sys.stdout.write(('\x08' * prev_total_width))
sys.stdout.write('\r')
else:
sys.stdout.write('\n')
if (self.target is not None):
numdigits = (int(np.floor(np.log10(self.target))) + 1)
barstr = ('%%%dd/%d [' % (numdigits, self.target))
bar = (barstr % current)
prog = (float(current) / self.target)
prog_width = int((self.width * prog))
if (prog_width > 0):
bar += ('=' * (prog_width - 1))
if (current < self.target):
bar += '>'
else:
bar += '='
bar += ('.' * (self.width - prog_width))
bar += ']'
else:
bar = ('%7d/Unknown' % current)
self._total_width = len(bar)
sys.stdout.write(bar)
if current:
time_per_unit = ((now - self._start) / current)
else:
time_per_unit = 0
if ((self.target is not None) and (current < self.target)):
eta = (time_per_unit * (self.target - current))
if (eta > 3600):
eta_format = ('%d:%02d:%02d' % ((eta // 3600), ((eta % 3600) // 60), (eta % 60)))
elif (eta > 60):
eta_format = ('%d:%02d' % ((eta // 60), (eta % 60)))
else:
eta_format = ('%ds' % eta)
info = (' - ETA: %s' % eta_format)
elif (time_per_unit >= 1):
info += (' %.0fs/step' % time_per_unit)
elif (time_per_unit >= 0.001):
info += (' %.0fms/step' % (time_per_unit * 1000.0))
else:
info += (' %.0fus/step' % (time_per_unit * 1000000.0))
for k in self._values:
info += (' - %s:' % k)
if isinstance(self._values[k], list):
avg = np.mean((self._values[k][0] / max(1, self._values[k][1])))
if (abs(avg) > 0.001):
info += (' %.4f' % avg)
else:
info += (' %.4e' % avg)
else:
info += (' %s' % self._values[k])
self._total_width += len(info)
if (prev_total_width > self._total_width):
info += (' ' * (prev_total_width - self._total_width))
if ((self.target is not None) and (current >= self.target)):
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
elif (self.verbose == 2):
if ((self.target is None) or (current >= self.target)):
for k in self._values:
info += (' - %s:' % k)
avg = np.mean((self._values[k][0] / max(1, self._values[k][1])))
if (avg > 0.001):
info += (' %.4f' % avg)
else:
info += (' %.4e' % avg)
info += '\n'
sys.stdout.write(info)
sys.stdout.flush()
self._last_update = now
def add(self, n, values=None):
self.update((self._seen_so_far + n), values)
|
def l2norm(X, eps=1e-13, dim=1):
'L2-normalize columns of X\n '
norm = ((torch.pow(X, 2).sum(dim=dim, keepdim=True).sqrt() + eps) + 1e-14)
X = torch.div(X, norm)
return X
|
def l1norm(X, eps=1e-13, dim=1):
'L2-normalize columns of X\n '
norm = ((torch.abs(X).sum(dim=dim, keepdim=True) + eps) + 1e-14)
X = torch.div(X, norm)
return X
|
def normalization(X, dim=1):
_range = (np.max(X) - np.min(X))
return ((X - np.min(X)) / _range)
|
def cosine_sim(query, retrio):
'Cosine similarity between all the query and retrio pairs\n '
(query, retrio) = (l2norm(query), l2norm(retrio))
return query.mm(retrio.t())
|
def vector_cosine_sim(query, retrio):
'Cosine similarity between the query and retrio pairs\n '
(query, retrio) = (l2norm(query), l2norm(retrio))
return torch.sum(torch.mul(query, retrio), dim=1).unsqueeze(0)
|
def hist_sim(im, s, eps=1e-14):
bs = im.size(0)
im = im.unsqueeze(1).expand((- 1), bs, (- 1))
s = s.unsqueeze(0).expand(bs, (- 1), (- 1))
intersection = torch.min(im, s).sum((- 1))
union = (torch.max(im, s).sum((- 1)) + eps)
score = (intersection / union)
return score
|
def jaccard_sim(query, retrieval_base, eps=1e-08):
score = None
base_num = retrieval_base.size(0)
for each in query:
each = each.unsqueeze(0).repeat(base_num, 1)
intersection = torch.min(each, retrieval_base).sum((- 1))
union = (torch.max(each, retrieval_base).sum((- 1)) + eps)
score_temp = (intersection / union).unsqueeze(0)
if (score is None):
score = score_temp
else:
score = torch.cat((score, score_temp), dim=0)
return score
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.