code stringlengths 17 6.64M |
|---|
def cached_path(url_or_filename, cache_dir=None, force_download=False, proxies=None, resume_download=False, user_agent: Union[(Dict, str, None)]=None, extract_compressed_file=False, force_extract=False, local_files_only=False) -> Optional[str]:
"\n Given something that might be a URL (or might be a local path),\n determine which. If it's a URL, download the file and cache it, and\n return the path to the cached file. If it's already a local path,\n make sure the file exists and then return the path.\n Args:\n cache_dir: specify a cache directory to save the file to (overwrite the default cache dir).\n force_download: if True, re-dowload the file even if it's already cached in the cache dir.\n resume_download: if True, resume the download if incompletly recieved file is found.\n user_agent: Optional string or dict that will be appended to the user-agent on remote requests.\n extract_compressed_file: if True and the path point to a zip or tar file, extract the compressed\n file in a folder along the archive.\n force_extract: if True when extract_compressed_file is True and the archive was already extracted,\n re-extract the archive and overide the folder where it was extracted.\n Return:\n None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).\n Local path (string) otherwise\n "
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(url_or_filename, Path):
url_or_filename = str(url_or_filename)
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
if is_remote_url(url_or_filename):
output_path = get_from_cache(url_or_filename, cache_dir=cache_dir, force_download=force_download, proxies=proxies, resume_download=resume_download, user_agent=user_agent, local_files_only=local_files_only)
elif os.path.exists(url_or_filename):
output_path = url_or_filename
elif (urlparse(url_or_filename).scheme == ''):
raise EnvironmentError('file {} not found'.format(url_or_filename))
else:
raise ValueError('unable to parse {} as a URL or as a local path'.format(url_or_filename))
if extract_compressed_file:
if ((not is_zipfile(output_path)) and (not tarfile.is_tarfile(output_path))):
return output_path
(output_dir, output_file) = os.path.split(output_path)
output_extract_dir_name = (output_file.replace('.', '-') + '-extracted')
output_path_extracted = os.path.join(output_dir, output_extract_dir_name)
if (os.path.isdir(output_path_extracted) and os.listdir(output_path_extracted) and (not force_extract)):
return output_path_extracted
lock_path = (output_path + '.lock')
with FileLock(lock_path):
shutil.rmtree(output_path_extracted, ignore_errors=True)
os.makedirs(output_path_extracted)
if is_zipfile(output_path):
with ZipFile(output_path, 'r') as zip_file:
zip_file.extractall(output_path_extracted)
zip_file.close()
elif tarfile.is_tarfile(output_path):
tar_file = tarfile.open(output_path)
tar_file.extractall(output_path_extracted)
tar_file.close()
else:
raise EnvironmentError('Archive format of {} could not be identified'.format(output_path))
return output_path_extracted
return output_path
|
def http_get(url, temp_file, proxies=None, resume_size=0, user_agent: Union[(Dict, str, None)]=None):
ua = 'transformers/{}; python/{}'.format(__version__, sys.version.split()[0])
if is_torch_available():
ua += '; torch/{}'.format(torch.__version__)
if is_tf_available():
ua += '; tensorflow/{}'.format(tf.__version__)
if isinstance(user_agent, dict):
ua += ('; ' + '; '.join(('{}/{}'.format(k, v) for (k, v) in user_agent.items())))
elif isinstance(user_agent, str):
ua += ('; ' + user_agent)
headers = {'user-agent': ua}
if (resume_size > 0):
headers['Range'] = ('bytes=%d-' % (resume_size,))
response = requests.get(url, stream=True, proxies=proxies, headers=headers)
if (response.status_code == 416):
return
content_length = response.headers.get('Content-Length')
total = ((resume_size + int(content_length)) if (content_length is not None) else None)
progress = tqdm(unit='B', unit_scale=True, total=total, initial=resume_size, desc='Downloading', disable=bool((logger.getEffectiveLevel() == logging.NOTSET)))
for chunk in response.iter_content(chunk_size=1024):
if chunk:
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
|
def get_from_cache(url, cache_dir=None, force_download=False, proxies=None, etag_timeout=10, resume_download=False, user_agent: Union[(Dict, str, None)]=None, local_files_only=False) -> Optional[str]:
"\n Given a URL, look for the corresponding file in the local cache.\n If it's not there, download it. Then return the path to the cached file.\n Return:\n None in case of non-recoverable file (non-existent or inaccessible url + no cache on disk).\n Local path (string) otherwise\n "
if (cache_dir is None):
cache_dir = TRANSFORMERS_CACHE
if isinstance(cache_dir, Path):
cache_dir = str(cache_dir)
os.makedirs(cache_dir, exist_ok=True)
etag = None
if (not local_files_only):
try:
response = requests.head(url, allow_redirects=True, proxies=proxies, timeout=etag_timeout)
if (response.status_code == 200):
etag = response.headers.get('ETag')
except (EnvironmentError, requests.exceptions.Timeout):
pass
filename = url_to_filename(url, etag)
cache_path = os.path.join(cache_dir, filename)
if (etag is None):
if os.path.exists(cache_path):
return cache_path
else:
matching_files = [file for file in fnmatch.filter(os.listdir(cache_dir), (filename + '.*')) if ((not file.endswith('.json')) and (not file.endswith('.lock')))]
if (len(matching_files) > 0):
return os.path.join(cache_dir, matching_files[(- 1)])
else:
if local_files_only:
raise ValueError("Cannot find the requested files in the cached path and outgoing traffic has been disabled. To enable model look-ups and downloads online, set 'local_files_only' to False.")
return None
if (os.path.exists(cache_path) and (not force_download)):
return cache_path
lock_path = (cache_path + '.lock')
with FileLock(lock_path):
if (os.path.exists(cache_path) and (not force_download)):
return cache_path
if resume_download:
incomplete_path = (cache_path + '.incomplete')
@contextmanager
def _resumable_file_manager():
with open(incomplete_path, 'a+b') as f:
(yield f)
temp_file_manager = _resumable_file_manager
if os.path.exists(incomplete_path):
resume_size = os.stat(incomplete_path).st_size
else:
resume_size = 0
else:
temp_file_manager = partial(tempfile.NamedTemporaryFile, dir=cache_dir, delete=False)
resume_size = 0
with temp_file_manager() as temp_file:
logger.info('%s not found in cache or force_download set to True, downloading to %s', url, temp_file.name)
http_get(url, temp_file, proxies=proxies, resume_size=resume_size, user_agent=user_agent)
logger.info('storing %s in cache at %s', url, cache_path)
os.replace(temp_file.name, cache_path)
logger.info('creating metadata file for %s', cache_path)
meta = {'url': url, 'etag': etag}
meta_path = (cache_path + '.json')
with open(meta_path, 'w') as meta_file:
json.dump(meta, meta_file)
return cache_path
|
class cached_property(property):
'\n Descriptor that mimics @property but caches output in member variable.\n From tensorflow_datasets\n Built-in in functools from Python 3.8.\n '
def __get__(self, obj, objtype=None):
if (obj is None):
return self
if (self.fget is None):
raise AttributeError('unreadable attribute')
attr = ('__cached_' + self.fget.__name__)
cached = getattr(obj, attr, None)
if (cached is None):
cached = self.fget(obj)
setattr(obj, attr, cached)
return cached
|
def torch_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if is_torch_available():
return func(*args, **kwargs)
else:
raise ImportError(f'Method `{func.__name__}` requires PyTorch.')
return wrapper
|
def tf_required(func):
@wraps(func)
def wrapper(*args, **kwargs):
if is_tf_available():
return func(*args, **kwargs)
else:
raise ImportError(f'Method `{func.__name__}` requires TF.')
return wrapper
|
class ModelOutput():
'\n Base class for all model outputs as dataclass. Has a ``__getitem__`` that allows indexing by integer or slice (like\n a tuple) or strings (like a dictionnary) that will ignore the ``None`` attributes.\n '
def to_tuple(self):
'\n Converts :obj:`self` to a tuple.\n Return: A tuple containing all non-:obj:`None` attributes of the :obj:`self`.\n '
return tuple((getattr(self, f) for f in self.__dataclass_fields__.keys() if (getattr(self, f, None) is not None)))
def to_dict(self):
'\n Converts :obj:`self` to a Python dictionary.\n Return: A dictionary containing all non-:obj:`None` attributes of the :obj:`self`.\n '
return {f: getattr(self, f) for f in self.__dataclass_fields__.keys() if (getattr(self, f, None) is not None)}
def __getitem__(self, i):
return (self.to_dict()[i] if isinstance(i, str) else self.to_tuple()[i])
def __len__(self):
return len(self.to_tuple())
|
class EntityEmbeddingVLayer(nn.Module):
'\n This embedding layer uses vicinity information to smooth over the continuous variables.\n If normal embedding layer is to be desired, please use EntityEmbeddingLayer (without V).\n '
def __init__(self, num_level, emdedding_dim, centroid, name):
super(EntityEmbeddingVLayer, self).__init__()
self.embedding = nn.Embedding(num_level, emdedding_dim)
self.centroid = torch.tensor(centroid).detach_().unsqueeze(1)
self.name = name
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
'\n x must be batch_size times 1\n '
x = x.unsqueeze(1)
x = x.unsqueeze(2)
d = (1.0 / ((x - self.centroid).abs() + EPS))
w = self.softmax(d.squeeze(2))
v = torch.mm(w, torch.transpose(self.embedding.weight, 0, 1))
return v
|
class EntityEmbeddingLayer(nn.Module):
def __init__(self, num_level, embedding_dim, name):
super(EntityEmbeddingLayer, self).__init__()
self.embedding = nn.Embedding(num_level, embedding_dim)
self.name = name
def forward(self, x):
return self.embedding(x)
|
class EntityDenseLayer(nn.Module):
def __init__(self, data_pd, opt: TabDataOpt, mlp=None):
super(EntityDenseLayer, self).__init__()
self.opt = opt
self.mlp = mlp
if (len(self.opt.conti_vars) and (mlp is None)):
logging.error('\n The options have specified the using of continuous variables. Yet no MLP network is specified.\n ')
if ((self.opt.dis_vars_vic is not None) and (self.opt.centroids is not None)):
self.entity_v = nn.ModuleDict()
for name in self.opt.dis_vars_vic:
self.entity_v[name] = EntityEmbeddingVLayer(len(self.opt.centroids[name]), self.opt.num_dim, centroid[name], name)
if (self.opt.dis_vars_entity is not None):
self.entity = nn.ModuleDict()
for name in self.opt.dis_vars_entity:
self.entity[name] = EntityEmbeddingLayer(len(data_pd[name].unique()), self.opt.num_dim, name)
def forward(self, x, **kwargs):
result = list()
if (self.opt.dis_vars_vic is not None):
for name in self.opt.dis_vars_vic:
result.append(torch.unsqueeze(self.entity_v[name](x[name]), 1))
if (self.opt.dis_vars_entity is not None):
for name in self.opt.dis_vars_entity:
result.append(torch.unsqueeze(self.entity[name](x[name]), 1))
if (self.opt.conti_vars is not None):
pass
return torch.cat(result, dim=1)
|
def get_constant_schedule(optimizer: Optimizer, last_epoch: int=(- 1)):
'\n Create a schedule with a constant learning rate, using the learning rate set in optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
return LambdaLR(optimizer, (lambda _: 1), last_epoch=last_epoch)
|
def get_constant_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, last_epoch: int=(- 1)):
'\n Create a schedule with a constant learning rate preceded by a warmup period during which the learning rate\n increases linearly between 0 and the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1.0, num_warmup_steps)))
return 1.0
return LambdaLR(optimizer, lr_lambda, last_epoch=last_epoch)
|
def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=(- 1)):
'\n Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0,\n after a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The totale number of training steps.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step: int):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
return max(0.0, (float((num_training_steps - current_step)) / float(max(1, (num_training_steps - num_warmup_steps)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_cosine_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: float=0.5, last_epoch: int=(- 1)):
'\n Create a schedule with a learning rate that decreases following the values of the cosine function between the\n initial lr set in the optimizer to 0, after a warmup period during which it increases linearly between 0 and the\n initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n num_cycles (:obj:`float`, `optional`, defaults to 0.5):\n The number of waves in the cosine schedule (the defaults is to just decrease from the max value to 0\n following a half-cosine).\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
return max(0.0, (0.5 * (1.0 + math.cos((((math.pi * float(num_cycles)) * 2.0) * progress)))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
def get_cosine_with_hard_restarts_schedule_with_warmup(optimizer: Optimizer, num_warmup_steps: int, num_training_steps: int, num_cycles: int=1, last_epoch: int=(- 1)):
'\n Create a schedule with a learning rate that decreases following the values of the cosine function between the\n initial lr set in the optimizer to 0, with several hard restarts, after a warmup period during which it increases\n linearly between 0 and the initial lr set in the optimizer.\n Args:\n optimizer (:class:`~torch.optim.Optimizer`):\n The optimizer for which to schedule the learning rate.\n num_warmup_steps (:obj:`int`):\n The number of steps for the warmup phase.\n num_training_steps (:obj:`int`):\n The total number of training steps.\n num_cycles (:obj:`int`, `optional`, defaults to 1):\n The number of hard restarts to use.\n last_epoch (:obj:`int`, `optional`, defaults to -1):\n The index of the last epoch when resuming training.\n Return:\n :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule.\n '
def lr_lambda(current_step):
if (current_step < num_warmup_steps):
return (float(current_step) / float(max(1, num_warmup_steps)))
progress = (float((current_step - num_warmup_steps)) / float(max(1, (num_training_steps - num_warmup_steps))))
if (progress >= 1.0):
return 0.0
return max(0.0, (0.5 * (1.0 + math.cos((math.pi * ((float(num_cycles) * progress) % 1.0))))))
return LambdaLR(optimizer, lr_lambda, last_epoch)
|
class AdamW(Optimizer):
"\n Implements Adam algorithm with weight decay fix as introduced in\n `Decoupled Weight Decay Regularization <https://arxiv.org/abs/1711.05101>`__.\n Parameters:\n params (:obj:`Iterable[torch.nn.parameter.Parameter]`):\n Iterable of parameters to optimize or dictionaries defining parameter groups.\n lr (:obj:`float`, `optional`, defaults to 1e-3):\n The learning rate to use.\n betas (:obj:`Tuple[float,float]`, `optional`, defaults to (0.9, 0.999)):\n Adam's betas parameters (b1, b2).\n eps (:obj:`float`, `optional`, defaults to 1e-6):\n Adam's epsilon for numerical stability.\n weight_decay (:obj:`float`, `optional`, defaults to 0):\n Decoupled weight decay to apply.\n correct_bias (:obj:`bool`, `optional`, defaults to `True`):\n Whether ot not to correct bias in Adam (for instance, in Bert TF repository they use :obj:`False`).\n "
def __init__(self, params: Iterable[torch.nn.parameter.Parameter], lr: float=0.001, betas: Tuple[(float, float)]=(0.9, 0.999), eps: float=1e-06, weight_decay: float=0.0, correct_bias: bool=True):
if (lr < 0.0):
raise ValueError('Invalid learning rate: {} - should be >= 0.0'.format(lr))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter: {} - should be in [0.0, 1.0['.format(betas[1]))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {} - should be >= 0.0'.format(eps))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, correct_bias=correct_bias)
super().__init__(params, defaults)
def step(self, closure: Callable=None):
'\n Performs a single optimization step.\n Arguments:\n closure (:obj:`Callable`, `optional`): A closure that reevaluates the model and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Adam does not support sparse gradients, please consider SparseAdam instead')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_(grad, alpha=(1.0 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1.0 - beta2))
denom = exp_avg_sq.sqrt().add_(group['eps'])
step_size = group['lr']
if group['correct_bias']:
bias_correction1 = (1.0 - (beta1 ** state['step']))
bias_correction2 = (1.0 - (beta2 ** state['step']))
step_size = ((step_size * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_(exp_avg, denom, value=(- step_size))
if (group['weight_decay'] > 0.0):
p.data.add_(p.data, alpha=((- group['lr']) * group['weight_decay']))
return loss
|
@dataclass
class OptimizerOptBase():
lr: float = field(default=0.01, metadata={'help': 'The learning rate of base optimizers.Default value is 1e-2'})
|
@dataclass
class SGDOpt(OptimizerOptBase):
momentum: float = field(default=0, metadata={'help': 'The momentum parameter for SGD optimizer.Default value is 0'})
weight_decay: float = field(default=0, metadata={'help': 'The weight decay parameter for SGD optimizerDefault is 0.'})
dampening: float = field(default=0, metadata={'help': 'The dampening parameter for SGD optimizer.Default value is 0.'})
nesterov: bool = field(default=False, metadata={'help': 'Whether to enable Nesterov momentum in SGD optimizer.Default value is False.'})
|
@dataclass
class AdamWOpt(OptimizerOptBase):
betas: any = field(default=(0.9, 0.999), metadata={'help': 'The betas for Adam optimizer.Default values are (0.9, 0.999).'})
eps: str = field(default=1e-08, metadata={'help': '\n A term added to the denominator to improve numerical stability (default: 1e-8)\n '})
weight_decay: str = field(default=0, metadata={'help': 'The weight decay coefficient for AdamW.Default value is 0.'})
amsgrad: bool = field(default=False, metadata={'help': 'Whether to use AMSGRAD variation as in the paper https://openreview.net/forum?id=ryQu7f-RZ.Default is False.'})
|
@dataclass
class LookaheadOpt(OptimizerOptBase):
inner_opt: OptimizerOptBase = field(default=AdamWOpt(), metadata={'help': 'The optimizer setup for inner optimizer.Default is AdamW Opt.'})
la_steps: int = field(default=5, metadata={'help': 'The number of steps for the lookahead optimizer.Default value is 5.'})
la_alpha: float = field(default=0.8, metadata={'help': 'The linear interpolation factor. If set to 1.0, it will recover the inner optimizer.The default value is 0.8.'})
pullback_momentum: str = field(default='none', metadata={'help': "Change to inner optimizer momentum on interpolation update.Options are 'reset', 'pullback' and ''none.Default value is 'none'"})
|
@dataclass
class RAdamWOpt(OptimizerOptBase):
betas: any = field(default=(0.9, 0.999), metadata={'help': 'The betas for RAdam optimizer.Default values are (0.9, 0.999).'})
eps: str = field(default=1e-08, metadata={'help': '\n A term added to the denominator to improve numerical stability (default: 1e-8)\n '})
weight_decay: str = field(default=0, metadata={'help': 'The weight decay coefficient for RAdamW.Default value is 0.'})
|
class Lookahead(Optimizer):
'PyTorch implementation of the lookahead wrapper.\n Lookahead Optimizer: https://arxiv.org/abs/1907.08610\n '
def __init__(self, optimizer, la_steps=5, la_alpha=0.8, pullback_momentum='none'):
'optimizer: inner optimizer\n la_steps (int): number of lookahead steps\n la_alpha (float): linear interpolation factor. 1.0 recovers the inner optimizer.\n pullback_momentum (str): change to inner optimizer momentum on interpolation update\n '
self.optimizer = optimizer
self._la_step = 0
self.la_alpha = la_alpha
self._total_la_steps = la_steps
pullback_momentum = pullback_momentum.lower()
assert (pullback_momentum in ['reset', 'pullback', 'none'])
self.pullback_momentum = pullback_momentum
self.state = defaultdict(dict)
for group in optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['cached_params'] = torch.zeros_like(p.data)
param_state['cached_params'].copy_(p.data)
if (self.pullback_momentum == 'pullback'):
param_state['cached_mom'] = torch.zeros_like(p.data)
def __getstate__(self):
return {'state': self.state, 'optimizer': self.optimizer, 'la_alpha': self.la_alpha, '_la_step': self._la_step, '_total_la_steps': self._total_la_steps, 'pullback_momentum': self.pullback_momentum}
def zero_grad(self):
self.optimizer.zero_grad()
def get_la_step(self):
return self._la_step
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, state_dict):
self.optimizer.load_state_dict(state_dict)
def _backup_and_load_cache(self):
'Useful for performing evaluation on the slow weights (which typically generalize better)\n '
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
param_state['backup_params'] = torch.zeros_like(p.data)
param_state['backup_params'].copy_(p.data)
p.data.copy_(param_state['cached_params'])
def _clear_and_load_backup(self):
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.copy_(param_state['backup_params'])
del param_state['backup_params']
@property
def param_groups(self):
return self.optimizer.param_groups
def step(self, closure=None):
'Performs a single Lookahead optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = self.optimizer.step(closure)
self._la_step += 1
if (self._la_step >= self._total_la_steps):
self._la_step = 0
for group in self.optimizer.param_groups:
for p in group['params']:
param_state = self.state[p]
p.data.mul_(self.la_alpha).add_((1.0 - self.la_alpha), param_state['cached_params'])
param_state['cached_params'].copy_(p.data)
if (self.pullback_momentum == 'pullback'):
internal_momentum = self.optimizer.state[p]['momentum_buffer']
self.optimizer.state[p]['momentum_buffer'] = internal_momentum.mul_(self.la_alpha).add_((1.0 - self.la_alpha), param_state['cached_mom'])
param_state['cached_mom'] = self.optimizer.state[p]['momentum_buffer']
elif (self.pullback_momentum == 'reset'):
self.optimizer.state[p]['momentum_buffer'] = torch.zeros_like(p.data)
return loss
|
class RAdamW(Optimizer):
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-08, weight_decay=0, degenerated_to_sgd=True):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
self.degenerated_to_sgd = degenerated_to_sgd
if (isinstance(params, (list, tuple)) and (len(params) > 0) and isinstance(params[0], dict)):
for param in params:
if (('betas' in param) and ((param['betas'][0] != betas[0]) or (param['betas'][1] != betas[1]))):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)])
super(RAdamW, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdamW, self).__setstate__(state)
def step(self, closure=None):
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('RAdam does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
exp_avg.mul_(beta1).add_((1 - beta1), grad)
state['step'] += 1
buffered = group['buffer'][int((state['step'] % 10))]
if (state['step'] == buffered[0]):
(N_sma, step_size) = (buffered[1], buffered[2])
else:
buffered[0] = state['step']
beta2_t = (beta2 ** state['step'])
N_sma_max = ((2 / (1 - beta2)) - 1)
N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t)))
buffered[1] = N_sma
if (N_sma >= 5):
step_size = (math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2))) / (1 - (beta1 ** state['step'])))
elif self.degenerated_to_sgd:
step_size = (1.0 / (1 - (beta1 ** state['step'])))
else:
step_size = (- 1)
buffered[2] = step_size
if (N_sma >= 5):
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
denom = exp_avg_sq.sqrt().add_(group['eps'])
p_data_fp32.addcdiv_(((- step_size) * group['lr']), exp_avg, denom)
p.data.copy_(p_data_fp32)
elif (step_size > 0):
if (group['weight_decay'] != 0):
p_data_fp32.add_(((- group['weight_decay']) * group['lr']), p_data_fp32)
p_data_fp32.add_(((- step_size) * group['lr']), exp_avg)
p.data.copy_(p_data_fp32)
return loss
|
def log_lamb_rs(optimizer: Optimizer, event_writer: SummaryWriter, token_count: int):
'Log a histogram of trust ratio scalars in across layers.'
results = collections.defaultdict(list)
for group in optimizer.param_groups:
for p in group['params']:
state = optimizer.state[p]
for i in ('weight_norm', 'adam_norm', 'trust_ratio'):
if (i in state):
results[i].append(state[i])
for (k, v) in results.items():
event_writer.add_histogram(f'lamb/{k}', torch.tensor(v), token_count)
|
class Lamb(Optimizer):
'Implements Lamb algorithm.\n It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.\n Arguments:\n params (iterable): iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float, optional): learning rate (default: 1e-3)\n betas (Tuple[float, float], optional): coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps (float, optional): term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n adam (bool, optional): always use trust ratio = 1, which turns this into\n Adam. Useful for comparison purposes.\n .. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:\n https://arxiv.org/abs/1904.00962\n '
def __init__(self, params, lr=0.001, betas=(0.9, 0.999), eps=1e-06, weight_decay=0, adam=False):
if (not (0.0 <= lr)):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (not (0.0 <= eps)):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
'Performs a single optimization step.\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients, consider SparseAdam instad.')
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p.data)
state['exp_avg_sq'] = torch.zeros_like(p.data)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
(beta1, beta2) = group['betas']
state['step'] += 1
exp_avg.mul_(beta1).add_((1 - beta1), grad)
exp_avg_sq.mul_(beta2).addcmul_((1 - beta2), grad, grad)
step_size = group['lr']
weight_norm = p.data.pow(2).sum().sqrt().clamp(0, 10)
adam_step = (exp_avg / exp_avg_sq.sqrt().add(group['eps']))
if (group['weight_decay'] != 0):
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.pow(2).sum().sqrt()
if ((weight_norm == 0) or (adam_norm == 0)):
trust_ratio = 1
else:
trust_ratio = (weight_norm / adam_norm)
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(((- step_size) * trust_ratio), adam_step)
return loss
|
def is_tensorboard_available():
return _has_tensorboard
|
@contextmanager
def torch_distributed_zero_first(local_rank: int):
'\n Decorator to make all processes in distributed training wait for each local_master to do something.\n Args:\n local_rank (:obj:`int`): The rank of the local process.\n '
if (local_rank not in [(- 1), 0]):
torch.distributed.barrier()
(yield)
if (local_rank == 0):
torch.distributed.barrier()
|
def get_tpu_sampler(dataset: Dataset):
if (xm.xrt_world_size() <= 1):
return RandomSampler(dataset)
return DistributedSampler(dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
|
class SequentialDistributedSampler(Sampler):
"\n Distributed Sampler that subsamples indicies sequentially,\n making it easier to collate all results at the end.\n Even though we only use this sampler for eval and predict (no training),\n which means that the model params won't have to be synced (i.e. will not hang\n for synchronization even if varied number of forward passes), we still add extra\n samples to the sampler to make it evenly divisible (like in `DistributedSampler`)\n to make it easy to `gather` or `reduce` resulting tensors at the end of the loop.\n "
def __init__(self, dataset, num_replicas=None, rank=None):
if (num_replicas is None):
if (not torch.distributed.is_available()):
raise RuntimeError('Requires distributed package to be available')
num_replicas = torch.distributed.get_world_size()
if (rank is None):
if (not torch.distributed.is_available()):
raise RuntimeError('Requires distributed package to be available')
rank = torch.distributed.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.num_samples = int(math.ceil(((len(self.dataset) * 1.0) / self.num_replicas)))
self.total_size = (self.num_samples * self.num_replicas)
def __iter__(self):
indices = list(range(len(self.dataset)))
indices += indices[:(self.total_size - len(indices))]
assert (len(indices) == self.total_size)
indices = indices[(self.rank * self.num_samples):((self.rank + 1) * self.num_samples)]
assert (len(indices) == self.num_samples)
return iter(indices)
def __len__(self):
return self.num_samples
|
class Trainer():
'\n Trainer is a simple but feature-complete training and eval loop for PyTorch,\n optimized for 🤗 Transformers.\n Args:\n model (:class:`~transformers.PreTrainedModel`):\n The model to train, evaluate or use for predictions.\n args (:class:`~transformers.TrainingArguments`):\n The arguments to tweak training.\n train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for training.\n eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`):\n The dataset to use for evaluation.\n compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`):\n The function that will be used to compute metrics at evaluation. Must take a\n :class:`~transformers.EvalPrediction` and return a dictionary string to metric values.\n prediction_loss_only (:obj:`bool`, `optional`, defaults to `False`):\n When performing evaluation and predictions, only returns the loss.\n tb_writer (:obj:`SummaryWriter`, `optional`):\n Object to write to TensorBoard.\n optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`):\n A tuple containing the optimizer and the scheduler to use. Will default to an instance of\n :class:`~transformers.AdamW` on your model and a scheduler given by\n :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`.\n '
def __init__(self, model: torch.nn.Module, args: TrainingArguments, train_dataset: Optional[Dataset]=None, eval_dataset: Optional[Dataset]=None, compute_metrics: Optional[Callable[([EvalPrediction], Dict)]]=None, prediction_loss_only=False, tb_writer: Optional['SummaryWriter']=None, optimizers: Tuple[(torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR)]=None):
self.model = model.to(args.device)
self.args = args
self.train_dataset = train_dataset
self.eval_dataset = eval_dataset
self.compute_metrics = compute_metrics
self.prediction_loss_only = prediction_loss_only
self.optimizers = optimizers
if (tb_writer is not None):
self.tb_writer = tb_writer
elif (is_tensorboard_available() and self.is_world_master()):
self.tb_writer = SummaryWriter(log_dir=self.args.logging_dir)
if (not is_tensorboard_available()):
logger.warning('You are instantiating a Trainer but Tensorboard is not installed. You should consider installing it.')
if is_wandb_available():
self._setup_wandb()
else:
logger.info('You are instantiating a Trainer but W&B is not installed. To use wandb logging, run `pip install wandb; wandb login` see https://docs.wandb.com/huggingface.')
set_seed(self.args.seed)
if self.is_world_master():
os.makedirs(self.args.output_dir, exist_ok=True)
if is_torch_tpu_available():
self.xla_device = True
def get_train_dataloader(self) -> DataLoader:
'\n Returns the training :class:`~torch.utils.data.DataLoader`.\n '
if isinstance(self.train_dataset, torch.utils.data.IterableDataset):
train_sampler = None
elif (self.train_dataset is None):
raise ValueError('Trainer: training requires a train_dataset.')
elif is_torch_tpu_available():
train_sampler = get_tpu_sampler(self.train_dataset)
else:
train_sampler = (RandomSampler(self.train_dataset) if (self.args.local_rank == (- 1)) else DistributedSampler(self.train_dataset))
data_loader = DataLoader(self.train_dataset, batch_size=self.args.train_batch_size, sampler=train_sampler, drop_last=self.args.dataloader_drop_last)
return data_loader
def get_eval_dataloader(self, eval_dataset: Optional[Dataset]=None) -> DataLoader:
'\n Returns the evaluation :class:`~torch.utils.data.DataLoader`.\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n If provided, will override `self.eval_dataset`.\n '
if ((eval_dataset is None) and (self.eval_dataset is None)):
raise ValueError('Trainer: evaluation requires an eval_dataset.')
eval_dataset = (eval_dataset if (eval_dataset is not None) else self.eval_dataset)
if isinstance(eval_dataset, torch.utils.data.IterableDataset):
sampler = None
elif is_torch_tpu_available():
sampler = SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal())
elif (self.args.local_rank != (- 1)):
sampler = SequentialDistributedSampler(eval_dataset)
else:
sampler = SequentialSampler(eval_dataset)
data_loader = DataLoader(eval_dataset, sampler=sampler, batch_size=self.args.eval_batch_size, drop_last=self.args.dataloader_drop_last)
return data_loader
def get_optimizers(self, num_training_steps: int) -> Tuple[(torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR)]:
"\n Setup the optimizer and the learning rate scheduler.\n We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the\n Trainer's init through :obj:`optimizers`, or override this method in a subclass.\n "
if (self.optimizers is not None):
return self.optimizers
if isinstance(self.args.optimizer_opt, LookaheadOpt):
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.args.optimizer_opt.inner_opt.weight_decay}, {'params': [p for (n, p) in self.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
else:
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [{'params': [p for (n, p) in self.model.named_parameters() if (not any(((nd in n) for nd in no_decay)))], 'weight_decay': self.args.optimizer_opt.weight_decay}, {'params': [p for (n, p) in self.model.named_parameters() if any(((nd in n) for nd in no_decay))], 'weight_decay': 0.0}]
if isinstance(self.args.optimizer_opt, SGDOpt):
optimizer = SGD(optimizer_grouped_parameters, lr=self.args.optimizer_opt.lr, momentum=self.args.optimizer_opt.momentum, dampening=self.args.optimizer_opt.dampening, nesterov=self.args.optimizer_opt.nesterov)
elif isinstance(self.args.optimizer_opt, AdamWOpt):
optimizer = AdamW(optimizer_grouped_parameters, lr=self.args.optimizer_opt.lr, betas=self.args.optimizer_opt.betas, eps=self.args.optimizer_opt.eps, weight_decay=self.args.optimizer_opt.weight_decay, amsgrad=self.args.optimizer_opt.amsgrad)
elif isinstance(self.args.optimizer_opt, RAdamWOpt):
optimizer = RAdamW(optimizer_grouped_parameters, lr=self.args.optimizer_opt.lr, betas=self.args.optimizer_opt.betas, eps=self.args.optimizer_opt.eps, weight_decay=self.args.optimizer_opt.weight_decay)
elif isinstance(self.args.optimizer_opt, LookaheadOpt):
if isinstance(self.args.optimizer_opt.inner_opt, SGDOpt):
optimizer_inner = SGD(optimizer_grouped_parameters, lr=self.args.optimizer_opt.inner.lr, momentum=self.args.optimizer_opt.inner.momentum, dampening=self.args.optimizer_opt.inner.dampening, nesterov=self.args.optimizer_opt.inner.nesterov)
optimizer = Lookahead(optimizer_inner, self.args.optimizer_opt.la_steps, self.args.optimizer_opt.la_alpha, self.args.optimizer_opt.pullback_momentum)
elif isinstance(self.args.optimizer_opt, AdamWOpt):
optimizer_inner = AdamW(optimizer_grouped_parameters, lr=self.args.optimizer_opt.inner.lr, betas=self.args.optimizer_opt.inner.betas, eps=self.args.optimizer_opt.inner.eps, weight_decay=self.args.optimizer_opt.inner.weight_decay, amsgrad=self.args.optimizer_opt.inner.amsgrad)
optimizer = Lookahead(optimizer_inner, self.args.optimizer_opt.la_steps, self.args.optimizer_opt.la_alpha, self.args.optimizer_opt.pullback_momentum)
elif isinstance(self.args.optimizer_opt, RAdamWOpt):
optimizer_inner = RAdamW(optimizer_grouped_parameters, lr=self.args.optimizer_opt.inner.lr, betas=self.args.optimizer_opt.inner.betas, eps=self.args.optimizer_opt.inner.eps, weight_decay=self.args.optimizer_opt.inner.weight_decay)
optimizer = Lookahead(optimizer_inner, self.args.optimizer_opt.la_steps, self.args.optimizer_opt.la_alpha, self.args.optimizer_opt.pullback_momentum)
else:
raise NotImplementedError()
else:
raise NotImplementedError()
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps)
return (optimizer, scheduler)
def _setup_wandb(self):
'\n Setup the optional Weights & Biases (`wandb`) integration.\n One can override this method to customize the setup if needed. Find more information at https://docs.wandb.com/huggingface\n You can also override the following environment variables:\n Environment:\n WANDB_WATCH:\n (Optional, ["gradients", "all", "false"]) "gradients" by default, set to "false" to disable gradient logging\n or "all" to log gradients and parameters\n WANDB_PROJECT:\n (Optional): str - "huggingface" by default, set this to a custom string to store results in a different project\n WANDB_DISABLED:\n (Optional): boolean - defaults to false, set to "true" to disable wandb entirely\n '
if self.is_world_master():
logger.info('Automatic Weights & Biases logging enabled, to disable set os.environ["WANDB_DISABLED"] = "true"')
wandb.init(project=os.getenv('WANDB_PROJECT', 'huggingface'), config=vars(self.args))
if ((not is_torch_tpu_available()) and (os.getenv('WANDB_WATCH') != 'false')):
wandb.watch(self.model, log=os.getenv('WANDB_WATCH', 'gradients'), log_freq=max(100, self.args.logging_steps))
def num_examples(self, dataloader: DataLoader) -> int:
'\n Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its Dataset.\n '
return len(dataloader.dataset)
def train(self, model_path: Optional[str]=None):
'\n Main training entry point.\n Args:\n model_path (:obj:`str`, `optional`):\n Local path to the model if the model to train has been instantiated from a local path. If present,\n training will resume from the optimizer/scheduler states loaded here.\n '
train_dataloader = self.get_train_dataloader()
if (self.args.max_steps > 0):
t_total = self.args.max_steps
num_train_epochs = ((self.args.max_steps // (len(train_dataloader) // self.args.gradient_accumulation_steps)) + 1)
else:
t_total = int(((len(train_dataloader) // self.args.gradient_accumulation_steps) * self.args.num_train_epochs))
num_train_epochs = self.args.num_train_epochs
(optimizer, scheduler) = self.get_optimizers(num_training_steps=t_total)
if ((model_path is not None) and os.path.isfile(os.path.join(model_path, 'optimizer.pt')) and os.path.isfile(os.path.join(model_path, 'scheduler.pt'))):
optimizer.load_state_dict(torch.load(os.path.join(model_path, 'optimizer.pt'), map_location=self.args.device))
scheduler.load_state_dict(torch.load(os.path.join(model_path, 'scheduler.pt')))
model = self.model
if (model_path is not None):
self.global_step = int(model_path.split('-')[(- 1)])
epochs_trained = (self.global_step // (len(train_dataloader) // self.args.gradient_accumulation_steps))
steps_trained_in_current_epoch = (self.global_step % (len(train_dataloader) // self.args.gradient_accumulation_steps))
logger.info(' Continuing training from checkpoint, will skip to saved global_step')
logger.info(' Continuing training from epoch %d', epochs_trained)
logger.info(' Continuing training from global step %d', self.global_step)
logger.info(' Will skip the first %d steps in the first epoch', steps_trained_in_current_epoch)
self.model.load_state_dict(torch.load((model_path + '/model.pt')))
if self.args.fp16:
logger.info('***** Using FP16 to train ******')
if (not is_apex_available()):
raise ImportError('Please install apex from https://www.github.com/nvidia/apex to use fp16 training.')
(model, optimizer) = amp.initialize(model, optimizer, opt_level=self.args.fp16_opt_level)
if (self.args.n_gpu > 1):
logger.info(("***** Using %d GPU's *****" % self.args.n_gpu))
model = torch.nn.DataParallel(model)
if (self.args.local_rank != (- 1)):
logger.info('****** Using distributed training *****')
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[self.args.local_rank], output_device=self.args.local_rank)
if (self.tb_writer is not None):
self.tb_writer.add_text('args', self.args.to_json_string())
self.tb_writer.add_hparams(self.args.to_sanitized_dict(), metric_dict={})
if is_torch_tpu_available():
logger.info('***** Using TPU *****')
total_train_batch_size = (self.args.train_batch_size * xm.xrt_world_size())
else:
total_train_batch_size = ((self.args.train_batch_size * self.args.gradient_accumulation_steps) * (torch.distributed.get_world_size() if (self.args.local_rank != (- 1)) else 1))
logger.info('***** Running training *****')
logger.info(' Num examples = %d', self.num_examples(train_dataloader))
logger.info(' Num Epochs = %d', num_train_epochs)
logger.info(' Instantaneous batch size per device = %d', self.args.per_device_train_batch_size)
logger.info(' Total train batch size (w. parallel, distributed & accumulation) = %d', total_train_batch_size)
logger.info(' Gradient Accumulation steps = %d', self.args.gradient_accumulation_steps)
logger.info(' Total optimization steps = %d', t_total)
self.global_step = 0
self.epoch = 0
epochs_trained = 0
steps_trained_in_current_epoch = 0
tr_loss = 0.0
logging_loss = 0.0
model.zero_grad()
train_iterator = trange(epochs_trained, int(num_train_epochs), desc='Epoch', disable=(not self.is_local_master()))
for epoch in train_iterator:
if (isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler)):
train_dataloader.sampler.set_epoch(epoch)
if is_torch_tpu_available():
parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader(self.args.device)
epoch_iterator = tqdm(parallel_loader, desc='Iteration', disable=(not self.is_local_master()))
else:
epoch_iterator = tqdm(train_dataloader, desc='Iteration', disable=(not self.is_local_master()))
if (self.args.past_index >= 0):
self._past = None
for (step, inputs) in enumerate(epoch_iterator):
if (steps_trained_in_current_epoch > 0):
steps_trained_in_current_epoch -= 1
continue
tr_loss += self._training_step(model, inputs, optimizer)
if ((((step + 1) % self.args.gradient_accumulation_steps) == 0) or ((len(epoch_iterator) <= self.args.gradient_accumulation_steps) and ((step + 1) == len(epoch_iterator)))):
if self.args.fp16:
torch.nn.utils.clip_grad_norm_(amp.master_params(optimizer), self.args.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm)
if is_torch_tpu_available():
xm.optimizer_step(optimizer)
else:
optimizer.step()
scheduler.step()
model.zero_grad()
self.global_step += 1
self.epoch = (epoch + ((step + 1) / len(epoch_iterator)))
if (((self.args.logging_steps > 0) and ((self.global_step % self.args.logging_steps) == 0)) or ((self.global_step == 1) and self.args.logging_first_step)):
logs: Dict[(str, float)] = {}
logs['loss'] = ((tr_loss - logging_loss) / self.args.logging_steps)
logs['learning_rate'] = (scheduler.get_last_lr()[0] if (version.parse(torch.__version__) >= version.parse('1.4')) else scheduler.get_lr()[0])
logging_loss = tr_loss
self._log(logs)
if (self.args.evaluate_during_training and ((self.global_step % self.args.eval_steps) == 0)):
self.evaluate()
if ((self.args.save_steps > 0) and ((self.global_step % self.args.save_steps) == 0)):
if hasattr(model, 'module'):
assert (model.module is self.model)
else:
assert (model is self.model)
output_dir = os.path.join(self.args.output_dir, f'{PREFIX_CHECKPOINT_DIR}-{self.global_step}')
self.save_model(output_dir)
if self.is_world_master():
self._rotate_checkpoints()
if is_torch_tpu_available():
xm.rendezvous('saving_optimizer_states')
xm.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
xm.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
elif self.is_world_master():
torch.save(optimizer.state_dict(), os.path.join(output_dir, 'optimizer.pt'))
torch.save(scheduler.state_dict(), os.path.join(output_dir, 'scheduler.pt'))
if ((self.args.max_steps > 0) and (self.global_step > self.args.max_steps)):
epoch_iterator.close()
break
if ((self.args.max_steps > 0) and (self.global_step > self.args.max_steps)):
train_iterator.close()
break
if (self.args.tpu_metrics_debug or self.args.debug):
xm.master_print(met.metrics_report())
if self.tb_writer:
self.tb_writer.close()
if (self.args.past_index and hasattr(self, '_past')):
delattr(self, '_past')
return TrainOutput(self.global_step, (tr_loss / self.global_step))
def _log(self, logs: Dict[(str, float)], iterator: Optional[tqdm]=None) -> None:
if (self.epoch is not None):
logs['epoch'] = self.epoch
if (self.global_step is None):
self.global_step = 0
if self.tb_writer:
for (k, v) in logs.items():
if isinstance(v, (int, float)):
self.tb_writer.add_scalar(k, v, self.global_step)
else:
logger.warning('Trainer is attempting to log a value of "%s" of type %s for key "%s" as a scalar. This invocation of Tensorboard\'s writer.add_scalar() is incorrect so we dropped this attribute.', v, type(v), k)
self.tb_writer.flush()
if is_wandb_available():
if self.is_world_master():
wandb.log(logs, step=self.global_step)
output = {**logs, **{'step': self.global_step}}
if (iterator is not None):
iterator.write(output)
else:
logger.info(output)
def _get_adv_noise(self, gradient, type='fgsm'):
if (type == 'fgsm'):
return (self.args.adv_opt.eps * torch.sign(gradient))
elif (type == 'fgm'):
return ((self.args.adv_opt.eps * torch.sign(gradient)) / torch.norm(gradient, p=2))
else:
logging.error('Not Implemented Yet')
def _training_step(self, model: nn.Module, inputs: Dict[(str, Union[(torch.Tensor, Any)])], optimizer: torch.optim.Optimizer) -> float:
model.train()
for (k, v) in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if ((self.args.past_index >= 0) and (self._past is not None)):
inputs['mems'] = self._past
if isinstance(model, nn.DataParallel):
inputs['return_tuple'] = True
if (self.args.adv_opt is not None):
try:
if (isinstance(self.args.adv_opt, FGSMOpt) or isinstance(self.args.adv_opt, FGMOpt)):
loss = self._train_fgm(inputs, model, optimizer)
elif isinstance(self.args.adv_opt, FreeLBOp):
loss = self._train_free_lb(inputs, model, optimizer)
except Exception as e:
raise e
finally:
pass
else:
outputs = model(**inputs)
loss = outputs[0]
loss = self._get_loss(loss, optimizer, outputs)
return loss.item()
def _train_fgm(self, inputs, model, optimizer):
type = ('fgsm' if isinstance(self.args.adv_opt, FGSMOpt) else 'fgm')
outputs = model(**inputs)
loss = outputs[0]
loss = self._get_loss(loss, optimizer, outputs)
gradient = model.get_gradient()
noise = self._get_adv_noise(gradient, type=type)
inputs['noise'] = noise
optimizer.zero_grad()
model.zero_grad()
outputs = model(**inputs)
loss = outputs(0)
loss = self._get_loss(loss, optimizer, outputs)
return loss
def _train_free_lb(self, inputs, model, optimizer):
if isinstance(model, torch.nn.DataParallel):
embeds_init = model.module.get_embed(**inputs)
else:
embeds_init = model.get_embed(**inputs)
delta = torch.zeros_like(embeds_init)
for astep in range(self.args.adv_opt.adv_steps):
delta.requires_grad_()
inputs['inputs_embeds'] = (delta + embeds_init)
inputs['dp_masks'] = dp_masks
outputs = model(**inputs)
loss = outputs[0]
self._get_loss(loss, optimizer, outputs)
if (astep == (args.adv_steps - 1)):
break
delta_grad = delta.grad.clone().detach()
if (self.args.adv_opt.norm_type == 'l2'):
denorm = torch.norm(delta_grad.view(delta_grad.size(0), (- 1)), dim=1).view((- 1), 1, 1)
denorm = torch.clamp(denorm, min=1e-08)
delta = (delta + ((self.opt.adv_opt.adv_lr * delta_grad) / denorm)).detach()
if (self.arsg.adv_opt.adv_max_norm > 0):
delta_norm = torch.norm(delta.view(delta.size(0), (- 1)).float(), p=2, dim=1).detach()
exceed_mask = (delta_norm > args.adv_max_norm).to(embeds_init)
reweights = (((args.adv_max_norm / delta_norm) * exceed_mask) + (1 - exceed_mask)).view((- 1), 1, 1)
delta = (delta * reweights).detach()
elif (self.args.adv_opt.norm_type == 'linf'):
denorm = torch.norm(delta_grad.view(delta_grad.size(0), (- 1)), dim=1, p=float('inf')).view((- 1), 1, 1)
denorm = torch.clamp(denorm, min=1e-08)
delta = (delta + ((args.adv_lr * delta_grad) / denorm)).detach()
if (args.adv_max_norm > 0):
delta = torch.clamp(delta, (- args.adv_max_norm), args.adv_max_norm).detach()
else:
print('Norm type {} not specified.'.format(args.norm_type))
exit()
if isinstance(model, torch.nn.DataParallel):
embeds_init = model.module.get_embed(**inputs)
else:
embeds_init = model.get_embed(**inputs)
return loss
def _get_loss(self, loss, optimizer, outputs):
if (self.args.past_index >= 0):
self._past = outputs[self.args.past_index]
if (self.args.n_gpu > 1):
loss = loss.mean()
if (self.args.gradient_accumulation_steps > 1):
loss = (loss / self.args.gradient_accumulation_steps)
if self.args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
return loss
def is_local_master(self) -> bool:
if is_torch_tpu_available():
return xm.is_master_ordinal(local=True)
else:
return (self.args.local_rank in [(- 1), 0])
def is_world_master(self) -> bool:
'\n This will be True only in one process, even in distributed mode,\n even when training on multiple machines.\n '
if is_torch_tpu_available():
return xm.is_master_ordinal(local=False)
else:
return ((self.args.local_rank == (- 1)) or (torch.distributed.get_rank() == 0))
def save_model(self, output_dir: Optional[str]=None):
'\n Will save the model, so you can reload it using :obj:`from_pretrained()`.\n Will only save from the world_master process (unless in TPUs).\n '
if is_torch_tpu_available():
self._save_tpu(output_dir)
elif self.is_world_master():
self._save(output_dir)
def _save_tpu(self, output_dir: Optional[str]=None):
output_dir = (output_dir if (output_dir is not None) else self.args.output_dir)
logger.info('Saving model checkpoint to %s', output_dir)
if xm.is_master_ordinal():
os.makedirs(output_dir, exist_ok=True)
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
if (not isinstance(self.model, torch.nn.Module)):
raise ValueError('Trainer.model appears to not be a PreTrainedModel')
xm.rendezvous('saving_checkpoint')
torch.save(self.model.state_dict(), ((output_dir + '/') + 'model.pt'))
def _save(self, output_dir: Optional[str]=None):
output_dir = (output_dir if (output_dir is not None) else self.args.output_dir)
os.makedirs(output_dir, exist_ok=True)
logger.info('Saving model checkpoint to %s', output_dir)
if (not isinstance(self.model, torch.nn.Module)):
raise ValueError('Trainer.model appears to not be a Torch Model')
torch.save(self.model.state_dict(), ((output_dir + '/') + 'model.pt'))
torch.save(self.args, os.path.join(output_dir, 'training_args.bin'))
def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]:
ordering_and_checkpoint_path = []
glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f'{checkpoint_prefix}-*')]
for path in glob_checkpoints:
if use_mtime:
ordering_and_checkpoint_path.append((os.path.getmtime(path), path))
else:
regex_match = re.match(f'.*{checkpoint_prefix}-([0-9]+)', path)
if (regex_match and regex_match.groups()):
ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path))
checkpoints_sorted = sorted(ordering_and_checkpoint_path)
checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted]
return checkpoints_sorted
def _rotate_checkpoints(self, use_mtime=False) -> None:
if ((self.args.save_total_limit is None) or (self.args.save_total_limit <= 0)):
return
checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime)
if (len(checkpoints_sorted) <= self.args.save_total_limit):
return
number_of_checkpoints_to_delete = max(0, (len(checkpoints_sorted) - self.args.save_total_limit))
checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete]
for checkpoint in checkpoints_to_be_deleted:
logger.info('Deleting older checkpoint [{}] due to args.save_total_limit'.format(checkpoint))
shutil.rmtree(checkpoint)
def evaluate(self, eval_dataset: Optional[Dataset]=None) -> Dict[(str, float)]:
'\n Run evaluation and returns metrics.\n The calling script will be responsible for providing a method to compute metrics, as they are\n task-dependent (pass it to the init :obj:`compute_metrics` argument).\n Args:\n eval_dataset (:obj:`Dataset`, `optional`):\n Pass a dataset if you wish to override :obj:`self.eval_dataset`.\n Returns:\n A dictionary containing the evaluation loss and the potential metrics computed from the predictions.\n '
eval_dataloader = self.get_eval_dataloader(eval_dataset)
output = self._prediction_loop(eval_dataloader, description='Evaluation')
self._log(output.metrics)
logger.info('Evaluate results')
logger.info(output.metrics)
if (self.args.tpu_metrics_debug or self.args.debug):
xm.master_print(met.metrics_report())
return output.metrics
def predict(self, test_dataset: Dataset) -> PredictionOutput:
'\n Run prediction and returns predictions and potential metrics.\n Depending on the dataset and your use case, your test dataset may contain labels.\n In that case, this method will also return metrics, like in :obj:`evaluate()`.\n Args:\n test_dataset (:obj:`Dataset`):\n Dataset to run the predictions on.\n Returns:\n `NamedTuple`:\n predictions (:obj:`np.ndarray`):\n The predictions on :obj:`test_dataset`.\n label_ids (:obj:`np.ndarray`, `optional`):\n The labels (if the dataset contained some).\n metrics (:obj:`Dict[str, float]`, `optional`):\n The potential dictionary of metrics (if the dataset contained labels).\n '
test_dataloader = self.get_test_dataloader(test_dataset)
return self._prediction_loop(test_dataloader, description='Prediction')
def _prediction_loop(self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool]=None) -> PredictionOutput:
'\n Prediction/evaluation loop, shared by `evaluate()` and `predict()`.\n Works both with or without labels.\n '
prediction_loss_only = (prediction_loss_only if (prediction_loss_only is not None) else self.prediction_loss_only)
model = self.model
if (self.args.n_gpu > 1):
model = torch.nn.DataParallel(model)
else:
model = self.model
batch_size = dataloader.batch_size
logger.info('***** Running %s *****', description)
logger.info(' Num examples = %d', self.num_examples(dataloader))
logger.info(' Batch size = %d', batch_size)
eval_losses: List[float] = []
preds: torch.Tensor = None
label_ids: torch.Tensor = None
model.eval()
if is_torch_tpu_available():
dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device)
if (self.args.past_index >= 0):
past = None
for inputs in tqdm(dataloader, desc=description):
has_labels = any(((inputs.get(k) is not None) for k in ['labels', 'lm_labels', 'masked_lm_labels']))
for (k, v) in inputs.items():
if isinstance(v, torch.Tensor):
inputs[k] = v.to(self.args.device)
if (self.args.past_index >= 0):
inputs['mems'] = past
if isinstance(model, nn.DataParallel):
inputs['return_tuple'] = True
with torch.no_grad():
outputs = model(**inputs)
if has_labels:
(step_eval_loss, logits) = outputs[:2]
eval_losses += [step_eval_loss.mean().item()]
else:
logits = outputs[0]
if (self.args.past_index >= 0):
past = outputs[(self.args.past_index if has_labels else (self.args.past_index - 1))]
if (not prediction_loss_only):
if (preds is None):
preds = logits.detach()
else:
preds = torch.cat((preds, logits.detach()), dim=0)
if (inputs.get('labels') is not None):
if (label_ids is None):
label_ids = inputs['labels'].detach()
else:
label_ids = torch.cat((label_ids, inputs['labels'].detach()), dim=0)
if (self.args.local_rank != (- 1)):
if (preds is not None):
preds = self.distributed_concat(preds, num_total_examples=self.num_examples(dataloader))
if (label_ids is not None):
label_ids = self.distributed_concat(label_ids, num_total_examples=self.num_examples(dataloader))
elif is_torch_tpu_available():
if (preds is not None):
preds = xm.mesh_reduce('eval_preds', preds, torch.cat)
if (label_ids is not None):
label_ids = xm.mesh_reduce('eval_label_ids', label_ids, torch.cat)
if (preds is not None):
preds = preds.cpu().numpy()
if (label_ids is not None):
label_ids = label_ids.cpu().numpy()
if ((self.compute_metrics is not None) and (preds is not None) and (label_ids is not None)):
metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids))
else:
metrics = {}
if (len(eval_losses) > 0):
metrics['eval_loss'] = np.mean(eval_losses)
for key in list(metrics.keys()):
if (not key.startswith('eval_')):
metrics[f'eval_{key}'] = metrics.pop(key)
return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics)
def distributed_concat(self, tensor: torch.Tensor, num_total_examples: int) -> torch.Tensor:
assert (self.args.local_rank != (- 1))
output_tensors = [tensor.clone() for _ in range(torch.distributed.get_world_size())]
torch.distributed.all_gather(output_tensors, tensor)
concat = torch.cat(output_tensors, dim=0)
output = concat[:num_total_examples]
return output
|
def is_wandb_available():
return _has_wandb
|
def set_seed(seed: int):
'\n Helper function for reproducible behavior to set the seed in ``random``, ``numpy``, ``torch`` and/or ``tf``\n (if installed).\n Args:\n seed (:obj:`int`): The seed to set.\n '
random.seed(seed)
np.random.seed(seed)
if is_torch_available():
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
if is_tf_available():
import tensorflow as tf
tf.random.set_seed(seed)
|
class EvalPrediction(NamedTuple):
'\n Evaluation output (always contains labels), to be used to compute metrics.\n Parameters:\n predictions (:obj:`np.ndarray`): Predictions of the model.\n label_ids (:obj:`np.ndarray`): Targets to be matched.\n '
predictions: np.ndarray
label_ids: np.ndarray
|
class PredictionOutput(NamedTuple):
predictions: np.ndarray
label_ids: Optional[np.ndarray]
metrics: Optional[Dict[(str, float)]]
|
class TrainOutput(NamedTuple):
global_step: int
training_loss: float
|
def default_logdir() -> str:
'\n Same default as PyTorch\n '
import socket
from datetime import datetime
current_time = datetime.now().strftime('%b%d_%H-%M-%S')
return os.path.join('runs', ((current_time + '_') + socket.gethostname()))
|
@dataclass
class TrainingArguments():
output_dir: str = field(metadata={'help': 'The output directory where the model predictions and checkpoints will be written.'})
overwrite_output_dir: bool = field(default=False, metadata={'help': 'Overwrite the content of the output directory.Use this to continue training if output_dir points to a checkpoint directory.'})
do_train: bool = field(default=True, metadata={'help': 'Whether to run training.'})
do_eval: bool = field(default=True, metadata={'help': 'Whether to run eval on the dev set.'})
do_predict: bool = field(default=False, metadata={'help': 'Whether to run predictions on the test set.'})
evaluate_during_training: bool = field(default=True, metadata={'help': 'Run evaluation during training at each logging step.'})
per_device_train_batch_size: int = field(default=8, metadata={'help': 'Batch size per GPU/TPU core/CPU for training.'})
per_device_eval_batch_size: int = field(default=8, metadata={'help': 'Batch size per GPU/TPU core/CPU for evaluation.'})
per_gpu_train_batch_size: Optional[int] = field(default=None, metadata={'help': 'Deprecated, the use of `--per_device_train_batch_size` is preferred. Batch size per GPU/TPU core/CPU for training.'})
per_gpu_eval_batch_size: Optional[int] = field(default=None, metadata={'help': 'Deprecated, the use of `--per_device_eval_batch_size` is preferred.Batch size per GPU/TPU core/CPU for evaluation.'})
gradient_accumulation_steps: int = field(default=1, metadata={'help': 'Number of updates steps to accumulate before performing a backward/update pass.'})
optimizer_opt: OptimizerOptBase = field(default=AdamWOpt(), metadata={'help': '\n The configurations of optimizers. Must be a subclass of OptimizerOptBase.\n Default value is AdamWOpt()\n '})
max_grad_norm: float = field(default=1.0, metadata={'help': 'Max gradient norm.'})
num_train_epochs: float = field(default=3.0, metadata={'help': 'Total number of training epochs to perform.'})
max_steps: int = field(default=(- 1), metadata={'help': 'If > 0: set total number of training steps to perform. Override num_train_epochs.'})
warmup_steps: int = field(default=0, metadata={'help': 'Linear warmup over warmup_steps.'})
logging_dir: Optional[str] = field(default_factory=default_logdir, metadata={'help': 'Tensorboard log dir.'})
logging_first_step: bool = field(default=False, metadata={'help': 'Log and eval the first global_step'})
logging_steps: int = field(default=500, metadata={'help': 'Log every X updates steps.'})
save_steps: int = field(default=500, metadata={'help': 'Save checkpoint every X updates steps.'})
save_total_limit: Optional[int] = field(default=None, metadata={'help': 'Limit the total amount of checkpoints.Deletes the older checkpoints in the output_dir. Default is unlimited checkpoints'})
no_cuda: bool = field(default=False, metadata={'help': 'Do not use CUDA even when it is available'})
seed: int = field(default=42, metadata={'help': 'random seed for initialization'})
fp16: bool = field(default=False, metadata={'help': 'Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit'})
fp16_opt_level: str = field(default='O1', metadata={'help': "For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3'].See details at https://nvidia.github.io/apex/amp.html"})
local_rank: int = field(default=(- 1), metadata={'help': 'For distributed training: local_rank'})
tpu_num_cores: Optional[int] = field(default=None, metadata={'help': 'TPU: Number of TPU cores (automatically passed by launcher script)'})
tpu_metrics_debug: bool = field(default=False, metadata={'help': 'Deprecated, the use of `--debug` is preferred. TPU: Whether to print debug metrics'})
debug: bool = field(default=False, metadata={'help': 'Whether to print debug metrics on TPU'})
dataloader_drop_last: bool = field(default=False, metadata={'help': 'Drop the last incomplete batch if it is not divisible by the batch size.'})
eval_steps: int = field(default=1000, metadata={'help': 'Run an evaluation every X steps.'})
past_index: int = field(default=(- 1), metadata={'help': 'If >=0, uses the corresponding part of the output as the past state for next step.'})
adv_opt: AdversarialOptBase = field(default=None, metadata={'help': 'The options that we use to run adversarial training based on gradient.If None, no adversarial training is performed. Default is None.'})
@property
def train_batch_size(self) -> int:
'\n The actual batch size for training (may differ from :obj:`per_gpu_train_batch_size` in distributed training).\n '
if self.per_gpu_train_batch_size:
logger.warning('Using deprecated `--per_gpu_train_batch_size` argument which will be removed in a future version. Using `--per_device_train_batch_size` is preferred.')
per_device_batch_size = (self.per_gpu_train_batch_size or self.per_device_train_batch_size)
return (per_device_batch_size * max(1, self.n_gpu))
@property
def eval_batch_size(self) -> int:
'\n The actual batch size for evaluation (may differ from :obj:`per_gpu_eval_batch_size` in distributed training).\n '
if self.per_gpu_eval_batch_size:
logger.warning('Using deprecated `--per_gpu_eval_batch_size` argument which will be removed in a future version. Using `--per_device_eval_batch_size` is preferred.')
per_device_batch_size = (self.per_gpu_eval_batch_size or self.per_device_eval_batch_size)
return (per_device_batch_size * max(1, self.n_gpu))
@cached_property
@torch_required
def _setup_devices(self) -> Tuple[('torch.device', int)]:
logger.info('PyTorch: setting up devices')
if self.no_cuda:
device = torch.device('cpu')
n_gpu = 0
elif is_torch_tpu_available():
device = xm.xla_device()
n_gpu = 0
elif (self.local_rank == (- 1)):
device = torch.device(('cuda:0' if torch.cuda.is_available() else 'cpu'))
n_gpu = torch.cuda.device_count()
else:
torch.distributed.init_process_group(backend='nccl')
device = torch.device('cuda', self.local_rank)
n_gpu = 1
if (device.type == 'cuda'):
torch.cuda.set_device(device)
return (device, n_gpu)
@property
@torch_required
def device(self) -> 'torch.device':
'\n The device used by this process.\n '
return self._setup_devices[0]
@property
@torch_required
def n_gpu(self):
'\n The number of GPUs used by this process.\n Note:\n This will only be greater than one when you have multiple GPUs available but are not using distributed\n training. For distributed training, it will always be 1.\n '
return self._setup_devices[1]
def to_json_string(self):
'\n Serializes this instance to a JSON string.\n '
return json.dumps(dataclasses.asdict(self), indent=2)
def to_sanitized_dict(self) -> Dict[(str, Any)]:
'\n Sanitized serialization to use with TensorBoard’s hparams\n '
d = dataclasses.asdict(self)
valid_types = [bool, int, float, str]
if is_torch_available():
valid_types.append(torch.Tensor)
return {k: (v if (type(v) in valid_types) else str(v)) for (k, v) in d.items()}
|
def a2c_step(policy_net, value_net, optimizer_policy, optimizer_value, states, actions, returns, advantages, l2_reg):
'update critic'
values_pred = value_net(states)
value_loss = (values_pred - returns).pow(2).mean()
for param in value_net.parameters():
value_loss += (param.pow(2).sum() * l2_reg)
optimizer_value.zero_grad()
value_loss.backward()
optimizer_value.step()
'update policy'
log_probs = policy_net.get_log_prob(states, actions)
policy_loss = (- (log_probs * advantages).mean())
optimizer_policy.zero_grad()
policy_loss.backward()
torch.nn.utils.clip_grad_norm_(policy_net.parameters(), 40)
optimizer_policy.step()
|
def collect_samples(pid, queue, env, policy, custom_reward, mean_action, render, running_state, min_batch_size):
torch.randn(pid)
log = dict()
memory = Memory()
num_steps = 0
total_reward = 0
min_reward = 1000000.0
max_reward = (- 1000000.0)
total_c_reward = 0
min_c_reward = 1000000.0
max_c_reward = (- 1000000.0)
num_episodes = 0
while (num_steps < min_batch_size):
state = env.reset()
if (running_state is not None):
state = running_state(state)
reward_episode = 0
for t in range(10000):
state_var = tensor(state).unsqueeze(0)
with torch.no_grad():
if mean_action:
action = policy(state_var)[0][0].numpy()
else:
action = policy.select_action(state_var)[0].numpy()
action = (int(action) if policy.is_disc_action else action.astype(np.float64))
(next_state, reward, done, _) = env.step(action)
reward_episode += reward
if (running_state is not None):
next_state = running_state(next_state)
if (custom_reward is not None):
reward = custom_reward(state, action)
total_c_reward += reward
min_c_reward = min(min_c_reward, reward)
max_c_reward = max(max_c_reward, reward)
mask = (0 if done else 1)
memory.push(state, action, mask, next_state, reward)
if render:
env.render()
if done:
break
state = next_state
num_steps += (t + 1)
num_episodes += 1
total_reward += reward_episode
min_reward = min(min_reward, reward_episode)
max_reward = max(max_reward, reward_episode)
log['num_steps'] = num_steps
log['num_episodes'] = num_episodes
log['total_reward'] = total_reward
log['avg_reward'] = (total_reward / num_episodes)
log['max_reward'] = max_reward
log['min_reward'] = min_reward
if (custom_reward is not None):
log['total_c_reward'] = total_c_reward
log['avg_c_reward'] = (total_c_reward / num_steps)
log['max_c_reward'] = max_c_reward
log['min_c_reward'] = min_c_reward
if (queue is not None):
queue.put([pid, memory, log])
else:
return (memory, log)
|
def merge_log(log_list):
log = dict()
log['total_reward'] = sum([x['total_reward'] for x in log_list])
log['num_episodes'] = sum([x['num_episodes'] for x in log_list])
log['num_steps'] = sum([x['num_steps'] for x in log_list])
log['avg_reward'] = (log['total_reward'] / log['num_episodes'])
log['max_reward'] = max([x['max_reward'] for x in log_list])
log['min_reward'] = min([x['min_reward'] for x in log_list])
if ('total_c_reward' in log_list[0]):
log['total_c_reward'] = sum([x['total_c_reward'] for x in log_list])
log['avg_c_reward'] = (log['total_c_reward'] / log['num_steps'])
log['max_c_reward'] = max([x['max_c_reward'] for x in log_list])
log['min_c_reward'] = min([x['min_c_reward'] for x in log_list])
return log
|
class Agent():
def __init__(self, env, policy, device, custom_reward=None, mean_action=False, render=False, running_state=None, num_threads=1):
self.env = env
self.policy = policy
self.device = device
self.custom_reward = custom_reward
self.mean_action = mean_action
self.running_state = running_state
self.render = render
self.num_threads = num_threads
def collect_samples(self, min_batch_size):
t_start = time.time()
to_device(torch.device('cpu'), self.policy)
thread_batch_size = int(math.floor((min_batch_size / self.num_threads)))
queue = multiprocessing.Queue()
workers = []
for i in range((self.num_threads - 1)):
worker_args = ((i + 1), queue, self.env, self.policy, self.custom_reward, self.mean_action, False, self.running_state, thread_batch_size)
workers.append(multiprocessing.Process(target=collect_samples, args=worker_args))
for worker in workers:
worker.start()
(memory, log) = collect_samples(0, None, self.env, self.policy, self.custom_reward, self.mean_action, self.render, self.running_state, thread_batch_size)
worker_logs = ([None] * len(workers))
worker_memories = ([None] * len(workers))
for _ in workers:
(pid, worker_memory, worker_log) = queue.get()
worker_memories[(pid - 1)] = worker_memory
worker_logs[(pid - 1)] = worker_log
for worker_memory in worker_memories:
memory.append(worker_memory)
batch = memory.sample()
if (self.num_threads > 1):
log_list = ([log] + worker_logs)
log = merge_log(log_list)
to_device(self.device, self.policy)
t_end = time.time()
log['sample_time'] = (t_end - t_start)
log['action_mean'] = np.mean(np.vstack(batch.action), axis=0)
log['action_min'] = np.min(np.vstack(batch.action), axis=0)
log['action_max'] = np.max(np.vstack(batch.action), axis=0)
return (batch, log)
|
def estimate_advantages(rewards, masks, values, gamma, tau, device):
(rewards, masks, values) = to_device(torch.device('cpu'), rewards, masks, values)
tensor_type = type(rewards)
deltas = tensor_type(rewards.size(0), 1)
advantages = tensor_type(rewards.size(0), 1)
prev_value = 0
prev_advantage = 0
for i in reversed(range(rewards.size(0))):
deltas[i] = ((rewards[i] + ((gamma * prev_value) * masks[i])) - values[i])
advantages[i] = (deltas[i] + (((gamma * tau) * prev_advantage) * masks[i]))
prev_value = values[(i, 0)]
prev_advantage = advantages[(i, 0)]
returns = (values + advantages)
advantages = ((advantages - advantages.mean()) / advantages.std())
(advantages, returns) = to_device(device, advantages, returns)
return (advantages, returns)
|
def ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, optim_value_iternum, states, actions, returns, advantages, fixed_log_probs, clip_epsilon, l2_reg):
'update critic'
for _ in range(optim_value_iternum):
values_pred = value_net(states)
value_loss = (values_pred - returns).pow(2).mean()
for param in value_net.parameters():
value_loss += (param.pow(2).sum() * l2_reg)
optimizer_value.zero_grad()
value_loss.backward()
optimizer_value.step()
'update policy'
log_probs = policy_net.get_log_prob(states, actions)
ratio = torch.exp((log_probs - fixed_log_probs))
surr1 = (ratio * advantages)
surr2 = (torch.clamp(ratio, (1.0 - clip_epsilon), (1.0 + clip_epsilon)) * advantages)
policy_surr = (- torch.min(surr1, surr2).mean())
optimizer_policy.zero_grad()
policy_surr.backward()
torch.nn.utils.clip_grad_norm_(policy_net.parameters(), 40)
optimizer_policy.step()
|
def conjugate_gradients(Avp_f, b, nsteps, rdotr_tol=1e-10):
x = zeros(b.size(), device=b.device)
r = b.clone()
p = b.clone()
rdotr = torch.dot(r, r)
for i in range(nsteps):
Avp = Avp_f(p)
alpha = (rdotr / torch.dot(p, Avp))
x += (alpha * p)
r -= (alpha * Avp)
new_rdotr = torch.dot(r, r)
betta = (new_rdotr / rdotr)
p = (r + (betta * p))
rdotr = new_rdotr
if (rdotr < rdotr_tol):
break
return x
|
def line_search(model, f, x, fullstep, expected_improve_full, max_backtracks=10, accept_ratio=0.1):
fval = f(True).item()
for stepfrac in [(0.5 ** x) for x in range(max_backtracks)]:
x_new = (x + (stepfrac * fullstep))
set_flat_params_to(model, x_new)
fval_new = f(True).item()
actual_improve = (fval - fval_new)
expected_improve = (expected_improve_full * stepfrac)
ratio = (actual_improve / expected_improve)
if (ratio > accept_ratio):
return (True, x_new)
return (False, x)
|
def trpo_step(policy_net, value_net, states, actions, returns, advantages, max_kl, damping, l2_reg, use_fim=True):
'update critic'
def get_value_loss(flat_params):
set_flat_params_to(value_net, tensor(flat_params))
for param in value_net.parameters():
if (param.grad is not None):
param.grad.data.fill_(0)
values_pred = value_net(states)
value_loss = (values_pred - returns).pow(2).mean()
for param in value_net.parameters():
value_loss += (param.pow(2).sum() * l2_reg)
value_loss.backward()
return (value_loss.item(), get_flat_grad_from(value_net.parameters()).cpu().numpy())
(flat_params, _, opt_info) = scipy.optimize.fmin_l_bfgs_b(get_value_loss, get_flat_params_from(value_net).detach().cpu().numpy(), maxiter=25)
set_flat_params_to(value_net, tensor(flat_params))
'update policy'
with torch.no_grad():
fixed_log_probs = policy_net.get_log_prob(states, actions)
'define the loss function for TRPO'
def get_loss(volatile=False):
with torch.set_grad_enabled((not volatile)):
log_probs = policy_net.get_log_prob(states, actions)
action_loss = ((- advantages) * torch.exp((log_probs - fixed_log_probs)))
return action_loss.mean()
'use fisher information matrix for Hessian*vector'
def Fvp_fim(v):
(M, mu, info) = policy_net.get_fim(states)
mu = mu.view((- 1))
filter_input_ids = (set() if policy_net.is_disc_action else set([info['std_id']]))
t = ones(mu.size(), requires_grad=True, device=mu.device)
mu_t = (mu * t).sum()
Jt = compute_flat_grad(mu_t, policy_net.parameters(), filter_input_ids=filter_input_ids, create_graph=True)
Jtv = (Jt * v).sum()
Jv = torch.autograd.grad(Jtv, t)[0]
MJv = (M * Jv.detach())
mu_MJv = (MJv * mu).sum()
JTMJv = compute_flat_grad(mu_MJv, policy_net.parameters(), filter_input_ids=filter_input_ids).detach()
JTMJv /= states.shape[0]
if (not policy_net.is_disc_action):
std_index = info['std_index']
JTMJv[std_index:(std_index + M.shape[0])] += (2 * v[std_index:(std_index + M.shape[0])])
return (JTMJv + (v * damping))
'directly compute Hessian*vector from KL'
def Fvp_direct(v):
kl = policy_net.get_kl(states)
kl = kl.mean()
grads = torch.autograd.grad(kl, policy_net.parameters(), create_graph=True)
flat_grad_kl = torch.cat([grad.view((- 1)) for grad in grads])
kl_v = (flat_grad_kl * v).sum()
grads = torch.autograd.grad(kl_v, policy_net.parameters())
flat_grad_grad_kl = torch.cat([grad.contiguous().view((- 1)) for grad in grads]).detach()
return (flat_grad_grad_kl + (v * damping))
Fvp = (Fvp_fim if use_fim else Fvp_direct)
loss = get_loss()
grads = torch.autograd.grad(loss, policy_net.parameters())
loss_grad = torch.cat([grad.view((- 1)) for grad in grads]).detach()
stepdir = conjugate_gradients(Fvp, (- loss_grad), 10)
shs = (0.5 * stepdir.dot(Fvp(stepdir)))
lm = math.sqrt((max_kl / shs))
fullstep = (stepdir * lm)
expected_improve = (- loss_grad.dot(fullstep))
prev_params = get_flat_params_from(policy_net)
(success, new_params) = line_search(policy_net, get_loss, prev_params, fullstep, expected_improve)
set_flat_params_to(policy_net, new_params)
return success
|
def expert_reward(state, action):
state_action = tensor(np.hstack([state, action]), dtype=dtype)
with torch.no_grad():
return (- math.log(discrim_net(state_action)[0].item()))
|
def update_params(batch, i_iter):
states = torch.from_numpy(np.stack(batch.state)).to(dtype).to(device)
actions = torch.from_numpy(np.stack(batch.action)).to(dtype).to(device)
rewards = torch.from_numpy(np.stack(batch.reward)).to(dtype).to(device)
masks = torch.from_numpy(np.stack(batch.mask)).to(dtype).to(device)
with torch.no_grad():
values = value_net(states)
fixed_log_probs = policy_net.get_log_prob(states, actions)
'get advantage estimation from the trajectories'
(advantages, returns) = estimate_advantages(rewards, masks, values, args.gamma, args.tau, device)
'update discriminator'
for _ in range(1):
expert_state_actions = torch.from_numpy(expert_traj).to(dtype).to(device)
g_o = discrim_net(torch.cat([states, actions], 1))
e_o = discrim_net(expert_state_actions)
optimizer_discrim.zero_grad()
discrim_loss = (discrim_criterion(g_o, ones((states.shape[0], 1), device=device)) + discrim_criterion(e_o, zeros((expert_traj.shape[0], 1), device=device)))
discrim_loss.backward()
optimizer_discrim.step()
'perform mini-batch PPO update'
optim_iter_num = int(math.ceil((states.shape[0] / optim_batch_size)))
for _ in range(optim_epochs):
perm = np.arange(states.shape[0])
np.random.shuffle(perm)
perm = LongTensor(perm).to(device)
(states, actions, returns, advantages, fixed_log_probs) = (states[perm].clone(), actions[perm].clone(), returns[perm].clone(), advantages[perm].clone(), fixed_log_probs[perm].clone())
for i in range(optim_iter_num):
ind = slice((i * optim_batch_size), min(((i + 1) * optim_batch_size), states.shape[0]))
(states_b, actions_b, advantages_b, returns_b, fixed_log_probs_b) = (states[ind], actions[ind], advantages[ind], returns[ind], fixed_log_probs[ind])
ppo_step(policy_net, value_net, optimizer_policy, optimizer_value, 1, states_b, actions_b, returns_b, advantages_b, fixed_log_probs_b, args.clip_epsilon, args.l2_reg)
|
def main_loop():
for i_iter in range(args.max_iter_num):
'generate multiple trajectories that reach the minimum batch_size'
discrim_net.to(torch.device('cpu'))
(batch, log) = agent.collect_samples(args.min_batch_size)
discrim_net.to(device)
t0 = time.time()
update_params(batch, i_iter)
t1 = time.time()
if ((i_iter % args.log_interval) == 0):
print('{}\tT_sample {:.4f}\tT_update {:.4f}\texpert_R_avg {:.2f}\tR_avg {:.2f}'.format(i_iter, log['sample_time'], (t1 - t0), log['avg_c_reward'], log['avg_reward']))
if ((args.save_model_interval > 0) and (((i_iter + 1) % args.save_model_interval) == 0)):
to_device(torch.device('cpu'), policy_net, value_net, discrim_net)
pickle.dump((policy_net, value_net, discrim_net), open(os.path.join(assets_dir(), 'learned_models/{}_gail.p'.format(args.env_name)), 'wb'))
to_device(device, policy_net, value_net, discrim_net)
'clean up gpu memory'
torch.cuda.empty_cache()
|
def main_loop():
num_steps = 0
for i_episode in count():
state = env.reset()
state = running_state(state)
reward_episode = 0
for t in range(10000):
state_var = tensor(state).unsqueeze(0).to(dtype)
action = policy_net(state_var)[0][0].detach().numpy()
action = (int(action) if is_disc_action else action.astype(np.float64))
(next_state, reward, done, _) = env.step(action)
next_state = running_state(next_state)
reward_episode += reward
num_steps += 1
expert_traj.append(np.hstack([state, action]))
if args.render:
env.render()
if (done or (num_steps >= args.max_expert_state_num)):
break
state = next_state
print('Episode {}\t reward: {:.2f}'.format(i_episode, reward_episode))
if (num_steps >= args.max_expert_state_num):
break
|
class Value(nn.Module):
def __init__(self, state_dim, hidden_size=(128, 128), activation='tanh'):
super().__init__()
if (activation == 'tanh'):
self.activation = torch.tanh
elif (activation == 'relu'):
self.activation = torch.relu
elif (activation == 'sigmoid'):
self.activation = torch.sigmoid
self.affine_layers = nn.ModuleList()
last_dim = state_dim
for nh in hidden_size:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
self.value_head = nn.Linear(last_dim, 1)
self.value_head.weight.data.mul_(0.1)
self.value_head.bias.data.mul_(0.0)
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
value = self.value_head(x)
return value
|
class Discriminator(nn.Module):
def __init__(self, num_inputs, hidden_size=(128, 128), activation='tanh'):
super().__init__()
if (activation == 'tanh'):
self.activation = torch.tanh
elif (activation == 'relu'):
self.activation = torch.relu
elif (activation == 'sigmoid'):
self.activation = torch.sigmoid
self.affine_layers = nn.ModuleList()
last_dim = num_inputs
for nh in hidden_size:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
self.logic = nn.Linear(last_dim, 1)
self.logic.weight.data.mul_(0.1)
self.logic.bias.data.mul_(0.0)
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
prob = torch.sigmoid(self.logic(x))
return prob
|
class Policy(nn.Module):
def __init__(self, state_dim, action_dim, hidden_size=(128, 128), activation='tanh', log_std=0):
super().__init__()
self.is_disc_action = False
if (activation == 'tanh'):
self.activation = torch.tanh
elif (activation == 'relu'):
self.activation = torch.relu
elif (activation == 'sigmoid'):
self.activation = torch.sigmoid
self.affine_layers = nn.ModuleList()
last_dim = state_dim
for nh in hidden_size:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
self.action_mean = nn.Linear(last_dim, action_dim)
self.action_mean.weight.data.mul_(0.1)
self.action_mean.bias.data.mul_(0.0)
self.action_log_std = nn.Parameter((torch.ones(1, action_dim) * log_std))
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
action_mean = self.action_mean(x)
action_log_std = self.action_log_std.expand_as(action_mean)
action_std = torch.exp(action_log_std)
return (action_mean, action_log_std, action_std)
def select_action(self, x):
(action_mean, _, action_std) = self.forward(x)
action = torch.normal(action_mean, action_std)
return action
def get_kl(self, x):
(mean1, log_std1, std1) = self.forward(x)
mean0 = mean1.detach()
log_std0 = log_std1.detach()
std0 = std1.detach()
kl = (((log_std1 - log_std0) + ((std0.pow(2) + (mean0 - mean1).pow(2)) / (2.0 * std1.pow(2)))) - 0.5)
return kl.sum(1, keepdim=True)
def get_log_prob(self, x, actions):
(action_mean, action_log_std, action_std) = self.forward(x)
return normal_log_density(actions, action_mean, action_log_std, action_std)
def get_fim(self, x):
(mean, _, _) = self.forward(x)
cov_inv = self.action_log_std.exp().pow((- 2)).squeeze(0).repeat(x.size(0))
param_count = 0
std_index = 0
id = 0
for (name, param) in self.named_parameters():
if (name == 'action_log_std'):
std_id = id
std_index = param_count
param_count += param.view((- 1)).shape[0]
id += 1
return (cov_inv.detach(), mean, {'std_id': std_id, 'std_index': std_index})
|
class DiscretePolicy(nn.Module):
def __init__(self, state_dim, action_num, hidden_size=(128, 128), activation='tanh'):
super().__init__()
self.is_disc_action = True
if (activation == 'tanh'):
self.activation = torch.tanh
elif (activation == 'relu'):
self.activation = torch.relu
elif (activation == 'sigmoid'):
self.activation = torch.sigmoid
self.affine_layers = nn.ModuleList()
last_dim = state_dim
for nh in hidden_size:
self.affine_layers.append(nn.Linear(last_dim, nh))
last_dim = nh
self.action_head = nn.Linear(last_dim, action_num)
self.action_head.weight.data.mul_(0.1)
self.action_head.bias.data.mul_(0.0)
def forward(self, x):
for affine in self.affine_layers:
x = self.activation(affine(x))
action_prob = torch.softmax(self.action_head(x), dim=1)
return action_prob
def select_action(self, x):
action_prob = self.forward(x)
action = action_prob.multinomial(1)
return action
def get_kl(self, x):
action_prob1 = self.forward(x)
action_prob0 = action_prob1.detach()
kl = (action_prob0 * (torch.log(action_prob0) - torch.log(action_prob1)))
return kl.sum(1, keepdim=True)
def get_log_prob(self, x, actions):
action_prob = self.forward(x)
return torch.log(action_prob.gather(1, actions.long().unsqueeze(1)))
def get_fim(self, x):
action_prob = self.forward(x)
M = action_prob.pow((- 1)).view((- 1)).detach()
return (M, action_prob, {})
|
def normal_entropy(std):
var = std.pow(2)
entropy = (0.5 + (0.5 * torch.log(((2 * var) * math.pi))))
return entropy.sum(1, keepdim=True)
|
def normal_log_density(x, mean, log_std, std):
var = std.pow(2)
log_density = ((((- (x - mean).pow(2)) / (2 * var)) - (0.5 * math.log((2 * math.pi)))) - log_std)
return log_density.sum(1, keepdim=True)
|
class Memory(object):
def __init__(self):
self.memory = []
def push(self, *args):
'Saves a transition.'
self.memory.append(Transition(*args))
def sample(self, batch_size=None):
if (batch_size is None):
return Transition(*zip(*self.memory))
else:
random_batch = random.sample(self.memory, batch_size)
return Transition(*zip(*random_batch))
def append(self, new_memory):
self.memory += new_memory.memory
def __len__(self):
return len(self.memory)
|
def assets_dir():
return path.abspath(path.join(path.dirname(path.abspath(__file__)), '../assets'))
|
def to_device(device, *args):
return [x.to(device) for x in args]
|
def get_flat_params_from(model):
params = []
for param in model.parameters():
params.append(param.view((- 1)))
flat_params = torch.cat(params)
return flat_params
|
def set_flat_params_to(model, flat_params):
prev_ind = 0
for param in model.parameters():
flat_size = int(np.prod(list(param.size())))
param.data.copy_(flat_params[prev_ind:(prev_ind + flat_size)].view(param.size()))
prev_ind += flat_size
|
def get_flat_grad_from(inputs, grad_grad=False):
grads = []
for param in inputs:
if grad_grad:
grads.append(param.grad.grad.view((- 1)))
elif (param.grad is None):
grads.append(zeros(param.view((- 1)).shape))
else:
grads.append(param.grad.view((- 1)))
flat_grad = torch.cat(grads)
return flat_grad
|
def compute_flat_grad(output, inputs, filter_input_ids=set(), retain_graph=False, create_graph=False):
if create_graph:
retain_graph = True
inputs = list(inputs)
params = []
for (i, param) in enumerate(inputs):
if (i not in filter_input_ids):
params.append(param)
grads = torch.autograd.grad(output, params, retain_graph=retain_graph, create_graph=create_graph)
j = 0
out_grads = []
for (i, param) in enumerate(inputs):
if (i in filter_input_ids):
out_grads.append(zeros(param.view((- 1)).shape, device=param.device, dtype=param.dtype))
else:
out_grads.append(grads[j].view((- 1)))
j += 1
grads = torch.cat(out_grads)
for param in params:
param.grad = None
return grads
|
class RunningStat(object):
def __init__(self, shape):
self._n = 0
self._M = np.zeros(shape)
self._S = np.zeros(shape)
def push(self, x):
x = np.asarray(x)
assert (x.shape == self._M.shape)
self._n += 1
if (self._n == 1):
self._M[...] = x
else:
oldM = self._M.copy()
self._M[...] = (oldM + ((x - oldM) / self._n))
self._S[...] = (self._S + ((x - oldM) * (x - self._M)))
@property
def n(self):
return self._n
@property
def mean(self):
return self._M
@property
def var(self):
return ((self._S / (self._n - 1)) if (self._n > 1) else np.square(self._M))
@property
def std(self):
return np.sqrt(self.var)
@property
def shape(self):
return self._M.shape
|
class ZFilter():
'\n y = (x-mean)/std\n using running estimates of mean,std\n '
def __init__(self, shape, demean=True, destd=True, clip=10.0):
self.demean = demean
self.destd = destd
self.clip = clip
self.rs = RunningStat(shape)
self.fix = False
def __call__(self, x, update=True):
if (update and (not self.fix)):
self.rs.push(x)
if self.demean:
x = (x - self.rs.mean)
if self.destd:
x = (x / (self.rs.std + 1e-08))
if self.clip:
x = np.clip(x, (- self.clip), self.clip)
return x
|
class RAdam(Optimizer):
'\n Implements RAdam optimization algorithm.\n Arguments:\n params: iterable of parameters to optimize or dicts defining\n parameter groups\n lr: learning rate (default: 1e-3)\n betas: coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps: term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay: weight decay (L2 penalty) (default: 0)\n Example:\n >>> optimizer = optim.RAdam(model.parameters(), lr=0.1)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n Note:\n Reference code: https://github.com/LiyuanLucasLiu/RAdam\n '
def __init__(self, params: Params, lr: float=0.001, betas: Betas2=(0.9, 0.999), eps: float=1e-08, weight_decay: float=0) -> None:
if (lr <= 0.0):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (eps < 0.0):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (weight_decay < 0):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
if (isinstance(params, (list, tuple)) and (len(params) > 0) and isinstance(params[0], dict)):
for param in params:
if (('betas' in param) and ((param['betas'][0] != betas[0]) or (param['betas'][1] != betas[1]))):
param['buffer'] = [[None, None, None] for _ in range(10)]
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, buffer=[[None, None, None] for _ in range(10)])
super(RAdam, self).__init__(params, defaults)
def __setstate__(self, state):
super(RAdam, self).__setstate__(state)
def step(self, closure: OptLossClosure=None) -> OptFloat:
'Performs a single optimization step.\n Arguments:\n closure: A closure that reevaluates the model and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
lr = group['lr']
weight_decay = group['weight_decay']
(beta1, beta2) = group['betas']
eps = group['eps']
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data.float()
if grad.is_sparse:
msg = 'RAdam does not support sparse gradients, please consider SparseAdam instead'
raise RuntimeError(msg)
p_data_fp32 = p.data.float()
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_avg_sq'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_avg_sq'] = state['exp_avg_sq'].type_as(p_data_fp32)
(exp_avg, exp_avg_sq) = (state['exp_avg'], state['exp_avg_sq'])
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
state['step'] += 1
buffered = group['buffer'][int((state['step'] % 10))]
if (state['step'] == buffered[0]):
(N_sma, step_size) = (buffered[1], buffered[2])
else:
buffered[0] = state['step']
beta2_t = (beta2 ** state['step'])
N_sma_max = ((2 / (1 - beta2)) - 1)
N_sma = (N_sma_max - (((2 * state['step']) * beta2_t) / (1 - beta2_t)))
buffered[1] = N_sma
if (N_sma >= 5):
step_size = ((lr * math.sqrt((((((((1 - beta2_t) * (N_sma - 4)) / (N_sma_max - 4)) * (N_sma - 2)) / N_sma) * N_sma_max) / (N_sma_max - 2)))) / (1 - (beta1 ** state['step'])))
else:
step_size = (lr / (1 - (beta1 ** state['step'])))
buffered[2] = step_size
if (weight_decay != 0):
p_data_fp32.add_(p_data_fp32, alpha=((- weight_decay) * lr))
if (N_sma >= 5):
denom = exp_avg_sq.sqrt().add_(eps)
p_data_fp32.addcdiv_(exp_avg, denom, value=(- step_size))
else:
p_data_fp32.add_(exp_avg, alpha=(- step_size))
p.data.copy_(p_data_fp32)
return loss
|
class DiffGrad(Optimizer):
'\n Implements DiffGrad algorithm.\n Arguments:\n params: iterable of parameters to optimize or dicts defining\n parameter groups\n lr: learning rate (default: 1e-3)\n betas: coefficients used for computing\n running averages of gradient and its square (default: (0.9, 0.999))\n eps: term added to the denominator to improve\n numerical stability (default: 1e-8)\n weight_decay: weight decay (L2 penalty) (default: 0)\n Example:\n >>> optimizer = optim.DiffGrad(model.parameters(), lr=0.1)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n https://arxiv.org/abs/1909.11015\n Note:\n Reference code: https://github.com/shivram1987/diffGrad\n '
def __init__(self, params: Params, lr: float=0.001, betas: Betas2=(0.9, 0.999), eps: float=1e-08, weight_decay: float=0.0) -> None:
if (lr <= 0.0):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (eps < 0.0):
raise ValueError('Invalid epsilon value: {}'.format(eps))
if (not (0.0 <= betas[0] < 1.0)):
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if (not (0.0 <= betas[1] < 1.0)):
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
if (weight_decay < 0.0):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay)
super(DiffGrad, self).__init__(params, defaults)
def step(self, closure: OptLossClosure=None) -> OptFloat:
'Performs a single optimization step.\n Arguments:\n closure: A closure that reevaluates the model and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
(beta1, beta2) = group['betas']
for p in group['params']:
if (p.grad is None):
continue
grad = p.grad.data
if grad.is_sparse:
msg = 'DiffGrad does not support sparse gradients, please consider SparseAdam instead'
raise RuntimeError(msg)
state = self.state[p]
if (len(state) == 0):
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p)
state['exp_avg_sq'] = torch.zeros_like(p)
state['previous_grad'] = torch.zeros_like(p)
(exp_avg, exp_avg_sq, previous_grad) = (state['exp_avg'], state['exp_avg_sq'], state['previous_grad'])
state['step'] += 1
if (group['weight_decay'] != 0):
grad.add_(p.data, alpha=group['weight_decay'])
exp_avg.mul_(beta1).add_(grad, alpha=(1 - beta1))
exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=(1 - beta2))
denom = exp_avg_sq.sqrt().add_(group['eps'])
bias_correction1 = (1 - (beta1 ** state['step']))
bias_correction2 = (1 - (beta2 ** state['step']))
diff = torch.abs((previous_grad - grad))
dfc = torch.div(1.0, (1.0 + torch.exp((- diff))))
state['previous_grad'] = grad.clone()
exp_avg1 = (exp_avg * dfc)
step_size = ((group['lr'] * math.sqrt(bias_correction2)) / bias_correction1)
p.data.addcdiv_(exp_avg1, denom, value=(- step_size))
return loss
|
class SGDW(Optimizer):
'Implements SGDW algorithm. https://arxiv.org/abs/1711.05101\n Arguments:\n params: iterable of parameters to optimize or dicts defining\n parameter groups\n lr: learning rate (default: 1e-3)\n momentum: momentum factor (default: 0)\n weight_decay: weight decay (L2 penalty) (default: 0)\n dampening: dampening for momentum (default: 0)\n nesterov: enables Nesterov momentum (default: False)\n Example:\n >>> optimizer = optim.SGDW(model.parameters(), lr=0.1, momentum=0.9)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n Note:\n Reference code: https://github.com/pytorch/pytorch/pull/22466\n '
def __init__(self, params: Params, lr: float=0.001, momentum: float=0.0, dampening: float=0.0, weight_decay: float=0.0, nesterov: bool=False) -> None:
if (lr <= 0.0):
raise ValueError('Invalid learning rate: {}'.format(lr))
if (momentum < 0.0):
raise ValueError('Invalid momentum value: {}'.format(momentum))
if (dampening < 0.0):
raise ValueError('Invalid dampening value: {}'.format(dampening))
if (weight_decay < 0.0):
raise ValueError('Invalid weight_decay value: {}'.format(weight_decay))
defaults = dict(lr=lr, momentum=momentum, dampening=dampening, weight_decay=weight_decay, nesterov=nesterov)
if (nesterov and ((momentum <= 0) or (dampening != 0))):
raise ValueError('Nesterov momentum requires a momentum and zero dampening')
super(SGDW, self).__init__(params, defaults)
def __setstate__(self, state: State) -> None:
super(SGDW, self).__setstate__(state)
for group in self.param_groups:
group.setdefault('nesterov', False)
def step(self, closure: OptLossClosure=None) -> OptFloat:
'Performs a single optimization step.\n Arguments:\n closure: A closure that reevaluates the model and returns the loss.\n '
loss = None
if (closure is not None):
loss = closure()
for group in self.param_groups:
weight_decay = group['weight_decay']
momentum = group['momentum']
dampening = group['dampening']
nesterov = group['nesterov']
for p in group['params']:
if (p.grad is None):
continue
d_p = p.grad.data
if p.grad.is_sparse:
msg = 'SGDW does not support sparse gradients, please consider SparseAdam instead'
raise RuntimeError(msg)
if (momentum != 0):
param_state = self.state[p]
if ('momentum_buffer' not in param_state):
buf = param_state['momentum_buffer'] = torch.clone(d_p).detach()
else:
buf = param_state['momentum_buffer']
buf.mul_(momentum).add_(d_p, alpha=(1 - dampening))
if nesterov:
d_p = d_p.add(momentum, buf)
else:
d_p = buf
p.data.add_(d_p, alpha=(- group['lr']))
if (weight_decay != 0):
p.data.add_(weight_decay, alpha=(- group['lr']))
return loss
|
class Dice(nn.Module):
'The Data Adaptive Activation Function in DIN,which can be viewed as a generalization of PReLu and can adaptively adjust the rectified point according to distribution of input data.\n\n Input shape:\n - 2 dims: [batch_size, embedding_size(features)]\n - 3 dims: [batch_size, num_features, embedding_size(features)]\n\n Output shape:\n - Same shape as input.\n \n References\n - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)\n - https://github.com/zhougr1993/DeepInterestNetwork, https://github.com/fanoping/DIN-pytorch\n '
def __init__(self, emb_size, dim=2, epsilon=1e-08, device='cpu'):
super(Dice, self).__init__()
assert ((dim == 2) or (dim == 3))
self.bn = nn.BatchNorm1d(emb_size, eps=epsilon)
self.sigmoid = nn.Sigmoid()
self.dim = dim
if (self.dim == 2):
self.alpha = torch.zeros((emb_size,)).to(device)
else:
self.alpha = torch.zeros((emb_size, 1)).to(device)
def forward(self, x):
assert (x.dim() == self.dim)
if (self.dim == 2):
x_p = self.sigmoid(self.bn(x))
out = (((self.alpha * (1 - x_p)) * x) + (x_p * x))
else:
x = torch.transpose(x, 1, 2)
x_p = self.sigmoid(self.bn(x))
out = (((self.alpha * (1 - x_p)) * x) + (x_p * x))
out = torch.transpose(out, 1, 2)
return out
|
class Identity(nn.Module):
def __init__(self, **kwargs):
super(Identity, self).__init__()
def forward(self, X):
return X
|
def activation_layer(act_name, hidden_size=None, dice_dim=2):
'Construct activation layers\n\n Args:\n act_name: str or nn.Module, name of activation function\n hidden_size: int, used for Dice activation\n dice_dim: int, used for Dice activation\n Return:\n act_layer: activation layer\n '
if isinstance(act_name, str):
if (act_name.lower() == 'sigmoid'):
act_layer = nn.Sigmoid()
elif (act_name.lower() == 'linear'):
act_layer = Identity()
elif (act_name.lower() == 'relu'):
act_layer = nn.ReLU(inplace=True)
elif (act_name.lower() == 'dice'):
assert dice_dim
act_layer = Dice(hidden_size, dice_dim)
elif (act_name.lower() == 'prelu'):
act_layer = nn.PReLU()
elif issubclass(act_name, nn.Module):
act_layer = act_name()
else:
raise NotImplementedError
return act_layer
|
class LocalActivationUnit(nn.Module):
'The LocalActivationUnit used in DIN with which the representation of\n user interests varies adaptively given different candidate items.\n\n Input shape\n - A list of two 3D tensor with shape: ``(batch_size, 1, embedding_size)`` and ``(batch_size, T, embedding_size)``\n\n Output shape\n - 3D tensor with shape: ``(batch_size, T, 1)``.\n\n Arguments\n - **hidden_units**:list of positive integer, the attention net layer number and units in each layer.\n\n - **activation**: Activation function to use in attention net.\n\n - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix of attention net.\n\n - **dropout_rate**: float in [0,1). Fraction of the units to dropout in attention net.\n\n - **use_bn**: bool. Whether use BatchNormalization before activation or not in attention net.\n\n - **seed**: A Python integer to use as random seed.\n\n References\n - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)\n '
def __init__(self, hidden_units=(64, 32), embedding_dim=4, activation='sigmoid', dropout_rate=0, dice_dim=3, l2_reg=0, use_bn=False):
super(LocalActivationUnit, self).__init__()
self.dnn = DNN(inputs_dim=(4 * embedding_dim), hidden_units=hidden_units, activation=activation, l2_reg=l2_reg, dropout_rate=dropout_rate, dice_dim=dice_dim, use_bn=use_bn)
self.dense = nn.Linear(hidden_units[(- 1)], 1)
def forward(self, query, user_behavior):
user_behavior_len = user_behavior.size(1)
queries = query.expand((- 1), user_behavior_len, (- 1))
attention_input = torch.cat([queries, user_behavior, (queries - user_behavior), (queries * user_behavior)], dim=(- 1))
attention_output = self.dnn(attention_input)
attention_score = self.dense(attention_output)
return attention_score
|
class DNN(nn.Module):
'The Multi Layer Percetron\n\n Input shape\n - nD tensor with shape: ``(batch_size, ..., input_dim)``. The most common situation would be a 2D input with shape ``(batch_size, input_dim)``.\n\n Output shape\n - nD tensor with shape: ``(batch_size, ..., hidden_size[-1])``. For instance, for a 2D input with shape ``(batch_size, input_dim)``, the output would have shape ``(batch_size, hidden_size[-1])``.\n\n Arguments\n - **inputs_dim**: input feature dimension.\n\n - **hidden_units**:list of positive integer, the layer number and units in each layer.\n\n - **activation**: Activation function to use.\n\n - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix.\n\n - **dropout_rate**: float in [0,1). Fraction of the units to dropout.\n\n - **use_bn**: bool. Whether use BatchNormalization before activation or not.\n\n - **seed**: A Python integer to use as random seed.\n '
def __init__(self, inputs_dim, hidden_units, activation='relu', l2_reg=0, dropout_rate=0, use_bn=False, init_std=0.0001, dice_dim=3, seed=1024, device='cpu'):
super(DNN, self).__init__()
self.dropout_rate = dropout_rate
self.dropout = nn.Dropout(dropout_rate)
self.seed = seed
self.l2_reg = l2_reg
self.use_bn = use_bn
if (len(hidden_units) == 0):
raise ValueError('hidden_units is empty!!')
hidden_units = ([inputs_dim] + list(hidden_units))
self.linears = nn.ModuleList([nn.Linear(hidden_units[i], hidden_units[(i + 1)]) for i in range((len(hidden_units) - 1))])
if self.use_bn:
self.bn = nn.ModuleList([nn.BatchNorm1d(hidden_units[(i + 1)]) for i in range((len(hidden_units) - 1))])
self.activation_layers = nn.ModuleList([activation_layer(activation, hidden_units[(i + 1)], dice_dim) for i in range((len(hidden_units) - 1))])
for (name, tensor) in self.linears.named_parameters():
if ('weight' in name):
nn.init.normal_(tensor, mean=0, std=init_std)
self.to(device)
def forward(self, inputs):
deep_input = inputs
for i in range(len(self.linears)):
fc = self.linears[i](deep_input)
if self.use_bn:
fc = self.bn[i](fc)
fc = self.activation_layers[i](fc)
fc = self.dropout(fc)
deep_input = fc
return deep_input
|
class PredictionLayer(nn.Module):
'\n Arguments\n - **task**: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n - **use_bias**: bool.Whether add bias term or not.\n '
def __init__(self, task='binary', use_bias=True, **kwargs):
if (task not in ['binary', 'multiclass', 'regression']):
raise ValueError('task must be binary,multiclass or regression')
super(PredictionLayer, self).__init__()
self.use_bias = use_bias
self.task = task
if self.use_bias:
self.bias = nn.Parameter(torch.zeros((1,)))
def forward(self, X):
output = X
if self.use_bias:
output += self.bias
if (self.task == 'binary'):
output = torch.sigmoid(output)
return output
|
class Conv2dSame(nn.Conv2d):
" Tensorflow like 'SAME' convolution wrapper for 2D convolutions\n "
def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
super(Conv2dSame, self).__init__(in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias)
nn.init.xavier_uniform_(self.weight)
def forward(self, x):
(ih, iw) = x.size()[(- 2):]
(kh, kw) = self.weight.size()[(- 2):]
oh = math.ceil((ih / self.stride[0]))
ow = math.ceil((iw / self.stride[1]))
pad_h = max((((((oh - 1) * self.stride[0]) + ((kh - 1) * self.dilation[0])) + 1) - ih), 0)
pad_w = max((((((ow - 1) * self.stride[1]) + ((kw - 1) * self.dilation[1])) + 1) - iw), 0)
if ((pad_h > 0) or (pad_w > 0)):
x = F.pad(x, [(pad_w // 2), (pad_w - (pad_w // 2)), (pad_h // 2), (pad_h - (pad_h // 2))])
out = F.conv2d(x, self.weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
return out
|
class FM(nn.Module):
'Factorization Machine models pairwise (order-2) feature interactions\n without linear term and bias.\n Input shape\n - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.\n Output shape\n - 2D tensor with shape: ``(batch_size, 1)``.\n References\n - [Factorization Machines](https://www.csie.ntu.edu.tw/~b97053/paper/Rendle2010FM.pdf)\n '
def __init__(self):
super(FM, self).__init__()
def forward(self, inputs):
fm_input = inputs
square_of_sum = torch.pow(torch.sum(fm_input, dim=1, keepdim=True), 2)
sum_of_square = torch.sum((fm_input * fm_input), dim=1, keepdim=True)
cross_term = (square_of_sum - sum_of_square)
cross_term = (0.5 * torch.sum(cross_term, dim=2, keepdim=False))
return cross_term
|
class BiInteractionPooling(nn.Module):
'Bi-Interaction Layer used in Neural FM,compress the\n pairwise element-wise product of features into one single vector.\n\n Input shape\n - A 3D tensor with shape:``(batch_size,field_size,embedding_size)``.\n\n Output shape\n - 3D tensor with shape: ``(batch_size,1,embedding_size)``.\n\n References\n - [He X, Chua T S. Neural factorization machines for sparse predictive analytics[C]//Proceedings of the 40th International ACM SIGIR conference on Research and Development in Information Retrieval. ACM, 2017: 355-364.](http://arxiv.org/abs/1708.05027)\n '
def __init__(self):
super(BiInteractionPooling, self).__init__()
def forward(self, inputs):
concated_embeds_value = inputs
square_of_sum = torch.pow(torch.sum(concated_embeds_value, dim=1, keepdim=True), 2)
sum_of_square = torch.sum((concated_embeds_value * concated_embeds_value), dim=1, keepdim=True)
cross_term = (0.5 * (square_of_sum - sum_of_square))
return cross_term
|
class SENETLayer(nn.Module):
'SENETLayer used in FiBiNET.\n Input shape\n - A list of 3D tensor with shape: ``(batch_size,filed_size,embedding_size)``.\n Output shape\n - A list of 3D tensor with shape: ``(batch_size,filed_size,embedding_size)``.\n Arguments\n - **filed_size** : Positive integer, number of feature groups.\n - **reduction_ratio** : Positive integer, dimensionality of the\n attention network output space.\n - **seed** : A Python integer to use as random seed.\n References\n - [FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction\nTongwen](https://arxiv.org/pdf/1905.09433.pdf)\n '
def __init__(self, filed_size, reduction_ratio=3, seed=1024, device='cpu'):
super(SENETLayer, self).__init__()
self.seed = seed
self.filed_size = filed_size
self.reduction_size = max(1, (filed_size // reduction_ratio))
self.excitation = nn.Sequential(nn.Linear(self.filed_size, self.reduction_size, bias=False), nn.ReLU(), nn.Linear(self.reduction_size, self.filed_size, bias=False), nn.ReLU())
self.to(device)
def forward(self, inputs):
if (len(inputs.shape) != 3):
raise ValueError(('Unexpected inputs dimensions %d, expect to be 3 dimensions' % len(inputs.shape)))
Z = torch.mean(inputs, dim=(- 1), out=None)
A = self.excitation(Z)
V = torch.mul(inputs, torch.unsqueeze(A, dim=2))
return V
|
class BilinearInteraction(nn.Module):
'BilinearInteraction Layer used in FiBiNET.\n Input shape\n - A list of 3D tensor with shape: ``(batch_size,filed_size, embedding_size)``.\n Output shape\n - 3D tensor with shape: ``(batch_size,filed_size, embedding_size)``.\n Arguments\n - **filed_size** : Positive integer, number of feature groups.\n - **str** : String, types of bilinear functions used in this layer.\n - **seed** : A Python integer to use as random seed.\n References\n - [FiBiNET: Combining Feature Importance and Bilinear feature Interaction for Click-Through Rate Prediction\nTongwen](https://arxiv.org/pdf/1905.09433.pdf)\n '
def __init__(self, filed_size, embedding_size, bilinear_type='interaction', seed=1024, device='cpu'):
super(BilinearInteraction, self).__init__()
self.bilinear_type = bilinear_type
self.seed = seed
self.bilinear = nn.ModuleList()
if (self.bilinear_type == 'all'):
self.bilinear = nn.Linear(embedding_size, embedding_size, bias=False)
elif (self.bilinear_type == 'each'):
for i in range(filed_size):
self.bilinear.append(nn.Linear(embedding_size, embedding_size, bias=False))
elif (self.bilinear_type == 'interaction'):
for (i, j) in itertools.combinations(range(filed_size), 2):
self.bilinear.append(nn.Linear(embedding_size, embedding_size, bias=False))
else:
raise NotImplementedError
self.to(device)
def forward(self, inputs):
if (len(inputs.shape) != 3):
raise ValueError(('Unexpected inputs dimensions %d, expect to be 3 dimensions' % len(inputs.shape)))
inputs = torch.split(inputs, 1, dim=1)
if (self.bilinear_type == 'all'):
p = [torch.mul(self.bilinear(v_i), v_j) for (v_i, v_j) in itertools.combinations(inputs, 2)]
elif (self.bilinear_type == 'each'):
p = [torch.mul(self.bilinear[i](inputs[i]), inputs[j]) for (i, j) in itertools.combinations(range(len(inputs)), 2)]
elif (self.bilinear_type == 'interaction'):
p = [torch.mul(bilinear(v[0]), v[1]) for (v, bilinear) in zip(itertools.combinations(inputs, 2), self.bilinear)]
else:
raise NotImplementedError
return torch.cat(p, dim=1)
|
class CIN(nn.Module):
'Compressed Interaction Network used in xDeepFM.\n Input shape\n - 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.\n Output shape\n - 2D tensor with shape: ``(batch_size, featuremap_num)`` ``featuremap_num = sum(self.layer_size[:-1]) // 2 + self.layer_size[-1]`` if ``split_half=True``,else ``sum(layer_size)`` .\n Arguments\n - **filed_size** : Positive integer, number of feature groups.\n - **layer_size** : list of int.Feature maps in each layer.\n - **activation** : activation function name used on feature maps.\n - **split_half** : bool.if set to False, half of the feature maps in each hidden will connect to output unit.\n - **seed** : A Python integer to use as random seed.\n References\n - [Lian J, Zhou X, Zhang F, et al. xDeepFM: Combining Explicit and Implicit Feature Interactions for Recommender Systems[J]. arXiv preprint arXiv:1803.05170, 2018.] (https://arxiv.org/pdf/1803.05170.pdf)\n '
def __init__(self, field_size, layer_size=(128, 128), activation='relu', split_half=True, l2_reg=1e-05, seed=1024, device='cpu'):
super(CIN, self).__init__()
if (len(layer_size) == 0):
raise ValueError('layer_size must be a list(tuple) of length greater than 1')
self.layer_size = layer_size
self.field_nums = [field_size]
self.split_half = split_half
self.activation = activation_layer(activation)
self.l2_reg = l2_reg
self.seed = seed
self.conv1ds = nn.ModuleList()
for (i, size) in enumerate(self.layer_size):
self.conv1ds.append(nn.Conv1d((self.field_nums[(- 1)] * self.field_nums[0]), size, 1))
if self.split_half:
if ((i != (len(self.layer_size) - 1)) and ((size % 2) > 0)):
raise ValueError('layer_size must be even number except for the last layer when split_half=True')
self.field_nums.append((size // 2))
else:
self.field_nums.append(size)
self.to(device)
def forward(self, inputs):
if (len(inputs.shape) != 3):
raise ValueError(('Unexpected inputs dimensions %d, expect to be 3 dimensions' % len(inputs.shape)))
batch_size = inputs.shape[0]
dim = inputs.shape[(- 1)]
hidden_nn_layers = [inputs]
final_result = []
for (i, size) in enumerate(self.layer_size):
x = torch.einsum('bhd,bmd->bhmd', hidden_nn_layers[(- 1)], hidden_nn_layers[0])
x = x.reshape(batch_size, (hidden_nn_layers[(- 1)].shape[1] * hidden_nn_layers[0].shape[1]), dim)
x = self.conv1ds[i](x)
if ((self.activation is None) or (self.activation == 'linear')):
curr_out = x
else:
curr_out = self.activation(x)
if self.split_half:
if (i != (len(self.layer_size) - 1)):
(next_hidden, direct_connect) = torch.split(curr_out, (2 * [(size // 2)]), 1)
else:
direct_connect = curr_out
next_hidden = 0
else:
direct_connect = curr_out
next_hidden = curr_out
final_result.append(direct_connect)
hidden_nn_layers.append(next_hidden)
result = torch.cat(final_result, dim=1)
result = torch.sum(result, (- 1))
return result
|
class AFMLayer(nn.Module):
'Attentonal Factorization Machine models pairwise (order-2) feature\n interactions without linear term and bias.\n Input shape\n - A list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.\n Output shape\n - 2D tensor with shape: ``(batch_size, 1)``.\n Arguments\n - **in_features** : Positive integer, dimensionality of input features.\n - **attention_factor** : Positive integer, dimensionality of the\n attention network output space.\n - **l2_reg_w** : float between 0 and 1. L2 regularizer strength\n applied to attention network.\n - **dropout_rate** : float between in [0,1). Fraction of the attention net output units to dropout.\n - **seed** : A Python integer to use as random seed.\n References\n - [Attentional Factorization Machines : Learning the Weight of Feature\n Interactions via Attention Networks](https://arxiv.org/pdf/1708.04617.pdf)\n '
def __init__(self, in_features, attention_factor=4, l2_reg_w=0, dropout_rate=0, seed=1024, device='cpu'):
super(AFMLayer, self).__init__()
self.attention_factor = attention_factor
self.l2_reg_w = l2_reg_w
self.dropout_rate = dropout_rate
self.seed = seed
embedding_size = in_features
self.attention_W = nn.Parameter(torch.Tensor(embedding_size, self.attention_factor))
self.attention_b = nn.Parameter(torch.Tensor(self.attention_factor))
self.projection_h = nn.Parameter(torch.Tensor(self.attention_factor, 1))
self.projection_p = nn.Parameter(torch.Tensor(embedding_size, 1))
for tensor in [self.attention_W, self.projection_h, self.projection_p]:
nn.init.xavier_normal_(tensor)
for tensor in [self.attention_b]:
nn.init.zeros_(tensor)
self.dropout = nn.Dropout(dropout_rate)
self.to(device)
def forward(self, inputs):
embeds_vec_list = inputs
row = []
col = []
for (r, c) in itertools.combinations(embeds_vec_list, 2):
row.append(r)
col.append(c)
p = torch.cat(row, dim=1)
q = torch.cat(col, dim=1)
inner_product = (p * q)
bi_interaction = inner_product
attention_temp = F.relu((torch.tensordot(bi_interaction, self.attention_W, dims=([(- 1)], [0])) + self.attention_b))
self.normalized_att_score = F.softmax(torch.tensordot(attention_temp, self.projection_h, dims=([(- 1)], [0])), dim=1)
attention_output = torch.sum((self.normalized_att_score * bi_interaction), dim=1)
attention_output = self.dropout(attention_output)
afm_out = torch.tensordot(attention_output, self.projection_p, dims=([(- 1)], [0]))
return afm_out
|
class InteractingLayer(nn.Module):
'A Layer used in AutoInt that model the correlations between different feature fields by multi-head self-attention mechanism.\n Input shape\n - A 3D tensor with shape: ``(batch_size,field_size,embedding_size)``.\n Output shape\n - 3D tensor with shape:``(batch_size,field_size,att_embedding_size * head_num)``.\n Arguments\n - **in_features** : Positive integer, dimensionality of input features.\n - **att_embedding_size**: int.The embedding size in multi-head self-attention network.\n - **head_num**: int.The head number in multi-head self-attention network.\n - **use_res**: bool.Whether or not use standard residual connections before output.\n - **seed**: A Python integer to use as random seed.\n References\n - [Song W, Shi C, Xiao Z, et al. AutoInt: Automatic Feature Interaction Learning via Self-Attentive Neural Networks[J]. arXiv preprint arXiv:1810.11921, 2018.](https://arxiv.org/abs/1810.11921)\n '
def __init__(self, in_features, att_embedding_size=8, head_num=2, use_res=True, seed=1024, device='cpu'):
super(InteractingLayer, self).__init__()
if (head_num <= 0):
raise ValueError('head_num must be a int > 0')
self.att_embedding_size = att_embedding_size
self.head_num = head_num
self.use_res = use_res
self.seed = seed
embedding_size = in_features
self.W_Query = nn.Parameter(torch.Tensor(embedding_size, (self.att_embedding_size * self.head_num)))
self.W_key = nn.Parameter(torch.Tensor(embedding_size, (self.att_embedding_size * self.head_num)))
self.W_Value = nn.Parameter(torch.Tensor(embedding_size, (self.att_embedding_size * self.head_num)))
if self.use_res:
self.W_Res = nn.Parameter(torch.Tensor(embedding_size, (self.att_embedding_size * self.head_num)))
for tensor in self.parameters():
nn.init.normal_(tensor, mean=0.0, std=0.05)
self.to(device)
def forward(self, inputs):
if (len(inputs.shape) != 3):
raise ValueError(('Unexpected inputs dimensions %d, expect to be 3 dimensions' % len(inputs.shape)))
querys = torch.tensordot(inputs, self.W_Query, dims=([(- 1)], [0]))
keys = torch.tensordot(inputs, self.W_key, dims=([(- 1)], [0]))
values = torch.tensordot(inputs, self.W_Value, dims=([(- 1)], [0]))
querys = torch.stack(torch.split(querys, self.att_embedding_size, dim=2))
keys = torch.stack(torch.split(keys, self.att_embedding_size, dim=2))
values = torch.stack(torch.split(values, self.att_embedding_size, dim=2))
inner_product = torch.einsum('bnik,bnjk->bnij', querys, keys)
self.normalized_att_scores = F.softmax(inner_product, dim=(- 1))
result = torch.matmul(self.normalized_att_scores, values)
result = torch.cat(torch.split(result, 1), dim=(- 1))
result = torch.squeeze(result, dim=0)
if self.use_res:
result += torch.tensordot(inputs, self.W_Res, dims=([(- 1)], [0]))
result = F.relu(result)
return result
|
class CrossNet(nn.Module):
'The Cross Network part of Deep&Cross Network model,\n which leans both low and high degree cross feature.\n Input shape\n - 2D tensor with shape: ``(batch_size, units)``.\n Output shape\n - 2D tensor with shape: ``(batch_size, units)``.\n Arguments\n - **in_features** : Positive integer, dimensionality of input features.\n - **input_feature_num**: Positive integer, shape(Input tensor)[-1]\n - **layer_num**: Positive integer, the cross layer number\n - **parameterization**: string, ``"vector"`` or ``"matrix"`` , way to parameterize the cross network.\n - **l2_reg**: float between 0 and 1. L2 regularizer strength applied to the kernel weights matrix\n - **seed**: A Python integer to use as random seed.\n References\n - [Wang R, Fu B, Fu G, et al. Deep & cross network for ad click predictions[C]//Proceedings of the ADKDD\'17. ACM, 2017: 12.](https://arxiv.org/abs/1708.05123)\n - [Wang R, Shivanna R, Cheng D Z, et al. DCN-M: Improved Deep & Cross Network for Feature Cross Learning in Web-scale Learning to Rank Systems[J]. 2020.](https://arxiv.org/abs/2008.13535)\n '
def __init__(self, in_features, layer_num=2, parameterization='vector', seed=1024, device='cpu'):
super(CrossNet, self).__init__()
self.layer_num = layer_num
self.parameterization = parameterization
if (self.parameterization == 'vector'):
self.kernels = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(torch.empty(in_features, 1))) for i in range(self.layer_num)])
elif (self.parameterization == 'matrix'):
self.kernels = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(torch.empty(in_features, in_features))) for i in range(self.layer_num)])
else:
raise ValueError("parameterization should be 'vector' or 'matrix'")
self.bias = torch.nn.ParameterList([nn.Parameter(nn.init.zeros_(torch.empty(in_features, 1))) for i in range(self.layer_num)])
self.to(device)
def forward(self, inputs):
x_0 = inputs.unsqueeze(2)
x_l = x_0
for i in range(self.layer_num):
if (self.parameterization == 'vector'):
xl_w = torch.tensordot(x_l, self.kernels[i], dims=([1], [0]))
dot_ = torch.matmul(x_0, xl_w)
x_l = (dot_ + self.bias[i])
elif (self.parameterization == 'matrix'):
dot_ = torch.matmul(self.kernels[i], x_l)
dot_ = (dot_ + self.bias[i])
dot_ = (x_0 * dot_)
else:
print("parameterization should be 'vector' or 'matrix'")
pass
x_l = (dot_ + x_l)
x_l = torch.squeeze(x_l, dim=2)
return x_l
|
class CrossNetMix(nn.Module):
'The Cross Network part of DCN-Mix model, which improves DCN-M by:\n 1 add MOE to learn feature interactions in different subspaces\n 2 add nonlinear transformations in low-dimensional space\n Input shape\n - 2D tensor with shape: ``(batch_size, units)``.\n Output shape\n - 2D tensor with shape: ``(batch_size, units)``.\n Arguments\n - **in_features** : Positive integer, dimensionality of input features.\n - **low_rank** : Positive integer, dimensionality of low-rank sapce.\n - **num_experts** : Positive integer, number of experts.\n - **layer_num**: Positive integer, the cross layer number\n - **device**: str, e.g. ``"cpu"`` or ``"cuda:0"``\n References\n - [Wang R, Shivanna R, Cheng D Z, et al. DCN-M: Improved Deep & Cross Network for Feature Cross Learning in Web-scale Learning to Rank Systems[J]. 2020.](https://arxiv.org/abs/2008.13535)\n '
def __init__(self, in_features, low_rank=32, num_experts=4, layer_num=2, device='cpu'):
super(CrossNetMix, self).__init__()
self.layer_num = layer_num
self.num_experts = num_experts
self.U_list = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(torch.empty(num_experts, in_features, low_rank))) for i in range(self.layer_num)])
self.V_list = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(torch.empty(num_experts, in_features, low_rank))) for i in range(self.layer_num)])
self.C_list = torch.nn.ParameterList([nn.Parameter(nn.init.xavier_normal_(torch.empty(num_experts, low_rank, low_rank))) for i in range(self.layer_num)])
self.gating = nn.ModuleList([nn.Linear(in_features, 1, bias=False) for i in range(self.num_experts)])
self.bias = torch.nn.ParameterList([nn.Parameter(nn.init.zeros_(torch.empty(in_features, 1))) for i in range(self.layer_num)])
self.to(device)
def forward(self, inputs):
x_0 = inputs.unsqueeze(2)
x_l = x_0
for i in range(self.layer_num):
output_of_experts = []
gating_score_of_experts = []
for expert_id in range(self.num_experts):
gating_score_of_experts.append(self.gating[expert_id](x_l.squeeze(2)))
v_x = torch.matmul(self.V_list[i][expert_id].T, x_l)
v_x = torch.tanh(v_x)
v_x = torch.matmul(self.C_list[i][expert_id], v_x)
v_x = torch.tanh(v_x)
uv_x = torch.matmul(self.U_list[i][expert_id], v_x)
dot_ = (uv_x + self.bias[i])
dot_ = (x_0 * dot_)
output_of_experts.append(dot_.squeeze(2))
output_of_experts = torch.stack(output_of_experts, 2)
gating_score_of_experts = torch.stack(gating_score_of_experts, 1)
moe_out = torch.matmul(output_of_experts, gating_score_of_experts.softmax(1))
x_l = (moe_out + x_l)
x_l = x_l.squeeze()
return x_l
|
class InnerProductLayer(nn.Module):
'InnerProduct Layer used in PNN that compute the element-wise\n product or inner product between feature vectors.\n Input shape\n - a list of 3D tensor with shape: ``(batch_size,1,embedding_size)``.\n Output shape\n - 3D tensor with shape: ``(batch_size, N*(N-1)/2 ,1)`` if use reduce_sum. or 3D tensor with shape:\n ``(batch_size, N*(N-1)/2, embedding_size )`` if not use reduce_sum.\n Arguments\n - **reduce_sum**: bool. Whether return inner product or element-wise product\n References\n - [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//\n Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.]\n (https://arxiv.org/pdf/1611.00144.pdf)'
def __init__(self, reduce_sum=True, device='cpu'):
super(InnerProductLayer, self).__init__()
self.reduce_sum = reduce_sum
self.to(device)
def forward(self, inputs):
embed_list = inputs
row = []
col = []
num_inputs = len(embed_list)
for i in range((num_inputs - 1)):
for j in range((i + 1), num_inputs):
row.append(i)
col.append(j)
p = torch.cat([embed_list[idx] for idx in row], dim=1)
q = torch.cat([embed_list[idx] for idx in col], dim=1)
inner_product = (p * q)
if self.reduce_sum:
inner_product = torch.sum(inner_product, dim=2, keepdim=True)
return inner_product
|
class OutterProductLayer(nn.Module):
'OutterProduct Layer used in PNN.This implemention is\n adapted from code that the author of the paper published on https://github.com/Atomu2014/product-nets.\n Input shape\n - A list of N 3D tensor with shape: ``(batch_size,1,embedding_size)``.\n Output shape\n - 2D tensor with shape:``(batch_size,N*(N-1)/2 )``.\n Arguments\n - **filed_size** : Positive integer, number of feature groups.\n - **kernel_type**: str. The kernel weight matrix type to use,can be mat,vec or num\n - **seed**: A Python integer to use as random seed.\n References\n - [Qu Y, Cai H, Ren K, et al. Product-based neural networks for user response prediction[C]//Data Mining (ICDM), 2016 IEEE 16th International Conference on. IEEE, 2016: 1149-1154.](https://arxiv.org/pdf/1611.00144.pdf)\n '
def __init__(self, field_size, embedding_size, kernel_type='mat', seed=1024, device='cpu'):
super(OutterProductLayer, self).__init__()
self.kernel_type = kernel_type
num_inputs = field_size
num_pairs = int(((num_inputs * (num_inputs - 1)) / 2))
embed_size = embedding_size
if (self.kernel_type == 'mat'):
self.kernel = nn.Parameter(torch.Tensor(embed_size, num_pairs, embed_size))
elif (self.kernel_type == 'vec'):
self.kernel = nn.Parameter(torch.Tensor(num_pairs, embed_size))
elif (self.kernel_type == 'num'):
self.kernel = nn.Parameter(torch.Tensor(num_pairs, 1))
nn.init.xavier_uniform_(self.kernel)
self.to(device)
def forward(self, inputs):
embed_list = inputs
row = []
col = []
num_inputs = len(embed_list)
for i in range((num_inputs - 1)):
for j in range((i + 1), num_inputs):
row.append(i)
col.append(j)
p = torch.cat([embed_list[idx] for idx in row], dim=1)
q = torch.cat([embed_list[idx] for idx in col], dim=1)
if (self.kernel_type == 'mat'):
p.unsqueeze_(dim=1)
kp = torch.sum(torch.mul(torch.transpose(torch.sum(torch.mul(p, self.kernel), dim=(- 1)), 2, 1), q), dim=(- 1))
else:
k = torch.unsqueeze(self.kernel, 0)
kp = torch.sum(((p * q) * k), dim=(- 1))
return kp
|
class ConvLayer(nn.Module):
'Conv Layer used in CCPM.\n\n Input shape\n - A list of N 3D tensor with shape: ``(batch_size,1,filed_size,embedding_size)``.\n Output shape\n - A list of N 3D tensor with shape: ``(batch_size,last_filters,pooling_size,embedding_size)``.\n Arguments\n - **filed_size** : Positive integer, number of feature groups.\n - **conv_kernel_width**: list. list of positive integer or empty list,the width of filter in each conv layer.\n - **conv_filters**: list. list of positive integer or empty list,the number of filters in each conv layer.\n Reference:\n - Liu Q, Yu F, Wu S, et al. A convolutional click prediction model[C]//Proceedings of the 24th ACM International on Conference on Information and Knowledge Management. ACM, 2015: 1743-1746.(http://ir.ia.ac.cn/bitstream/173211/12337/1/A%20Convolutional%20Click%20Prediction%20Model.pdf)\n '
def __init__(self, field_size, conv_kernel_width, conv_filters, device='cpu'):
super(ConvLayer, self).__init__()
self.device = device
module_list = []
n = int(field_size)
l = len(conv_filters)
filed_shape = n
for i in range(1, (l + 1)):
if (i == 1):
in_channels = 1
else:
in_channels = conv_filters[(i - 2)]
out_channels = conv_filters[(i - 1)]
width = conv_kernel_width[(i - 1)]
k = (max(1, int(((1 - pow((i / l), (l - i))) * n))) if (i < l) else 3)
module_list.append(Conv2dSame(in_channels=in_channels, out_channels=out_channels, kernel_size=(width, 1), stride=1).to(self.device))
module_list.append(torch.nn.Tanh().to(self.device))
module_list.append(KMaxPooling(k=min(k, filed_shape), axis=2, device=self.device).to(self.device))
filed_shape = min(k, filed_shape)
self.conv_layer = nn.Sequential(*module_list)
self.to(device)
self.filed_shape = filed_shape
def forward(self, inputs):
return self.conv_layer(inputs)
|
class SequencePoolingLayer(nn.Module):
'The SequencePoolingLayer is used to apply pooling operation(sum,mean,max) on variable-length sequence feature/multi-value feature.\n\n Input shape\n - A list of two tensor [seq_value,seq_len]\n\n - seq_value is a 3D tensor with shape: ``(batch_size, T, embedding_size)``\n\n - seq_len is a 2D tensor with shape : ``(batch_size, 1)``,indicate valid length of each sequence.\n\n Output shape\n - 3D tensor with shape: ``(batch_size, 1, embedding_size)``.\n\n Arguments\n - **mode**:str.Pooling operation to be used,can be sum,mean or max.\n\n '
def __init__(self, mode='mean', supports_masking=False, device='cpu'):
super(SequencePoolingLayer, self).__init__()
if (mode not in ['sum', 'mean', 'max']):
raise ValueError('parameter mode should in [sum, mean, max]')
self.supports_masking = supports_masking
self.mode = mode
self.device = device
self.eps = torch.FloatTensor([1e-08]).to(device)
self.to(device)
def _sequence_mask(self, lengths, maxlen=None, dtype=torch.bool):
if (maxlen is None):
maxlen = lengths.max()
row_vector = torch.arange(0, maxlen, 1).to(self.device)
matrix = torch.unsqueeze(lengths, dim=(- 1))
mask = (row_vector < matrix)
mask.type(dtype)
return mask
def forward(self, seq_value_len_list):
if self.supports_masking:
(uiseq_embed_list, mask) = seq_value_len_list
mask = mask.float()
user_behavior_length = torch.sum(mask, dim=(- 1), keepdim=True)
mask = mask.unsqueeze(2)
else:
(uiseq_embed_list, user_behavior_length) = seq_value_len_list
mask = self._sequence_mask(user_behavior_length, maxlen=uiseq_embed_list.shape[1], dtype=torch.float32)
mask = torch.transpose(mask, 1, 2)
embedding_size = uiseq_embed_list.shape[(- 1)]
mask = torch.repeat_interleave(mask, embedding_size, dim=2)
if (self.mode == 'max'):
hist = (uiseq_embed_list - ((1 - mask) * 1000000000.0))
hist = torch.max(hist, dim=1, keepdim=True)[0]
return hist
hist = (uiseq_embed_list * mask.float())
hist = torch.sum(hist, dim=1, keepdim=False)
if (self.mode == 'mean'):
hist = torch.div(hist, (user_behavior_length.type(torch.float32) + self.eps))
hist = torch.unsqueeze(hist, dim=1)
return hist
|
class AttentionSequencePoolingLayer(nn.Module):
'The Attentional sequence pooling operation used in DIN & DIEN.\n\n Arguments\n - **att_hidden_units**:list of positive integer, the attention net layer number and units in each layer.\n\n - **att_activation**: Activation function to use in attention net.\n\n - **weight_normalization**: bool.Whether normalize the attention score of local activation unit.\n\n - **supports_masking**:If True,the input need to support masking.\n\n References\n - [Zhou G, Zhu X, Song C, et al. Deep interest network for click-through rate prediction[C]//Proceedings of the 24th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining. ACM, 2018: 1059-1068.](https://arxiv.org/pdf/1706.06978.pdf)\n '
def __init__(self, att_hidden_units=(80, 40), att_activation='sigmoid', weight_normalization=False, return_score=False, supports_masking=False, embedding_dim=4, **kwargs):
super(AttentionSequencePoolingLayer, self).__init__()
self.return_score = return_score
self.weight_normalization = weight_normalization
self.supports_masking = supports_masking
self.local_att = LocalActivationUnit(hidden_units=att_hidden_units, embedding_dim=embedding_dim, activation=att_activation, dropout_rate=0, use_bn=False)
def forward(self, query, keys, keys_length, mask=None):
'\n Input shape\n - A list of three tensor: [query,keys,keys_length]\n\n - query is a 3D tensor with shape: ``(batch_size, 1, embedding_size)``\n\n - keys is a 3D tensor with shape: ``(batch_size, T, embedding_size)``\n\n - keys_length is a 2D tensor with shape: ``(batch_size, 1)``\n\n Output shape\n - 3D tensor with shape: ``(batch_size, 1, embedding_size)``.\n '
(batch_size, max_length, dim) = keys.size()
if self.supports_masking:
if (mask is None):
raise ValueError('When supports_masking=True,input must support masking')
keys_masks = mask.unsqueeze(1)
else:
keys_masks = torch.arange(max_length, device=keys_length.device, dtype=keys_length.dtype).repeat(batch_size, 1)
keys_masks = (keys_masks < keys_length.view((- 1), 1))
keys_masks = keys_masks.unsqueeze(1)
attention_score = self.local_att(query, keys)
outputs = torch.transpose(attention_score, 1, 2)
if self.weight_normalization:
paddings = (torch.ones_like(outputs) * ((- (2 ** 32)) + 1))
else:
paddings = torch.zeros_like(outputs)
outputs = torch.where(keys_masks, outputs, paddings)
if self.weight_normalization:
outputs = F.softmax(outputs, dim=(- 1))
if (not self.return_score):
outputs = torch.matmul(outputs, keys)
return outputs
|
class KMaxPooling(nn.Module):
'K Max pooling that selects the k biggest value along the specific axis.\n\n Input shape\n - nD tensor with shape: ``(batch_size, ..., input_dim)``.\n\n Output shape\n - nD tensor with shape: ``(batch_size, ..., output_dim)``.\n\n Arguments\n - **k**: positive integer, number of top elements to look for along the ``axis`` dimension.\n\n - **axis**: positive integer, the dimension to look for elements.\n\n '
def __init__(self, k, axis, device='cpu'):
super(KMaxPooling, self).__init__()
self.k = k
self.axis = axis
self.to(device)
def forward(self, input):
if ((self.axis < 0) or (self.axis >= len(input.shape))):
raise ValueError(('axis must be 0~%d,now is %d' % ((len(input.shape) - 1), self.axis)))
if ((self.k < 1) or (self.k > input.shape[self.axis])):
raise ValueError(('k must be in 1 ~ %d,now k is %d' % (input.shape[self.axis], self.k)))
out = torch.topk(input, k=self.k, dim=self.axis, sorted=True)[0]
return out
|
class AGRUCell(nn.Module):
' Attention based GRU (AGRU)\n\n Reference:\n - Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.\n '
def __init__(self, input_size, hidden_size, bias=True):
super(AGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor((3 * hidden_size), input_size))
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor((3 * hidden_size), hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor((3 * hidden_size)))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor((3 * hidden_size)))
self.register_parameter('bias_hh', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input, hx, att_score):
gi = F.linear(input, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
(i_r, i_z, i_n) = gi.chunk(3, 1)
(h_r, h_z, h_n) = gh.chunk(3, 1)
reset_gate = torch.sigmoid((i_r + h_r))
new_state = torch.tanh((i_n + (reset_gate * h_n)))
att_score = att_score.view((- 1), 1)
hy = (((1.0 - att_score) * hx) + (att_score * new_state))
return hy
|
class AUGRUCell(nn.Module):
' Effect of GRU with attentional update gate (AUGRU)\n\n Reference:\n - Deep Interest Evolution Network for Click-Through Rate Prediction[J]. arXiv preprint arXiv:1809.03672, 2018.\n '
def __init__(self, input_size, hidden_size, bias=True):
super(AUGRUCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(torch.Tensor((3 * hidden_size), input_size))
self.register_parameter('weight_ih', self.weight_ih)
self.weight_hh = nn.Parameter(torch.Tensor((3 * hidden_size), hidden_size))
self.register_parameter('weight_hh', self.weight_hh)
if bias:
self.bias_ih = nn.Parameter(torch.Tensor((3 * hidden_size)))
self.register_parameter('bias_ih', self.bias_ih)
self.bias_hh = nn.Parameter(torch.Tensor((3 * hidden_size)))
self.register_parameter('bias_ih', self.bias_hh)
for tensor in [self.bias_ih, self.bias_hh]:
nn.init.zeros_(tensor)
else:
self.register_parameter('bias_ih', None)
self.register_parameter('bias_hh', None)
def forward(self, input, hx, att_score):
gi = F.linear(input, self.weight_ih, self.bias_ih)
gh = F.linear(hx, self.weight_hh, self.bias_hh)
(i_r, i_z, i_n) = gi.chunk(3, 1)
(h_r, h_z, h_n) = gh.chunk(3, 1)
reset_gate = torch.sigmoid((i_r + h_r))
update_gate = torch.sigmoid((i_z + h_z))
new_state = torch.tanh((i_n + (reset_gate * h_n)))
att_score = att_score.view((- 1), 1)
update_gate = (att_score * update_gate)
hy = (((1.0 - update_gate) * hx) + (update_gate * new_state))
return hy
|
class DynamicGRU(nn.Module):
def __init__(self, input_size, hidden_size, bias=True, gru_type='AGRU'):
super(DynamicGRU, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
if (gru_type == 'AGRU'):
self.rnn = AGRUCell(input_size, hidden_size, bias)
elif (gru_type == 'AUGRU'):
self.rnn = AUGRUCell(input_size, hidden_size, bias)
def forward(self, input, att_scores=None, hx=None):
if ((not isinstance(input, PackedSequence)) or (not isinstance(att_scores, PackedSequence))):
raise NotImplementedError('DynamicGRU only supports packed input and att_scores')
(input, batch_sizes, sorted_indices, unsorted_indices) = input
(att_scores, _, _, _) = att_scores
max_batch_size = int(batch_sizes[0])
if (hx is None):
hx = torch.zeros(max_batch_size, self.hidden_size, dtype=input.dtype, device=input.device)
outputs = torch.zeros(input.size(0), self.hidden_size, dtype=input.dtype, device=input.device)
begin = 0
for batch in batch_sizes:
new_hx = self.rnn(input[begin:(begin + batch)], hx[0:batch], att_scores[begin:(begin + batch)])
outputs[begin:(begin + batch)] = new_hx
hx = new_hx
begin += batch
return PackedSequence(outputs, batch_sizes, sorted_indices, unsorted_indices)
|
def concat_fun(inputs, axis=(- 1)):
if (len(inputs) == 1):
return inputs[0]
else:
return torch.cat(inputs, dim=axis)
|
def slice_arrays(arrays, start=None, stop=None):
'Slice an array or list of arrays.\n\n This takes an array-like, or a list of\n array-likes, and outputs:\n - arrays[start:stop] if `arrays` is an array-like\n - [x[start:stop] for x in arrays] if `arrays` is a list\n\n Can also work on list/array of indices: `slice_arrays(x, indices)`\n\n Arguments:\n arrays: Single array or list of arrays.\n start: can be an integer index (start index)\n or a list/array of indices\n stop: integer (stop index); should be None if\n `start` was a list.\n\n Returns:\n A slice of the array(s).\n\n Raises:\n ValueError: If the value of start is a list and stop is not None.\n '
if (arrays is None):
return [None]
if isinstance(arrays, np.ndarray):
arrays = [arrays]
if (isinstance(start, list) and (stop is not None)):
raise ValueError('The stop argument has to be None if the value of start is a list.')
elif isinstance(arrays, list):
if hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return [(None if (x is None) else x[start]) for x in arrays]
else:
if (len(arrays) == 1):
return arrays[0][start:stop]
return [(None if (x is None) else x[start:stop]) for x in arrays]
elif hasattr(start, '__len__'):
if hasattr(start, 'shape'):
start = start.tolist()
return arrays[start]
elif hasattr(start, '__getitem__'):
return arrays[start:stop]
else:
return [None]
|
class AFM(BaseModel):
'Instantiates the Attentional Factorization Machine architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param use_attention: bool,whether use attention or not,if set to ``False``.it is the same as **standard Factorization Machine**\n :param attention_factor: positive integer,units in attention net\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_att: float. L2 regularizer strength applied to attention net\n :param afm_dropout: float in [0,1), Fraction of the attention net output units to dropout.\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n\n '
def __init__(self, linear_feature_columns, dnn_feature_columns, use_attention=True, attention_factor=8, l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_att=1e-05, afm_dropout=0, init_std=0.0001, seed=1024, task='binary', device='cpu'):
super(AFM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.use_attention = use_attention
if use_attention:
self.fm = AFMLayer(self.embedding_size, attention_factor, l2_reg_att, afm_dropout, seed, device)
self.add_regularization_weight(self.fm.attention_W, l2_reg_att)
else:
self.fm = FM()
self.to(device)
def forward(self, X):
(sparse_embedding_list, _) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict, support_dense=False)
logit = self.linear_model(X)
if (len(sparse_embedding_list) > 0):
if self.use_attention:
logit += self.fm(sparse_embedding_list)
else:
logit += self.fm(torch.cat(sparse_embedding_list, dim=1))
y_pred = self.out(logit)
return y_pred
|
class AutoInt(BaseModel):
'Instantiates the AutoInt Network architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param att_layer_num: int.The InteractingLayer number to be used.\n :param att_embedding_size: int.The embedding size in multi-head self-attention network.\n :param att_head_num: int.The head number in multi-head self-attention network.\n :param att_res: bool.Whether or not use standard residual connections before output.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param dnn_activation: Activation function to use in DNN\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not in DNN\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, att_layer_num=3, att_embedding_size=8, att_head_num=2, att_res=True, dnn_hidden_units=(256, 128), dnn_activation='relu', l2_reg_dnn=0, l2_reg_embedding=1e-05, dnn_use_bn=False, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary', device='cpu'):
super(AutoInt, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=0, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
if ((len(dnn_hidden_units) <= 0) and (att_layer_num <= 0)):
raise ValueError('Either hidden_layer or att_layer_num must > 0')
self.use_dnn = ((len(dnn_feature_columns) > 0) and (len(dnn_hidden_units) > 0))
field_num = len(self.embedding_dict)
if (len(dnn_hidden_units) and (att_layer_num > 0)):
dnn_linear_in_feature = (dnn_hidden_units[(- 1)] + ((field_num * att_embedding_size) * att_head_num))
elif (len(dnn_hidden_units) > 0):
dnn_linear_in_feature = dnn_hidden_units[(- 1)]
elif (att_layer_num > 0):
dnn_linear_in_feature = ((field_num * att_embedding_size) * att_head_num)
else:
raise NotImplementedError
self.dnn_linear = nn.Linear(dnn_linear_in_feature, 1, bias=False).to(device)
self.dnn_hidden_units = dnn_hidden_units
self.att_layer_num = att_layer_num
if self.use_dnn:
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.int_layers = nn.ModuleList([InteractingLayer((self.embedding_size if (i == 0) else (att_embedding_size * att_head_num)), att_embedding_size, att_head_num, att_res, device=device) for i in range(att_layer_num)])
self.to(device)
def forward(self, X):
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
logit = self.linear_model(X)
att_input = concat_fun(sparse_embedding_list, axis=1)
for layer in self.int_layers:
att_input = layer(att_input)
att_output = torch.flatten(att_input, start_dim=1)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
if ((len(self.dnn_hidden_units) > 0) and (self.att_layer_num > 0)):
deep_out = self.dnn(dnn_input)
stack_out = concat_fun([att_output, deep_out])
logit += self.dnn_linear(stack_out)
elif (len(self.dnn_hidden_units) > 0):
deep_out = self.dnn(dnn_input)
logit += self.dnn_linear(deep_out)
elif (self.att_layer_num > 0):
logit += self.dnn_linear(att_output)
else:
pass
y_pred = self.out(logit)
return y_pred
|
class CCPM(BaseModel):
'Instantiates the Convolutional Click Prediction Model architecture.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param conv_kernel_width: list,list of positive integer or empty list,the width of filter in each conv layer.\n :param conv_filters: list,list of positive integer or empty list,the number of filters in each conv layer.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN.\n :param l2_reg_linear: float. L2 regularizer strength applied to linear part\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n\n '
def __init__(self, linear_feature_columns, dnn_feature_columns, conv_kernel_width=(6, 5), conv_filters=(4, 4), dnn_hidden_units=(256,), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_dnn=0, dnn_dropout=0, init_std=0.0001, seed=1024, task='binary', device='cpu', dnn_use_bn=False, dnn_activation='relu'):
super(CCPM, self).__init__(linear_feature_columns, dnn_feature_columns, l2_reg_linear=l2_reg_linear, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
if (len(conv_kernel_width) != len(conv_filters)):
raise ValueError('conv_kernel_width must have same element with conv_filters')
filed_size = self.compute_input_dim(dnn_feature_columns, include_dense=False, feature_group=True)
self.conv_layer = ConvLayer(field_size=filed_size, conv_kernel_width=conv_kernel_width, conv_filters=conv_filters, device=device)
self.dnn_input_dim = ((self.conv_layer.filed_shape * self.embedding_size) * conv_filters[(- 1)])
self.dnn = DNN(self.dnn_input_dim, dnn_hidden_units, activation=dnn_activation, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, use_bn=dnn_use_bn, init_std=init_std, device=device)
self.dnn_linear = nn.Linear(dnn_hidden_units[(- 1)], 1, bias=False).to(device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_dnn)
self.to(device)
def forward(self, X):
linear_logit = self.linear_model(X)
(sparse_embedding_list, _) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict, support_dense=False)
if (len(sparse_embedding_list) == 0):
raise ValueError('must have the embedding feature,now the embedding feature is None!')
conv_input = concat_fun(sparse_embedding_list, axis=1)
conv_input_concact = torch.unsqueeze(conv_input, 1)
pooling_result = self.conv_layer(conv_input_concact)
flatten_result = pooling_result.view(pooling_result.size(0), (- 1))
dnn_output = self.dnn(flatten_result)
dnn_logit = self.dnn_linear(dnn_output)
logit = (linear_logit + dnn_logit)
y_pred = self.out(logit)
return y_pred
|
class DCN(BaseModel):
'Instantiates the Deep&Cross Network architecture. Including DCN-V (parameterization=\'vector\')\n and DCN-M (parameterization=\'matrix\').\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param cross_num: positive integet,cross layer number\n :param cross_parameterization: str, ``"vector"`` or ``"matrix"``, how to parameterize the cross network.\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_cross: float. L2 regularizer strength applied to cross net\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not DNN\n :param dnn_activation: Activation function to use in DNN\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, cross_num=2, cross_parameterization='vector', dnn_hidden_units=(128, 128), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_cross=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
super(DCN, self).__init__(linear_feature_columns=linear_feature_columns, dnn_feature_columns=dnn_feature_columns, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.dnn_hidden_units = dnn_hidden_units
self.cross_num = cross_num
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, use_bn=dnn_use_bn, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, init_std=init_std, device=device)
if ((len(self.dnn_hidden_units) > 0) and (self.cross_num > 0)):
dnn_linear_in_feature = (self.compute_input_dim(dnn_feature_columns) + dnn_hidden_units[(- 1)])
elif (len(self.dnn_hidden_units) > 0):
dnn_linear_in_feature = dnn_hidden_units[(- 1)]
elif (self.cross_num > 0):
dnn_linear_in_feature = self.compute_input_dim(dnn_feature_columns)
self.dnn_linear = nn.Linear(dnn_linear_in_feature, 1, bias=False).to(device)
self.crossnet = CrossNet(in_features=self.compute_input_dim(dnn_feature_columns), layer_num=cross_num, parameterization=cross_parameterization, device=device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_linear)
self.add_regularization_weight(self.crossnet.kernels, l2_reg_cross)
self.to(device)
def forward(self, X):
logit = self.linear_model(X)
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
if ((len(self.dnn_hidden_units) > 0) and (self.cross_num > 0)):
deep_out = self.dnn(dnn_input)
cross_out = self.crossnet(dnn_input)
stack_out = torch.cat((cross_out, deep_out), dim=(- 1))
logit += self.dnn_linear(stack_out)
elif (len(self.dnn_hidden_units) > 0):
deep_out = self.dnn(dnn_input)
logit += self.dnn_linear(deep_out)
elif (self.cross_num > 0):
cross_out = self.crossnet(dnn_input)
logit += self.dnn_linear(cross_out)
else:
pass
y_pred = self.out(logit)
return y_pred
|
class DCNMix(BaseModel):
'Instantiates the DCN-Mix model.\n\n :param linear_feature_columns: An iterable containing all the features used by linear part of the model.\n :param dnn_feature_columns: An iterable containing all the features used by deep part of the model.\n :param cross_num: positive integet,cross layer number\n :param dnn_hidden_units: list,list of positive integer or empty list, the layer number and units in each layer of DNN\n :param l2_reg_embedding: float. L2 regularizer strength applied to embedding vector\n :param l2_reg_cross: float. L2 regularizer strength applied to cross net\n :param l2_reg_dnn: float. L2 regularizer strength applied to DNN\n :param init_std: float,to use as the initialize std of embedding vector\n :param seed: integer ,to use as random seed.\n :param dnn_dropout: float in [0,1), the probability we will drop out a given DNN coordinate.\n :param dnn_use_bn: bool. Whether use BatchNormalization before activation or not DNN\n :param dnn_activation: Activation function to use in DNN\n :param low_rank: Positive integer, dimensionality of low-rank sapce.\n :param num_experts: Positive integer, number of experts.\n :param task: str, ``"binary"`` for binary logloss or ``"regression"`` for regression loss\n :param device: str, ``"cpu"`` or ``"cuda:0"``\n :return: A PyTorch model instance.\n \n '
def __init__(self, linear_feature_columns, dnn_feature_columns, cross_num=2, dnn_hidden_units=(128, 128), l2_reg_linear=1e-05, l2_reg_embedding=1e-05, l2_reg_cross=1e-05, l2_reg_dnn=0, init_std=0.0001, seed=1024, dnn_dropout=0, low_rank=32, num_experts=4, dnn_activation='relu', dnn_use_bn=False, task='binary', device='cpu'):
super(DCNMix, self).__init__(linear_feature_columns=linear_feature_columns, dnn_feature_columns=dnn_feature_columns, l2_reg_embedding=l2_reg_embedding, init_std=init_std, seed=seed, task=task, device=device)
self.dnn_hidden_units = dnn_hidden_units
self.cross_num = cross_num
self.dnn = DNN(self.compute_input_dim(dnn_feature_columns), dnn_hidden_units, activation=dnn_activation, use_bn=dnn_use_bn, l2_reg=l2_reg_dnn, dropout_rate=dnn_dropout, init_std=init_std, device=device)
if ((len(self.dnn_hidden_units) > 0) and (self.cross_num > 0)):
dnn_linear_in_feature = (self.compute_input_dim(dnn_feature_columns) + dnn_hidden_units[(- 1)])
elif (len(self.dnn_hidden_units) > 0):
dnn_linear_in_feature = dnn_hidden_units[(- 1)]
elif (self.cross_num > 0):
dnn_linear_in_feature = self.compute_input_dim(dnn_feature_columns)
self.dnn_linear = nn.Linear(dnn_linear_in_feature, 1, bias=False).to(device)
self.crossnet = CrossNetMix(in_features=self.compute_input_dim(dnn_feature_columns), low_rank=low_rank, num_experts=num_experts, layer_num=cross_num, device=device)
self.add_regularization_weight(filter((lambda x: (('weight' in x[0]) and ('bn' not in x[0]))), self.dnn.named_parameters()), l2_reg_dnn)
self.add_regularization_weight(self.dnn_linear.weight, l2_reg_linear)
self.add_regularization_weight(self.crossnet.U_list, l2_reg_cross)
self.add_regularization_weight(self.crossnet.V_list, l2_reg_cross)
self.add_regularization_weight(self.crossnet.C_list, l2_reg_cross)
self.to(device)
def forward(self, X):
logit = self.linear_model(X)
(sparse_embedding_list, dense_value_list) = self.input_from_feature_columns(X, self.dnn_feature_columns, self.embedding_dict)
dnn_input = combined_dnn_input(sparse_embedding_list, dense_value_list)
if ((len(self.dnn_hidden_units) > 0) and (self.cross_num > 0)):
deep_out = self.dnn(dnn_input)
cross_out = self.crossnet(dnn_input)
stack_out = torch.cat((cross_out, deep_out), dim=(- 1))
logit += self.dnn_linear(stack_out)
elif (len(self.dnn_hidden_units) > 0):
deep_out = self.dnn(dnn_input)
logit += self.dnn_linear(deep_out)
elif (self.cross_num > 0):
cross_out = self.crossnet(dnn_input)
logit += self.dnn_linear(cross_out)
else:
pass
y_pred = self.out(logit)
return y_pred
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.